code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import unittest
from resthelper.utils import build_restful_url
class TestBuildUrl(unittest.TestCase):
def test_is_restful_https_url(self):
url = build_restful_url('https://jenkins1.tttech.com',
'testuser', '/rest/1.0/request')
self.assertEqual(url,
'https://[email protected]/rest/1.0/request')
def test_is_restful_http_url(self):
url = build_restful_url('http://jenkins1.tttech.com',
'testuser', '/rest/1.0/request')
self.assertEqual(url,
'http://[email protected]/rest/1.0/request')
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"resthelper.utils.build_restful_url"
]
| [((672, 687), 'unittest.main', 'unittest.main', ([], {}), '()\n', (685, 687), False, 'import unittest\n'), ((160, 245), 'resthelper.utils.build_restful_url', 'build_restful_url', (['"""https://jenkins1.tttech.com"""', '"""testuser"""', '"""/rest/1.0/request"""'], {}), "('https://jenkins1.tttech.com', 'testuser',\n '/rest/1.0/request')\n", (177, 245), False, 'from resthelper.utils import build_restful_url\n'), ((428, 513), 'resthelper.utils.build_restful_url', 'build_restful_url', (['"""http://jenkins1.tttech.com"""', '"""testuser"""', '"""/rest/1.0/request"""'], {}), "('http://jenkins1.tttech.com', 'testuser', '/rest/1.0/request'\n )\n", (445, 513), False, 'from resthelper.utils import build_restful_url\n')] |
""" python-rq based backend
This backend will send your messages asynchronously with python-rq.
Before using this backend, make sure that django-rq is installed and
configured.
Usage
-----
In settings.py
SENDSMS_BACKEND = 'sendsms.backends.rq.SmsBackend'
RQ_SENDSMS_BACKEND = 'actual.backend.to.use.SmsBackend'
"""
from sendsms.api import get_connection
from sendsms.backends.base import BaseSmsBackend
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django_rq import job
RQ_SENDSMS_BACKEND = getattr(settings, 'RQ_SENDSMS_BACKEND', None)
if not RQ_SENDSMS_BACKEND:
raise ImproperlyConfigured('Set RQ_SENDSMS_BACKEND')
@job
def send_messages(messages):
connection = get_connection(RQ_SENDSMS_BACKEND)
connection.send_messages(messages)
class SmsBackend(BaseSmsBackend):
def send_messages(self, messages):
send_messages.delay(messages)
| [
"sendsms.api.get_connection",
"django.core.exceptions.ImproperlyConfigured"
]
| [((641, 687), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""Set RQ_SENDSMS_BACKEND"""'], {}), "('Set RQ_SENDSMS_BACKEND')\n", (661, 687), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((741, 775), 'sendsms.api.get_connection', 'get_connection', (['RQ_SENDSMS_BACKEND'], {}), '(RQ_SENDSMS_BACKEND)\n', (755, 775), False, 'from sendsms.api import get_connection\n')] |
import json
import sys
def compatible_loads(json_data):
"""
Function json.loads in python 3.0 - 3.5 can't handle bytes, so this function handle it.
:param json_data:
:return: unicode (str if it's python 3)
"""
if isinstance(json_data, bytes) and (3, 0) <= sys.version_info < (3, 6):
json_data = json_data.decode("utf-8")
return json.loads(json_data)
def get_massage_from_io_error(error):
"""
:param: IOError
:return: error message
"""
if sys.version_info >= (3, 0):
return error.strerror
else:
return error.message
| [
"json.loads"
]
| [((378, 399), 'json.loads', 'json.loads', (['json_data'], {}), '(json_data)\n', (388, 399), False, 'import json\n')] |
import sys,time
def sprint(str):
for c in str + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(3./90)
from colorama import Fore, Back, Style
sprint (Fore.RED + "แแแแแ แฏแแแ. tool-แ แจแแฅแแแแแแ แแแแแ แงแแคแแแแ-DaduVoke-แแก แแแแ @2021")
import socket
import _thread
import time
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Core(object):
ipurl=0
mode=1024
menu1=False
f=None
network_speed="แกแแฉแฅแแ แ"
menu2=False
def GetData(self, url):
self.url = url
try:
self.ipurl = socket.gethostbyname(self.url)
except Exception as e:
print ("แแฅแแแ แแ แแกแฌแแ แแ แจแแแงแแแแแ IP แแ URL")
exit(0)
Core.ipurl=self.ipurl
print (22*" ",bcolors.OKGREEN,"=/=\=\=/=\=/=\=/=\=/=\=/=\=/=\=/=\=/VokeScaner=/=\=\=/=\=/=\=/=\=/=\=/=\=/=\=/=\=",bcolors.OKGREEN)
sprint('แแแฎแแแ แแแ แฉแแแ 1 แแ 2')
while Core.menu1 is not True:
choice = input("\n1 - แแแแแ\n2 - แแ แซแแแ\n")
if choice == "1":
Core.mode=1024
menu=True
break
elif choice == "2":
Core.mode=64000
menu = True
break
else:
sprint("แแแฎแแแ แแแ แฉแแแ แแแ แแแแ แแ แแแแ แ. แแ แแแ แแแแก แแแกแแจแแแแแ แขแแ แแแแแแจแ แแแแแแงแแแแ แแ แซแแแแแ 1 แแ 2")
while Core.menu2 is not True:
sprint("แแแแ แ แแขแแแ! แแแฎแแแ แแแ แฉแแแ แแแแแงแแแแแฃแแ แแแขแแ แแแขแแก แกแแฉแฅแแ แ (0.05(1) 0.03(2))")
choice = input("\n1 - แแแแแ \n2 - แแ แซแแแ\n")
if choice == "1":
Core.network_speed=0.05
menu2=True
break
elif choice == "2":
Core.network_speed=0.3
menu2 = True
break
else:
print("แแแฎแแแ แแแ แฉแแแ แแแ แแแแ แแ แแแแ แ. แแ แแแ แแแแก แแแกแแจแแแแแ แขแแ แแแแแแจแ แแแแแแงแแแแ แแ แซแแแแแ 1 แแ 2")
def Start_Scan(self, port_start, port_end):
Core.f = open(Core.ipurl, "a")
try:
for x in range(port_start,port_end):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
res = sock.connect_ex((Core.ipurl,x))
if res is 0:
tmp="แแแ แขแ",x,"แแแฎแกแแแแแ", socket.getservbyport(x)
tmp1=str(tmp[0])+" "+str(tmp[1])+" "+str(tmp[2])+" "+str(tmp[3])
print(bcolors.OKGREEN,tmp1)
Core.f.write(str(tmp)+"\n")
Core.f.close()
except Exception as e:
print (e)
try:
scan = Core()
scan.GetData(input("แฉแแฌแแ แแ IP แแ แแแกแแแแ แแ URL\n"))
print(bcolors.WARNING,"แกแแฎแจแแ แ:",Core.mode,"\n แกแแแแแแ:",Core.ipurl,"\n แกแแแแแ แแก แกแแฉแฅแแ แ:",Core.network_speed,bcolors.ENDC)
print(bcolors.BOLD,"แแแฎแแแ แแแแชแแแแ แ แแแแแแแแ แฌแแแ...",bcolors.ENDC)
for count in range(0,Core.mode):
time.sleep(Core.network_speed)
_thread.start_new_thread(scan.Start_Scan, (count,count+1))
if count > Core.mode:
exit(0)
except Exception as e:
print (e)
| [
"socket.gethostbyname",
"socket.getservbyport",
"socket.socket",
"time.sleep",
"sys.stdout.flush",
"_thread.start_new_thread",
"sys.stdout.write"
]
| [((75, 94), 'sys.stdout.write', 'sys.stdout.write', (['c'], {}), '(c)\n', (91, 94), False, 'import sys, time\n'), ((103, 121), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (119, 121), False, 'import sys, time\n'), ((130, 150), 'time.sleep', 'time.sleep', (['(3.0 / 90)'], {}), '(3.0 / 90)\n', (140, 150), False, 'import time\n'), ((3340, 3370), 'time.sleep', 'time.sleep', (['Core.network_speed'], {}), '(Core.network_speed)\n', (3350, 3370), False, 'import time\n'), ((3382, 3443), '_thread.start_new_thread', '_thread.start_new_thread', (['scan.Start_Scan', '(count, count + 1)'], {}), '(scan.Start_Scan, (count, count + 1))\n', (3406, 3443), False, 'import _thread\n'), ((803, 833), 'socket.gethostbyname', 'socket.gethostbyname', (['self.url'], {}), '(self.url)\n', (823, 833), False, 'import socket\n'), ((2489, 2538), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (2502, 2538), False, 'import socket\n'), ((2702, 2725), 'socket.getservbyport', 'socket.getservbyport', (['x'], {}), '(x)\n', (2722, 2725), False, 'import socket\n')] |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
from copy import deepcopy
from math import sqrt
import numpy as np
from .unit_cell_lattice import UnitCell, UnitCellLattice
from ..geometry import Cube
from ..tiling import CubicTiling
from ..transform_func import ScaleFunc, RotateFunc
from ...util.util import ListHasPoint
class DiamondLattice(UnitCellLattice):
RefIAD = sqrt(3) / 4
# === STANDARD CONSTRUCTOR
def __init__(self, IAD):
RefUnitCellShape = Cube(1, BotBackLeftCorner=np.array([0, 0, 0], dtype=float))
RefUnitCellTiling = CubicTiling(RefUnitCellShape)
RefFracPositions = [np.array([0.0, 0.0, 0.0]),
np.array([0.5, 0.5, 0.0]),
np.array([0.0, 0.5, 0.5]),
np.array([0.5, 0.0, 0.5]),
np.array([0.25, 0.25, 0.25]),
np.array([0.25, 0.75, 0.75]),
np.array([0.75, 0.25, 0.75]),
np.array([0.75, 0.75, 0.25])]
RefUnitCell = UnitCell(RefUnitCellTiling, RefFracPositions)
UnitCellLattice.__init__(self, RefUnitCell)
self._IAD = DiamondLattice.RefIAD # IAD is set correctly after calling applyTransF
self.applyTransF(ScaleFunc(IAD / DiamondLattice.RefIAD))
self._NthNeighbors = [[[np.array([0.25, 0.25, 0.25]),
np.array([-0.25, -0.25, 0.25]),
np.array([-0.25, 0.25, -0.25]),
np.array([0.25, -0.25, -0.25])],
[np.array([-0.25, -0.25, -0.25]),
np.array([0.25, 0.25, -0.25]),
np.array([0.25, -0.25, 0.25]),
np.array([-0.25, 0.25, 0.25])]],
[[np.array([0.0, 0.5, 0.5]),
np.array([0.0, 0.5, -0.5]),
np.array([0.0, -0.5, 0.5]),
np.array([0.0, -0.5, -0.5]),
np.array([0.5, 0.5, 0.0]),
np.array([0.5, 0.0, 0.5]),
np.array([0.5, -0.5, 0.0]),
np.array([0.5, 0.0, -0.5]),
np.array([-0.5, 0.5, 0.0]),
np.array([-0.5, 0.0, 0.5]),
np.array([-0.5, -0.5, 0.0]),
np.array([-0.5, 0.0, -0.5])],
[np.array([0.0, 0.5, 0.5]),
np.array([0.0, 0.5, -0.5]),
np.array([0.0, -0.5, 0.5]),
np.array([0.0, -0.5, -0.5]),
np.array([0.5, 0.5, 0.0]),
np.array([0.5, 0.0, 0.5]),
np.array([0.5, -0.5, 0.0]),
np.array([0.5, 0.0, -0.5]),
np.array([-0.5, 0.5, 0.0]),
np.array([-0.5, 0.0, 0.5]),
np.array([-0.5, -0.5, 0.0]),
np.array([-0.5, 0.0, -0.5])]]]
self._typeDict = {0: 0, 3: 1}
self._relativePositions = {0: np.array([0.0, 0.0, 0.0]), 3: np.array([0.25, 0.25, 0.25])}
# === CONSTRUCTOR - Aligned with {100}
@classmethod
def alignedWith100(cls, IAD):
return cls(IAD) # Default implementation
# === CONSTRUCTOR - Aligned with {110}
@classmethod
def aligndWith110(cls, IAD):
result = cls(IAD)
thetaX = 0
thetaY = np.pi * 0.25
thetaZ = 0
result.applyTransF(RotateFunc.fromXYZAngles(thetaX, thetaY, thetaZ))
return result
# === CONSTRUCTOR - Aligned with {111}
@classmethod
def alignedWith111(cls, IAD, blnTrianglesAlignedWithX=True):
result = cls(IAD)
thetaX = -np.pi * 0.25
thetaY = -np.arctan2(-sqrt(2), 2)
thetaZ = (np.pi * 0.5 if blnTrianglesAlignedWithX else 0)
result.applyTransF(RotateFunc.fromXYZAngles(thetaX, thetaY, thetaZ))
return result
# === CONSTRUCTOR - Aligned with {xyz}
@classmethod
def alignedWith(cls, IAD, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return cls(IAD)
elif MI in ['110', '101', '011']:
return cls.aligndWith110(IAD)
elif MI == '111':
return cls.alignedWith111(IAD)
else:
result = cls(IAD)
a = np.array([0.0, 0.0, 1.0])
b = np.array([float(MI[0]), float(MI[1]), float(MI[2])])
axis = np.cross(a, b)
angle = np.arccos(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)))
result.applyTransF(RotateFunc.fromAxisAngle(axis, angle))
return result
return ValueError('DiamondLattice.alignedWith: Input direction is not correct.')
# === MANIPULATION METHODS
def applyTransF(self, TransF):
if isinstance(TransF, ScaleFunc):
if TransF.isIsometric:
self._IAD *= TransF.Scale[0]
else:
raise ValueError('DiamondLattice.applyTransF: Can only scale isometrically')
UnitCellLattice.applyTransF(self, TransF)
# === AUXILIARY METHODS
def _getPointType(self, P):
return (int(round(P[0] * 4)) + int(round(P[1] * 4)) + int(round(P[2] * 4))) % 4
# === PROPERTY EVALUATION METHODS
# NOTE: inherited from UnitCellLattice
# def isOnLattice(self,P):
def areNeighbors(self, P1, P2):
return np.linalg.norm(P2 - P1) <= self.IAD
def getNeighbors(self, P, layer=1):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
if PType not in self._typeDict.keys():
raise ValueError('DiamondLattice.getNeighbors Should never reach here!')
if layer > len(self._NthNeighbors):
self._calculateNeighbors(layer)
NBs = deepcopy(self._NthNeighbors[layer - 1][self._typeDict[PType]])
for NeighP in NBs:
NeighP += RefP
self._convertFromReference(NeighP)
return NBs
def _calculateNeighbors(self, layer):
NList = []
for k, v in self._typeDict.items():
tmp = [np.array([0, 0, 0], dtype=float)]
for nb in self._NthNeighbors:
tmp.extend(nb[v])
NList.append(tmp)
for _ in range(layer - len(self._NthNeighbors)):
tmp = [[] for _ in self._typeDict.keys()]
for k, v in self._typeDict.items():
for P in self._NthNeighbors[len(self._NthNeighbors) - 1][v]:
PType = self._getPointType(P + self._relativePositions[k])
for Q in self._NthNeighbors[0][self._typeDict[PType]]:
N = P + Q
if not ListHasPoint(NList[v], N, 0.001 * DiamondLattice.RefIAD):
tmp[v].append(N)
NList[v].append(N)
self._NthNeighbors.append(tmp)
def isASite(self, P):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
return PType == 0
def isBSite(self, P):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
return PType == 3
def setDesign(self, D, AType, BType):
for i, P in enumerate(D.Canvas.Points):
if self.isASite(P):
D.setContent(i, AType)
elif self.isBSite(P):
D.setContent(i, BType)
else:
raise ValueError('setDesign can not set site not on lattice')
# === BASIC QUERY METHODS
@property
def IAD(self):
return self._IAD
@property
def Diamond100LayerSpacing(self):
return self.IAD / sqrt(3)
@property
def Diamond110LayerSpacing(self):
return self.IAD * sqrt(2) / sqrt(3)
@property
def Diamond111LayerSpacing(self):
return self.IAD * 4 / 3
@property
def Diamond112LayerSpacing(self):
return self.IAD * sqrt(2) / 3
def getLayerSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return self.Diamond100LayerSpacing
elif MI in ['110', '101', '011']:
return self.Diamond110LayerSpacing
elif MI == '111':
return self.Diamond111LayerSpacing
elif MI in ['112', '121', '211']:
return self.Diamond112LayerSpacing
else:
raise NotImplementedError('DiamondLattice.getLayerSpacing: Input direction is not supported.')
return ValueError('DiamondLattice.getLayerSpacing: Input direction is not correct.')
def getShellSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001', '110', '101', '011', '111']:
return self.IAD * sqrt(8) / sqrt(3)
elif MI in ['112', '121', '211']:
return self.IAD * sqrt(2) / sqrt(3)
else:
raise NotImplementedError('DiamondLattice.getShellSpacing: Input direction is not supported.')
return ValueError('The input direction is not correct.')
def getUniqueLayerCount(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return 4
elif MI in ['110', '101', '011']:
return 2
elif MI == '111':
return 3
elif MI in ['112', '121', '211']:
return 6
else:
raise NotImplementedError('DiamondLattice.getUniqueLayerCount: Input direction is not supported.')
return ValueError('The input direction is not correct.')
| [
"copy.deepcopy",
"numpy.cross",
"math.sqrt",
"numpy.array",
"numpy.dot",
"numpy.linalg.norm"
]
| [((1104, 1111), 'math.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (1108, 1111), False, 'from math import sqrt\n'), ((6940, 7002), 'copy.deepcopy', 'deepcopy', (['self._NthNeighbors[layer - 1][self._typeDict[PType]]'], {}), '(self._NthNeighbors[layer - 1][self._typeDict[PType]])\n', (6948, 7002), False, 'from copy import deepcopy\n'), ((1350, 1375), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1358, 1375), True, 'import numpy as np\n'), ((1405, 1430), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (1413, 1430), True, 'import numpy as np\n'), ((1460, 1485), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.5]'], {}), '([0.0, 0.5, 0.5])\n', (1468, 1485), True, 'import numpy as np\n'), ((1515, 1540), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (1523, 1540), True, 'import numpy as np\n'), ((1570, 1598), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25])\n', (1578, 1598), True, 'import numpy as np\n'), ((1628, 1656), 'numpy.array', 'np.array', (['[0.25, 0.75, 0.75]'], {}), '([0.25, 0.75, 0.75])\n', (1636, 1656), True, 'import numpy as np\n'), ((1686, 1714), 'numpy.array', 'np.array', (['[0.75, 0.25, 0.75]'], {}), '([0.75, 0.25, 0.75])\n', (1694, 1714), True, 'import numpy as np\n'), ((1744, 1772), 'numpy.array', 'np.array', (['[0.75, 0.75, 0.25]'], {}), '([0.75, 0.75, 0.25])\n', (1752, 1772), True, 'import numpy as np\n'), ((4081, 4106), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4089, 4106), True, 'import numpy as np\n'), ((4111, 4139), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25])\n', (4119, 4139), True, 'import numpy as np\n'), ((6542, 6565), 'numpy.linalg.norm', 'np.linalg.norm', (['(P2 - P1)'], {}), '(P2 - P1)\n', (6556, 6565), True, 'import numpy as np\n'), ((8815, 8822), 'math.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (8819, 8822), False, 'from math import sqrt\n'), ((8912, 8919), 'math.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (8916, 8919), False, 'from math import sqrt\n'), ((1230, 1262), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'float'}), '([0, 0, 0], dtype=float)\n', (1238, 1262), True, 'import numpy as np\n'), ((7248, 7280), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'float'}), '([0, 0, 0], dtype=float)\n', (7256, 7280), True, 'import numpy as np\n'), ((8902, 8909), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (8906, 8909), False, 'from math import sqrt\n'), ((9084, 9091), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (9088, 9091), False, 'from math import sqrt\n'), ((2083, 2111), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25])\n', (2091, 2111), True, 'import numpy as np\n'), ((2145, 2175), 'numpy.array', 'np.array', (['[-0.25, -0.25, 0.25]'], {}), '([-0.25, -0.25, 0.25])\n', (2153, 2175), True, 'import numpy as np\n'), ((2209, 2239), 'numpy.array', 'np.array', (['[-0.25, 0.25, -0.25]'], {}), '([-0.25, 0.25, -0.25])\n', (2217, 2239), True, 'import numpy as np\n'), ((2273, 2303), 'numpy.array', 'np.array', (['[0.25, -0.25, -0.25]'], {}), '([0.25, -0.25, -0.25])\n', (2281, 2303), True, 'import numpy as np\n'), ((2338, 2369), 'numpy.array', 'np.array', (['[-0.25, -0.25, -0.25]'], {}), '([-0.25, -0.25, -0.25])\n', (2346, 2369), True, 'import numpy as np\n'), ((2403, 2432), 'numpy.array', 'np.array', (['[0.25, 0.25, -0.25]'], {}), '([0.25, 0.25, -0.25])\n', (2411, 2432), True, 'import numpy as np\n'), ((2466, 2495), 'numpy.array', 'np.array', (['[0.25, -0.25, 0.25]'], {}), '([0.25, -0.25, 0.25])\n', (2474, 2495), True, 'import numpy as np\n'), ((2529, 2558), 'numpy.array', 'np.array', (['[-0.25, 0.25, 0.25]'], {}), '([-0.25, 0.25, 0.25])\n', (2537, 2558), True, 'import numpy as np\n'), ((2594, 2619), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.5]'], {}), '([0.0, 0.5, 0.5])\n', (2602, 2619), True, 'import numpy as np\n'), ((2653, 2679), 'numpy.array', 'np.array', (['[0.0, 0.5, -0.5]'], {}), '([0.0, 0.5, -0.5])\n', (2661, 2679), True, 'import numpy as np\n'), ((2713, 2739), 'numpy.array', 'np.array', (['[0.0, -0.5, 0.5]'], {}), '([0.0, -0.5, 0.5])\n', (2721, 2739), True, 'import numpy as np\n'), ((2773, 2800), 'numpy.array', 'np.array', (['[0.0, -0.5, -0.5]'], {}), '([0.0, -0.5, -0.5])\n', (2781, 2800), True, 'import numpy as np\n'), ((2834, 2859), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (2842, 2859), True, 'import numpy as np\n'), ((2893, 2918), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (2901, 2918), True, 'import numpy as np\n'), ((2952, 2978), 'numpy.array', 'np.array', (['[0.5, -0.5, 0.0]'], {}), '([0.5, -0.5, 0.0])\n', (2960, 2978), True, 'import numpy as np\n'), ((3012, 3038), 'numpy.array', 'np.array', (['[0.5, 0.0, -0.5]'], {}), '([0.5, 0.0, -0.5])\n', (3020, 3038), True, 'import numpy as np\n'), ((3072, 3098), 'numpy.array', 'np.array', (['[-0.5, 0.5, 0.0]'], {}), '([-0.5, 0.5, 0.0])\n', (3080, 3098), True, 'import numpy as np\n'), ((3132, 3158), 'numpy.array', 'np.array', (['[-0.5, 0.0, 0.5]'], {}), '([-0.5, 0.0, 0.5])\n', (3140, 3158), True, 'import numpy as np\n'), ((3192, 3219), 'numpy.array', 'np.array', (['[-0.5, -0.5, 0.0]'], {}), '([-0.5, -0.5, 0.0])\n', (3200, 3219), True, 'import numpy as np\n'), ((3253, 3280), 'numpy.array', 'np.array', (['[-0.5, 0.0, -0.5]'], {}), '([-0.5, 0.0, -0.5])\n', (3261, 3280), True, 'import numpy as np\n'), ((3315, 3340), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.5]'], {}), '([0.0, 0.5, 0.5])\n', (3323, 3340), True, 'import numpy as np\n'), ((3374, 3400), 'numpy.array', 'np.array', (['[0.0, 0.5, -0.5]'], {}), '([0.0, 0.5, -0.5])\n', (3382, 3400), True, 'import numpy as np\n'), ((3434, 3460), 'numpy.array', 'np.array', (['[0.0, -0.5, 0.5]'], {}), '([0.0, -0.5, 0.5])\n', (3442, 3460), True, 'import numpy as np\n'), ((3494, 3521), 'numpy.array', 'np.array', (['[0.0, -0.5, -0.5]'], {}), '([0.0, -0.5, -0.5])\n', (3502, 3521), True, 'import numpy as np\n'), ((3555, 3580), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (3563, 3580), True, 'import numpy as np\n'), ((3614, 3639), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (3622, 3639), True, 'import numpy as np\n'), ((3673, 3699), 'numpy.array', 'np.array', (['[0.5, -0.5, 0.0]'], {}), '([0.5, -0.5, 0.0])\n', (3681, 3699), True, 'import numpy as np\n'), ((3733, 3759), 'numpy.array', 'np.array', (['[0.5, 0.0, -0.5]'], {}), '([0.5, 0.0, -0.5])\n', (3741, 3759), True, 'import numpy as np\n'), ((3793, 3819), 'numpy.array', 'np.array', (['[-0.5, 0.5, 0.0]'], {}), '([-0.5, 0.5, 0.0])\n', (3801, 3819), True, 'import numpy as np\n'), ((3853, 3879), 'numpy.array', 'np.array', (['[-0.5, 0.0, 0.5]'], {}), '([-0.5, 0.0, 0.5])\n', (3861, 3879), True, 'import numpy as np\n'), ((3913, 3940), 'numpy.array', 'np.array', (['[-0.5, -0.5, 0.0]'], {}), '([-0.5, -0.5, 0.0])\n', (3921, 3940), True, 'import numpy as np\n'), ((3974, 4001), 'numpy.array', 'np.array', (['[-0.5, 0.0, -0.5]'], {}), '([-0.5, 0.0, -0.5])\n', (3982, 4001), True, 'import numpy as np\n'), ((4786, 4793), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (4790, 4793), False, 'from math import sqrt\n'), ((10040, 10047), 'math.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (10044, 10047), False, 'from math import sqrt\n'), ((5458, 5483), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (5466, 5483), True, 'import numpy as np\n'), ((5580, 5594), 'numpy.cross', 'np.cross', (['a', 'b'], {}), '(a, b)\n', (5588, 5594), True, 'import numpy as np\n'), ((10030, 10037), 'math.sqrt', 'sqrt', (['(8)'], {}), '(8)\n', (10034, 10037), False, 'from math import sqrt\n'), ((10138, 10145), 'math.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (10142, 10145), False, 'from math import sqrt\n'), ((10128, 10135), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (10132, 10135), False, 'from math import sqrt\n'), ((5629, 5641), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (5635, 5641), True, 'import numpy as np\n'), ((5645, 5662), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {}), '(a)\n', (5659, 5662), True, 'import numpy as np\n'), ((5665, 5682), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {}), '(b)\n', (5679, 5682), True, 'import numpy as np\n')] |
from util.infinity import INFINITY
from tc_python.arule import ARule
from t_core.rollbacker import Rollbacker
from t_core.resolver import Resolver
class XRule(ARule):
'''
Applies the transformation on one match with roll-back capability.
'''
def __init__(self, LHS, RHS, max_iterations=INFINITY):
'''
Applies the transformation on one match with roll-back capability.
@param LHS: The pre-condition pattern (LHS + NACs).
@param RHS: The post-condition pattern (RHS).
'''
# external_matches_only=True because further matches of this rule are only processed after a roll-back
super(XRule, self).__init__(LHS, RHS)
self.M.max = max_iterations
self.I.max_iterations = max_iterations
self.B = Rollbacker(condition=LHS, max_iterations=max_iterations)
def packet_in(self, packet):
self.exception = None
self.is_success = False
# Checkpoint the original packet
self.B.packet_in(packet)
if not self.B.is_success:
self.exception = self.B.exception
return packet
# Match
packet = self.M.packet_in(packet)
if not self.M.is_success:
packet = self.B.restore(packet)
if self.M.exception:
self.exception = self.M.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Choose one match
packet = self.I.packet_in(packet)
if not self.I.is_success:
packet = self.B.restore(packet)
if self.I.exception:
self.exception = self.I.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Rewrite
packet = self.W.packet_in(packet)
if not self.W.is_success:
packet = self.B.restore(packet)
if self.W.exception:
self.exception = self.W.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
self.is_success = True
return packet
def next_in(self, packet):
self.exception = None
self.is_success = False
packet = self.B.next_in(packet)
if not self.B.is_success:
self.exception = self.B.exception
return packet
# Choose the next match
packet = self.I.packet_in(packet)
if not self.I.is_success:
packet = self.B.next_in(packet)
if self.I.exception:
self.exception = self.I.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Rewrite
packet = self.W.packet_in(packet)
if not self.W.is_success:
packet = self.B.next_in(packet)
if self.W.exception:
self.exception = self.W.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Output success packet
self.is_success = True
return packet
class XRule_r(XRule):
'''
Applies the transformation on one match with roll-back capability.
'''
def __init__(self, LHS, RHS, external_matches_only=False, custom_resolution=lambda packet: False):
'''
Applies the transformation on one match with roll-back capability.
@param LHS: The pre-condition pattern (LHS + NACs).
@param RHS: The post-condition pattern (RHS).
@param external_matches_only: Resolve conflicts ignoring the matches found in this ARule.
@param custom_resolution: Override the default resolution function.
'''
super(XRule_r, self).__init__()
self.R = Resolver(external_matches_only=external_matches_only,
custom_resolution=custom_resolution)
def packet_in(self, packet):
packet = super(XRule_r, self).packet_in(packet)
# is_success is True
if self.exception is None:
# Resolve any conflicts if necessary
packet = self.R.packet_in(packet)
if not self.R.is_success:
self.exception = self.R.exception
return packet
# Output success packet
else:
self.is_success = False
return packet
def next_in(self, packet):
packet = super(XRule_r, self).next_in(packet)
# is_success is True
if self.exception is None:
# Resolve any conflicts if necessary
packet = self.R.packet_in(packet)
if not self.R.is_success:
self.exception = self.R.exception
return packet
# Output success packet
else:
self.is_success = False
return packet
| [
"t_core.resolver.Resolver",
"t_core.rollbacker.Rollbacker"
]
| [((823, 879), 't_core.rollbacker.Rollbacker', 'Rollbacker', ([], {'condition': 'LHS', 'max_iterations': 'max_iterations'}), '(condition=LHS, max_iterations=max_iterations)\n', (833, 879), False, 'from t_core.rollbacker import Rollbacker\n'), ((3988, 4083), 't_core.resolver.Resolver', 'Resolver', ([], {'external_matches_only': 'external_matches_only', 'custom_resolution': 'custom_resolution'}), '(external_matches_only=external_matches_only, custom_resolution=\n custom_resolution)\n', (3996, 4083), False, 'from t_core.resolver import Resolver\n')] |
from bs4 import BeautifulSoup
import requests
import re
def retrieveText():
print("Parsing text from online target")
url = "https://www.whitehouse.gov/the-press-office/2017/10/16/remarks-president-trump-and-senate-majority-leader-mitch-mcconnell-joint"
response = requests.get(url)
soup = BeautifulSoup(response.content, "lxml")
textwrapper = soup.find("div", { "class" : "field-item" })
textel = textwrapper.find_all("p", { "class" : None })
textstripped = []
for element in textel:
stripped = element.text.replace("\r", "\n").replace("\r", "").replace("\n", "").replace("Q ", "0002reporter: ").replace("THE PRESIDENT: ", "0001president: ").strip()
if "P.M." not in stripped and "A.M." not in stripped:
textstripped.append(stripped)
# print(textstripped)
return textstripped | [
"bs4.BeautifulSoup",
"requests.get"
]
| [((277, 294), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (289, 294), False, 'import requests\n'), ((306, 345), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""lxml"""'], {}), "(response.content, 'lxml')\n", (319, 345), False, 'from bs4 import BeautifulSoup\n')] |
"""
Django settings for city-infrastructure-platform project.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import sentry_sdk
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import gettext_lazy as _
from helusers.defaults import SOCIAL_AUTH_PIPELINE # noqa: F401
from sentry_sdk.integrations.django import DjangoIntegration
from .utils import git_version
# Set up .env file
checkout_dir = environ.Path(__file__) - 2
assert os.path.exists(checkout_dir("manage.py"))
parent_dir = checkout_dir.path("..")
if parent_dir() != "/" and os.path.isdir(parent_dir("etc")):
env_file = parent_dir("etc/env")
default_var_root = parent_dir("var")
else:
env_file = checkout_dir(".env")
default_var_root = checkout_dir("var")
BASE_DIR = checkout_dir()
env = environ.Env(
DEBUG=(bool, False),
TIER=(str, "dev"), # one of: prod, qa, stage, test, dev
SECRET_KEY=(str, ""),
VAR_ROOT=(str, default_var_root),
ALLOWED_HOSTS=(list, []),
TRUST_X_FORWARDED_HOST=(bool, False),
DATABASE_URL=(
str,
"postgis:///city-infrastructure-platform",
),
CACHE_URL=(str, "locmemcache://"),
EMAIL_URL=(str, "consolemail://"),
SENTRY_DSN=(str, ""),
AZURE_DEPLOYMENT=(bool, False),
AZURE_ACCOUNT_KEY=(str, False),
AZURE_CONTAINER=(str, False),
AZURE_ACCOUNT_NAME=(str, False),
OIDC_AUTHENTICATION_ENABLED=(bool, True),
SOCIAL_AUTH_TUNNISTAMO_KEY=(str, None),
SOCIAL_AUTH_TUNNISTAMO_SECRET=(str, None),
OIDC_API_TOKEN_AUTH_AUDIENCE=(str, None),
OIDC_API_TOKEN_AUTH_ISSUER=(str, None),
TOKEN_AUTH_MAX_TOKEN_AGE=(int, 600),
OIDC_ENDPOINT=(str, None),
HELUSERS_ADGROUPS_CLAIM=(str, "groups"),
LOGGING_AUTH_DEBUG=(bool, False),
OVERLAY_SOURCE_URL=(str, "https://geoserver.hel.fi/geoserver/city-infra/wms"),
BASEMAP_SOURCE_URL=(str, "https://kartta.hel.fi/ws/geoserver/avoindata/wms"),
STATIC_URL=(str, "/static/"),
MEDIA_URL=(str, "/media/"),
)
if os.path.exists(env_file):
env.read_env(env_file)
SOCIAL_AUTH_TUNNISTAMO_KEY = env("SOCIAL_AUTH_TUNNISTAMO_KEY")
SOCIAL_AUTH_TUNNISTAMO_SECRET = env("SOCIAL_AUTH_TUNNISTAMO_SECRET")
HELUSERS_ADGROUPS_CLAIM = env("HELUSERS_ADGROUPS_CLAIM")
SOCIAL_AUTH_ID_TOKEN_IN_END_SESSION = False
if env("OIDC_ENDPOINT"):
SOCIAL_AUTH_TUNNISTAMO_OIDC_ENDPOINT = env("OIDC_ENDPOINT")
OIDC_API_TOKEN_AUTH = {
"AUDIENCE": env("OIDC_API_TOKEN_AUTH_AUDIENCE"),
"ISSUER": env("OIDC_API_TOKEN_AUTH_ISSUER"),
}
# General settings
DEBUG = env("DEBUG")
OIDC_AUTHENTICATION_ENABLED = env("OIDC_AUTHENTICATION_ENABLED")
TIER = env("TIER")
SECRET_KEY = env("SECRET_KEY")
if DEBUG and not SECRET_KEY:
SECRET_KEY = "xxx"
ALLOWED_HOSTS = env("ALLOWED_HOSTS")
if OIDC_AUTHENTICATION_ENABLED and (
not SOCIAL_AUTH_TUNNISTAMO_KEY
or not SOCIAL_AUTH_TUNNISTAMO_SECRET
or not OIDC_API_TOKEN_AUTH["AUDIENCE"]
or not OIDC_API_TOKEN_AUTH["ISSUER"]
):
raise ImproperlyConfigured("Authentication not configured properly")
CACHES = {"default": env.cache()}
vars().update(env.email_url()) # EMAIL_BACKEND etc.
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"timestamped_named": {
"format": "%(asctime)s %(name)s %(levelname)s: %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "timestamped_named",
},
# Just for reference, not used
"blackhole": {"class": "logging.NullHandler"},
},
"loggers": {
"django": {"handlers": ["console"], "level": "INFO"},
"helusers": {
"handlers": ["console"],
"level": "DEBUG" if env("LOGGING_AUTH_DEBUG") else "INFO",
"propagate": False,
},
},
}
# Application definition
DJANGO_APPS = [
"helusers",
"social_django",
"helusers.apps.HelusersAdminConfig",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.gis",
]
THIRD_PARTY_APPS = [
"django_extensions",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
"drf_yasg",
"django_filters",
"auditlog",
]
LOCAL_APPS = [
"users.apps.UsersConfig",
"traffic_control.apps.TrafficControlConfig",
"map.apps.MapConfig",
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
AUTHENTICATION_BACKENDS = (
"helusers.tunnistamo_oidc.TunnistamoOIDCAuth",
"django.contrib.auth.backends.ModelBackend",
)
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "/admin/"
LOGOUT_REDIRECT_URL = "/admin/login/"
SOCIAL_AUTH_TUNNISTAMO_AUTH_EXTRA_ARGUMENTS = {"ui_locales": "fi"}
WAGTAIL_SITE_NAME = _("City Infrastructure Platform")
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
MIDDLEWARE = [
"deployment.middleware.HealthCheckMiddleware",
"azure_client_ip.middleware.AzureClientIPMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.locale.LocaleMiddleware",
"auditlog.middleware.AuditlogMiddleware",
]
ROOT_URLCONF = "city-infrastructure-platform.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [checkout_dir("templates"), checkout_dir("map-view/build")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "city-infrastructure-platform.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "fi"
LANGUAGES = [("fi", _("Finnish")), ("en", _("English"))]
TIME_ZONE = "Europe/Helsinki"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
var_root = env.path("VAR_ROOT")
STATIC_ROOT = var_root("static")
MEDIA_ROOT = var_root("media")
STATIC_URL = env("STATIC_URL")
MEDIA_URL = env("MEDIA_URL")
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
STATICFILES_DIRS = [checkout_dir("map-view/build/static")]
# Whether to trust X-Forwarded-Host headers for all purposes
# where Django would need to make use of its own hostname
# fe. generating absolute URLs pointing to itself
# Most often used in reverse proxy setups
USE_X_FORWARDED_HOST = env("TRUST_X_FORWARDED_HOST")
# Django REST Framework
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
"helusers.oidc.ApiTokenAuthentication",
"rest_framework.authentication.TokenAuthentication",
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"],
"PAGE_SIZE": 20,
"OIDC_LEEWAY": env("TOKEN_AUTH_MAX_TOKEN_AGE"),
"GROUP_CLAIM_NAME": "groups",
}
# django-cors
if DEBUG:
CORS_ORIGIN_ALLOW_ALL = True
# Azure CLIENT_IP middleware
AZURE_DEPLOYMENT = env.bool("AZURE_DEPLOYMENT")
if AZURE_DEPLOYMENT:
AZURE_ACCOUNT_KEY = env.str("AZURE_ACCOUNT_KEY")
AZURE_CONTAINER = env.str("AZURE_CONTAINER")
AZURE_ACCOUNT_NAME = env.str("AZURE_ACCOUNT_NAME")
DEFAULT_FILE_STORAGE = "storages.backends.azure_storage.AzureStorage"
# Sentry-SDK
SENTRY_DSN = env.str("SENTRY_DSN")
VERSION = git_version()
if SENTRY_DSN:
sentry_sdk.init(dsn=SENTRY_DSN, integrations=[DjangoIntegration()], release=VERSION)
# Custom settings
SRID = 3879 # the spatial reference id used for geometries
OVERLAY_SOURCE_URL = env.str("OVERLAY_SOURCE_URL")
BASEMAP_SOURCE_URL = env.str("BASEMAP_SOURCE_URL")
LOCALE_PATHS = [
"./templates/locale",
]
| [
"os.path.exists",
"environ.Path",
"sentry_sdk.integrations.django.DjangoIntegration",
"django.utils.translation.gettext_lazy",
"django.core.exceptions.ImproperlyConfigured",
"environ.Env"
]
| [((987, 2068), 'environ.Env', 'environ.Env', ([], {'DEBUG': '(bool, False)', 'TIER': "(str, 'dev')", 'SECRET_KEY': "(str, '')", 'VAR_ROOT': '(str, default_var_root)', 'ALLOWED_HOSTS': '(list, [])', 'TRUST_X_FORWARDED_HOST': '(bool, False)', 'DATABASE_URL': "(str, 'postgis:///city-infrastructure-platform')", 'CACHE_URL': "(str, 'locmemcache://')", 'EMAIL_URL': "(str, 'consolemail://')", 'SENTRY_DSN': "(str, '')", 'AZURE_DEPLOYMENT': '(bool, False)', 'AZURE_ACCOUNT_KEY': '(str, False)', 'AZURE_CONTAINER': '(str, False)', 'AZURE_ACCOUNT_NAME': '(str, False)', 'OIDC_AUTHENTICATION_ENABLED': '(bool, True)', 'SOCIAL_AUTH_TUNNISTAMO_KEY': '(str, None)', 'SOCIAL_AUTH_TUNNISTAMO_SECRET': '(str, None)', 'OIDC_API_TOKEN_AUTH_AUDIENCE': '(str, None)', 'OIDC_API_TOKEN_AUTH_ISSUER': '(str, None)', 'TOKEN_AUTH_MAX_TOKEN_AGE': '(int, 600)', 'OIDC_ENDPOINT': '(str, None)', 'HELUSERS_ADGROUPS_CLAIM': "(str, 'groups')", 'LOGGING_AUTH_DEBUG': '(bool, False)', 'OVERLAY_SOURCE_URL': "(str, 'https://geoserver.hel.fi/geoserver/city-infra/wms')", 'BASEMAP_SOURCE_URL': "(str, 'https://kartta.hel.fi/ws/geoserver/avoindata/wms')", 'STATIC_URL': "(str, '/static/')", 'MEDIA_URL': "(str, '/media/')"}), "(DEBUG=(bool, False), TIER=(str, 'dev'), SECRET_KEY=(str, ''),\n VAR_ROOT=(str, default_var_root), ALLOWED_HOSTS=(list, []),\n TRUST_X_FORWARDED_HOST=(bool, False), DATABASE_URL=(str,\n 'postgis:///city-infrastructure-platform'), CACHE_URL=(str,\n 'locmemcache://'), EMAIL_URL=(str, 'consolemail://'), SENTRY_DSN=(str,\n ''), AZURE_DEPLOYMENT=(bool, False), AZURE_ACCOUNT_KEY=(str, False),\n AZURE_CONTAINER=(str, False), AZURE_ACCOUNT_NAME=(str, False),\n OIDC_AUTHENTICATION_ENABLED=(bool, True), SOCIAL_AUTH_TUNNISTAMO_KEY=(\n str, None), SOCIAL_AUTH_TUNNISTAMO_SECRET=(str, None),\n OIDC_API_TOKEN_AUTH_AUDIENCE=(str, None), OIDC_API_TOKEN_AUTH_ISSUER=(\n str, None), TOKEN_AUTH_MAX_TOKEN_AGE=(int, 600), OIDC_ENDPOINT=(str,\n None), HELUSERS_ADGROUPS_CLAIM=(str, 'groups'), LOGGING_AUTH_DEBUG=(\n bool, False), OVERLAY_SOURCE_URL=(str,\n 'https://geoserver.hel.fi/geoserver/city-infra/wms'),\n BASEMAP_SOURCE_URL=(str,\n 'https://kartta.hel.fi/ws/geoserver/avoindata/wms'), STATIC_URL=(str,\n '/static/'), MEDIA_URL=(str, '/media/'))\n", (998, 2068), False, 'import environ\n'), ((2178, 2202), 'os.path.exists', 'os.path.exists', (['env_file'], {}), '(env_file)\n', (2192, 2202), False, 'import os\n'), ((5003, 5036), 'django.utils.translation.gettext_lazy', '_', (['"""City Infrastructure Platform"""'], {}), "('City Infrastructure Platform')\n", (5004, 5036), True, 'from django.utils.translation import gettext_lazy as _\n'), ((615, 637), 'environ.Path', 'environ.Path', (['__file__'], {}), '(__file__)\n', (627, 637), False, 'import environ\n'), ((3142, 3204), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""Authentication not configured properly"""'], {}), "('Authentication not configured properly')\n", (3162, 3204), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((7208, 7220), 'django.utils.translation.gettext_lazy', '_', (['"""Finnish"""'], {}), "('Finnish')\n", (7209, 7220), True, 'from django.utils.translation import gettext_lazy as _\n'), ((7230, 7242), 'django.utils.translation.gettext_lazy', '_', (['"""English"""'], {}), "('English')\n", (7231, 7242), True, 'from django.utils.translation import gettext_lazy as _\n'), ((9118, 9137), 'sentry_sdk.integrations.django.DjangoIntegration', 'DjangoIntegration', ([], {}), '()\n', (9135, 9137), False, 'from sentry_sdk.integrations.django import DjangoIntegration\n')] |
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2021/7/25 19:30
# @author : Mo
# @function: predict model, ้ขๆตๆจกๅ-ๅค็ฑปๅ็ฑป
# ้้
linux
import platform
import json
import sys
import os
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
path_sys = os.path.join(path_root, "pytorch_nlu", "pytorch_textclassification")
print(path_root)
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from tcPredict import TextClassificationPredict
if __name__ == "__main__":
path_config = "../output/text_classification/model_ERNIE/tc.config"
tcp = TextClassificationPredict(path_config)
texts = [{"text": "ๅนณไนๅฟ๏ผๅค็งฐๆญๅท๏ผ้ถๅฑไบๅนฟ่ฅฟๅฃฎๆ่ชๆฒปๅบๆกๆๅธ๏ผไฝไบๅนฟ่ฅฟไธๅ้จ๏ผๆกๆๅธไธๅ้จ๏ผไธไธด้ๅฑฑๅฟ๏ผๅๆฅๆญๅนณ๏ผ่ฅฟๅๆฏ้ป้ณๆ๏ผๅ่ฟๆญๅ๏ผๆป้ข็งฏ1919.34ๅนณๆนๅ
ฌ้ใ"},
{"text": "ๅนณไนๅฟไธป่ฆๆ
ๆธธๆฏ็นๆๆฆๆดฅๅๅนดๅคๆฆใๅทๆฐด็ณๆฏ่ใไปๅฎถๆธฉๆณใๆกๆฑ้ฃๆฏๅบใๆผๆฑ้ฃๆฏๅบ็ญ๏ผๅนณไนๅฟไธบๆผๆฑๅ็็น๏ผๅนณไนไปฅๅ็งฐๆผๆฑ๏ผไปฅๅ็งฐๆกๆฑ๏ผๆฏ่ๅ็ๅคงๆกๆๆ
ๆธธๅบไนไธใ"},
{"text": "ๅฐๅฒญ็ฒ็๏ผๆญๆฐดๆถ่น๏ผ็ฏ็ปๆๅนณไธญใ้ๅนด็ไนๅญ๏ผๅคๅฃซๅ้ถ็ใ็ๆดป่ช่ง่ชๆฒป๏ผๅญฆไน ่ชๅ่ชๅจใไบ่ฒๅนถ้๏ผๆ่ๅนถ็จใ่ฟๆฅๆฐๆฝฎๆต๏ผๅปบ่ฎพๆฐๅนณไธญ"},
{"text": "ๆกๆๅฑฑๆฐด็ฒๅคฉไธ, ้ณๆๅฑฑๆฐด็ฒๆกๆ"},
]
res = tcp.predict(texts, logits_type="sigmoid")
print(res)
while True:
print("่ฏท่พๅ
ฅ:")
question = input()
res = tcp.predict([{"text": question}], logits_type="sigmoid")
print(res)
| [
"os.path.dirname",
"os.path.join",
"tcPredict.TextClassificationPredict"
]
| [((276, 344), 'os.path.join', 'os.path.join', (['path_root', '"""pytorch_nlu"""', '"""pytorch_textclassification"""'], {}), "(path_root, 'pytorch_nlu', 'pytorch_textclassification')\n", (288, 344), False, 'import os\n'), ((566, 604), 'tcPredict.TextClassificationPredict', 'TextClassificationPredict', (['path_config'], {}), '(path_config)\n', (591, 604), False, 'from tcPredict import TextClassificationPredict\n'), ((228, 253), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (243, 253), False, 'import os\n')] |
import unittest
from stringsheet.parser import create_spreadsheet_values
from stringsheet.parser import create_language_sheet_values
from stringsheet.parser import parse_resources
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.resources = parse_resources('test-resources/res')
class CreateSpreadsheetValuesTestCase(BaseTestCase):
def setUp(self):
super(CreateSpreadsheetValuesTestCase, self).setUp()
self.values = create_spreadsheet_values(self.resources)
def test_rows_are_valid(self):
rows = [
['id', 'comment', 'default', 'de', 'pl', 'zh-rCN', 'zh-rTW'],
['a_string', '', 'A string', '', '', '', ''],
['partly_added', '', 'Partly added', 'Partly added (de)', '', '',
''],
['string', 'String with comment', 'String', 'String (de)',
'String (pl)', 'String (zh-rCN)', 'String (zh-rTW)'],
['string_2', '', 'String 2', '', '', '', ''],
['array[0]', 'Item comment', 'First', '', '', '', ''],
['array[1]', '', 'Second', '', '', '', ''],
['array_comment[0]', 'Array comment', 'Some item', '', '', '', ''],
['array_comment[1]', 'Array comment', 'More items', '', '', '', ''],
['array_comment[2]', 'Comment', 'More', '', '', '', ''],
['plural{zero}', 'Parent comment', 'Other', '', '', '', ''],
['plural{one}', 'Parent comment', 'One', '', '', '', ''],
['plural{two}', 'Parent comment', 'Other', '', '', '', ''],
['plural{few}', 'Parent comment', 'Other', '', '', '', ''],
['plural{many}', 'Parent comment', 'Other', '', '', '', ''],
['plural{other}', 'Comment', 'Other', '', '', '', ''],
['plurals{zero}', 'Item comment', 'Zero', '', '', '', ''],
['plurals{one}', '', 'One', '', '', '', ''],
['plurals{two}', '', 'Two', '', '', '', ''],
['plurals{few}', '', 'Few', '', '', '', ''],
['plurals{many}', '', 'Many', '', '', '', ''],
['plurals{other}', '', 'Other', '', '', '', ''],
]
self.assertEqual(len(rows), len(self.values))
for index, row in enumerate(rows):
self.assertEqual(row, self.values[index])
class CreateLanguageSpreadsheetValuesTestCase(BaseTestCase):
def setUp(self):
super(CreateLanguageSpreadsheetValuesTestCase, self).setUp()
self.values = create_language_sheet_values(self.resources, 'de')
def test_rows_are_valid(self):
rows = [
['id', 'comment', 'default', 'de'],
['a_string', '', 'A string', ''],
['partly_added', '', 'Partly added', 'Partly added (de)'],
['string', 'String with comment', 'String', 'String (de)'],
['string_2', '', 'String 2', ''],
['array[0]', 'Item comment', 'First', ''],
['array[1]', '', 'Second', ''],
['array_comment[0]', 'Array comment', 'Some item', ''],
['array_comment[1]', 'Array comment', 'More items', ''],
['array_comment[2]', 'Comment', 'More', ''],
['plural{zero}', 'Parent comment', 'Other', ''],
['plural{one}', 'Parent comment', 'One', ''],
['plural{two}', 'Parent comment', 'Other', ''],
['plural{few}', 'Parent comment', 'Other', ''],
['plural{many}', 'Parent comment', 'Other', ''],
['plural{other}', 'Comment', 'Other', ''],
['plurals{zero}', 'Item comment', 'Zero', ''],
['plurals{one}', '', 'One', ''],
['plurals{two}', '', 'Two', ''],
['plurals{few}', '', 'Few', ''],
['plurals{many}', '', 'Many', ''],
['plurals{other}', '', 'Other', ''],
]
self.assertEqual(len(rows), len(self.values))
for index, row in enumerate(rows):
self.assertEqual(row, self.values[index])
class CreateTemplateSpreadsheetValuesTestCase(BaseTestCase):
def setUp(self):
super(CreateTemplateSpreadsheetValuesTestCase, self).setUp()
self.values = create_language_sheet_values(self.resources, 'Template')
def test_rows_are_valid(self):
rows = [
['id', 'comment', 'default', 'language-id'],
['a_string', '', 'A string', ''],
['partly_added', '', 'Partly added', ''],
['string', 'String with comment', 'String', ''],
['string_2', '', 'String 2', ''],
['array[0]', 'Item comment', 'First', ''],
['array[1]', '', 'Second', ''],
['array_comment[0]', 'Array comment', 'Some item', ''],
['array_comment[1]', 'Array comment', 'More items', ''],
['array_comment[2]', 'Comment', 'More', ''],
['plural{zero}', 'Parent comment', 'Other', ''],
['plural{one}', 'Parent comment', 'One', ''],
['plural{two}', 'Parent comment', 'Other', ''],
['plural{few}', 'Parent comment', 'Other', ''],
['plural{many}', 'Parent comment', 'Other', ''],
['plural{other}', 'Comment', 'Other', ''],
['plurals{zero}', 'Item comment', 'Zero', ''],
['plurals{one}', '', 'One', ''],
['plurals{two}', '', 'Two', ''],
['plurals{few}', '', 'Few', ''],
['plurals{many}', '', 'Many', ''],
['plurals{other}', '', 'Other', ''],
]
self.assertEqual(len(rows), len(self.values))
for index, row in enumerate(rows):
self.assertEqual(row, self.values[index])
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"stringsheet.parser.create_spreadsheet_values",
"stringsheet.parser.parse_resources",
"stringsheet.parser.create_language_sheet_values"
]
| [((5629, 5644), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5642, 5644), False, 'import unittest\n'), ((268, 305), 'stringsheet.parser.parse_resources', 'parse_resources', (['"""test-resources/res"""'], {}), "('test-resources/res')\n", (283, 305), False, 'from stringsheet.parser import parse_resources\n'), ((465, 506), 'stringsheet.parser.create_spreadsheet_values', 'create_spreadsheet_values', (['self.resources'], {}), '(self.resources)\n', (490, 506), False, 'from stringsheet.parser import create_spreadsheet_values\n'), ((2462, 2512), 'stringsheet.parser.create_language_sheet_values', 'create_language_sheet_values', (['self.resources', '"""de"""'], {}), "(self.resources, 'de')\n", (2490, 2512), False, 'from stringsheet.parser import create_language_sheet_values\n'), ((4123, 4179), 'stringsheet.parser.create_language_sheet_values', 'create_language_sheet_values', (['self.resources', '"""Template"""'], {}), "(self.resources, 'Template')\n", (4151, 4179), False, 'from stringsheet.parser import create_language_sheet_values\n')] |
import os, sys
import numpy as np
from sedflow import obs as Obs
from sedflow import train as Train
from provabgs import infer as Infer
from provabgs import models as Models
####################################################
# input
####################################################
sample = sys.argv[1]
itrain = int(sys.argv[2])
nhidden = int(sys.argv[3])
nblocks = int(sys.argv[4])
niter = int(sys.argv[5])
i0 = int(sys.argv[6])
i1 = int(sys.argv[7])
####################################################
# compile NSA failures
####################################################
# u, g, r, i, z, sigma_u, sigma_g, sigma_r, sigma_i, sigma_z, redshift
y_nsa = Obs.load_nsa_data(test_set=False)
igals = np.load('/scratch/network/chhahn/sedflow/nsa_fail/fail.igals.npy')
# convert to flux
y_flux = Train.mag2flux(y_nsa[:,:5])
y_ivar = Train.sigma_mag2flux(y_nsa[:,5:10], y_nsa[:,:5])**-2
y_zred = y_nsa[:,-1]
####################################################
# setup inference
####################################################
# SPS parameter priors
prior_sps = Infer.load_priors([
Infer.UniformPrior(7., 12.5, label='sed'),
Infer.FlatDirichletPrior(4, label='sed'), # flat dirichilet priors
Infer.UniformPrior(0., 1., label='sed'), # burst fraction
Infer.UniformPrior(1e-2, 13.27, label='sed'), # tburst
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust1
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust2
Infer.UniformPrior(-2., 1., label='sed') # uniform priors on dust_index
])
# SPS model
m_sps = Models.NMF(burst=True, emulator=True)
def run_mcmc(i_obs):
# desi MCMC object
nsa_mcmc = Infer.nsaMCMC(model=m_sps, prior=prior_sps)
fmcmc = os.path.join('/scratch/network/chhahn/sedflow/nsa_fail',
'mcmc.nsa.%i.hdf5' % i_obs)
if not os.path.isfile(fmcmc):
print('%s running' % os.path.basename(fmcmc))
if not np.all(np.isfinite(y_flux[i_obs])):
print('NaN photometry', y_flux[i_obs])
return None
if not np.all(np.isfinite(y_ivar[i_obs])):
print('NaN ivar', y_ivar[i_obs])
return None
# run MCMC
zeus_chain = nsa_mcmc.run(
bands='sdss', # u, g, r, i, z
photo_obs=y_flux[i_obs],
photo_ivar_obs=y_ivar[i_obs],
zred=y_zred[i_obs],
vdisp=0.,
sampler='zeus',
nwalkers=30,
burnin=0,
opt_maxiter=2000,
niter=niter,
progress=True,
writeout=fmcmc)
else:
print('%s already exists' % os.path.basename(fmcmc))
return None
for i in range(i0, i1+1):
run_mcmc(igals[i])
| [
"provabgs.models.NMF",
"sedflow.train.mag2flux",
"provabgs.infer.nsaMCMC",
"provabgs.infer.LogUniformPrior",
"provabgs.infer.UniformPrior",
"sedflow.obs.load_nsa_data",
"os.path.join",
"sedflow.train.sigma_mag2flux",
"os.path.isfile",
"provabgs.infer.FlatDirichletPrior",
"numpy.isfinite",
"os.path.basename",
"numpy.load"
]
| [((689, 722), 'sedflow.obs.load_nsa_data', 'Obs.load_nsa_data', ([], {'test_set': '(False)'}), '(test_set=False)\n', (706, 722), True, 'from sedflow import obs as Obs\n'), ((732, 798), 'numpy.load', 'np.load', (['"""/scratch/network/chhahn/sedflow/nsa_fail/fail.igals.npy"""'], {}), "('/scratch/network/chhahn/sedflow/nsa_fail/fail.igals.npy')\n", (739, 798), True, 'import numpy as np\n'), ((828, 856), 'sedflow.train.mag2flux', 'Train.mag2flux', (['y_nsa[:, :5]'], {}), '(y_nsa[:, :5])\n', (842, 856), True, 'from sedflow import train as Train\n'), ((1863, 1900), 'provabgs.models.NMF', 'Models.NMF', ([], {'burst': '(True)', 'emulator': '(True)'}), '(burst=True, emulator=True)\n', (1873, 1900), True, 'from provabgs import models as Models\n'), ((865, 915), 'sedflow.train.sigma_mag2flux', 'Train.sigma_mag2flux', (['y_nsa[:, 5:10]', 'y_nsa[:, :5]'], {}), '(y_nsa[:, 5:10], y_nsa[:, :5])\n', (885, 915), True, 'from sedflow import train as Train\n'), ((1962, 2005), 'provabgs.infer.nsaMCMC', 'Infer.nsaMCMC', ([], {'model': 'm_sps', 'prior': 'prior_sps'}), '(model=m_sps, prior=prior_sps)\n', (1975, 2005), True, 'from provabgs import infer as Infer\n'), ((2023, 2111), 'os.path.join', 'os.path.join', (['"""/scratch/network/chhahn/sedflow/nsa_fail"""', "('mcmc.nsa.%i.hdf5' % i_obs)"], {}), "('/scratch/network/chhahn/sedflow/nsa_fail', 'mcmc.nsa.%i.hdf5' %\n i_obs)\n", (2035, 2111), False, 'import os, sys\n'), ((1129, 1171), 'provabgs.infer.UniformPrior', 'Infer.UniformPrior', (['(7.0)', '(12.5)'], {'label': '"""sed"""'}), "(7.0, 12.5, label='sed')\n", (1147, 1171), True, 'from provabgs import infer as Infer\n'), ((1180, 1220), 'provabgs.infer.FlatDirichletPrior', 'Infer.FlatDirichletPrior', (['(4)'], {'label': '"""sed"""'}), "(4, label='sed')\n", (1204, 1220), True, 'from provabgs import infer as Infer\n'), ((1265, 1306), 'provabgs.infer.UniformPrior', 'Infer.UniformPrior', (['(0.0)', '(1.0)'], {'label': '"""sed"""'}), "(0.0, 1.0, label='sed')\n", (1283, 1306), True, 'from provabgs import infer as Infer\n'), ((1342, 1386), 'provabgs.infer.UniformPrior', 'Infer.UniformPrior', (['(0.01)', '(13.27)'], {'label': '"""sed"""'}), "(0.01, 13.27, label='sed')\n", (1360, 1386), True, 'from provabgs import infer as Infer\n'), ((1408, 1458), 'provabgs.infer.LogUniformPrior', 'Infer.LogUniformPrior', (['(4.5e-05)', '(0.015)'], {'label': '"""sed"""'}), "(4.5e-05, 0.015, label='sed')\n", (1429, 1458), True, 'from provabgs import infer as Infer\n'), ((1501, 1551), 'provabgs.infer.LogUniformPrior', 'Infer.LogUniformPrior', (['(4.5e-05)', '(0.015)'], {'label': '"""sed"""'}), "(4.5e-05, 0.015, label='sed')\n", (1522, 1551), True, 'from provabgs import infer as Infer\n'), ((1594, 1635), 'provabgs.infer.UniformPrior', 'Infer.UniformPrior', (['(0.0)', '(3.0)'], {'label': '"""sed"""'}), "(0.0, 3.0, label='sed')\n", (1612, 1635), True, 'from provabgs import infer as Infer\n'), ((1676, 1717), 'provabgs.infer.UniformPrior', 'Infer.UniformPrior', (['(0.0)', '(3.0)'], {'label': '"""sed"""'}), "(0.0, 3.0, label='sed')\n", (1694, 1717), True, 'from provabgs import infer as Infer\n'), ((1758, 1800), 'provabgs.infer.UniformPrior', 'Infer.UniformPrior', (['(-2.0)', '(1.0)'], {'label': '"""sed"""'}), "(-2.0, 1.0, label='sed')\n", (1776, 1800), True, 'from provabgs import infer as Infer\n'), ((2133, 2154), 'os.path.isfile', 'os.path.isfile', (['fmcmc'], {}), '(fmcmc)\n', (2147, 2154), False, 'import os, sys\n'), ((2186, 2209), 'os.path.basename', 'os.path.basename', (['fmcmc'], {}), '(fmcmc)\n', (2202, 2209), False, 'import os, sys\n'), ((2238, 2264), 'numpy.isfinite', 'np.isfinite', (['y_flux[i_obs]'], {}), '(y_flux[i_obs])\n', (2249, 2264), True, 'import numpy as np\n'), ((2374, 2400), 'numpy.isfinite', 'np.isfinite', (['y_ivar[i_obs]'], {}), '(y_ivar[i_obs])\n', (2385, 2400), True, 'import numpy as np\n'), ((2985, 3008), 'os.path.basename', 'os.path.basename', (['fmcmc'], {}), '(fmcmc)\n', (3001, 3008), False, 'import os, sys\n')] |
import json
import time
import re
from .keyvalue_provider import KeyValueProvider
from .gcloud_artifact_store import GCloudArtifactStore
from .util import timeit
class GSProvider(KeyValueProvider):
def __init__(self, config, blocking_auth=True, verbose=10, store=None):
self.config = config
self.bucket = config.get('bucket', 'studioml-meta')
self.meta_store = GCloudArtifactStore(config, verbose)
super(GSProvider, self).__init__(
config,
blocking_auth,
verbose,
store)
@timeit
def _get(self, key, shallow=False):
bucket = self.meta_store._get_bucket_obj()
retval = {}
if shallow:
blob_iterator = bucket.list_blobs(
prefix=key, delimiter='/')
bloblist = list(blob_iterator)
blobnames = {b.name for b in bloblist}
prefixes = blob_iterator.prefixes
suffixes = [re.sub('^' + key, '', p) for p in prefixes | blobnames]
retval = set({})
for s in suffixes:
if s.endswith('/'):
retval.add(s[:-1])
else:
retval.add(s)
return retval
else:
blob_iterator = bucket.list_blobs(prefix=key)
for blob in blob_iterator:
suffix = re.sub('^' + key, '', blob.name)
if suffix == '':
return json.loads(blob.download_as_string())
path = suffix.split('/')
path = [p for p in path if p != '']
current_dict = retval
for subdir in path[:-1]:
if subdir != '':
if subdir not in current_dict.keys():
current_dict[subdir] = {}
current_dict = current_dict[subdir]
try:
current_dict[path[-1]] = json.loads(
blob.download_as_string())
except BaseException:
pass
if not any(retval):
return None
else:
return retval
def _delete(self, key):
self.meta_store._delete_file(key)
def _set(self, key, value):
no_retries = 10
sleep_time = 1
for i in range(no_retries):
try:
self.meta_store._get_bucket_obj().blob(key) \
.upload_from_string(json.dumps(value))
break
except BaseException as e:
self.logger.error('uploading data raised an exception:')
self.logger.exception(e)
time.sleep(sleep_time)
| [
"re.sub",
"json.dumps",
"time.sleep"
]
| [((2686, 2708), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (2696, 2708), False, 'import time\n'), ((958, 982), 're.sub', 're.sub', (["('^' + key)", '""""""', 'p'], {}), "('^' + key, '', p)\n", (964, 982), False, 'import re\n'), ((1370, 1402), 're.sub', 're.sub', (["('^' + key)", '""""""', 'blob.name'], {}), "('^' + key, '', blob.name)\n", (1376, 1402), False, 'import re\n'), ((2479, 2496), 'json.dumps', 'json.dumps', (['value'], {}), '(value)\n', (2489, 2496), False, 'import json\n')] |
import logging
from urllib.parse import unquote, urlparse
from pathlib import PurePosixPath
import requests
from requests.exceptions import ReadTimeout, ConnectionError, HTTPError
from django.core.management.base import BaseCommand
from django.core.files.base import ContentFile
from places.models import Place, Image
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
class Command(BaseCommand):
help = 'Uploads data for a place'
def add_arguments(self, parser):
parser.add_argument('data_urls', nargs='+', type=str)
def handle(self, *args, **options):
for url in options['data_urls']:
response = requests.get(url)
response.raise_for_status()
place_data = response.json()
new_place, created = Place.objects.get_or_create(
title=place_data['title'],
defaults={
'short_description': place_data['description_short'],
'long_description': place_data['description_long'],
'longitude': place_data['coordinates']['lng'],
'latitude': place_data['coordinates']['lat']
}
)
if created:
logging.info(f'Place "{new_place.title}" created')
else:
logging.info(f'Place "{new_place.title}" already exists')
for image_position, image_url in enumerate(place_data['imgs']):
try:
response = requests.get(image_url)
response.raise_for_status()
except (ReadTimeout, ConnectionError, HTTPError) as exception:
logging.exception(exception)
continue
new_image, _ = Image.objects.get_or_create(
place=new_place,
position=image_position
)
image_content = ContentFile(response.content)
image_name = PurePosixPath(unquote(urlparse(image_url).path)).parts[-1]
new_image.image.save(image_name, image_content)
logging.info(f'Image {image_name} for place "{new_place.title}" uploaded')
| [
"logging.basicConfig",
"django.core.files.base.ContentFile",
"urllib.parse.urlparse",
"requests.get",
"logging.exception",
"places.models.Image.objects.get_or_create",
"logging.info",
"places.models.Place.objects.get_or_create"
]
| [((321, 396), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s:%(message)s"""', 'level': 'logging.INFO'}), "(format='%(levelname)s:%(message)s', level=logging.INFO)\n", (340, 396), False, 'import logging\n'), ((670, 687), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (682, 687), False, 'import requests\n'), ((802, 1084), 'places.models.Place.objects.get_or_create', 'Place.objects.get_or_create', ([], {'title': "place_data['title']", 'defaults': "{'short_description': place_data['description_short'], 'long_description':\n place_data['description_long'], 'longitude': place_data['coordinates'][\n 'lng'], 'latitude': place_data['coordinates']['lat']}"}), "(title=place_data['title'], defaults={\n 'short_description': place_data['description_short'],\n 'long_description': place_data['description_long'], 'longitude':\n place_data['coordinates']['lng'], 'latitude': place_data['coordinates']\n ['lat']})\n", (829, 1084), False, 'from places.models import Place, Image\n'), ((1251, 1301), 'logging.info', 'logging.info', (['f"""Place "{new_place.title}" created"""'], {}), '(f\'Place "{new_place.title}" created\')\n', (1263, 1301), False, 'import logging\n'), ((1336, 1393), 'logging.info', 'logging.info', (['f"""Place "{new_place.title}" already exists"""'], {}), '(f\'Place "{new_place.title}" already exists\')\n', (1348, 1393), False, 'import logging\n'), ((1784, 1853), 'places.models.Image.objects.get_or_create', 'Image.objects.get_or_create', ([], {'place': 'new_place', 'position': 'image_position'}), '(place=new_place, position=image_position)\n', (1811, 1853), False, 'from places.models import Place, Image\n'), ((1944, 1973), 'django.core.files.base.ContentFile', 'ContentFile', (['response.content'], {}), '(response.content)\n', (1955, 1973), False, 'from django.core.files.base import ContentFile\n'), ((2142, 2216), 'logging.info', 'logging.info', (['f"""Image {image_name} for place "{new_place.title}" uploaded"""'], {}), '(f\'Image {image_name} for place "{new_place.title}" uploaded\')\n', (2154, 2216), False, 'import logging\n'), ((1523, 1546), 'requests.get', 'requests.get', (['image_url'], {}), '(image_url)\n', (1535, 1546), False, 'import requests\n'), ((1694, 1722), 'logging.exception', 'logging.exception', (['exception'], {}), '(exception)\n', (1711, 1722), False, 'import logging\n'), ((2025, 2044), 'urllib.parse.urlparse', 'urlparse', (['image_url'], {}), '(image_url)\n', (2033, 2044), False, 'from urllib.parse import unquote, urlparse\n')] |
# -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions and class for listing commands such as ls and du."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import fnmatch
import sys
import six
from gslib.cloud_api import EncryptionException
from gslib.exception import CommandException
from gslib.plurality_checkable_iterator import PluralityCheckableIterator
from gslib.storage_url import GenerationFromUrlAndString
from gslib.utils.constants import S3_ACL_MARKER_GUID
from gslib.utils.constants import S3_DELETE_MARKER_GUID
from gslib.utils.constants import S3_MARKER_GUIDS
from gslib.utils.constants import UTF8
from gslib.utils.system_util import IS_WINDOWS
from gslib.utils.translation_helper import AclTranslation
from gslib.utils import text_util
from gslib.wildcard_iterator import StorageUrlFromString
ENCRYPTED_FIELDS = [
'md5Hash',
'crc32c',
]
UNENCRYPTED_FULL_LISTING_FIELDS = [
'acl',
'cacheControl',
'componentCount',
'contentDisposition',
'contentEncoding',
'contentLanguage',
'contentType',
'customTime',
'kmsKeyName',
'customerEncryption',
'etag',
'eventBasedHold',
'generation',
'metadata',
'metageneration',
'retentionExpirationTime',
'size',
'storageClass',
'temporaryHold',
'timeCreated',
'timeDeleted',
'timeStorageClassUpdated',
'updated',
]
def MakeMetadataLine(label, value, indent=1):
"""Returns a string with a vertically aligned label and value.
Labels of the same indentation level will start at the same column. Values
will all start at the same column (unless the combined left-indent and
label length is excessively long). If a value spans multiple lines,
indentation will only be applied to the first line. Example output from
several calls:
Label1: Value (default indent of 1 was used)
Sublabel1: Value (used indent of 2 here)
Label2: Value
Args:
label: The label to print in the first column.
value: The value to print in the second column.
indent: (4 * indent) spaces will be placed before the label.
Returns:
A string with a vertically aligned label and value.
"""
return '{}{}'.format(((' ' * indent * 4) + label + ':').ljust(28), value)
def PrintBucketHeader(bucket_listing_ref): # pylint: disable=unused-argument
"""Default function for printing headers for buckets.
Header is printed prior to listing the contents of the bucket.
Args:
bucket_listing_ref: BucketListingRef of type BUCKET.
"""
pass
def PrintDir(bucket_listing_ref):
"""Default function for printing buckets or prefixes.
Args:
bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX.
"""
text_util.print_to_fd(bucket_listing_ref.url_string)
# pylint: disable=unused-argument
def PrintDirSummary(num_bytes, bucket_listing_ref):
"""Off-by-default function for printing buckets or prefix size summaries.
Args:
num_bytes: Number of bytes contained in the directory.
bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX.
"""
pass
def PrintDirHeader(bucket_listing_ref):
"""Default function for printing headers for prefixes.
Header is printed prior to listing the contents of the prefix.
Args:
bucket_listing_ref: BucketListingRef of type PREFIX.
"""
text_util.print_to_fd('{}:'.format(bucket_listing_ref.url_string))
def PrintNewLine():
"""Default function for printing new lines between directories."""
text_util.print_to_fd()
# pylint: disable=too-many-statements
def PrintFullInfoAboutObject(bucket_listing_ref, incl_acl=True):
"""Print full info for given object (like what displays for gsutil ls -L).
Args:
bucket_listing_ref: BucketListingRef being listed.
Must have ref_type OBJECT and a populated root_object
with the desired fields.
incl_acl: True if ACL info should be output.
Returns:
Tuple (number of objects, object_length)
Raises:
Exception: if calling bug encountered.
"""
url_str = bucket_listing_ref.url_string
storage_url = StorageUrlFromString(url_str)
obj = bucket_listing_ref.root_object
if (obj.metadata and
S3_DELETE_MARKER_GUID in obj.metadata.additionalProperties):
num_bytes = 0
num_objs = 0
url_str += '<DeleteMarker>'
else:
num_bytes = obj.size
num_objs = 1
text_util.print_to_fd('{}:'.format(url_str))
if obj.timeCreated:
text_util.print_to_fd(
MakeMetadataLine('Creation time',
obj.timeCreated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.updated:
text_util.print_to_fd(
MakeMetadataLine('Update time',
obj.updated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if (obj.timeStorageClassUpdated and
obj.timeStorageClassUpdated != obj.timeCreated):
text_util.print_to_fd(
MakeMetadataLine(
'Storage class update time',
obj.timeStorageClassUpdated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.storageClass:
text_util.print_to_fd(MakeMetadataLine('Storage class', obj.storageClass))
if obj.temporaryHold:
text_util.print_to_fd(MakeMetadataLine('Temporary Hold', 'Enabled'))
if obj.eventBasedHold:
text_util.print_to_fd(MakeMetadataLine('Event-Based Hold', 'Enabled'))
if obj.retentionExpirationTime:
text_util.print_to_fd(
MakeMetadataLine(
'Retention Expiration',
obj.retentionExpirationTime.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.kmsKeyName:
text_util.print_to_fd(MakeMetadataLine('KMS key', obj.kmsKeyName))
if obj.cacheControl:
text_util.print_to_fd(MakeMetadataLine('Cache-Control', obj.cacheControl))
if obj.contentDisposition:
text_util.print_to_fd(
MakeMetadataLine('Content-Disposition', obj.contentDisposition))
if obj.contentEncoding:
text_util.print_to_fd(
MakeMetadataLine('Content-Encoding', obj.contentEncoding))
if obj.contentLanguage:
text_util.print_to_fd(
MakeMetadataLine('Content-Language', obj.contentLanguage))
text_util.print_to_fd(MakeMetadataLine('Content-Length', obj.size))
text_util.print_to_fd(MakeMetadataLine('Content-Type', obj.contentType))
if obj.componentCount:
text_util.print_to_fd(
MakeMetadataLine('Component-Count', obj.componentCount))
if obj.customTime:
text_util.print_to_fd(MakeMetadataLine('Custom-Time', obj.customTime))
if obj.timeDeleted:
text_util.print_to_fd(
MakeMetadataLine('Noncurrent time',
obj.timeDeleted.strftime('%a, %d %b %Y %H:%M:%S GMT')))
marker_props = {}
if obj.metadata and obj.metadata.additionalProperties:
non_marker_props = []
for add_prop in obj.metadata.additionalProperties:
if add_prop.key not in S3_MARKER_GUIDS:
non_marker_props.append(add_prop)
else:
marker_props[add_prop.key] = add_prop.value
if non_marker_props:
text_util.print_to_fd(MakeMetadataLine('Metadata', ''))
for ap in non_marker_props:
ap_key = '{}'.format(ap.key)
ap_value = '{}'.format(ap.value)
meta_data_line = MakeMetadataLine(ap_key, ap_value, indent=2)
text_util.print_to_fd(meta_data_line)
if obj.customerEncryption:
if not obj.crc32c:
text_util.print_to_fd(MakeMetadataLine('Hash (crc32c)', 'encrypted'))
if not obj.md5Hash:
text_util.print_to_fd(MakeMetadataLine('Hash (md5)', 'encrypted'))
text_util.print_to_fd(
MakeMetadataLine('Encryption algorithm',
obj.customerEncryption.encryptionAlgorithm))
text_util.print_to_fd(
MakeMetadataLine('Encryption key SHA256',
obj.customerEncryption.keySha256))
if obj.crc32c:
text_util.print_to_fd(MakeMetadataLine('Hash (crc32c)', obj.crc32c))
if obj.md5Hash:
text_util.print_to_fd(MakeMetadataLine('Hash (md5)', obj.md5Hash))
text_util.print_to_fd(MakeMetadataLine('ETag', obj.etag.strip('"\'')))
if obj.generation:
generation_str = GenerationFromUrlAndString(storage_url, obj.generation)
text_util.print_to_fd(MakeMetadataLine('Generation', generation_str))
if obj.metageneration:
text_util.print_to_fd(MakeMetadataLine('Metageneration',
obj.metageneration))
if incl_acl:
# JSON API won't return acls as part of the response unless we have
# full control scope
if obj.acl:
text_util.print_to_fd(
MakeMetadataLine('ACL', AclTranslation.JsonFromMessage(obj.acl)))
elif S3_ACL_MARKER_GUID in marker_props:
text_util.print_to_fd(
MakeMetadataLine('ACL', marker_props[S3_ACL_MARKER_GUID]))
else:
# Empty ACLs are possible with Bucket Policy Only and no longer imply
# ACCESS DENIED anymore.
text_util.print_to_fd(MakeMetadataLine('ACL', '[]'))
return (num_objs, num_bytes)
def PrintObject(bucket_listing_ref):
"""Default printing function for objects.
Args:
bucket_listing_ref: BucketListingRef of type OBJECT.
Returns:
(num_objects, num_bytes).
"""
try:
text_util.print_to_fd(bucket_listing_ref.url_string)
except IOError as e:
# Windows throws an IOError 0 here for object names containing Unicode
# chars. Ignore it.
if not (IS_WINDOWS and e.errno == 0):
raise
return (1, 0)
class LsHelper(object):
"""Helper class for ls and du."""
def __init__(self,
iterator_func,
logger,
print_object_func=PrintObject,
print_dir_func=PrintDir,
print_dir_header_func=PrintDirHeader,
print_bucket_header_func=PrintBucketHeader,
print_dir_summary_func=PrintDirSummary,
print_newline_func=PrintNewLine,
all_versions=False,
should_recurse=False,
exclude_patterns=None,
fields=('name',),
list_subdir_contents=True):
"""Initializes the helper class to prepare for listing.
Args:
iterator_func: Function for instantiating iterator.
Inputs-
url_string- Url string to iterate on. May include
wildcards.
all_versions=False- If true, iterate over all object
versions.
logger: Logger for outputting warnings / errors.
print_object_func: Function for printing objects.
print_dir_func: Function for printing buckets/prefixes.
print_dir_header_func: Function for printing header line for buckets
or prefixes.
print_bucket_header_func: Function for printing header line for buckets
or prefixes.
print_dir_summary_func: Function for printing size summaries about
buckets/prefixes.
print_newline_func: Function for printing new lines between dirs.
all_versions: If true, list all object versions.
should_recurse: If true, recursively listing buckets/prefixes.
exclude_patterns: Patterns to exclude when listing.
fields: Fields to request from bucket listings; this should
include all fields that need to be populated in
objects so they can be listed. Can be set to None
to retrieve all object fields. Defaults to short
listing fields.
list_subdir_contents: If true, return the directory and any contents,
otherwise return only the directory itself.
"""
self._iterator_func = iterator_func
self.logger = logger
self._print_object_func = print_object_func
self._print_dir_func = print_dir_func
self._print_dir_header_func = print_dir_header_func
self._print_bucket_header_func = print_bucket_header_func
self._print_dir_summary_func = print_dir_summary_func
self._print_newline_func = print_newline_func
self.all_versions = all_versions
self.should_recurse = should_recurse
self.exclude_patterns = exclude_patterns
self.bucket_listing_fields = fields
self.list_subdir_contents = list_subdir_contents
def ExpandUrlAndPrint(self, url):
"""Iterates over the given URL and calls print functions.
Args:
url: StorageUrl to iterate over.
Returns:
(num_objects, num_bytes) total number of objects and bytes iterated.
"""
num_objects = 0
num_dirs = 0
num_bytes = 0
print_newline = False
if url.IsBucket() or self.should_recurse:
# IsBucket() implies a top-level listing.
if url.IsBucket():
self._print_bucket_header_func(url)
return self._RecurseExpandUrlAndPrint(url.url_string,
print_initial_newline=False)
else:
# User provided a prefix or object URL, but it's impossible to tell
# which until we do a listing and see what matches.
top_level_iterator = PluralityCheckableIterator(
self._iterator_func(
url.CreatePrefixUrl(wildcard_suffix=None),
all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=self.bucket_listing_fields))
plurality = top_level_iterator.HasPlurality()
try:
top_level_iterator.PeekException()
except EncryptionException:
# Detailed listing on a single object can perform a GetObjectMetadata
# call, which raises if a matching encryption key isn't found.
# Re-iterate without requesting encrypted fields.
top_level_iterator = PluralityCheckableIterator(
self._iterator_func(
url.CreatePrefixUrl(wildcard_suffix=None),
all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=UNENCRYPTED_FULL_LISTING_FIELDS))
plurality = top_level_iterator.HasPlurality()
for blr in top_level_iterator:
if self._MatchesExcludedPattern(blr):
continue
if blr.IsObject():
nd = 0
no, nb = self._print_object_func(blr)
print_newline = True
elif blr.IsPrefix():
if print_newline:
self._print_newline_func()
else:
print_newline = True
if plurality and self.list_subdir_contents:
self._print_dir_header_func(blr)
elif plurality and not self.list_subdir_contents:
print_newline = False
expansion_url_str = StorageUrlFromString(
blr.url_string).CreatePrefixUrl(
wildcard_suffix='*' if self.list_subdir_contents else None)
nd, no, nb = self._RecurseExpandUrlAndPrint(expansion_url_str)
self._print_dir_summary_func(nb, blr)
else:
# We handle all buckets at the top level, so this should never happen.
raise CommandException(
'Sub-level iterator returned a CsBucketListingRef of type Bucket')
num_objects += no
num_dirs += nd
num_bytes += nb
return num_dirs, num_objects, num_bytes
def _RecurseExpandUrlAndPrint(self, url_str, print_initial_newline=True):
"""Iterates over the given URL string and calls print functions.
Args:
url_str: String describing StorageUrl to iterate over.
Must be of depth one or higher.
print_initial_newline: If true, print a newline before recursively
expanded prefixes.
Returns:
(num_objects, num_bytes) total number of objects and bytes iterated.
"""
num_objects = 0
num_dirs = 0
num_bytes = 0
for blr in self._iterator_func(
'%s' % url_str, all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=self.bucket_listing_fields):
if self._MatchesExcludedPattern(blr):
continue
if blr.IsObject():
nd = 0
no, nb = self._print_object_func(blr)
elif blr.IsPrefix():
if self.should_recurse:
if print_initial_newline:
self._print_newline_func()
else:
print_initial_newline = True
self._print_dir_header_func(blr)
expansion_url_str = StorageUrlFromString(
blr.url_string).CreatePrefixUrl(wildcard_suffix='*')
nd, no, nb = self._RecurseExpandUrlAndPrint(expansion_url_str)
self._print_dir_summary_func(nb, blr)
else:
nd, no, nb = 1, 0, 0
self._print_dir_func(blr)
else:
# We handle all buckets at the top level, so this should never happen.
raise CommandException(
'Sub-level iterator returned a bucketListingRef of type Bucket')
num_dirs += nd
num_objects += no
num_bytes += nb
return num_dirs, num_objects, num_bytes
def _MatchesExcludedPattern(self, blr):
"""Checks bucket listing reference against patterns to exclude.
Args:
blr: BucketListingRef to check.
Returns:
True if reference matches a pattern and should be excluded.
"""
if self.exclude_patterns:
tomatch = six.ensure_str(blr.url_string)
for pattern in self.exclude_patterns:
if fnmatch.fnmatch(tomatch, six.ensure_str(pattern)):
return True
return False
| [
"gslib.utils.translation_helper.AclTranslation.JsonFromMessage",
"gslib.wildcard_iterator.StorageUrlFromString",
"gslib.exception.CommandException",
"six.ensure_str",
"gslib.utils.text_util.print_to_fd",
"gslib.storage_url.GenerationFromUrlAndString"
]
| [((3413, 3465), 'gslib.utils.text_util.print_to_fd', 'text_util.print_to_fd', (['bucket_listing_ref.url_string'], {}), '(bucket_listing_ref.url_string)\n', (3434, 3465), False, 'from gslib.utils import text_util\n'), ((4177, 4200), 'gslib.utils.text_util.print_to_fd', 'text_util.print_to_fd', ([], {}), '()\n', (4198, 4200), False, 'from gslib.utils import text_util\n'), ((4798, 4827), 'gslib.wildcard_iterator.StorageUrlFromString', 'StorageUrlFromString', (['url_str'], {}), '(url_str)\n', (4818, 4827), False, 'from gslib.wildcard_iterator import StorageUrlFromString\n'), ((8748, 8803), 'gslib.storage_url.GenerationFromUrlAndString', 'GenerationFromUrlAndString', (['storage_url', 'obj.generation'], {}), '(storage_url, obj.generation)\n', (8774, 8803), False, 'from gslib.storage_url import GenerationFromUrlAndString\n'), ((9820, 9872), 'gslib.utils.text_util.print_to_fd', 'text_util.print_to_fd', (['bucket_listing_ref.url_string'], {}), '(bucket_listing_ref.url_string)\n', (9841, 9872), False, 'from gslib.utils import text_util\n'), ((18051, 18081), 'six.ensure_str', 'six.ensure_str', (['blr.url_string'], {}), '(blr.url_string)\n', (18065, 18081), False, 'import six\n'), ((7908, 7945), 'gslib.utils.text_util.print_to_fd', 'text_util.print_to_fd', (['meta_data_line'], {}), '(meta_data_line)\n', (7929, 7945), False, 'from gslib.utils import text_util\n'), ((9219, 9258), 'gslib.utils.translation_helper.AclTranslation.JsonFromMessage', 'AclTranslation.JsonFromMessage', (['obj.acl'], {}), '(obj.acl)\n', (9249, 9258), False, 'from gslib.utils.translation_helper import AclTranslation\n'), ((17550, 17636), 'gslib.exception.CommandException', 'CommandException', (['"""Sub-level iterator returned a bucketListingRef of type Bucket"""'], {}), "(\n 'Sub-level iterator returned a bucketListingRef of type Bucket')\n", (17566, 17636), False, 'from gslib.exception import CommandException\n'), ((18162, 18185), 'six.ensure_str', 'six.ensure_str', (['pattern'], {}), '(pattern)\n', (18176, 18185), False, 'import six\n'), ((15778, 15866), 'gslib.exception.CommandException', 'CommandException', (['"""Sub-level iterator returned a CsBucketListingRef of type Bucket"""'], {}), "(\n 'Sub-level iterator returned a CsBucketListingRef of type Bucket')\n", (15794, 15866), False, 'from gslib.exception import CommandException\n'), ((15399, 15435), 'gslib.wildcard_iterator.StorageUrlFromString', 'StorageUrlFromString', (['blr.url_string'], {}), '(blr.url_string)\n', (15419, 15435), False, 'from gslib.wildcard_iterator import StorageUrlFromString\n'), ((17153, 17189), 'gslib.wildcard_iterator.StorageUrlFromString', 'StorageUrlFromString', (['blr.url_string'], {}), '(blr.url_string)\n', (17173, 17189), False, 'from gslib.wildcard_iterator import StorageUrlFromString\n')] |
import os
from tqdm import tqdm
import cv2
import numpy as np
#pre process test data:
path = "raw_test_data/"
list_width = []
list_height = []
list_image = []
def pre_process():
print("analyze images")
for Files in tqdm(os.listdir(path)):
if "jpg" in Files:
#print(Files)
img = cv2.imread(path + Files, 1)
height, width, chan = img.shape
#print(width)
#print(height)
list_width.append(width)
list_height.append(height)
max_width = np.max(list_width)
max_height = np.max(list_height)
if max_height == max_width :
print("max height == max width")
print("format images: ")
for image in tqdm(os.listdir(path)):
if "jpg" in image:
#print(image)
img = cv2.imread(path + image, 1)
height, width, chan = img.shape
new_height = (round(max_height/16)+1)*16 # image dimension needs to be a multiple of 16
new_width = new_height # image needs to be squared
delta_width = new_width - width
delta_height = new_height - height
#print("delta height",delta_height)
#print("delta width",delta_width)
pad_img = cv2.copyMakeBorder(img, 0, delta_height, 0, delta_width, cv2.BORDER_CONSTANT,None, value = 0)
#list_image.append(pad_img)
cv2.imwrite("test_data/"+image, pad_img)
pre_process()
for image in list_image:
print(image.shape)
| [
"cv2.imwrite",
"os.listdir",
"cv2.copyMakeBorder",
"numpy.max",
"cv2.imread"
]
| [((539, 557), 'numpy.max', 'np.max', (['list_width'], {}), '(list_width)\n', (545, 557), True, 'import numpy as np\n'), ((575, 594), 'numpy.max', 'np.max', (['list_height'], {}), '(list_height)\n', (581, 594), True, 'import numpy as np\n'), ((230, 246), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (240, 246), False, 'import os\n'), ((720, 736), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (730, 736), False, 'import os\n'), ((320, 347), 'cv2.imread', 'cv2.imread', (['(path + Files)', '(1)'], {}), '(path + Files, 1)\n', (330, 347), False, 'import cv2\n'), ((810, 837), 'cv2.imread', 'cv2.imread', (['(path + image)', '(1)'], {}), '(path + image, 1)\n', (820, 837), False, 'import cv2\n'), ((1265, 1362), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img', '(0)', 'delta_height', '(0)', 'delta_width', 'cv2.BORDER_CONSTANT', 'None'], {'value': '(0)'}), '(img, 0, delta_height, 0, delta_width, cv2.\n BORDER_CONSTANT, None, value=0)\n', (1283, 1362), False, 'import cv2\n'), ((1411, 1453), 'cv2.imwrite', 'cv2.imwrite', (["('test_data/' + image)", 'pad_img'], {}), "('test_data/' + image, pad_img)\n", (1422, 1453), False, 'import cv2\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for creating RPC clusters on localhost."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import portpicker
import tensorflow as tf
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return their servers."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]}
cs = tf.train.ClusterSpec(cluster_dict)
workers = [
tf.train.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)]
ps_servers = [
tf.train.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)]
return workers, ps_servers
class CreateLocalClusterTest(tf.test.TestCase):
def testCreateLocalCluster(self):
workers, _ = create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
var0 = tf.Variable(0.0)
with tf.device("/job:ps/task:1"):
var1 = tf.Variable(1.0)
worker_sessions[0].run([var0.initializer, var1.initializer])
with tf.device("/job:ps/task:0"):
var2 = tf.Variable(2.0)
with tf.device("/job:ps/task:1"):
var3 = tf.Variable(3.0)
worker_sessions[1].run([var2.initializer, var3.initializer])
# Read values back in the opposite session
self.assertAllEqual(0.0, var0.eval(session=worker_sessions[1]))
self.assertAllEqual(1.0, var1.eval(session=worker_sessions[1]))
self.assertAllEqual(2.0, var2.eval(session=worker_sessions[0]))
self.assertAllEqual(3.0, var3.eval(session=worker_sessions[0]))
class CreateLocalClusterBenchmark(tf.test.Benchmark):
def benchmarkCreateLocalCluster(self):
deltas = []
iters = 5
for _ in range(iters):
start_time = time.time()
create_local_cluster(num_workers=1, num_ps=10)
end_time = time.time()
deltas.append(end_time - start_time)
median_deltas = np.median(deltas)
print(
"\n\nbenchmark_create_local_cluster_1_worker_10_ps. "
"iterations: %d, median wall time: %g\n\n" % (iters, median_deltas))
self.report_benchmark(
iters=iters,
wall_time=median_deltas,
name="benchmark_create_local_cluster_1_worker_10_ps")
class PartitionedVariablesBenchmark(tf.test.Benchmark):
def benchmark_create_1000_partitions_with_100_parameter_servers(self):
workers, _ = create_local_cluster(num_workers=1, num_ps=100)
worker_sessions = [tf.Session(w.target) for w in workers]
worker = worker_sessions[0]
partition_sizes = (1, 512, 1024*32, 1024*128)
partitioned = []
for partition_size in partition_sizes:
# max_shard_bytes is 4, shape is 1000*partition_size float32s which should
# partition into 1000 shards, each containing partition_size float32s.
print("Building partitioned variable with %d floats per partition"
% partition_size)
with tf.device(tf.train.replica_device_setter(ps_tasks=100)):
partitioned_ix = tf.get_variable(
"partitioned_%d" % partition_size,
shape=[1000 * partition_size],
dtype=tf.float32,
# Each partition to have exactly N float32s
partitioner=tf.variable_axis_size_partitioner(
max_shard_bytes=4 * partition_size))
# Concatenates along axis 0
partitioned.append(tf.convert_to_tensor(partitioned_ix))
tf.global_variables_initializer().run(session=worker)
for ix, partition_size in enumerate(partition_sizes):
print("Running benchmark having partitions with %d floats"
% partition_size)
self.run_op_benchmark(
worker,
partitioned[ix],
name=("read_concat_1000_partitions_from_"
"100_parameter_servers_partsize_%d_floats" % partition_size))
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.train.ClusterSpec",
"tensorflow.device",
"numpy.median",
"tensorflow.train.Server",
"tensorflow.Variable",
"tensorflow.Session",
"tensorflow.test.main",
"tensorflow.global_variables_initializer",
"tensorflow.variable_axis_size_partitioner",
"tensorflow.train.replica_device_setter",
"tensorflow.convert_to_tensor",
"portpicker.pick_unused_port",
"time.time"
]
| [((1362, 1396), 'tensorflow.train.ClusterSpec', 'tf.train.ClusterSpec', (['cluster_dict'], {}), '(cluster_dict)\n', (1382, 1396), True, 'import tensorflow as tf\n'), ((4919, 4933), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (4931, 4933), True, 'import tensorflow as tf\n'), ((1085, 1114), 'portpicker.pick_unused_port', 'portpicker.pick_unused_port', ([], {}), '()\n', (1112, 1114), False, 'import portpicker\n'), ((1158, 1187), 'portpicker.pick_unused_port', 'portpicker.pick_unused_port', ([], {}), '()\n', (1185, 1187), False, 'import portpicker\n'), ((1418, 1506), 'tensorflow.train.Server', 'tf.train.Server', (['cs'], {'job_name': '"""worker"""', 'protocol': 'protocol', 'task_index': 'ix', 'start': '(True)'}), "(cs, job_name='worker', protocol=protocol, task_index=ix,\n start=True)\n", (1433, 1506), True, 'import tensorflow as tf\n'), ((1573, 1658), 'tensorflow.train.Server', 'tf.train.Server', (['cs'], {'job_name': '"""ps"""', 'protocol': 'protocol', 'task_index': 'ix', 'start': '(True)'}), "(cs, job_name='ps', protocol=protocol, task_index=ix, start=True\n )\n", (1588, 1658), True, 'import tensorflow as tf\n'), ((2992, 3009), 'numpy.median', 'np.median', (['deltas'], {}), '(deltas)\n', (3001, 3009), True, 'import numpy as np\n'), ((1899, 1919), 'tensorflow.Session', 'tf.Session', (['w.target'], {}), '(w.target)\n', (1909, 1919), True, 'import tensorflow as tf\n'), ((1947, 1974), 'tensorflow.device', 'tf.device', (['"""/job:ps/task:0"""'], {}), "('/job:ps/task:0')\n", (1956, 1974), True, 'import tensorflow as tf\n'), ((1989, 2005), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {}), '(0.0)\n', (2000, 2005), True, 'import tensorflow as tf\n'), ((2015, 2042), 'tensorflow.device', 'tf.device', (['"""/job:ps/task:1"""'], {}), "('/job:ps/task:1')\n", (2024, 2042), True, 'import tensorflow as tf\n'), ((2057, 2073), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {}), '(1.0)\n', (2068, 2073), True, 'import tensorflow as tf\n'), ((2148, 2175), 'tensorflow.device', 'tf.device', (['"""/job:ps/task:0"""'], {}), "('/job:ps/task:0')\n", (2157, 2175), True, 'import tensorflow as tf\n'), ((2190, 2206), 'tensorflow.Variable', 'tf.Variable', (['(2.0)'], {}), '(2.0)\n', (2201, 2206), True, 'import tensorflow as tf\n'), ((2216, 2243), 'tensorflow.device', 'tf.device', (['"""/job:ps/task:1"""'], {}), "('/job:ps/task:1')\n", (2225, 2243), True, 'import tensorflow as tf\n'), ((2258, 2274), 'tensorflow.Variable', 'tf.Variable', (['(3.0)'], {}), '(3.0)\n', (2269, 2274), True, 'import tensorflow as tf\n'), ((2834, 2845), 'time.time', 'time.time', ([], {}), '()\n', (2843, 2845), False, 'import time\n'), ((2916, 2927), 'time.time', 'time.time', ([], {}), '()\n', (2925, 2927), False, 'import time\n'), ((3524, 3544), 'tensorflow.Session', 'tf.Session', (['w.target'], {}), '(w.target)\n', (3534, 3544), True, 'import tensorflow as tf\n'), ((4476, 4509), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4507, 4509), True, 'import tensorflow as tf\n'), ((3993, 4037), 'tensorflow.train.replica_device_setter', 'tf.train.replica_device_setter', ([], {'ps_tasks': '(100)'}), '(ps_tasks=100)\n', (4023, 4037), True, 'import tensorflow as tf\n'), ((4433, 4469), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['partitioned_ix'], {}), '(partitioned_ix)\n', (4453, 4469), True, 'import tensorflow as tf\n'), ((4282, 4351), 'tensorflow.variable_axis_size_partitioner', 'tf.variable_axis_size_partitioner', ([], {'max_shard_bytes': '(4 * partition_size)'}), '(max_shard_bytes=4 * partition_size)\n', (4315, 4351), True, 'import tensorflow as tf\n')] |
import functools
import glob
import itertools
import logging
import os
from progressbar import progressbar
import re
import requests
from typing import List
class ValueSingleDispatch:
def __init__(self):
self._handlers = dict()
def register(self, key):
def decorator(fn: callable):
if key in self._handlers:
raise KeyError(key)
self._handlers[key] = fn
return fn
return decorator
def call(self, key, *args, **kwargs):
if key not in self._handlers:
raise KeyError(key)
return self._handlers[key](*args, **kwargs)
def valid_keys(self):
return self._handlers.keys()
def alphanumeric_glob(pattern: str):
"""Glob and sort alpahnumerically. Limitations: exactly one `*', no `?', file names with single extention."""
matches = glob.glob(pattern)
asterisk_pos = pattern.find('*')
matches.sort(key=lambda name: int(name[asterisk_pos:name.rfind('.')]))
return matches
def findall_in_files(pattern: re.Pattern, filenames: List[str], encoding: str) -> re.Match:
"""Generator"""
for filename in filenames:
logging.debug('util.findall_in_files: input file %s', filename)
with open(filename, 'rb') as ifile:
for match in pattern.findall(ifile.read().decode(encoding)):
logging.debug('util.findall_in_files(): match: file = %s, text = %s', filename, match)
yield match
def make_pattern(url_regex: str, extentions: List[str]) -> re.Pattern:
if extentions:
ext_regex = '({})'.format('|'.join(extentions))
else:
ext_regex = '()'
return re.compile(url_regex.format(extentions=ext_regex))
def download_by_pattern(url_regex: str, filenames: List[str], output_dir: str, *, extentions=[], encoding='windows-1251', limit=None):
logging.debug('util.download_by_pattern(): pattern = %s, extentions = %s', url_regex, extentions)
pattern = make_pattern(url_regex, extentions)
matches = findall_in_files(pattern, filenames, encoding)
if limit is not None:
matches = itertools.islice(matches, limit)
matches = list(matches)
logging.info('util.download_by_pattern(): %d matches', len(matches))
os.makedirs(output_dir, exist_ok=True)
downloads = 0
# TODO statistics by extention
for idx, (url, ext) in progressbar(enumerate(matches), max_value=len(matches)):
local_name = '{:07d}'.format(idx) + '_' + os.path.basename(url)
try:
download(url, os.path.join(output_dir, local_name))
downloads += 1
except Exception as e:
logging.warning('util.download_by_pattern(): unhandled exception: url = %s, e = %s', match_url, e)
logging.info('util.download_by_pattern(): %d successful downloads', downloads)
if downloads < len(matches):
logging.warning('util.download_by_pattern(): %d downloads failed, see log for warnings', len(matches) - downloads)
def download(url: str, local_path: str) -> bool:
logging.debug('util.download(): url = %s, local = %s', url, local_path)
req = requests.get(url)
with open(local_path, 'wb') as ofile:
ofile.write(req.content)
| [
"itertools.islice",
"logging.debug",
"os.makedirs",
"os.path.join",
"logging.warning",
"requests.get",
"os.path.basename",
"logging.info",
"glob.glob"
]
| [((862, 880), 'glob.glob', 'glob.glob', (['pattern'], {}), '(pattern)\n', (871, 880), False, 'import glob\n'), ((1860, 1961), 'logging.debug', 'logging.debug', (['"""util.download_by_pattern(): pattern = %s, extentions = %s"""', 'url_regex', 'extentions'], {}), "('util.download_by_pattern(): pattern = %s, extentions = %s',\n url_regex, extentions)\n", (1873, 1961), False, 'import logging\n'), ((2256, 2294), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (2267, 2294), False, 'import os\n'), ((2754, 2832), 'logging.info', 'logging.info', (['"""util.download_by_pattern(): %d successful downloads"""', 'downloads'], {}), "('util.download_by_pattern(): %d successful downloads', downloads)\n", (2766, 2832), False, 'import logging\n'), ((3043, 3114), 'logging.debug', 'logging.debug', (['"""util.download(): url = %s, local = %s"""', 'url', 'local_path'], {}), "('util.download(): url = %s, local = %s', url, local_path)\n", (3056, 3114), False, 'import logging\n'), ((3125, 3142), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3137, 3142), False, 'import requests\n'), ((1164, 1227), 'logging.debug', 'logging.debug', (['"""util.findall_in_files: input file %s"""', 'filename'], {}), "('util.findall_in_files: input file %s', filename)\n", (1177, 1227), False, 'import logging\n'), ((2113, 2145), 'itertools.islice', 'itertools.islice', (['matches', 'limit'], {}), '(matches, limit)\n', (2129, 2145), False, 'import itertools\n'), ((2482, 2503), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (2498, 2503), False, 'import os\n'), ((1361, 1451), 'logging.debug', 'logging.debug', (['"""util.findall_in_files(): match: file = %s, text = %s"""', 'filename', 'match'], {}), "('util.findall_in_files(): match: file = %s, text = %s',\n filename, match)\n", (1374, 1451), False, 'import logging\n'), ((2543, 2579), 'os.path.join', 'os.path.join', (['output_dir', 'local_name'], {}), '(output_dir, local_name)\n', (2555, 2579), False, 'import os\n'), ((2651, 2758), 'logging.warning', 'logging.warning', (['"""util.download_by_pattern(): unhandled exception: url = %s, e = %s"""', 'match_url', 'e'], {}), "(\n 'util.download_by_pattern(): unhandled exception: url = %s, e = %s',\n match_url, e)\n", (2666, 2758), False, 'import logging\n')] |
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
def read_dataset_from_npy(path):
""" Read dataset from .npy file
"""
data = np.load(path, allow_pickle=True)
return data[()]['X'], data[()]['y'], data[()]['train_idx'], data[()]['test_idx']
def read_dataset(ucr_root_dir, dataset_name, shot):
""" Read univariate dataset from UCR
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
y_train = df_train.values[:, 0].astype(np.int64)
y_test = df_test.values[:, 0].astype(np.int64)
y = np.concatenate((y_train, y_test))
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test))
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
X[np.isnan(X)] = 0
std_ = X.std(axis=1, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=1, keepdims=True)) / std_
# add a dimension to make it multivariate with one dimension
X = X.reshape((X.shape[0], 1, X.shape[1]))
return X, y, train_idx, test_idx
def read_multivariate_dataset(root_dir, dataset_name, shot):
""" Read multivariate dataset
"""
X = np.load(os.path.join(root_dir, dataset_name+".npy"), allow_pickle=True)
y = np.loadtxt(os.path.join(root_dir, dataset_name+'_label.txt'))
y = y.astype(np.int64)
dim = X[0].shape[0]
max_length = 0
for _X in X:
if _X.shape[1] > max_length:
max_length = _X.shape[1]
X_list = []
for i in range(len(X)):
_X = np.zeros((dim, max_length))
_X[:, :X[i].shape[1]] = X[i]
X_list.append(_X)
X = np.array(X_list, dtype=np.float32)
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
std_ = X.std(axis=2, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=2, keepdims=True)) / std_
return X, y, train_idx, test_idx
def read_X(ucr_root_dir, dataset_name):
""" Read the raw time-series
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test), axis=0)
return X
class Logger:
def __init__(self, f):
self.f = f
def log(self, content):
print(content)
self.f.write(content + '\n')
self.f.flush()
| [
"sklearn.preprocessing.LabelEncoder",
"numpy.unique",
"os.path.join",
"numpy.array",
"numpy.zeros",
"numpy.isnan",
"numpy.concatenate",
"numpy.load",
"numpy.random.shuffle"
]
| [((185, 217), 'numpy.load', 'np.load', (['path'], {'allow_pickle': '(True)'}), '(path, allow_pickle=True)\n', (192, 217), True, 'import numpy as np\n'), ((423, 463), 'os.path.join', 'os.path.join', (['ucr_root_dir', 'dataset_name'], {}), '(ucr_root_dir, dataset_name)\n', (435, 463), False, 'import os\n'), ((783, 816), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (797, 816), True, 'import numpy as np\n'), ((826, 840), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (838, 840), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1154, 1187), 'numpy.concatenate', 'np.concatenate', (['(X_train, X_test)'], {}), '((X_train, X_test))\n', (1168, 1187), True, 'import numpy as np\n'), ((1240, 1262), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (1257, 1262), True, 'import numpy as np\n'), ((2396, 2430), 'numpy.array', 'np.array', (['X_list'], {'dtype': 'np.float32'}), '(X_list, dtype=np.float32)\n', (2404, 2430), True, 'import numpy as np\n'), ((2441, 2455), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2453, 2455), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2547, 2569), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (2564, 2569), True, 'import numpy as np\n'), ((3093, 3133), 'os.path.join', 'os.path.join', (['ucr_root_dir', 'dataset_name'], {}), '(ucr_root_dir, dataset_name)\n', (3105, 3133), False, 'import os\n'), ((3615, 3656), 'numpy.concatenate', 'np.concatenate', (['(X_train, X_test)'], {'axis': '(0)'}), '((X_train, X_test), axis=0)\n', (3629, 3656), True, 'import numpy as np\n'), ((491, 545), 'os.path.join', 'os.path.join', (['dataset_dir', "(dataset_name + '_TRAIN.tsv')"], {}), "(dataset_dir, dataset_name + '_TRAIN.tsv')\n", (503, 545), False, 'import os\n'), ((594, 647), 'os.path.join', 'os.path.join', (['dataset_dir', "(dataset_name + '_TEST.tsv')"], {}), "(dataset_dir, dataset_name + '_TEST.tsv')\n", (606, 647), False, 'import os\n'), ((1537, 1548), 'numpy.isnan', 'np.isnan', (['X'], {}), '(X)\n', (1545, 1548), True, 'import numpy as np\n'), ((1943, 1988), 'os.path.join', 'os.path.join', (['root_dir', "(dataset_name + '.npy')"], {}), "(root_dir, dataset_name + '.npy')\n", (1955, 1988), False, 'import os\n'), ((2026, 2077), 'os.path.join', 'os.path.join', (['root_dir', "(dataset_name + '_label.txt')"], {}), "(root_dir, dataset_name + '_label.txt')\n", (2038, 2077), False, 'import os\n'), ((2297, 2324), 'numpy.zeros', 'np.zeros', (['(dim, max_length)'], {}), '((dim, max_length))\n', (2305, 2324), True, 'import numpy as np\n'), ((3161, 3215), 'os.path.join', 'os.path.join', (['dataset_dir', "(dataset_name + '_TRAIN.tsv')"], {}), "(dataset_dir, dataset_name + '_TRAIN.tsv')\n", (3173, 3215), False, 'import os\n'), ((3264, 3317), 'os.path.join', 'os.path.join', (['dataset_dir', "(dataset_name + '_TEST.tsv')"], {}), "(dataset_dir, dataset_name + '_TEST.tsv')\n", (3276, 3317), False, 'import os\n'), ((1372, 1384), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1381, 1384), True, 'import numpy as np\n'), ((2679, 2691), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2688, 2691), True, 'import numpy as np\n')] |
from aiopylimit import AIOPyRateLimit
from aiopylimit import AIOPyRateLimitException
import asynctest
import asyncio
class TestPyLimit(asynctest.TestCase):
async def test_exception(self):
limit = AIOPyRateLimit(10, 10)
await self.assertAsyncRaises(AIOPyRateLimitException,
limit.attempt('test_namespace'))
async def test_throttle(self):
AIOPyRateLimit.init(redis_host="localhost", redis_port=6379,
force_new_connection=True)
limit = AIOPyRateLimit(10, 10)
for x in range(0, 20):
await asyncio.sleep(.5)
if x < 10:
self.assertTrue(await limit.attempt('test_namespace'))
else:
self.assertFalse(await limit.attempt('test_namespace'))
await asyncio.sleep(6)
self.assertTrue(await limit.attempt('test_namespace'))
async def test_peek(self):
AIOPyRateLimit.init(redis_host="localhost", redis_port=6379,
force_new_connection=True)
limit = AIOPyRateLimit(10, 10)
for x in range(0, 10):
self.assertTrue(await limit.attempt('test_namespace2'))
self.assertTrue(await limit.is_rate_limited('test_namespace2'))
await asyncio.sleep(10)
self.assertFalse(await limit.is_rate_limited('test_namespace2'))
| [
"aiopylimit.AIOPyRateLimit",
"aiopylimit.AIOPyRateLimit.init",
"asyncio.sleep"
]
| [((210, 232), 'aiopylimit.AIOPyRateLimit', 'AIOPyRateLimit', (['(10)', '(10)'], {}), '(10, 10)\n', (224, 232), False, 'from aiopylimit import AIOPyRateLimit\n'), ((409, 500), 'aiopylimit.AIOPyRateLimit.init', 'AIOPyRateLimit.init', ([], {'redis_host': '"""localhost"""', 'redis_port': '(6379)', 'force_new_connection': '(True)'}), "(redis_host='localhost', redis_port=6379,\n force_new_connection=True)\n", (428, 500), False, 'from aiopylimit import AIOPyRateLimit\n'), ((541, 563), 'aiopylimit.AIOPyRateLimit', 'AIOPyRateLimit', (['(10)', '(10)'], {}), '(10, 10)\n', (555, 563), False, 'from aiopylimit import AIOPyRateLimit\n'), ((949, 1040), 'aiopylimit.AIOPyRateLimit.init', 'AIOPyRateLimit.init', ([], {'redis_host': '"""localhost"""', 'redis_port': '(6379)', 'force_new_connection': '(True)'}), "(redis_host='localhost', redis_port=6379,\n force_new_connection=True)\n", (968, 1040), False, 'from aiopylimit import AIOPyRateLimit\n'), ((1081, 1103), 'aiopylimit.AIOPyRateLimit', 'AIOPyRateLimit', (['(10)', '(10)'], {}), '(10, 10)\n', (1095, 1103), False, 'from aiopylimit import AIOPyRateLimit\n'), ((829, 845), 'asyncio.sleep', 'asyncio.sleep', (['(6)'], {}), '(6)\n', (842, 845), False, 'import asyncio\n'), ((1289, 1306), 'asyncio.sleep', 'asyncio.sleep', (['(10)'], {}), '(10)\n', (1302, 1306), False, 'import asyncio\n'), ((613, 631), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (626, 631), False, 'import asyncio\n')] |
# coding: utf-8
import sys
import shutil
import requests
import wx
from pathlib import Path
from urllib.parse import urljoin, urlsplit
from tempfile import TemporaryFile
from zipfile import ZipFile
from bookworm import typehints as t
from bookworm import app
from bookworm.http_tools import RemoteJsonResource, HttpResource
from bookworm.ocr_engines.tesseract_ocr_engine import (
TesseractOcrEngine,
get_tesseract_path,
)
from bookworm.logger import logger
log = logger.getChild(__name__)
BRANCH = "develop"
TESSERACT_VERSION_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/version"
if app.arch == "x86":
TESSERACT_ENGINE_DOWNLOAD_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/tesseract_x86.zip"
else:
TESSERACT_ENGINE_DOWNLOAD_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/tesseract_x64.zip"
FAST_TRAINEDDATA_DOWNLOAD_URL = "https://raw.githubusercontent.com/tesseract-ocr/tessdata_fast/main/{lang_code}.traineddata"
BEST_TRAINEDDATA_DOWNLOAD_URL = "https://raw.githubusercontent.com/tesseract-ocr/tessdata_best/main/{lang_code}.traineddata"
def get_downloadable_languages():
return (
"afr",
"sqi",
"amh",
"ara",
"hye",
"asm",
"aze_cyrl",
"aze",
"ben",
"eus",
"bel",
"bos",
"bre",
"bul",
"mya",
"cat",
"ceb",
"chr",
"chi_sim",
"hrv",
"ces",
"dan",
"nld",
"dzo",
"eng",
"epo",
"est",
"fao",
"fil",
"fin",
"fra",
"glg",
"kat_old",
"kat",
"deu",
"ell",
"guj",
"heb",
"hin",
"hun",
"isl",
"ind",
"gle",
"ita_old",
"ita",
"jpn_vert",
"jpn",
"jav",
"kan",
"kaz",
"khm",
"kor_vert",
"kor",
"kmr",
"kir",
"lao",
"lav",
"lit",
"ltz",
"mkd",
"msa",
"mal",
"mlt",
"mri",
"mar",
"mon",
"nep",
"nor",
"ori",
"pus",
"fas",
"pol",
"por",
"pan",
"que",
"ron",
"rus",
"gla",
"srp_latn",
"srp",
"snd",
"sin",
"slk",
"slv",
"spa_old",
"spa",
"sun",
"swa",
"swe",
"tgk",
"tam",
"tat",
"tel",
"tha",
"bod",
"tir",
"ton",
"tur",
"ukr",
"urd",
"uig",
"uzb_cyrl",
"uzb",
"vie",
"cym",
"fry",
"yid",
"yor",
)
def is_tesseract_available():
return sys.platform == "win32" and TesseractOcrEngine.check()
def get_tessdata():
return get_tesseract_path() / "tessdata"
def get_language_path(language):
return Path(get_tessdata(), f"{language}.traineddata")
def is_new_tesseract_version_available():
remote_version = requests.get(TESSERACT_VERSION_URL).text
return TesseractOcrEngine.get_tesseract_version() != remote_version
def download_tesseract_engine(progress_dlg):
tesseract_directory = get_tesseract_path()
callback = lambda prog: progress_dlg.Update(prog.percentage, prog.user_message)
try:
dl_request = HttpResource(TESSERACT_ENGINE_DOWNLOAD_URL).download()
progress_dlg.set_abort_callback(dl_request.cancel)
with TemporaryFile() as dlfile:
dl_request.download_to_file(dlfile, callback)
if dl_request.is_cancelled():
return
with progress_dlg.PulseContinuously(_("Extracting file...")):
with ZipFile(dlfile, "r") as zfile:
tesseract_directory.mkdir(parents=True, exist_ok=True)
zfile.extractall(path=tesseract_directory)
wx.GetApp().mainFrame.notify_user(
# Translators: title of a messagebox
_("Success"),
# Translators: content of a messagebox
_("Tesseract engine downloaded successfully"),
)
return True
except ConnectionError:
log.debug("Failed to download tesseract OCR engine.", exc_info=True)
wx.GetApp().mainFrame.notify_user(
# Translators: title of a messagebox
_("Connection Error"),
_(
"Could not download Tesseract OCR Engine.\nPlease check your internet and try again."
),
icon=wx.ICON_ERROR,
)
except:
log.exception(
"An error occurred while installing the Tesseract OCr Engine", exc_info=True
)
wx.GetApp().mainFrame.notify_user(
_("Error"),
_("Could not install the Tesseract OCR engine.\nPlease try again."),
icon=wx.ICON_WARNING,
)
def download_language(lang_code, variant, target_file, progress_dlg):
url_prefix = (
BEST_TRAINEDDATA_DOWNLOAD_URL
if variant == "best"
else FAST_TRAINEDDATA_DOWNLOAD_URL
)
download_url = url_prefix.format(lang_code=lang_code)
callback = lambda prog: progress_dlg.Update(prog.percentage, prog.user_message)
dl_request = HttpResource(download_url).download()
progress_dlg.set_abort_callback(dl_request.cancel)
dl_request.download_to_filesystem(target_file, callback)
return not dl_request.is_cancelled()
def remove_tesseract():
tesseract_path = get_tesseract_path()
shutil.rmtree(tesseract_path, ignore_errors=False)
| [
"bookworm.ocr_engines.tesseract_ocr_engine.TesseractOcrEngine.check",
"zipfile.ZipFile",
"wx.GetApp",
"bookworm.logger.logger.getChild",
"bookworm.ocr_engines.tesseract_ocr_engine.get_tesseract_path",
"bookworm.ocr_engines.tesseract_ocr_engine.TesseractOcrEngine.get_tesseract_version",
"requests.get",
"shutil.rmtree",
"tempfile.TemporaryFile",
"bookworm.http_tools.HttpResource"
]
| [((473, 498), 'bookworm.logger.logger.getChild', 'logger.getChild', (['__name__'], {}), '(__name__)\n', (488, 498), False, 'from bookworm.logger import logger\n'), ((3422, 3442), 'bookworm.ocr_engines.tesseract_ocr_engine.get_tesseract_path', 'get_tesseract_path', ([], {}), '()\n', (3440, 3442), False, 'from bookworm.ocr_engines.tesseract_ocr_engine import TesseractOcrEngine, get_tesseract_path\n'), ((5696, 5716), 'bookworm.ocr_engines.tesseract_ocr_engine.get_tesseract_path', 'get_tesseract_path', ([], {}), '()\n', (5714, 5716), False, 'from bookworm.ocr_engines.tesseract_ocr_engine import TesseractOcrEngine, get_tesseract_path\n'), ((5721, 5771), 'shutil.rmtree', 'shutil.rmtree', (['tesseract_path'], {'ignore_errors': '(False)'}), '(tesseract_path, ignore_errors=False)\n', (5734, 5771), False, 'import shutil\n'), ((2983, 3009), 'bookworm.ocr_engines.tesseract_ocr_engine.TesseractOcrEngine.check', 'TesseractOcrEngine.check', ([], {}), '()\n', (3007, 3009), False, 'from bookworm.ocr_engines.tesseract_ocr_engine import TesseractOcrEngine, get_tesseract_path\n'), ((3043, 3063), 'bookworm.ocr_engines.tesseract_ocr_engine.get_tesseract_path', 'get_tesseract_path', ([], {}), '()\n', (3061, 3063), False, 'from bookworm.ocr_engines.tesseract_ocr_engine import TesseractOcrEngine, get_tesseract_path\n'), ((3236, 3271), 'requests.get', 'requests.get', (['TESSERACT_VERSION_URL'], {}), '(TESSERACT_VERSION_URL)\n', (3248, 3271), False, 'import requests\n'), ((3288, 3330), 'bookworm.ocr_engines.tesseract_ocr_engine.TesseractOcrEngine.get_tesseract_version', 'TesseractOcrEngine.get_tesseract_version', ([], {}), '()\n', (3328, 3330), False, 'from bookworm.ocr_engines.tesseract_ocr_engine import TesseractOcrEngine, get_tesseract_path\n'), ((3684, 3699), 'tempfile.TemporaryFile', 'TemporaryFile', ([], {}), '()\n', (3697, 3699), False, 'from tempfile import TemporaryFile\n'), ((5454, 5480), 'bookworm.http_tools.HttpResource', 'HttpResource', (['download_url'], {}), '(download_url)\n', (5466, 5480), False, 'from bookworm.http_tools import RemoteJsonResource, HttpResource\n'), ((3557, 3600), 'bookworm.http_tools.HttpResource', 'HttpResource', (['TESSERACT_ENGINE_DOWNLOAD_URL'], {}), '(TESSERACT_ENGINE_DOWNLOAD_URL)\n', (3569, 3600), False, 'from bookworm.http_tools import RemoteJsonResource, HttpResource\n'), ((3929, 3949), 'zipfile.ZipFile', 'ZipFile', (['dlfile', '"""r"""'], {}), "(dlfile, 'r')\n", (3936, 3949), False, 'from zipfile import ZipFile\n'), ((4106, 4117), 'wx.GetApp', 'wx.GetApp', ([], {}), '()\n', (4115, 4117), False, 'import wx\n'), ((4469, 4480), 'wx.GetApp', 'wx.GetApp', ([], {}), '()\n', (4478, 4480), False, 'import wx\n'), ((4904, 4915), 'wx.GetApp', 'wx.GetApp', ([], {}), '()\n', (4913, 4915), False, 'import wx\n')] |
from django.db import models
# Create your models here.
class CommonFieldsMixin(models.Model):
"""Add created_at and updated_at fields."""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, null=True)
class Meta:
"""Define metadata options."""
abstract = True
class Category(CommonFieldsMixin):
name = models.CharField(max_length=250,null=False,unique=True)
class Notes(CommonFieldsMixin):
title = models.CharField(max_length=250,null=False,unique=False)
body = models.TextField(null=False)
category = models.ForeignKey(Category,on_delete=models.CASCADE,default=None)
| [
"django.db.models.DateTimeField",
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
]
| [((164, 203), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (184, 203), False, 'from django.db import models\n'), ((221, 267), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)'}), '(auto_now=True, null=True)\n', (241, 267), False, 'from django.db import models\n'), ((397, 454), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'null': '(False)', 'unique': '(True)'}), '(max_length=250, null=False, unique=True)\n', (413, 454), False, 'from django.db import models\n'), ((499, 557), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'null': '(False)', 'unique': '(False)'}), '(max_length=250, null=False, unique=False)\n', (515, 557), False, 'from django.db import models\n'), ((567, 595), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(False)'}), '(null=False)\n', (583, 595), False, 'from django.db import models\n'), ((611, 678), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Category'], {'on_delete': 'models.CASCADE', 'default': 'None'}), '(Category, on_delete=models.CASCADE, default=None)\n', (628, 678), False, 'from django.db import models\n')] |
# seq2seq LSTM (no-convolutional model) for time series prediction
import numpy as np
import torch
import torchvision
import torch.utils.data as data
import torchvision.transforms as transforms
import pandas as pd
import h5py
import os
import sys
import json
import time
import pdb
from jma_timeseries_dataset import *
from scaler import *
from train_valid_epoch_tsconv import *
from utils import Logger
from opts_ts import parse_opts
def count_parameters(model,f):
for name,p in model.named_parameters():
f.write("name,"+name+", Trainable, "+str(p.requires_grad)+",#params, "+str(p.numel())+"\n")
Nparam = sum(p.numel() for p in model.parameters())
Ntrain = sum(p.numel() for p in model.parameters() if p.requires_grad)
f.write("Number of params:"+str(Nparam)+", Trainable parameters:"+str(Ntrain)+"\n")
print("Number of params:"+str(Nparam)+", Trainable parameters:"+str(Ntrain)+"\n")
if __name__ == '__main__':
# parse command-line options
opt = parse_opts()
print(opt)
# create result dir
if not os.path.exists(opt.result_path):
os.mkdir(opt.result_path)
with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
json.dump(vars(opt), opt_file)
# generic log file
logfile = open(os.path.join(opt.result_path, 'log_run.txt'),'w')
logfile.write('Start time:'+time.ctime()+'\n')
tstart = time.time()
# model information
modelinfo = open(os.path.join(opt.result_path, 'model_info.txt'),'w')
# prepare scaler for data
if opt.data_scaling == 'linear':
scl = LinearScaler()
if opt.data_scaling == 'root':
scl = RootScaler()
if not opt.no_train:
# loading datasets
train_dataset = JMATSConvDataset(csv_data=opt.train_data_path,
csv_anno=opt.train_anno_path,
use_var=opt.use_var,
root_dir=None,
tdim_use=opt.tdim_use,
resize=opt.data_resize,
transform=None)
valid_dataset = JMATSConvDataset(csv_data=opt.valid_data_path,
csv_anno=opt.valid_anno_path,
use_var=opt.use_var,
root_dir=None,
tdim_use=opt.tdim_use,
resize=opt.data_resize,
transform=None)
#tstdata = next(iter(train_dataset))
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=opt.batch_size,
num_workers=7,
drop_last=True,
shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,
batch_size=opt.batch_size,
num_workers=7,
drop_last=True,
shuffle=False)
if opt.model_name == 'seq2seq':
# lstm seq2seq model
CONV_HID_DIM = 32
INPUT_DIM = 1 + CONV_HID_DIM
OUTPUT_DIM = 1
HID_DIM = 512
N_LAYERS = 3
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
from models.seq2seq_convlstm_ts import *
enc = Encoder(INPUT_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
model = Seq2SeqConv(enc, dec, CONV_HID_DIM, device='cuda').cuda()
if opt.transfer_path != 'None':
# Use pretrained weights for transfer learning
print('loading pretrained model:',opt.transfer_path)
model = torch.load(opt.transfer_path)
modelinfo.write('Model Structure \n')
modelinfo.write(str(model))
count_parameters(model,modelinfo)
# modelinfo.close()
if opt.loss_function == 'MSE':
loss_fn = torch.nn.MSELoss()
# Type of optimizers adam/rmsprop
if opt.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)
elif opt.optimizer == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr=opt.learning_rate)
# learning rate scheduler
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=opt.lr_decay)
# Prep logger
train_logger = Logger(
os.path.join(opt.result_path, 'train.log'),
['epoch', 'loss', 'lr'])
train_batch_logger = Logger(
os.path.join(opt.result_path, 'train_batch.log'),
['epoch', 'batch', 'loss', 'lr'])
valid_logger = Logger(
os.path.join(opt.result_path, 'valid.log'),
['epoch', 'loss'])
# training
for epoch in range(1,opt.n_epochs+1):
if epoch < 10:
# freeze conv_encoder for first 10 epochs
submodel = next(iter(model.children()))
for param in submodel.parameters():
param.requires_grad = False
else:
# unfreeze conv_encoder for the rest
submodel = next(iter(model.children()))
for param in submodel.parameters():
param.requires_grad = True
count_parameters(model,modelinfo)
#import pdb;pdb.set_trace()
# step scheduler
scheduler.step()
# training & validation
train_epoch(epoch,opt.n_epochs,train_loader,model,loss_fn,optimizer,
train_logger,train_batch_logger,opt,scl)
valid_epoch(epoch,opt.n_epochs,valid_loader,model,loss_fn,
valid_logger,opt,scl)
if epoch % opt.checkpoint == 0:
# save the trained model for every checkpoint
# (1) as binary
torch.save(model,os.path.join(opt.result_path,
'trained_seq2seq_epoch%03d.model' % epoch))
# (2) as state dictionary
torch.save(model.state_dict(),
os.path.join(opt.result_path,
'trained_seq2seq_epoch%03d.dict' % epoch))
# save the trained model
# (1) as binary
torch.save(model,os.path.join(opt.result_path, 'trained_seq2seq.model'))
# (2) as state dictionary
torch.save(model.state_dict(),
os.path.join(opt.result_path, 'trained_seq2seq.dict'))
# test datasets if specified
if opt.test:
if opt.no_train:
#load pretrained model from results directory
model_fname = os.path.join(opt.result_path, opt.test_model_fname)
print('loading pretrained model:',model_fname)
model = torch.load(model_fname)
loss_fn = torch.nn.MSELoss()
# prepare loader
test_dataset = JMATSConvDataset(csv_data=opt.test_data_path,
csv_anno=opt.test_anno_path,
use_var=opt.use_var,
root_dir=None,
tdim_use=opt.tdim_use,
transform=None)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
# batch_size=opt.batch_size,
batch_size=3, # small batch size used
num_workers=7,
drop_last=True,
shuffle=False)
# testing for the trained model
test_epoch(test_loader,model,loss_fn,opt,scl)
# output elapsed time
logfile.write('End time: '+time.ctime()+'\n')
tend = time.time()
tdiff = float(tend-tstart)/3600.0
logfile.write('Elapsed time[hours]: %f \n' % tdiff)
| [
"os.path.exists",
"time.ctime",
"torch.load",
"os.path.join",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.MSELoss",
"os.mkdir",
"torch.utils.data.DataLoader",
"time.time",
"opts_ts.parse_opts"
]
| [((999, 1011), 'opts_ts.parse_opts', 'parse_opts', ([], {}), '()\n', (1009, 1011), False, 'from opts_ts import parse_opts\n'), ((1406, 1417), 'time.time', 'time.time', ([], {}), '()\n', (1415, 1417), False, 'import time\n'), ((8428, 8439), 'time.time', 'time.time', ([], {}), '()\n', (8437, 8439), False, 'import time\n'), ((1062, 1093), 'os.path.exists', 'os.path.exists', (['opt.result_path'], {}), '(opt.result_path)\n', (1076, 1093), False, 'import os\n'), ((1103, 1128), 'os.mkdir', 'os.mkdir', (['opt.result_path'], {}), '(opt.result_path)\n', (1111, 1128), False, 'import os\n'), ((1292, 1336), 'os.path.join', 'os.path.join', (['opt.result_path', '"""log_run.txt"""'], {}), "(opt.result_path, 'log_run.txt')\n", (1304, 1336), False, 'import os\n'), ((1464, 1511), 'os.path.join', 'os.path.join', (['opt.result_path', '"""model_info.txt"""'], {}), "(opt.result_path, 'model_info.txt')\n", (1476, 1511), False, 'import os\n'), ((2668, 2795), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'opt.batch_size', 'num_workers': '(7)', 'drop_last': '(True)', 'shuffle': '(True)'}), '(dataset=train_dataset, batch_size=opt.\n batch_size, num_workers=7, drop_last=True, shuffle=True)\n', (2695, 2795), False, 'import torch\n'), ((3023, 3151), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'valid_dataset', 'batch_size': 'opt.batch_size', 'num_workers': '(7)', 'drop_last': '(True)', 'shuffle': '(False)'}), '(dataset=valid_dataset, batch_size=opt.\n batch_size, num_workers=7, drop_last=True, shuffle=False)\n', (3050, 3151), False, 'import torch\n'), ((4709, 4784), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(1)', 'gamma': 'opt.lr_decay'}), '(optimizer, step_size=1, gamma=opt.lr_decay)\n', (4740, 4784), False, 'import torch\n'), ((7820, 7934), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': '(3)', 'num_workers': '(7)', 'drop_last': '(True)', 'shuffle': '(False)'}), '(dataset=test_dataset, batch_size=3, num_workers\n =7, drop_last=True, shuffle=False)\n', (7847, 7934), False, 'import torch\n'), ((1148, 1190), 'os.path.join', 'os.path.join', (['opt.result_path', '"""opts.json"""'], {}), "(opt.result_path, 'opts.json')\n", (1160, 1190), False, 'import os\n'), ((4094, 4123), 'torch.load', 'torch.load', (['opt.transfer_path'], {}), '(opt.transfer_path)\n', (4104, 4123), False, 'import torch\n'), ((4346, 4364), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (4362, 4364), False, 'import torch\n'), ((4863, 4905), 'os.path.join', 'os.path.join', (['opt.result_path', '"""train.log"""'], {}), "(opt.result_path, 'train.log')\n", (4875, 4905), False, 'import os\n'), ((4993, 5041), 'os.path.join', 'os.path.join', (['opt.result_path', '"""train_batch.log"""'], {}), "(opt.result_path, 'train_batch.log')\n", (5005, 5041), False, 'import os\n'), ((5132, 5174), 'os.path.join', 'os.path.join', (['opt.result_path', '"""valid.log"""'], {}), "(opt.result_path, 'valid.log')\n", (5144, 5174), False, 'import os\n'), ((6843, 6897), 'os.path.join', 'os.path.join', (['opt.result_path', '"""trained_seq2seq.model"""'], {}), "(opt.result_path, 'trained_seq2seq.model')\n", (6855, 6897), False, 'import os\n'), ((6991, 7044), 'os.path.join', 'os.path.join', (['opt.result_path', '"""trained_seq2seq.dict"""'], {}), "(opt.result_path, 'trained_seq2seq.dict')\n", (7003, 7044), False, 'import os\n'), ((7206, 7257), 'os.path.join', 'os.path.join', (['opt.result_path', 'opt.test_model_fname'], {}), '(opt.result_path, opt.test_model_fname)\n', (7218, 7257), False, 'import os\n'), ((7337, 7360), 'torch.load', 'torch.load', (['model_fname'], {}), '(model_fname)\n', (7347, 7360), False, 'import torch\n'), ((7383, 7401), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (7399, 7401), False, 'import torch\n'), ((1374, 1386), 'time.ctime', 'time.ctime', ([], {}), '()\n', (1384, 1386), False, 'import time\n'), ((8398, 8410), 'time.ctime', 'time.ctime', ([], {}), '()\n', (8408, 8410), False, 'import time\n'), ((6408, 6480), 'os.path.join', 'os.path.join', (['opt.result_path', "('trained_seq2seq_epoch%03d.model' % epoch)"], {}), "(opt.result_path, 'trained_seq2seq_epoch%03d.model' % epoch)\n", (6420, 6480), False, 'import os\n'), ((6647, 6718), 'os.path.join', 'os.path.join', (['opt.result_path', "('trained_seq2seq_epoch%03d.dict' % epoch)"], {}), "(opt.result_path, 'trained_seq2seq_epoch%03d.dict' % epoch)\n", (6659, 6718), False, 'import os\n')] |
import inspect
import sys
from abc import ABC, abstractmethod
from enum import Enum
from typing import List, Union, Type, Optional, Dict, Any
from pydantic import BaseModel
from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException
from .node import Node
__all__ = ['Tag', 'HeaderTag', 'FooterTag', 'BrTag', 'UlTag', 'LiTag', 'FormTag',
'SectionTag', 'InputTagAttrs', 'InputTag', 'FormTagAttrs', 'PTag', 'ATag',
'ATagAttrs', 'get_tag_cls', 'SectionTagAttrs', 'LiTagAttrs', 'InputTagType']
class Tag(BaseModel, ABC):
class Config:
tag_name: str = None
attrs: Any = None
children: List[Union['Tag', str]] = []
@abstractmethod
def render(self) -> str:
pass
@classmethod
def from_node(cls, node: Node) -> 'Tag':
if node.tag != cls.Config.tag_name:
raise NodeTagMismatchException(
f'Expected tag <{cls.Config.tag_name}>, received <{node.tag}>')
attrs = cls.get_attrs(node)
children = []
for node_child in node.children:
if isinstance(node_child, str):
children.append(node_child)
else:
child_tag_cls = get_tag_cls(node_child.tag)
children.append(child_tag_cls.from_node(node_child))
return cls(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node):
return None
class HeaderTag(Tag):
class Config:
tag_name = 'header'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<header> must have max 1 text child')
super(HeaderTag, self).__init__(children=children)
def render(self):
if len(self.children) == 1:
return self.children[0]
return ''
def data(self):
return None
HeaderTag.update_forward_refs()
class FooterTag(Tag):
class Config:
tag_name = 'footer'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<footer> must have max 1 text child')
super(FooterTag, self).__init__(children=children)
def render(self):
if len(self.children) == 1:
return self.children[0]
return ''
def data(self):
return None
FooterTag.update_forward_refs()
class InputTagType(str, Enum):
# standard HTML5 input values
text = 'text'
date = 'date'
number = 'number'
hidden = 'hidden'
email = 'email'
url = 'url'
# not standard
datetime = 'datetime'
location = 'location'
class InputTagAttrs(BaseModel):
# standard HTML5 attributes
type: InputTagType
min: Union[int, float] = None
minlength: int = None
max: Union[int, float] = None
maxlength: int = None
step: int = None
value: str = None # only for type="hidden"
pattern: str = None
# not standard
min_error: str = None
minlength_error: str = None
max_error: str = None
maxlength_error: str = None
class InputTag(Tag):
class Config:
tag_name = 'input'
attrs: InputTagAttrs
def __init__(self, attrs: InputTagAttrs, **data):
super(InputTag, self).__init__(attrs=attrs)
@classmethod
def get_attrs(cls, node: Node):
return InputTagAttrs(
type=node.attrs.get('type'),
min=node.attrs.get('min'),
min_error=node.attrs.get('min-error'),
minlength=node.attrs.get('minlength'),
minlength_error=node.attrs.get('minlength-error'),
max=node.attrs.get('max'),
max_error=node.attrs.get('max-error'),
maxlength=node.attrs.get('maxlength'),
maxlength_error=node.attrs.get('maxlength-error'),
step=node.attrs.get('step'),
value=node.attrs.get('value'),
pattern=node.attrs.get('pattern'),
)
def render(self):
return ''
def data(self) -> Optional[Dict[str, str]]:
return None
InputTag.update_forward_refs()
class LabelTag(Tag):
class Config:
tag_name = 'label'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<label> must have max 1 text child')
super(LabelTag, self).__init__(children=children)
def render(self):
return self.children[0]
LabelTag.update_forward_refs()
class ATagAttrs(BaseModel):
href: str
method: Optional[str] = 'GET'
class ATag(Tag):
class Config:
tag_name: str = 'a'
attrs: ATagAttrs
def __init__(self, attrs: ATagAttrs, children: List[str]):
if len(children) != 1 or not isinstance(children[0], str):
raise ONEmSDKException('<a> must have 1 text child')
super(ATag, self).__init__(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node) -> ATagAttrs:
return ATagAttrs(href=node.attrs.get('href'),
method=node.attrs.get('method') or 'GET')
def render(self):
return self.children[0]
def data(self) -> Dict[str, str]:
return {
**self.attrs.dict(),
'text': self.children[0]
}
ATag.update_forward_refs()
class LiTagAttrs(BaseModel):
value: Optional[str]
text_search: Optional[str]
class LiTag(Tag):
class Config:
tag_name = 'li'
attrs: LiTagAttrs
def __init__(self, children: List[Union[ATag, str]], attrs: LiTagAttrs = None):
if len(children) != 1 or not isinstance(children[0], (str, ATag)):
raise ONEmSDKException('<li> must have 1 (text or <a>) child')
if attrs is None:
attrs = LiTagAttrs()
super(LiTag, self).__init__(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node):
return LiTagAttrs(
value=node.attrs.get('value'),
text_search=node.attrs.get('text-search'),
)
def render(self):
if isinstance(self.children[0], ATag):
return self.children[0].render()
return self.children[0]
LiTag.update_forward_refs()
class UlTag(Tag):
class Config:
tag_name = 'ul'
def __init__(self, children: List[LiTag], **data):
if not children or not isinstance(children[0], LiTag):
raise ONEmSDKException('<ul> must have min 1 <li> child')
super(UlTag, self).__init__(children=children)
def render(self):
return '\n'.join([child.render() for child in self.children])
UlTag.update_forward_refs()
class PTag(Tag):
class Config:
tag_name = 'p'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<p> must have max 1 text child')
super(PTag, self).__init__(children=children)
def render(self):
if len(self.children) == 1:
return self.children[0]
return ''
def data(self):
return {
'text': self.children[0],
'href': None,
'data': None
}
PTag.update_forward_refs()
class BrTag(Tag):
class Config:
tag_name = 'br'
def __init__(self, **data):
super(BrTag, self).__init__()
def render(self):
return '\n'
def data(self):
return {
'text': '\n',
'data': None,
'href': None
}
BrTag.update_forward_refs()
class SectionTagAttrs(BaseModel):
header: Optional[str]
footer: Optional[str]
name: Optional[str]
auto_select: bool = False
multi_select: bool = False
numbered: bool = False
chunking_footer: Optional[str]
confirmation_label: Optional[str]
method: Optional[str]
required: Optional[bool]
status_exclude: Optional[bool]
status_prepend: Optional[bool]
url: Optional[str]
validate_type_error: Optional[str]
validate_type_error_footer: Optional[str]
validate_url: Optional[str]
class SectionTag(Tag):
class Config:
tag_name = 'section'
attrs: SectionTagAttrs
def __init__(self, attrs: SectionTagAttrs = None, children: List = None):
children = children or []
allowed_children = (FooterTag, HeaderTag, UlTag, PTag,
InputTag, LabelTag, BrTag, str)
for child in children:
if not isinstance(child, allowed_children):
raise ONEmSDKException(
f'<{child.Config.tag_name}> cannot be child for <section>')
super(SectionTag, self).__init__(attrs=attrs, children=children)
def render(self, exclude_header: bool = False, exclude_footer: bool = False):
# Add a temporary \n for help
rendered_children = ['\n']
for child in self.children:
if isinstance(child, HeaderTag) and exclude_header:
# Do not include header
continue
if isinstance(child, FooterTag) and exclude_footer:
# Do not include footer
continue
if isinstance(child, str):
text = child
else:
text = child.render()
if text:
if isinstance(child, PTag) or isinstance(child, UlTag):
if rendered_children[-1] != '\n':
rendered_children.append('\n')
rendered_children.append(text)
rendered_children.append('\n')
else:
rendered_children.append(text)
# Remove the temporary \n
del rendered_children[0]
if rendered_children and rendered_children[-1] == '\n':
del rendered_children[-1]
return ''.join(rendered_children)
@classmethod
def get_attrs(cls, node: Node) -> SectionTagAttrs:
return SectionTagAttrs(
header=node.attrs.get('header'),
footer=node.attrs.get('footer'),
name=node.attrs.get('name'),
# Note that boolean attributes in HTML are evaluated to True if they are
# present (their actual value does not matter). They are evaluated to False
# only when they are missing
auto_select='auto-select' in node.attrs,
multi_select='multi-select' in node.attrs,
numbered='numbered' in node.attrs,
chunking_footer=node.attrs.get('chunking-footer'),
confirmation_label=node.attrs.get('confirmation-label'),
method=node.attrs.get('method'),
required='required' in node.attrs,
status_exclude='status-exclude' in node.attrs,
status_prepend='status-prepend' in node.attrs,
url=node.attrs.get('url'),
validate_type_error=node.attrs.get('validate-type-error'),
validate_type_error_footer=node.attrs.get('validate-type-error-footer'),
validate_url=node.attrs.get('validate-url'),
)
SectionTag.update_forward_refs()
class FormTagAttrs(BaseModel):
header: Optional[str]
footer: Optional[str]
action: str
method: str = 'POST'
completion_status_show: bool = False
completion_status_in_header: bool = False
skip_confirmation: bool = False
class FormTag(Tag):
class Config:
tag_name = 'form'
attrs: FormTagAttrs
children: List[SectionTag]
def __init__(self, attrs: FormTagAttrs, children: List[SectionTag]):
if not children:
raise ONEmSDKException('<form> must have at least 1 child')
for child in children:
if not isinstance(child, SectionTag):
raise ONEmSDKException('<form> can have only <section> children')
if not child.attrs.name:
raise ONEmSDKException('<form> can contain only named <section> tags. '
'Please add a unique "name" attribute in each form '
'section.')
super(FormTag, self).__init__(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node):
return FormTagAttrs(
header=node.attrs.get('header'),
footer=node.attrs.get('footer'),
action=node.attrs.get('action'),
method=node.attrs.get('method') or 'POST',
completion_status_show='completion-status-show' in node.attrs,
completion_status_in_header='completion-status-in-header' in node.attrs,
skip_confirmation='skip-confirmation' in node.attrs,
)
def render(self):
return '\n'.join([child.render() for child in self.children])
FormTag.update_forward_refs()
_map_tag_cls = {}
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and issubclass(obj, Tag):
_map_tag_cls[obj.Config.tag_name] = obj
def get_tag_cls(tag_name: str) -> Type[Tag]:
global _map_tag_cls
try:
return _map_tag_cls[tag_name]
except KeyError:
raise ONEmSDKException(f'Tag <{tag_name}> is not supported')
| [
"onemsdk.exceptions.NodeTagMismatchException",
"inspect.isclass",
"inspect.getmembers",
"onemsdk.exceptions.ONEmSDKException"
]
| [((13146, 13187), 'inspect.getmembers', 'inspect.getmembers', (['sys.modules[__name__]'], {}), '(sys.modules[__name__])\n', (13164, 13187), False, 'import inspect\n'), ((13196, 13216), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (13211, 13216), False, 'import inspect\n'), ((859, 952), 'onemsdk.exceptions.NodeTagMismatchException', 'NodeTagMismatchException', (['f"""Expected tag <{cls.Config.tag_name}>, received <{node.tag}>"""'], {}), "(\n f'Expected tag <{cls.Config.tag_name}>, received <{node.tag}>')\n", (883, 952), False, 'from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException\n'), ((1689, 1744), 'onemsdk.exceptions.ONEmSDKException', 'ONEmSDKException', (['"""<header> must have max 1 text child"""'], {}), "('<header> must have max 1 text child')\n", (1705, 1744), False, 'from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException\n'), ((2254, 2309), 'onemsdk.exceptions.ONEmSDKException', 'ONEmSDKException', (['"""<footer> must have max 1 text child"""'], {}), "('<footer> must have max 1 text child')\n", (2270, 2309), False, 'from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException\n'), ((4528, 4582), 'onemsdk.exceptions.ONEmSDKException', 'ONEmSDKException', (['"""<label> must have max 1 text child"""'], {}), "('<label> must have max 1 text child')\n", (4544, 4582), False, 'from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException\n'), ((5043, 5089), 'onemsdk.exceptions.ONEmSDKException', 'ONEmSDKException', (['"""<a> must have 1 text child"""'], {}), "('<a> must have 1 text child')\n", (5059, 5089), False, 'from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException\n'), ((5915, 5971), 'onemsdk.exceptions.ONEmSDKException', 'ONEmSDKException', (['"""<li> must have 1 (text or <a>) child"""'], {}), "('<li> must have 1 (text or <a>) child')\n", (5931, 5971), False, 'from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException\n'), ((6666, 6717), 'onemsdk.exceptions.ONEmSDKException', 'ONEmSDKException', (['"""<ul> must have min 1 <li> child"""'], {}), "('<ul> must have min 1 <li> child')\n", (6682, 6717), False, 'from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException\n'), ((7148, 7198), 'onemsdk.exceptions.ONEmSDKException', 'ONEmSDKException', (['"""<p> must have max 1 text child"""'], {}), "('<p> must have max 1 text child')\n", (7164, 7198), False, 'from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException\n'), ((11920, 11973), 'onemsdk.exceptions.ONEmSDKException', 'ONEmSDKException', (['"""<form> must have at least 1 child"""'], {}), "('<form> must have at least 1 child')\n", (11936, 11973), False, 'from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException\n'), ((13445, 13499), 'onemsdk.exceptions.ONEmSDKException', 'ONEmSDKException', (['f"""Tag <{tag_name}> is not supported"""'], {}), "(f'Tag <{tag_name}> is not supported')\n", (13461, 13499), False, 'from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException\n'), ((8847, 8923), 'onemsdk.exceptions.ONEmSDKException', 'ONEmSDKException', (['f"""<{child.Config.tag_name}> cannot be child for <section>"""'], {}), "(f'<{child.Config.tag_name}> cannot be child for <section>')\n", (8863, 8923), False, 'from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException\n'), ((12077, 12136), 'onemsdk.exceptions.ONEmSDKException', 'ONEmSDKException', (['"""<form> can have only <section> children"""'], {}), "('<form> can have only <section> children')\n", (12093, 12136), False, 'from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException\n'), ((12196, 12330), 'onemsdk.exceptions.ONEmSDKException', 'ONEmSDKException', (['"""<form> can contain only named <section> tags. Please add a unique "name" attribute in each form section."""'], {}), '(\n \'<form> can contain only named <section> tags. Please add a unique "name" attribute in each form section.\'\n )\n', (12212, 12330), False, 'from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException\n')] |
import json
import hashlib
import logging
import os
import threading
import time
import datetime
import itertools
import peewee
from passlib.apps import custom_app_context as pwd_context
from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase
from flask.ext.login import UserMixin, AnonymousUserMixin
import psycopg2
from redash import utils, settings, redis_connection
from redash.query_runner import get_query_runner
class Database(object):
def __init__(self):
self.database_config = dict(settings.DATABASE_CONFIG)
self.database_config['register_hstore'] = False
self.database_name = self.database_config.pop('name')
self.database = PostgresqlExtDatabase(self.database_name, **self.database_config)
self.app = None
self.pid = os.getpid()
def init_app(self, app):
self.app = app
self.register_handlers()
def connect_db(self):
self._check_pid()
self.database.connect()
def close_db(self, exc):
self._check_pid()
if not self.database.is_closed():
self.database.close()
def _check_pid(self):
current_pid = os.getpid()
if self.pid != current_pid:
logging.info("New pid detected (%d!=%d); resetting database lock.", self.pid, current_pid)
self.pid = os.getpid()
self.database._conn_lock = threading.Lock()
def register_handlers(self):
self.app.before_request(self.connect_db)
self.app.teardown_request(self.close_db)
db = Database()
class BaseModel(peewee.Model):
class Meta:
database = db.database
@classmethod
def get_by_id(cls, model_id):
return cls.get(cls.id == model_id)
def pre_save(self, created):
pass
def post_save(self, created):
# Handler for post_save operations. Overriding if needed.
pass
def save(self, *args, **kwargs):
pk_value = self._get_pk_value()
created = kwargs.get('force_insert', False) or not bool(pk_value)
self.pre_save(created)
super(BaseModel, self).save(*args, **kwargs)
self.post_save(created)
class ModelTimestampsMixin(BaseModel):
updated_at = DateTimeTZField(default=datetime.datetime.now)
created_at = DateTimeTZField(default=datetime.datetime.now)
def pre_save(self, created):
super(ModelTimestampsMixin, self).pre_save(created)
self.updated_at = datetime.datetime.now()
class PermissionsCheckMixin(object):
def has_permission(self, permission):
return self.has_permissions((permission,))
def has_permissions(self, permissions):
has_permissions = reduce(lambda a, b: a and b,
map(lambda permission: permission in self.permissions,
permissions),
True)
return has_permissions
class AnonymousUser(AnonymousUserMixin, PermissionsCheckMixin):
@property
def permissions(self):
return []
class ApiUser(UserMixin, PermissionsCheckMixin):
def __init__(self, api_key):
self.id = api_key
def __repr__(self):
return u"<ApiUser: {}>".format(self.id)
@property
def permissions(self):
return ['view_query']
class Group(BaseModel):
DEFAULT_PERMISSIONS = ['create_dashboard', 'create_query', 'edit_dashboard', 'edit_query',
'view_query', 'view_source', 'execute_query']
id = peewee.PrimaryKeyField()
name = peewee.CharField(max_length=100)
permissions = ArrayField(peewee.CharField, default=DEFAULT_PERMISSIONS)
tables = ArrayField(peewee.CharField)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'groups'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'permissions': self.permissions,
'tables': self.tables,
'created_at': self.created_at
}
def __unicode__(self):
return unicode(self.id)
class User(ModelTimestampsMixin, BaseModel, UserMixin, PermissionsCheckMixin):
DEFAULT_GROUPS = ['default']
id = peewee.PrimaryKeyField()
name = peewee.CharField(max_length=320)
email = peewee.CharField(max_length=320, index=True, unique=True)
password_hash = peewee.CharField(max_length=128, null=True)
groups = ArrayField(peewee.CharField, default=DEFAULT_GROUPS)
class Meta:
db_table = 'users'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'email': self.email,
'updated_at': self.updated_at,
'created_at': self.created_at
}
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
self._allowed_tables = None
@property
def permissions(self):
# TODO: this should be cached.
return list(itertools.chain(*[g.permissions for g in
Group.select().where(Group.name << self.groups)]))
@property
def allowed_tables(self):
# TODO: cache this as weel
if self._allowed_tables is None:
self._allowed_tables = set([t.lower() for t in itertools.chain(*[g.tables for g in
Group.select().where(Group.name << self.groups)])])
return self._allowed_tables
@classmethod
def get_by_email(cls, email):
return cls.get(cls.email == email)
def __unicode__(self):
return '%r, %r' % (self.name, self.email)
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return self.password_hash and pwd_context.verify(password, self.password_hash)
class ActivityLog(BaseModel):
QUERY_EXECUTION = 1
id = peewee.PrimaryKeyField()
user = peewee.ForeignKeyField(User)
type = peewee.IntegerField()
activity = peewee.TextField()
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'activity_log'
def to_dict(self):
return {
'id': self.id,
'user': self.user.to_dict(),
'type': self.type,
'activity': self.activity,
'created_at': self.created_at
}
def __unicode__(self):
return unicode(self.id)
class DataSource(BaseModel):
id = peewee.PrimaryKeyField()
name = peewee.CharField(unique=True)
type = peewee.CharField()
options = peewee.TextField()
queue_name = peewee.CharField(default="queries")
scheduled_queue_name = peewee.CharField(default="queries")
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'data_sources'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'type': self.type,
'syntax': self.query_runner.syntax
}
def get_schema(self, refresh=False):
key = "data_source:schema:{}".format(self.id)
cache = None
if not refresh:
cache = redis_connection.get(key)
if cache is None:
query_runner = self.query_runner
schema = sorted(query_runner.get_schema(), key=lambda t: t['name'])
redis_connection.set(key, json.dumps(schema))
else:
schema = json.loads(cache)
return schema
@property
def query_runner(self):
return get_query_runner(self.type, self.options)
@classmethod
def all(cls):
return cls.select().order_by(cls.id.asc())
class QueryResult(BaseModel):
id = peewee.PrimaryKeyField()
data_source = peewee.ForeignKeyField(DataSource)
query_hash = peewee.CharField(max_length=32, index=True)
query = peewee.TextField()
data = peewee.TextField()
runtime = peewee.FloatField()
retrieved_at = DateTimeTZField()
class Meta:
db_table = 'query_results'
def to_dict(self):
return {
'id': self.id,
'query_hash': self.query_hash,
'query': self.query,
'data': json.loads(self.data),
'data_source_id': self._data.get('data_source', None),
'runtime': self.runtime,
'retrieved_at': self.retrieved_at
}
@classmethod
def unused(cls):
week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
unused_results = cls.select().where(Query.id == None, cls.retrieved_at < week_ago)\
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)
return unused_results
@classmethod
def get_latest(cls, data_source, query, max_age=0):
query_hash = utils.gen_query_hash(query)
if max_age == -1:
query = cls.select().where(cls.query_hash == query_hash,
cls.data_source == data_source).order_by(cls.retrieved_at.desc())
else:
query = cls.select().where(cls.query_hash == query_hash, cls.data_source == data_source,
peewee.SQL("retrieved_at + interval '%s second' >= now() at time zone 'utc'",
max_age)).order_by(cls.retrieved_at.desc())
return query.first()
@classmethod
def store_result(cls, data_source_id, query_hash, query, data, run_time, retrieved_at):
query_result = cls.create(query_hash=query_hash,
query=query,
runtime=run_time,
data_source=data_source_id,
retrieved_at=retrieved_at,
data=data)
logging.info("Inserted query (%s) data; id=%s", query_hash, query_result.id)
updated_count = Query.update(latest_query_data=query_result).\
where(Query.query_hash==query_hash, Query.data_source==data_source_id).\
execute()
logging.info("Updated %s queries with result (%s).", updated_count, query_hash)
return query_result
def __unicode__(self):
return u"%d | %s | %s" % (self.id, self.query_hash, self.retrieved_at)
def should_schedule_next(previous_iteration, now, schedule):
if schedule.isdigit():
ttl = int(schedule)
next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)
else:
hour, minute = schedule.split(':')
hour, minute = int(hour), int(minute)
# The following logic is needed for cases like the following:
# - The query scheduled to run at 23:59.
# - The scheduler wakes up at 00:01.
# - Using naive implementation of comparing timestamps, it will skip the execution.
normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute)
if normalized_previous_iteration > previous_iteration:
previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)
next_iteration = (previous_iteration + datetime.timedelta(days=1)).replace(hour=hour, minute=minute)
return now > next_iteration
class Query(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
data_source = peewee.ForeignKeyField(DataSource)
latest_query_data = peewee.ForeignKeyField(QueryResult, null=True)
name = peewee.CharField(max_length=255)
description = peewee.CharField(max_length=4096, null=True)
query = peewee.TextField()
query_hash = peewee.CharField(max_length=32)
api_key = peewee.CharField(max_length=40)
user_email = peewee.CharField(max_length=360, null=True)
user = peewee.ForeignKeyField(User)
last_modified_by = peewee.ForeignKeyField(User, null=True, related_name="modified_queries")
is_archived = peewee.BooleanField(default=False, index=True)
schedule = peewee.CharField(max_length=10, null=True)
class Meta:
db_table = 'queries'
def to_dict(self, with_stats=False, with_visualizations=False, with_user=True):
d = {
'id': self.id,
'latest_query_data_id': self._data.get('latest_query_data', None),
'name': self.name,
'description': self.description,
'query': self.query,
'query_hash': self.query_hash,
'schedule': self.schedule,
'api_key': self.api_key,
'is_archived': self.is_archived,
'updated_at': self.updated_at,
'created_at': self.created_at,
'data_source_id': self._data.get('data_source', None)
}
if with_user:
d['user'] = self.user.to_dict()
d['last_modified_by'] = self.last_modified_by.to_dict()
else:
d['user_id'] = self._data['user']
if with_stats:
d['retrieved_at'] = self.retrieved_at
d['runtime'] = self.runtime
if with_visualizations:
d['visualizations'] = [vis.to_dict(with_query=False)
for vis in self.visualizations]
return d
def archive(self):
self.is_archived = True
self.schedule = None
for vis in self.visualizations:
for w in vis.widgets:
w.delete_instance()
self.save()
@classmethod
def all_queries(cls):
q = Query.select(Query, User, QueryResult.retrieved_at, QueryResult.runtime)\
.join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER)\
.switch(Query).join(User)\
.where(Query.is_archived==False)\
.group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime)\
.order_by(cls.created_at.desc())
return q
@classmethod
def outdated_queries(cls):
queries = cls.select(cls, QueryResult.retrieved_at, DataSource)\
.join(QueryResult)\
.switch(Query).join(DataSource)\
.where(cls.schedule != None)
now = datetime.datetime.utcnow().replace(tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=0, name=None))
outdated_queries = {}
for query in queries:
if should_schedule_next(query.latest_query_data.retrieved_at, now, query.schedule):
key = "{}:{}".format(query.query_hash, query.data_source.id)
outdated_queries[key] = query
return outdated_queries.values()
@classmethod
def search(cls, term):
# This is very naive implementation of search, to be replaced with PostgreSQL full-text-search solution.
where = (cls.name**u"%{}%".format(term)) | (cls.description**u"%{}%".format(term))
if term.isdigit():
where |= cls.id == term
where &= cls.is_archived == False
return cls.select().where(where).order_by(cls.created_at.desc())
@classmethod
def recent(cls, user_id):
# TODO: instead of t2 here, we should define table_alias for Query table
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")).\
join(Event, on=(Query.id == peewee.SQL("t2.object_id::integer"))).\
where(Event.action << ('edit', 'execute', 'edit_name', 'edit_description', 'view_source')).\
where(Event.user == user_id).\
where(~(Event.object_id >> None)).\
where(Event.object_type == 'query'). \
where(cls.is_archived == False).\
group_by(Event.object_id, Query.id).\
order_by(peewee.SQL("count(0) desc"))
@classmethod
def update_instance(cls, query_id, **kwargs):
if 'query' in kwargs:
kwargs['query_hash'] = utils.gen_query_hash(kwargs['query'])
update = cls.update(**kwargs).where(cls.id == query_id)
return update.execute()
def pre_save(self, created):
super(Query, self).pre_save(created)
self.query_hash = utils.gen_query_hash(self.query)
self._set_api_key()
if self.last_modified_by is None:
self.last_modified_by = self.user
def post_save(self, created):
if created:
self._create_default_visualizations()
def _create_default_visualizations(self):
table_visualization = Visualization(query=self, name="Table",
description='',
type="TABLE", options="{}")
table_visualization.save()
def _set_api_key(self):
if not self.api_key:
self.api_key = hashlib.sha1(
u''.join((str(time.time()), self.query, str(self._data['user']), self.name)).encode('utf-8')).hexdigest()
@property
def runtime(self):
return self.latest_query_data.runtime
@property
def retrieved_at(self):
return self.latest_query_data.retrieved_at
def __unicode__(self):
return unicode(self.id)
class Dashboard(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
slug = peewee.CharField(max_length=140, index=True)
name = peewee.CharField(max_length=100)
user_email = peewee.CharField(max_length=360, null=True)
user = peewee.ForeignKeyField(User)
layout = peewee.TextField()
dashboard_filters_enabled = peewee.BooleanField(default=False)
is_archived = peewee.BooleanField(default=False, index=True)
class Meta:
db_table = 'dashboards'
def to_dict(self, with_widgets=False):
layout = json.loads(self.layout)
if with_widgets:
widgets = Widget.select(Widget, Visualization, Query, User)\
.where(Widget.dashboard == self.id)\
.join(Visualization, join_type=peewee.JOIN_LEFT_OUTER)\
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)\
.join(User, join_type=peewee.JOIN_LEFT_OUTER)
widgets = {w.id: w.to_dict() for w in widgets}
# The following is a workaround for cases when the widget object gets deleted without the dashboard layout
# updated. This happens for users with old databases that didn't have a foreign key relationship between
# visualizations and widgets.
# It's temporary until better solution is implemented (we probably should move the position information
# to the widget).
widgets_layout = []
for row in layout:
new_row = []
for widget_id in row:
widget = widgets.get(widget_id, None)
if widget:
new_row.append(widget)
widgets_layout.append(new_row)
# widgets_layout = map(lambda row: map(lambda widget_id: widgets.get(widget_id, None), row), layout)
else:
widgets_layout = None
return {
'id': self.id,
'slug': self.slug,
'name': self.name,
'user_id': self._data['user'],
'layout': layout,
'dashboard_filters_enabled': self.dashboard_filters_enabled,
'widgets': widgets_layout,
'updated_at': self.updated_at,
'created_at': self.created_at
}
@classmethod
def get_by_slug(cls, slug):
return cls.get(cls.slug == slug)
@classmethod
def recent(cls, user_id):
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")). \
join(Event, on=(Dashboard.id == peewee.SQL("t2.object_id::integer"))). \
where(Event.action << ('edit', 'view')).\
where(Event.user == user_id). \
where(~(Event.object_id >> None)). \
where(Event.object_type == 'dashboard'). \
group_by(Event.object_id, Dashboard.id). \
order_by(peewee.SQL("count(0) desc"))
def save(self, *args, **kwargs):
if not self.slug:
self.slug = utils.slugify(self.name)
tries = 1
while self.select().where(Dashboard.slug == self.slug).first() is not None:
self.slug = utils.slugify(self.name) + "_{0}".format(tries)
tries += 1
super(Dashboard, self).save(*args, **kwargs)
def __unicode__(self):
return u"%s=%s" % (self.id, self.name)
class Visualization(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
type = peewee.CharField(max_length=100)
query = peewee.ForeignKeyField(Query, related_name='visualizations')
name = peewee.CharField(max_length=255)
description = peewee.CharField(max_length=4096, null=True)
options = peewee.TextField()
class Meta:
db_table = 'visualizations'
def to_dict(self, with_query=True):
d = {
'id': self.id,
'type': self.type,
'name': self.name,
'description': self.description,
'options': json.loads(self.options),
'updated_at': self.updated_at,
'created_at': self.created_at
}
if with_query:
d['query'] = self.query.to_dict()
return d
def __unicode__(self):
return u"%s %s" % (self.id, self.type)
class Widget(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
visualization = peewee.ForeignKeyField(Visualization, related_name='widgets', null=True)
text = peewee.TextField(null=True)
width = peewee.IntegerField()
options = peewee.TextField()
dashboard = peewee.ForeignKeyField(Dashboard, related_name='widgets', index=True)
# unused; kept for backward compatability:
type = peewee.CharField(max_length=100, null=True)
query_id = peewee.IntegerField(null=True)
class Meta:
db_table = 'widgets'
def to_dict(self):
d = {
'id': self.id,
'width': self.width,
'options': json.loads(self.options),
'dashboard_id': self._data['dashboard'],
'text': self.text,
'updated_at': self.updated_at,
'created_at': self.created_at
}
if self.visualization and self.visualization.id:
d['visualization'] = self.visualization.to_dict()
return d
def __unicode__(self):
return u"%s" % self.id
def delete_instance(self, *args, **kwargs):
layout = json.loads(self.dashboard.layout)
layout = map(lambda row: filter(lambda w: w != self.id, row), layout)
layout = filter(lambda row: len(row) > 0, layout)
self.dashboard.layout = json.dumps(layout)
self.dashboard.save()
super(Widget, self).delete_instance(*args, **kwargs)
class Event(BaseModel):
user = peewee.ForeignKeyField(User, related_name="events", null=True)
action = peewee.CharField()
object_type = peewee.CharField()
object_id = peewee.CharField(null=True)
additional_properties = peewee.TextField(null=True)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'events'
def __unicode__(self):
return u"%s,%s,%s,%s" % (self._data['user'], self.action, self.object_type, self.object_id)
@classmethod
def record(cls, event):
user = event.pop('user_id')
action = event.pop('action')
object_type = event.pop('object_type')
object_id = event.pop('object_id', None)
created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))
additional_properties = json.dumps(event)
event = cls.create(user=user, action=action, object_type=object_type, object_id=object_id,
additional_properties=additional_properties, created_at=created_at)
return event
all_models = (DataSource, User, QueryResult, Query, Dashboard, Visualization, Widget, ActivityLog, Group, Event)
def init_db():
Group.insert(name='admin', permissions=['admin'], tables=['*']).execute()
Group.insert(name='default', permissions=Group.DEFAULT_PERMISSIONS, tables=['*']).execute()
def create_db(create_tables, drop_tables):
db.connect_db()
for model in all_models:
if drop_tables and model.table_exists():
# TODO: submit PR to peewee to allow passing cascade option to drop_table.
db.database.execute_sql('DROP TABLE %s CASCADE' % model._meta.db_table)
if create_tables and not model.table_exists():
model.create_table()
db.close_db(None)
| [
"playhouse.postgres_ext.ArrayField",
"redash.utils.slugify",
"passlib.apps.custom_app_context.verify",
"datetime.timedelta",
"logging.info",
"redash.redis_connection.get",
"redash.utils.gen_query_hash",
"peewee.SQL",
"threading.Lock",
"json.dumps",
"peewee.TextField",
"passlib.apps.custom_app_context.encrypt",
"os.getpid",
"peewee.PrimaryKeyField",
"peewee.CharField",
"json.loads",
"psycopg2.tz.FixedOffsetTimezone",
"peewee.IntegerField",
"playhouse.postgres_ext.DateTimeTZField",
"peewee.FloatField",
"time.time",
"peewee.BooleanField",
"datetime.datetime.utcnow",
"peewee.ForeignKeyField",
"playhouse.postgres_ext.PostgresqlExtDatabase",
"datetime.datetime.now",
"redash.query_runner.get_query_runner"
]
| [((2232, 2278), 'playhouse.postgres_ext.DateTimeTZField', 'DateTimeTZField', ([], {'default': 'datetime.datetime.now'}), '(default=datetime.datetime.now)\n', (2247, 2278), False, 'from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase\n'), ((2296, 2342), 'playhouse.postgres_ext.DateTimeTZField', 'DateTimeTZField', ([], {'default': 'datetime.datetime.now'}), '(default=datetime.datetime.now)\n', (2311, 2342), False, 'from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase\n'), ((3514, 3538), 'peewee.PrimaryKeyField', 'peewee.PrimaryKeyField', ([], {}), '()\n', (3536, 3538), False, 'import peewee\n'), ((3550, 3582), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (3566, 3582), False, 'import peewee\n'), ((3601, 3658), 'playhouse.postgres_ext.ArrayField', 'ArrayField', (['peewee.CharField'], {'default': 'DEFAULT_PERMISSIONS'}), '(peewee.CharField, default=DEFAULT_PERMISSIONS)\n', (3611, 3658), False, 'from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase\n'), ((3672, 3700), 'playhouse.postgres_ext.ArrayField', 'ArrayField', (['peewee.CharField'], {}), '(peewee.CharField)\n', (3682, 3700), False, 'from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase\n'), ((3718, 3764), 'playhouse.postgres_ext.DateTimeTZField', 'DateTimeTZField', ([], {'default': 'datetime.datetime.now'}), '(default=datetime.datetime.now)\n', (3733, 3764), False, 'from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase\n'), ((4225, 4249), 'peewee.PrimaryKeyField', 'peewee.PrimaryKeyField', ([], {}), '()\n', (4247, 4249), False, 'import peewee\n'), ((4261, 4293), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(320)'}), '(max_length=320)\n', (4277, 4293), False, 'import peewee\n'), ((4306, 4363), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(320)', 'index': '(True)', 'unique': '(True)'}), '(max_length=320, index=True, unique=True)\n', (4322, 4363), False, 'import peewee\n'), ((4384, 4427), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(128)', 'null': '(True)'}), '(max_length=128, null=True)\n', (4400, 4427), False, 'import peewee\n'), ((4441, 4493), 'playhouse.postgres_ext.ArrayField', 'ArrayField', (['peewee.CharField'], {'default': 'DEFAULT_GROUPS'}), '(peewee.CharField, default=DEFAULT_GROUPS)\n', (4451, 4493), False, 'from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase\n'), ((5938, 5962), 'peewee.PrimaryKeyField', 'peewee.PrimaryKeyField', ([], {}), '()\n', (5960, 5962), False, 'import peewee\n'), ((5974, 6002), 'peewee.ForeignKeyField', 'peewee.ForeignKeyField', (['User'], {}), '(User)\n', (5996, 6002), False, 'import peewee\n'), ((6014, 6035), 'peewee.IntegerField', 'peewee.IntegerField', ([], {}), '()\n', (6033, 6035), False, 'import peewee\n'), ((6051, 6069), 'peewee.TextField', 'peewee.TextField', ([], {}), '()\n', (6067, 6069), False, 'import peewee\n'), ((6087, 6133), 'playhouse.postgres_ext.DateTimeTZField', 'DateTimeTZField', ([], {'default': 'datetime.datetime.now'}), '(default=datetime.datetime.now)\n', (6102, 6133), False, 'from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase\n'), ((6516, 6540), 'peewee.PrimaryKeyField', 'peewee.PrimaryKeyField', ([], {}), '()\n', (6538, 6540), False, 'import peewee\n'), ((6552, 6581), 'peewee.CharField', 'peewee.CharField', ([], {'unique': '(True)'}), '(unique=True)\n', (6568, 6581), False, 'import peewee\n'), ((6593, 6611), 'peewee.CharField', 'peewee.CharField', ([], {}), '()\n', (6609, 6611), False, 'import peewee\n'), ((6626, 6644), 'peewee.TextField', 'peewee.TextField', ([], {}), '()\n', (6642, 6644), False, 'import peewee\n'), ((6662, 6697), 'peewee.CharField', 'peewee.CharField', ([], {'default': '"""queries"""'}), "(default='queries')\n", (6678, 6697), False, 'import peewee\n'), ((6725, 6760), 'peewee.CharField', 'peewee.CharField', ([], {'default': '"""queries"""'}), "(default='queries')\n", (6741, 6760), False, 'import peewee\n'), ((6778, 6824), 'playhouse.postgres_ext.DateTimeTZField', 'DateTimeTZField', ([], {'default': 'datetime.datetime.now'}), '(default=datetime.datetime.now)\n', (6793, 6824), False, 'from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase\n'), ((7766, 7790), 'peewee.PrimaryKeyField', 'peewee.PrimaryKeyField', ([], {}), '()\n', (7788, 7790), False, 'import peewee\n'), ((7809, 7843), 'peewee.ForeignKeyField', 'peewee.ForeignKeyField', (['DataSource'], {}), '(DataSource)\n', (7831, 7843), False, 'import peewee\n'), ((7861, 7904), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(32)', 'index': '(True)'}), '(max_length=32, index=True)\n', (7877, 7904), False, 'import peewee\n'), ((7917, 7935), 'peewee.TextField', 'peewee.TextField', ([], {}), '()\n', (7933, 7935), False, 'import peewee\n'), ((7947, 7965), 'peewee.TextField', 'peewee.TextField', ([], {}), '()\n', (7963, 7965), False, 'import peewee\n'), ((7980, 7999), 'peewee.FloatField', 'peewee.FloatField', ([], {}), '()\n', (7997, 7999), False, 'import peewee\n'), ((8019, 8036), 'playhouse.postgres_ext.DateTimeTZField', 'DateTimeTZField', ([], {}), '()\n', (8034, 8036), False, 'from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase\n'), ((11334, 11358), 'peewee.PrimaryKeyField', 'peewee.PrimaryKeyField', ([], {}), '()\n', (11356, 11358), False, 'import peewee\n'), ((11377, 11411), 'peewee.ForeignKeyField', 'peewee.ForeignKeyField', (['DataSource'], {}), '(DataSource)\n', (11399, 11411), False, 'import peewee\n'), ((11436, 11482), 'peewee.ForeignKeyField', 'peewee.ForeignKeyField', (['QueryResult'], {'null': '(True)'}), '(QueryResult, null=True)\n', (11458, 11482), False, 'import peewee\n'), ((11494, 11526), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (11510, 11526), False, 'import peewee\n'), ((11545, 11589), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(4096)', 'null': '(True)'}), '(max_length=4096, null=True)\n', (11561, 11589), False, 'import peewee\n'), ((11602, 11620), 'peewee.TextField', 'peewee.TextField', ([], {}), '()\n', (11618, 11620), False, 'import peewee\n'), ((11638, 11669), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (11654, 11669), False, 'import peewee\n'), ((11684, 11715), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (11700, 11715), False, 'import peewee\n'), ((11733, 11776), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(360)', 'null': '(True)'}), '(max_length=360, null=True)\n', (11749, 11776), False, 'import peewee\n'), ((11788, 11816), 'peewee.ForeignKeyField', 'peewee.ForeignKeyField', (['User'], {}), '(User)\n', (11810, 11816), False, 'import peewee\n'), ((11840, 11912), 'peewee.ForeignKeyField', 'peewee.ForeignKeyField', (['User'], {'null': '(True)', 'related_name': '"""modified_queries"""'}), "(User, null=True, related_name='modified_queries')\n", (11862, 11912), False, 'import peewee\n'), ((11931, 11977), 'peewee.BooleanField', 'peewee.BooleanField', ([], {'default': '(False)', 'index': '(True)'}), '(default=False, index=True)\n', (11950, 11977), False, 'import peewee\n'), ((11993, 12035), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(10)', 'null': '(True)'}), '(max_length=10, null=True)\n', (12009, 12035), False, 'import peewee\n'), ((17104, 17128), 'peewee.PrimaryKeyField', 'peewee.PrimaryKeyField', ([], {}), '()\n', (17126, 17128), False, 'import peewee\n'), ((17140, 17184), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(140)', 'index': '(True)'}), '(max_length=140, index=True)\n', (17156, 17184), False, 'import peewee\n'), ((17196, 17228), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (17212, 17228), False, 'import peewee\n'), ((17246, 17289), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(360)', 'null': '(True)'}), '(max_length=360, null=True)\n', (17262, 17289), False, 'import peewee\n'), ((17301, 17329), 'peewee.ForeignKeyField', 'peewee.ForeignKeyField', (['User'], {}), '(User)\n', (17323, 17329), False, 'import peewee\n'), ((17343, 17361), 'peewee.TextField', 'peewee.TextField', ([], {}), '()\n', (17359, 17361), False, 'import peewee\n'), ((17394, 17428), 'peewee.BooleanField', 'peewee.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (17413, 17428), False, 'import peewee\n'), ((17447, 17493), 'peewee.BooleanField', 'peewee.BooleanField', ([], {'default': '(False)', 'index': '(True)'}), '(default=False, index=True)\n', (17466, 17493), False, 'import peewee\n'), ((20465, 20489), 'peewee.PrimaryKeyField', 'peewee.PrimaryKeyField', ([], {}), '()\n', (20487, 20489), False, 'import peewee\n'), ((20501, 20533), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (20517, 20533), False, 'import peewee\n'), ((20546, 20606), 'peewee.ForeignKeyField', 'peewee.ForeignKeyField', (['Query'], {'related_name': '"""visualizations"""'}), "(Query, related_name='visualizations')\n", (20568, 20606), False, 'import peewee\n'), ((20618, 20650), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (20634, 20650), False, 'import peewee\n'), ((20669, 20713), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(4096)', 'null': '(True)'}), '(max_length=4096, null=True)\n', (20685, 20713), False, 'import peewee\n'), ((20728, 20746), 'peewee.TextField', 'peewee.TextField', ([], {}), '()\n', (20744, 20746), False, 'import peewee\n'), ((21354, 21378), 'peewee.PrimaryKeyField', 'peewee.PrimaryKeyField', ([], {}), '()\n', (21376, 21378), False, 'import peewee\n'), ((21399, 21471), 'peewee.ForeignKeyField', 'peewee.ForeignKeyField', (['Visualization'], {'related_name': '"""widgets"""', 'null': '(True)'}), "(Visualization, related_name='widgets', null=True)\n", (21421, 21471), False, 'import peewee\n'), ((21483, 21510), 'peewee.TextField', 'peewee.TextField', ([], {'null': '(True)'}), '(null=True)\n', (21499, 21510), False, 'import peewee\n'), ((21523, 21544), 'peewee.IntegerField', 'peewee.IntegerField', ([], {}), '()\n', (21542, 21544), False, 'import peewee\n'), ((21559, 21577), 'peewee.TextField', 'peewee.TextField', ([], {}), '()\n', (21575, 21577), False, 'import peewee\n'), ((21594, 21663), 'peewee.ForeignKeyField', 'peewee.ForeignKeyField', (['Dashboard'], {'related_name': '"""widgets"""', 'index': '(True)'}), "(Dashboard, related_name='widgets', index=True)\n", (21616, 21663), False, 'import peewee\n'), ((21723, 21766), 'peewee.CharField', 'peewee.CharField', ([], {'max_length': '(100)', 'null': '(True)'}), '(max_length=100, null=True)\n', (21739, 21766), False, 'import peewee\n'), ((21782, 21812), 'peewee.IntegerField', 'peewee.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (21801, 21812), False, 'import peewee\n'), ((22801, 22863), 'peewee.ForeignKeyField', 'peewee.ForeignKeyField', (['User'], {'related_name': '"""events"""', 'null': '(True)'}), "(User, related_name='events', null=True)\n", (22823, 22863), False, 'import peewee\n'), ((22877, 22895), 'peewee.CharField', 'peewee.CharField', ([], {}), '()\n', (22893, 22895), False, 'import peewee\n'), ((22914, 22932), 'peewee.CharField', 'peewee.CharField', ([], {}), '()\n', (22930, 22932), False, 'import peewee\n'), ((22949, 22976), 'peewee.CharField', 'peewee.CharField', ([], {'null': '(True)'}), '(null=True)\n', (22965, 22976), False, 'import peewee\n'), ((23005, 23032), 'peewee.TextField', 'peewee.TextField', ([], {'null': '(True)'}), '(null=True)\n', (23021, 23032), False, 'import peewee\n'), ((23050, 23096), 'playhouse.postgres_ext.DateTimeTZField', 'DateTimeTZField', ([], {'default': 'datetime.datetime.now'}), '(default=datetime.datetime.now)\n', (23065, 23096), False, 'from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase\n'), ((705, 770), 'playhouse.postgres_ext.PostgresqlExtDatabase', 'PostgresqlExtDatabase', (['self.database_name'], {}), '(self.database_name, **self.database_config)\n', (726, 770), False, 'from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase\n'), ((814, 825), 'os.getpid', 'os.getpid', ([], {}), '()\n', (823, 825), False, 'import os\n'), ((1178, 1189), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1187, 1189), False, 'import os\n'), ((2464, 2487), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2485, 2487), False, 'import datetime\n'), ((5713, 5742), 'passlib.apps.custom_app_context.encrypt', 'pwd_context.encrypt', (['password'], {}), '(password)\n', (5732, 5742), True, 'from passlib.apps import custom_app_context as pwd_context\n'), ((7596, 7637), 'redash.query_runner.get_query_runner', 'get_query_runner', (['self.type', 'self.options'], {}), '(self.type, self.options)\n', (7612, 7637), False, 'from redash.query_runner import get_query_runner\n'), ((8825, 8852), 'redash.utils.gen_query_hash', 'utils.gen_query_hash', (['query'], {}), '(query)\n', (8845, 8852), False, 'from redash import utils, settings, redis_connection\n'), ((9853, 9929), 'logging.info', 'logging.info', (['"""Inserted query (%s) data; id=%s"""', 'query_hash', 'query_result.id'], {}), "('Inserted query (%s) data; id=%s', query_hash, query_result.id)\n", (9865, 9929), False, 'import logging\n'), ((10118, 10197), 'logging.info', 'logging.info', (['"""Updated %s queries with result (%s)."""', 'updated_count', 'query_hash'], {}), "('Updated %s queries with result (%s).', updated_count, query_hash)\n", (10130, 10197), False, 'import logging\n'), ((16045, 16077), 'redash.utils.gen_query_hash', 'utils.gen_query_hash', (['self.query'], {}), '(self.query)\n', (16065, 16077), False, 'from redash import utils, settings, redis_connection\n'), ((17604, 17627), 'json.loads', 'json.loads', (['self.layout'], {}), '(self.layout)\n', (17614, 17627), False, 'import json\n'), ((22452, 22485), 'json.loads', 'json.loads', (['self.dashboard.layout'], {}), '(self.dashboard.layout)\n', (22462, 22485), False, 'import json\n'), ((22654, 22672), 'json.dumps', 'json.dumps', (['layout'], {}), '(layout)\n', (22664, 22672), False, 'import json\n'), ((23598, 23615), 'json.dumps', 'json.dumps', (['event'], {}), '(event)\n', (23608, 23615), False, 'import json\n'), ((1238, 1333), 'logging.info', 'logging.info', (['"""New pid detected (%d!=%d); resetting database lock."""', 'self.pid', 'current_pid'], {}), "('New pid detected (%d!=%d); resetting database lock.', self.\n pid, current_pid)\n", (1250, 1333), False, 'import logging\n'), ((1352, 1363), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1361, 1363), False, 'import os\n'), ((1403, 1419), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1417, 1419), False, 'import threading\n'), ((5823, 5871), 'passlib.apps.custom_app_context.verify', 'pwd_context.verify', (['password', 'self.password_hash'], {}), '(password, self.password_hash)\n', (5841, 5871), True, 'from passlib.apps import custom_app_context as pwd_context\n'), ((7225, 7250), 'redash.redis_connection.get', 'redis_connection.get', (['key'], {}), '(key)\n', (7245, 7250), False, 'from redash import utils, settings, redis_connection\n'), ((7497, 7514), 'json.loads', 'json.loads', (['cache'], {}), '(cache)\n', (7507, 7514), False, 'import json\n'), ((8253, 8274), 'json.loads', 'json.loads', (['self.data'], {}), '(self.data)\n', (8263, 8274), False, 'import json\n'), ((8494, 8517), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8515, 8517), False, 'import datetime\n'), ((8520, 8546), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (8538, 8546), False, 'import datetime\n'), ((10498, 10529), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'ttl'}), '(seconds=ttl)\n', (10516, 10529), False, 'import datetime\n'), ((15643, 15670), 'peewee.SQL', 'peewee.SQL', (['"""count(0) desc"""'], {}), "('count(0) desc')\n", (15653, 15670), False, 'import peewee\n'), ((15805, 15842), 'redash.utils.gen_query_hash', 'utils.gen_query_hash', (["kwargs['query']"], {}), "(kwargs['query'])\n", (15825, 15842), False, 'from redash import utils, settings, redis_connection\n'), ((19915, 19942), 'peewee.SQL', 'peewee.SQL', (['"""count(0) desc"""'], {}), "('count(0) desc')\n", (19925, 19942), False, 'import peewee\n'), ((20032, 20056), 'redash.utils.slugify', 'utils.slugify', (['self.name'], {}), '(self.name)\n', (20045, 20056), False, 'from redash import utils, settings, redis_connection\n'), ((21012, 21036), 'json.loads', 'json.loads', (['self.options'], {}), '(self.options)\n', (21022, 21036), False, 'import json\n'), ((21980, 22004), 'json.loads', 'json.loads', (['self.options'], {}), '(self.options)\n', (21990, 22004), False, 'import json\n'), ((7442, 7460), 'json.dumps', 'json.dumps', (['schema'], {}), '(schema)\n', (7452, 7460), False, 'import json\n'), ((11107, 11133), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (11125, 11133), False, 'import datetime\n'), ((14135, 14161), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (14159, 14161), False, 'import datetime\n'), ((14177, 14229), 'psycopg2.tz.FixedOffsetTimezone', 'psycopg2.tz.FixedOffsetTimezone', ([], {'offset': '(0)', 'name': 'None'}), '(offset=0, name=None)\n', (14208, 14229), False, 'import psycopg2\n'), ((11182, 11208), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (11200, 11208), False, 'import datetime\n'), ((20196, 20220), 'redash.utils.slugify', 'utils.slugify', (['self.name'], {}), '(self.name)\n', (20209, 20220), False, 'from redash import utils, settings, redis_connection\n'), ((9208, 9298), 'peewee.SQL', 'peewee.SQL', (['"""retrieved_at + interval \'%s second\' >= now() at time zone \'utc\'"""', 'max_age'], {}), '("retrieved_at + interval \'%s second\' >= now() at time zone \'utc\'",\n max_age)\n', (9218, 9298), False, 'import peewee\n'), ((16713, 16724), 'time.time', 'time.time', ([], {}), '()\n', (16722, 16724), False, 'import time\n'), ((19596, 19631), 'peewee.SQL', 'peewee.SQL', (['"""t2.object_id::integer"""'], {}), "('t2.object_id::integer')\n", (19606, 19631), False, 'import peewee\n'), ((19517, 19547), 'peewee.SQL', 'peewee.SQL', (['"""current_date - 7"""'], {}), "('current_date - 7')\n", (19527, 19547), False, 'import peewee\n'), ((15239, 15274), 'peewee.SQL', 'peewee.SQL', (['"""t2.object_id::integer"""'], {}), "('t2.object_id::integer')\n", (15249, 15274), False, 'import peewee\n'), ((15165, 15195), 'peewee.SQL', 'peewee.SQL', (['"""current_date - 7"""'], {}), "('current_date - 7')\n", (15175, 15195), False, 'import peewee\n')] |
#!/usr/bin/env python3
import ipaddress
import random
import unittest
import ipgroup
class TestGroupIPs(unittest.TestCase):
def setUp(self):
pass
def test_group(self):
IPs = ["127.0.0.1",
"127.0.1.1",
"127.1.1.1",
"127.1.0.1",
"127.2.0.1",
"127.2.1.1",
]
expected_results = {"127.0.0.0/16": 2,
"127.1.0.0/16": 2,
"127.2.0.0/16": 2,
}
a = ipgroup.IPv4Group(IPs, 16)
self.assertEqual(expected_results, a.group)
def test_group2(self):
IPs = ["127.0.0.1",
"127.0.1.1",
"127.1.1.1",
"127.1.0.1",
"127.2.0.1",
"127.2.1.1",
]
expected_results = {"127.0.0.0/24": 1,
"127.0.1.0/24": 1,
"127.1.0.0/24": 1,
"127.1.1.0/24": 1,
"127.2.0.0/24": 1,
"127.2.1.0/24": 1,
}
b = ipgroup.IPv4Group(IPs, 24)
self.assertEqual(expected_results, b.group)
def test_group3(self):
""" 'Random' test """
# Small Netblock so we don't do over 2**10 hosts to test with
random_cidr = random.randint(22, 30)
network = ipaddress.IPv4Network(("172.16.58.3/%s" % random_cidr))
# So out sample size is never bigger than the population of hosts
random_int = random.randint(1, 2**(32 - random_cidr - 1))
IPs = random.sample(set(network.hosts()), random_int)
expected_results = {("172.16.58.3/%s" % random_cidr): random_int}
c = ipgroup.IPv4Group(IPs, random_cidr)
self.assertEqual(expected_results, c.group)
def test_IPv6(self):
""" 'Random' test """
# Small Netblock so we don't do over 2**10 hosts to test with
random_cidr = random.randint(118, 126)
network = ipaddress.IPv6Network(("2607:f8b0:4009:803::/%s" %
random_cidr))
# So out sample size is never bigger than the population of hosts
random_int = random.randint(1, 2**(128 - random_cidr - 1))
IPs = random.sample(set(network.hosts()), random_int)
expected_results = {("2607:f8b0:4009:803::/%s" % random_cidr):
random_int}
d = ipgroup.IPv6Group(IPs, random_cidr)
self.assertEqual(expected_results, d.group)
def test_reGroup(self):
IPs = ["127.0.0.1",
"127.1.0.1",
"127.1.1.1",
]
expected_results1 = {"127.0.0.0/24": 1,
"127.1.0.0/24": 1,
"127.1.1.0/24": 1,
}
g = ipgroup.IPv4Group(IPs, 24)
self.assertEqual(expected_results1, g.group)
expected_results2 = {"127.0.0.0/16": 1,
"127.1.0.0/16": 2,
}
g.reGroup(16)
self.assertEqual(expected_results2, g.group)
class TestTotalAddresses(unittest.TestCase):
"""
Tests totalAddresses function returns correct number of unique addresses
in various scenarios
"""
def setUp(self):
pass
def test_total_address1(self):
self.assertEqual(8, ipgroup.totalAddresses("127.0.0.0/29"))
def test_total_address2(self):
total = ipgroup.totalAddresses(["192.168.1.1/16",
"127.0.0.0/16",
])
self.assertEqual(2**17, total)
def test_total_address3(self):
total = ipgroup.totalAddresses(["192.168.1.1/16",
"127.0.0.0/28"
])
self.assertEqual((2**16 + 2**4), total)
def test_total_address4(self):
total = ipgroup.totalAddresses(["192.168.127.12/24",
"192.168.127.12/30",
])
self.assertEqual(2**8, total)
def test_total_address5(self):
total = ipgroup.totalAddresses(["192.168.127.12/24",
"192.168.127.12/23",
])
self.assertEqual(2**9, total)
def test_total_address_overlapping(self):
""" For the scenario where networks will contain eachother. """
total = ipgroup.totalAddresses(["172.16.58.3/16",
"172.16.58.3/18",
"172.16.58.3/24",
])
self.assertEqual(2**16, total)
def test_total_address_overlapping2(self):
""" For the scenario where networks will contain eachother big networks
to show that the function is fast, no longer enumerating all networks.
"""
total = ipgroup.totalAddresses(["1.0.0.0/8",
"2.0.0.0/8",
"2.0.0.0/16",
"2.1.1.0/24",
"1.0.0.0/16",
"1.1.1.0/24",
"2.0.0.0/8",
])
self.assertEqual((2**24 + 2**24), total)
def test_total_address_overlapping3(self):
""" For the scenario where networks will contain eachother big networks
to show that the function is fast, no longer enumerating all networks.
"""
total = ipgroup.totalAddresses(["1.0.0.0/8",
"1.0.0.0/4",
"2.0.0.0/8",
"2.0.0.0/16",
"2.1.1.0/24",
"1.0.0.0/16",
"1.1.1.0/24",
"2.0.0.0/8",
])
self.assertEqual(2**28, total)
def test_total_address_overlap_IPv6(self):
total = ipgroup.totalAddresses(['2620:008d:8000::/48',
'2620:008d:8000:e693::/64',
])
self.assertEqual(2**80, total)
if __name__ == "__main__":
unittest.main()
| [
"ipgroup.totalAddresses",
"ipgroup.IPv6Group",
"ipaddress.IPv6Network",
"ipgroup.IPv4Group",
"unittest.main",
"ipaddress.IPv4Network",
"random.randint"
]
| [((6556, 6571), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6569, 6571), False, 'import unittest\n'), ((559, 585), 'ipgroup.IPv4Group', 'ipgroup.IPv4Group', (['IPs', '(16)'], {}), '(IPs, 16)\n', (576, 585), False, 'import ipgroup\n'), ((1178, 1204), 'ipgroup.IPv4Group', 'ipgroup.IPv4Group', (['IPs', '(24)'], {}), '(IPs, 24)\n', (1195, 1204), False, 'import ipgroup\n'), ((1409, 1431), 'random.randint', 'random.randint', (['(22)', '(30)'], {}), '(22, 30)\n', (1423, 1431), False, 'import random\n'), ((1451, 1504), 'ipaddress.IPv4Network', 'ipaddress.IPv4Network', (["('172.16.58.3/%s' % random_cidr)"], {}), "('172.16.58.3/%s' % random_cidr)\n", (1472, 1504), False, 'import ipaddress\n'), ((1603, 1649), 'random.randint', 'random.randint', (['(1)', '(2 ** (32 - random_cidr - 1))'], {}), '(1, 2 ** (32 - random_cidr - 1))\n', (1617, 1649), False, 'import random\n'), ((1799, 1834), 'ipgroup.IPv4Group', 'ipgroup.IPv4Group', (['IPs', 'random_cidr'], {}), '(IPs, random_cidr)\n', (1816, 1834), False, 'import ipgroup\n'), ((2037, 2061), 'random.randint', 'random.randint', (['(118)', '(126)'], {}), '(118, 126)\n', (2051, 2061), False, 'import random\n'), ((2081, 2143), 'ipaddress.IPv6Network', 'ipaddress.IPv6Network', (["('2607:f8b0:4009:803::/%s' % random_cidr)"], {}), "('2607:f8b0:4009:803::/%s' % random_cidr)\n", (2102, 2143), False, 'import ipaddress\n'), ((2282, 2329), 'random.randint', 'random.randint', (['(1)', '(2 ** (128 - random_cidr - 1))'], {}), '(1, 2 ** (128 - random_cidr - 1))\n', (2296, 2329), False, 'import random\n'), ((2516, 2551), 'ipgroup.IPv6Group', 'ipgroup.IPv6Group', (['IPs', 'random_cidr'], {}), '(IPs, random_cidr)\n', (2533, 2551), False, 'import ipgroup\n'), ((2924, 2950), 'ipgroup.IPv4Group', 'ipgroup.IPv4Group', (['IPs', '(24)'], {}), '(IPs, 24)\n', (2941, 2950), False, 'import ipgroup\n'), ((3566, 3624), 'ipgroup.totalAddresses', 'ipgroup.totalAddresses', (["['192.168.1.1/16', '127.0.0.0/16']"], {}), "(['192.168.1.1/16', '127.0.0.0/16'])\n", (3588, 3624), False, 'import ipgroup\n'), ((3799, 3857), 'ipgroup.totalAddresses', 'ipgroup.totalAddresses', (["['192.168.1.1/16', '127.0.0.0/28']"], {}), "(['192.168.1.1/16', '127.0.0.0/28'])\n", (3821, 3857), False, 'import ipgroup\n'), ((4040, 4106), 'ipgroup.totalAddresses', 'ipgroup.totalAddresses', (["['192.168.127.12/24', '192.168.127.12/30']"], {}), "(['192.168.127.12/24', '192.168.127.12/30'])\n", (4062, 4106), False, 'import ipgroup\n'), ((4280, 4346), 'ipgroup.totalAddresses', 'ipgroup.totalAddresses', (["['192.168.127.12/24', '192.168.127.12/23']"], {}), "(['192.168.127.12/24', '192.168.127.12/23'])\n", (4302, 4346), False, 'import ipgroup\n'), ((4603, 4681), 'ipgroup.totalAddresses', 'ipgroup.totalAddresses', (["['172.16.58.3/16', '172.16.58.3/18', '172.16.58.3/24']"], {}), "(['172.16.58.3/16', '172.16.58.3/18', '172.16.58.3/24'])\n", (4625, 4681), False, 'import ipgroup\n'), ((5079, 5202), 'ipgroup.totalAddresses', 'ipgroup.totalAddresses', (["['1.0.0.0/8', '2.0.0.0/8', '2.0.0.0/16', '2.1.1.0/24', '1.0.0.0/16',\n '1.1.1.0/24', '2.0.0.0/8']"], {}), "(['1.0.0.0/8', '2.0.0.0/8', '2.0.0.0/16',\n '2.1.1.0/24', '1.0.0.0/16', '1.1.1.0/24', '2.0.0.0/8'])\n", (5101, 5202), False, 'import ipgroup\n'), ((5766, 5902), 'ipgroup.totalAddresses', 'ipgroup.totalAddresses', (["['1.0.0.0/8', '1.0.0.0/4', '2.0.0.0/8', '2.0.0.0/16', '2.1.1.0/24',\n '1.0.0.0/16', '1.1.1.0/24', '2.0.0.0/8']"], {}), "(['1.0.0.0/8', '1.0.0.0/4', '2.0.0.0/8', '2.0.0.0/16',\n '2.1.1.0/24', '1.0.0.0/16', '1.1.1.0/24', '2.0.0.0/8'])\n", (5788, 5902), False, 'import ipgroup\n'), ((6325, 6400), 'ipgroup.totalAddresses', 'ipgroup.totalAddresses', (["['2620:008d:8000::/48', '2620:008d:8000:e693::/64']"], {}), "(['2620:008d:8000::/48', '2620:008d:8000:e693::/64'])\n", (6347, 6400), False, 'import ipgroup\n'), ((3474, 3512), 'ipgroup.totalAddresses', 'ipgroup.totalAddresses', (['"""127.0.0.0/29"""'], {}), "('127.0.0.0/29')\n", (3496, 3512), False, 'import ipgroup\n')] |
# Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from debtcollector import moves
from neutron.api.v2 import resource_helper
from neutron_lib.api.definitions import constants as api_const
from neutron_lib.api.definitions import firewall_v2
from neutron_lib.api import extensions
from neutron_lib.exceptions import firewall_v2 as f_exc
from neutron_lib.services import base as service_base
from oslo_config import cfg
import six
from neutron_fwaas._i18n import _
from neutron_fwaas.common import fwaas_constants
FirewallGroupNotFound = moves.moved_class(
f_exc.FirewallGroupNotFound, 'FirewallGroupNotFound', __name__)
FirewallGroupInUse = moves.moved_class(
f_exc.FirewallGroupInUse, 'FirewallGroupInUse', __name__)
FirewallGroupInPendingState = moves.moved_class(
f_exc.FirewallGroupInPendingState, 'FirewallGroupInPendingState', __name__)
FirewallGroupPortInvalid = moves.moved_class(
f_exc.FirewallGroupPortInvalid, 'FirewallGroupPortInvalid', __name__)
FirewallGroupPortInvalidProject = moves.moved_class(
f_exc.FirewallGroupPortInvalidProject, 'FirewallGroupPortInvalidProject',
__name__)
FirewallGroupPortInUse = moves.moved_class(
f_exc.FirewallGroupPortInUse, 'FirewallGroupPortInUse', __name__)
FirewallPolicyNotFound = moves.moved_class(
f_exc.FirewallPolicyNotFound, 'FirewallPolicyNotFound', __name__)
FirewallPolicyInUse = moves.moved_class(
f_exc.FirewallPolicyInUse, 'FirewallPolicyInUse', __name__)
FirewallPolicyConflict = moves.moved_class(
f_exc.FirewallPolicyConflict, 'FirewallPolicyConflict', __name__)
FirewallRuleSharingConflict = moves.moved_class(
f_exc.FirewallRuleSharingConflict, 'FirewallRuleSharingConflict',
__name__)
FirewallPolicySharingConflict = moves.moved_class(
f_exc.FirewallPolicySharingConflict, 'FirewallPolicySharingConflict',
__name__)
FirewallRuleNotFound = moves.moved_class(
f_exc.FirewallRuleNotFound, 'FirewallRuleNotFound', __name__)
FirewallRuleInUse = moves.moved_class(
f_exc.FirewallRuleInUse, 'FirewallRuleInUse', __name__)
FirewallRuleNotAssociatedWithPolicy = moves.moved_class(
f_exc.FirewallRuleNotAssociatedWithPolicy,
'FirewallRuleNotAssociatedWithPolicy',
__name__)
FirewallRuleInvalidProtocol = moves.moved_class(
f_exc.FirewallRuleInvalidProtocol, 'FirewallRuleInvalidProtocol',
__name__)
FirewallRuleInvalidAction = moves.moved_class(
f_exc.FirewallRuleInvalidAction, 'FirewallRuleInvalidAction',
__name__)
FirewallRuleInvalidICMPParameter = moves.moved_class(
f_exc.FirewallRuleInvalidICMPParameter,
'FirewallRuleInvalidICMPParameter', __name__)
FirewallRuleWithPortWithoutProtocolInvalid = moves.moved_class(
f_exc.FirewallRuleWithPortWithoutProtocolInvalid,
'FirewallRuleWithPortWithoutProtocolInvalid', __name__)
FirewallRuleInvalidPortValue = moves.moved_class(
f_exc.FirewallRuleInvalidPortValue, 'FirewallRuleInvalidPortValue',
__name__)
FirewallRuleInfoMissing = moves.moved_class(
f_exc.FirewallRuleInfoMissing, 'FirewallRuleInfoMissing', __name__)
FirewallIpAddressConflict = moves.moved_class(
f_exc.FirewallIpAddressConflict, 'FirewallIpAddressConflict', __name__)
FirewallInternalDriverError = moves.moved_class(
f_exc.FirewallInternalDriverError, 'FirewallInternalDriverError', __name__)
FirewallRuleConflict = moves.moved_class(
f_exc.FirewallRuleConflict, 'FirewallRuleConflict', __name__)
FirewallRuleAlreadyAssociated = moves.moved_class(
f_exc.FirewallRuleAlreadyAssociated, 'FirewallRuleAlreadyAssociated',
__name__)
default_fwg_rules_opts = [
cfg.StrOpt('ingress_action',
default=api_const.FWAAS_DENY,
help=_('Firewall group rule action allow or '
'deny or reject for ingress. '
'Default is deny.')),
cfg.StrOpt('ingress_source_ipv4_address',
default=None,
help=_('IPv4 source address for ingress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('ingress_source_ipv6_address',
default=None,
help=_('IPv6 source address for ingress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('ingress_source_port',
default=None,
help=_('Source port number or range '
'(min:max) for ingress. '
'Default is None.')),
cfg.StrOpt('ingress_destination_ipv4_address',
default=None,
help=_('IPv4 destination address for ingress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('ingress_destination_ipv6_address',
default=None,
help=_('IPv6 destination address for ingress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('ingress_destination_port',
default=None,
help=_('Destination port number or range '
'(min:max) for ingress. '
'Default is None.')),
cfg.StrOpt('egress_action',
default=api_const.FWAAS_ALLOW,
help=_('Firewall group rule action allow or '
'deny or reject for egress. '
'Default is allow.')),
cfg.StrOpt('egress_source_ipv4_address',
default=None,
help=_('IPv4 source address for egress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('egress_source_ipv6_address',
default=None,
help=_('IPv6 source address for egress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('egress_source_port',
default=None,
help=_('Source port number or range '
'(min:max) for egress. '
'Default is None.')),
cfg.StrOpt('egress_destination_ipv4_address',
default=None,
help=_('IPv4 destination address for egress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('egress_destination_ipv6_address',
default=None,
help=_('IPv6 destination address for egress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('egress_destination_port',
default=None,
help=_('Destination port number or range '
'(min:max) for egress. '
'Default is None.')),
cfg.BoolOpt('shared',
default=False,
help=_('Firewall group rule shared. '
'Default is False.')),
cfg.StrOpt('protocol',
default=None,
help=_('Network protocols (tcp, udp, ...). '
'Default is None.')),
cfg.BoolOpt('enabled',
default=True,
help=_('Firewall group rule enabled. '
'Default is True.')),
]
firewall_quota_opts = [
cfg.IntOpt('quota_firewall_group',
default=10,
help=_('Number of firewall groups allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_firewall_policy',
default=10,
help=_('Number of firewall policies allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_firewall_rule',
default=100,
help=_('Number of firewall rules allowed per tenant. '
'A negative value means unlimited.')),
]
cfg.CONF.register_opts(default_fwg_rules_opts, 'default_fwg_rules')
cfg.CONF.register_opts(firewall_quota_opts, 'QUOTAS')
# TODO(Reedip): Remove the convert_to functionality after bug1706061 is fixed.
def convert_to_string(value):
if value is not None:
return str(value)
return None
firewall_v2.RESOURCE_ATTRIBUTE_MAP[api_const.FIREWALL_RULES][
'source_port']['convert_to'] = convert_to_string
firewall_v2.RESOURCE_ATTRIBUTE_MAP[api_const.FIREWALL_RULES][
'destination_port']['convert_to'] = convert_to_string
class Firewall_v2(extensions.APIExtensionDescriptor):
api_definition = firewall_v2
@classmethod
def get_resources(cls):
special_mappings = {'firewall_policies': 'firewall_policy'}
plural_mappings = resource_helper.build_plural_mappings(
special_mappings, firewall_v2.RESOURCE_ATTRIBUTE_MAP)
return resource_helper.build_resource_info(
plural_mappings, firewall_v2.RESOURCE_ATTRIBUTE_MAP,
fwaas_constants.FIREWALL_V2, action_map=firewall_v2.ACTION_MAP,
register_quota=True)
@classmethod
def get_plugin_interface(cls):
return Firewallv2PluginBase
@six.add_metaclass(abc.ABCMeta)
class Firewallv2PluginBase(service_base.ServicePluginBase):
def get_plugin_type(self):
return fwaas_constants.FIREWALL_V2
def get_plugin_description(self):
return 'Firewall Service v2 Plugin'
# Firewall Group
@abc.abstractmethod
def create_firewall_group(self, context, firewall_group):
pass
@abc.abstractmethod
def delete_firewall_group(self, context, id):
pass
@abc.abstractmethod
def get_firewall_group(self, context, id, fields=None):
pass
@abc.abstractmethod
def get_firewall_groups(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def update_firewall_group(self, context, id, firewall_group):
pass
# Firewall Policy
@abc.abstractmethod
def create_firewall_policy(self, context, firewall_policy):
pass
@abc.abstractmethod
def delete_firewall_policy(self, context, id):
pass
@abc.abstractmethod
def get_firewall_policy(self, context, id, fields=None):
pass
@abc.abstractmethod
def get_firewall_policies(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def update_firewall_policy(self, context, id, firewall_policy):
pass
# Firewall Rule
@abc.abstractmethod
def create_firewall_rule(self, context, firewall_rule):
pass
@abc.abstractmethod
def delete_firewall_rule(self, context, id):
pass
@abc.abstractmethod
def get_firewall_rule(self, context, id, fields=None):
pass
@abc.abstractmethod
def get_firewall_rules(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def update_firewall_rule(self, context, id, firewall_rule):
pass
@abc.abstractmethod
def insert_rule(self, context, id, rule_info):
pass
@abc.abstractmethod
def remove_rule(self, context, id, rule_info):
pass
| [
"neutron.api.v2.resource_helper.build_plural_mappings",
"neutron_fwaas._i18n._",
"six.add_metaclass",
"debtcollector.moves.moved_class",
"neutron.api.v2.resource_helper.build_resource_info",
"oslo_config.cfg.CONF.register_opts"
]
| [((1084, 1169), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallGroupNotFound', '"""FirewallGroupNotFound"""', '__name__'], {}), "(f_exc.FirewallGroupNotFound, 'FirewallGroupNotFound',\n __name__)\n", (1101, 1169), False, 'from debtcollector import moves\n'), ((1192, 1267), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallGroupInUse', '"""FirewallGroupInUse"""', '__name__'], {}), "(f_exc.FirewallGroupInUse, 'FirewallGroupInUse', __name__)\n", (1209, 1267), False, 'from debtcollector import moves\n'), ((1303, 1400), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallGroupInPendingState', '"""FirewallGroupInPendingState"""', '__name__'], {}), "(f_exc.FirewallGroupInPendingState,\n 'FirewallGroupInPendingState', __name__)\n", (1320, 1400), False, 'from debtcollector import moves\n'), ((1429, 1520), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallGroupPortInvalid', '"""FirewallGroupPortInvalid"""', '__name__'], {}), "(f_exc.FirewallGroupPortInvalid,\n 'FirewallGroupPortInvalid', __name__)\n", (1446, 1520), False, 'from debtcollector import moves\n'), ((1556, 1661), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallGroupPortInvalidProject', '"""FirewallGroupPortInvalidProject"""', '__name__'], {}), "(f_exc.FirewallGroupPortInvalidProject,\n 'FirewallGroupPortInvalidProject', __name__)\n", (1573, 1661), False, 'from debtcollector import moves\n'), ((1692, 1779), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallGroupPortInUse', '"""FirewallGroupPortInUse"""', '__name__'], {}), "(f_exc.FirewallGroupPortInUse, 'FirewallGroupPortInUse',\n __name__)\n", (1709, 1779), False, 'from debtcollector import moves\n'), ((1806, 1893), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallPolicyNotFound', '"""FirewallPolicyNotFound"""', '__name__'], {}), "(f_exc.FirewallPolicyNotFound, 'FirewallPolicyNotFound',\n __name__)\n", (1823, 1893), False, 'from debtcollector import moves\n'), ((1917, 1994), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallPolicyInUse', '"""FirewallPolicyInUse"""', '__name__'], {}), "(f_exc.FirewallPolicyInUse, 'FirewallPolicyInUse', __name__)\n", (1934, 1994), False, 'from debtcollector import moves\n'), ((2025, 2112), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallPolicyConflict', '"""FirewallPolicyConflict"""', '__name__'], {}), "(f_exc.FirewallPolicyConflict, 'FirewallPolicyConflict',\n __name__)\n", (2042, 2112), False, 'from debtcollector import moves\n'), ((2144, 2241), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallRuleSharingConflict', '"""FirewallRuleSharingConflict"""', '__name__'], {}), "(f_exc.FirewallRuleSharingConflict,\n 'FirewallRuleSharingConflict', __name__)\n", (2161, 2241), False, 'from debtcollector import moves\n'), ((2279, 2380), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallPolicySharingConflict', '"""FirewallPolicySharingConflict"""', '__name__'], {}), "(f_exc.FirewallPolicySharingConflict,\n 'FirewallPolicySharingConflict', __name__)\n", (2296, 2380), False, 'from debtcollector import moves\n'), ((2409, 2488), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallRuleNotFound', '"""FirewallRuleNotFound"""', '__name__'], {}), "(f_exc.FirewallRuleNotFound, 'FirewallRuleNotFound', __name__)\n", (2426, 2488), False, 'from debtcollector import moves\n'), ((2514, 2587), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallRuleInUse', '"""FirewallRuleInUse"""', '__name__'], {}), "(f_exc.FirewallRuleInUse, 'FirewallRuleInUse', __name__)\n", (2531, 2587), False, 'from debtcollector import moves\n'), ((2631, 2744), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallRuleNotAssociatedWithPolicy', '"""FirewallRuleNotAssociatedWithPolicy"""', '__name__'], {}), "(f_exc.FirewallRuleNotAssociatedWithPolicy,\n 'FirewallRuleNotAssociatedWithPolicy', __name__)\n", (2648, 2744), False, 'from debtcollector import moves\n'), ((2784, 2881), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallRuleInvalidProtocol', '"""FirewallRuleInvalidProtocol"""', '__name__'], {}), "(f_exc.FirewallRuleInvalidProtocol,\n 'FirewallRuleInvalidProtocol', __name__)\n", (2801, 2881), False, 'from debtcollector import moves\n'), ((2915, 3008), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallRuleInvalidAction', '"""FirewallRuleInvalidAction"""', '__name__'], {}), "(f_exc.FirewallRuleInvalidAction,\n 'FirewallRuleInvalidAction', __name__)\n", (2932, 3008), False, 'from debtcollector import moves\n'), ((3049, 3156), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallRuleInvalidICMPParameter', '"""FirewallRuleInvalidICMPParameter"""', '__name__'], {}), "(f_exc.FirewallRuleInvalidICMPParameter,\n 'FirewallRuleInvalidICMPParameter', __name__)\n", (3066, 3156), False, 'from debtcollector import moves\n'), ((3207, 3334), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallRuleWithPortWithoutProtocolInvalid', '"""FirewallRuleWithPortWithoutProtocolInvalid"""', '__name__'], {}), "(f_exc.FirewallRuleWithPortWithoutProtocolInvalid,\n 'FirewallRuleWithPortWithoutProtocolInvalid', __name__)\n", (3224, 3334), False, 'from debtcollector import moves\n'), ((3371, 3470), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallRuleInvalidPortValue', '"""FirewallRuleInvalidPortValue"""', '__name__'], {}), "(f_exc.FirewallRuleInvalidPortValue,\n 'FirewallRuleInvalidPortValue', __name__)\n", (3388, 3470), False, 'from debtcollector import moves\n'), ((3502, 3591), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallRuleInfoMissing', '"""FirewallRuleInfoMissing"""', '__name__'], {}), "(f_exc.FirewallRuleInfoMissing, 'FirewallRuleInfoMissing',\n __name__)\n", (3519, 3591), False, 'from debtcollector import moves\n'), ((3621, 3714), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallIpAddressConflict', '"""FirewallIpAddressConflict"""', '__name__'], {}), "(f_exc.FirewallIpAddressConflict,\n 'FirewallIpAddressConflict', __name__)\n", (3638, 3714), False, 'from debtcollector import moves\n'), ((3746, 3843), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallInternalDriverError', '"""FirewallInternalDriverError"""', '__name__'], {}), "(f_exc.FirewallInternalDriverError,\n 'FirewallInternalDriverError', __name__)\n", (3763, 3843), False, 'from debtcollector import moves\n'), ((3868, 3947), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallRuleConflict', '"""FirewallRuleConflict"""', '__name__'], {}), "(f_exc.FirewallRuleConflict, 'FirewallRuleConflict', __name__)\n", (3885, 3947), False, 'from debtcollector import moves\n'), ((3985, 4086), 'debtcollector.moves.moved_class', 'moves.moved_class', (['f_exc.FirewallRuleAlreadyAssociated', '"""FirewallRuleAlreadyAssociated"""', '__name__'], {}), "(f_exc.FirewallRuleAlreadyAssociated,\n 'FirewallRuleAlreadyAssociated', __name__)\n", (4002, 4086), False, 'from debtcollector import moves\n'), ((8434, 8501), 'oslo_config.cfg.CONF.register_opts', 'cfg.CONF.register_opts', (['default_fwg_rules_opts', '"""default_fwg_rules"""'], {}), "(default_fwg_rules_opts, 'default_fwg_rules')\n", (8456, 8501), False, 'from oslo_config import cfg\n'), ((8502, 8555), 'oslo_config.cfg.CONF.register_opts', 'cfg.CONF.register_opts', (['firewall_quota_opts', '"""QUOTAS"""'], {}), "(firewall_quota_opts, 'QUOTAS')\n", (8524, 8555), False, 'from oslo_config import cfg\n'), ((9624, 9654), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (9641, 9654), False, 'import six\n'), ((9201, 9297), 'neutron.api.v2.resource_helper.build_plural_mappings', 'resource_helper.build_plural_mappings', (['special_mappings', 'firewall_v2.RESOURCE_ATTRIBUTE_MAP'], {}), '(special_mappings, firewall_v2.\n RESOURCE_ATTRIBUTE_MAP)\n', (9238, 9297), False, 'from neutron.api.v2 import resource_helper\n'), ((9321, 9504), 'neutron.api.v2.resource_helper.build_resource_info', 'resource_helper.build_resource_info', (['plural_mappings', 'firewall_v2.RESOURCE_ATTRIBUTE_MAP', 'fwaas_constants.FIREWALL_V2'], {'action_map': 'firewall_v2.ACTION_MAP', 'register_quota': '(True)'}), '(plural_mappings, firewall_v2.\n RESOURCE_ATTRIBUTE_MAP, fwaas_constants.FIREWALL_V2, action_map=\n firewall_v2.ACTION_MAP, register_quota=True)\n', (9356, 9504), False, 'from neutron.api.v2 import resource_helper\n'), ((4218, 4308), 'neutron_fwaas._i18n._', '_', (['"""Firewall group rule action allow or deny or reject for ingress. Default is deny."""'], {}), "('Firewall group rule action allow or deny or reject for ingress. Default is deny.'\n )\n", (4219, 4308), False, 'from neutron_fwaas._i18n import _\n'), ((4451, 4539), 'neutron_fwaas._i18n._', '_', (['"""IPv4 source address for ingress (address or address/netmask). Default is None."""'], {}), "('IPv4 source address for ingress (address or address/netmask). Default is None.'\n )\n", (4452, 4539), False, 'from neutron_fwaas._i18n import _\n'), ((4682, 4770), 'neutron_fwaas._i18n._', '_', (['"""IPv6 source address for ingress (address or address/netmask). Default is None."""'], {}), "('IPv6 source address for ingress (address or address/netmask). Default is None.'\n )\n", (4683, 4770), False, 'from neutron_fwaas._i18n import _\n'), ((4905, 4977), 'neutron_fwaas._i18n._', '_', (['"""Source port number or range (min:max) for ingress. Default is None."""'], {}), "('Source port number or range (min:max) for ingress. Default is None.')\n", (4906, 4977), False, 'from neutron_fwaas._i18n import _\n'), ((5130, 5223), 'neutron_fwaas._i18n._', '_', (['"""IPv4 destination address for ingress (address or address/netmask). Default is None."""'], {}), "('IPv4 destination address for ingress (address or address/netmask). Default is None.'\n )\n", (5131, 5223), False, 'from neutron_fwaas._i18n import _\n'), ((5371, 5464), 'neutron_fwaas._i18n._', '_', (['"""IPv6 destination address for ingress (address or address/netmask). Default is deny."""'], {}), "('IPv6 destination address for ingress (address or address/netmask). Default is deny.'\n )\n", (5372, 5464), False, 'from neutron_fwaas._i18n import _\n'), ((5604, 5681), 'neutron_fwaas._i18n._', '_', (['"""Destination port number or range (min:max) for ingress. Default is None."""'], {}), "('Destination port number or range (min:max) for ingress. Default is None.')\n", (5605, 5681), False, 'from neutron_fwaas._i18n import _\n'), ((5832, 5922), 'neutron_fwaas._i18n._', '_', (['"""Firewall group rule action allow or deny or reject for egress. Default is allow."""'], {}), "('Firewall group rule action allow or deny or reject for egress. Default is allow.'\n )\n", (5833, 5922), False, 'from neutron_fwaas._i18n import _\n'), ((6064, 6151), 'neutron_fwaas._i18n._', '_', (['"""IPv4 source address for egress (address or address/netmask). Default is None."""'], {}), "('IPv4 source address for egress (address or address/netmask). Default is None.'\n )\n", (6065, 6151), False, 'from neutron_fwaas._i18n import _\n'), ((6293, 6380), 'neutron_fwaas._i18n._', '_', (['"""IPv6 source address for egress (address or address/netmask). Default is deny."""'], {}), "('IPv6 source address for egress (address or address/netmask). Default is deny.'\n )\n", (6294, 6380), False, 'from neutron_fwaas._i18n import _\n'), ((6514, 6585), 'neutron_fwaas._i18n._', '_', (['"""Source port number or range (min:max) for egress. Default is None."""'], {}), "('Source port number or range (min:max) for egress. Default is None.')\n", (6515, 6585), False, 'from neutron_fwaas._i18n import _\n'), ((6737, 6829), 'neutron_fwaas._i18n._', '_', (['"""IPv4 destination address for egress (address or address/netmask). Default is deny."""'], {}), "('IPv4 destination address for egress (address or address/netmask). Default is deny.'\n )\n", (6738, 6829), False, 'from neutron_fwaas._i18n import _\n'), ((6976, 7068), 'neutron_fwaas._i18n._', '_', (['"""IPv6 destination address for egress (address or address/netmask). Default is deny."""'], {}), "('IPv6 destination address for egress (address or address/netmask). Default is deny.'\n )\n", (6977, 7068), False, 'from neutron_fwaas._i18n import _\n'), ((7207, 7283), 'neutron_fwaas._i18n._', '_', (['"""Destination port number or range (min:max) for egress. Default is None."""'], {}), "('Destination port number or range (min:max) for egress. Default is None.')\n", (7208, 7283), False, 'from neutron_fwaas._i18n import _\n'), ((7414, 7464), 'neutron_fwaas._i18n._', '_', (['"""Firewall group rule shared. Default is False."""'], {}), "('Firewall group rule shared. Default is False.')\n", (7415, 7464), False, 'from neutron_fwaas._i18n import _\n'), ((7569, 7625), 'neutron_fwaas._i18n._', '_', (['"""Network protocols (tcp, udp, ...). Default is None."""'], {}), "('Network protocols (tcp, udp, ...). Default is None.')\n", (7570, 7625), False, 'from neutron_fwaas._i18n import _\n'), ((7731, 7781), 'neutron_fwaas._i18n._', '_', (['"""Firewall group rule enabled. Default is True."""'], {}), "('Firewall group rule enabled. Default is True.')\n", (7732, 7781), False, 'from neutron_fwaas._i18n import _\n'), ((7922, 8011), 'neutron_fwaas._i18n._', '_', (['"""Number of firewall groups allowed per tenant. A negative value means unlimited."""'], {}), "('Number of firewall groups allowed per tenant. A negative value means unlimited.'\n )\n", (7923, 8011), False, 'from neutron_fwaas._i18n import _\n'), ((8121, 8212), 'neutron_fwaas._i18n._', '_', (['"""Number of firewall policies allowed per tenant. A negative value means unlimited."""'], {}), "('Number of firewall policies allowed per tenant. A negative value means unlimited.'\n )\n", (8122, 8212), False, 'from neutron_fwaas._i18n import _\n'), ((8321, 8409), 'neutron_fwaas._i18n._', '_', (['"""Number of firewall rules allowed per tenant. A negative value means unlimited."""'], {}), "('Number of firewall rules allowed per tenant. A negative value means unlimited.'\n )\n", (8322, 8409), False, 'from neutron_fwaas._i18n import _\n')] |
from ctypes import c_uint32, c_void_p, string_at
from rotypes.idldsl import define_winrt_com_method, GUID
from rotypes.inspectable import IInspectable, IUnknown
@GUID('905a0fef-bc53-11df-8c49-001e4fc686da')
class IBufferByteAccess(IUnknown):
pass
@GUID('905A0FE0-BC53-11DF-8C49-001E4FC686DA')
class IBuffer(IInspectable):
def __len__(self):
return self.Length
def __bytes__(self):
byteaccess = self.astype(IBufferByteAccess)
ptr = byteaccess.Buffer()
return string_at(ptr, len(self))
define_winrt_com_method(IBufferByteAccess, 'Buffer', retval=c_void_p)
define_winrt_com_method(IBuffer, 'get_Capacity', propget=c_uint32)
define_winrt_com_method(IBuffer, 'get_Length', propget=c_uint32)
define_winrt_com_method(IBuffer, 'put_Length', propput=c_uint32)
| [
"rotypes.idldsl.define_winrt_com_method",
"rotypes.idldsl.GUID"
]
| [((165, 209), 'rotypes.idldsl.GUID', 'GUID', (['"""905a0fef-bc53-11df-8c49-001e4fc686da"""'], {}), "('905a0fef-bc53-11df-8c49-001e4fc686da')\n", (169, 209), False, 'from rotypes.idldsl import define_winrt_com_method, GUID\n'), ((257, 301), 'rotypes.idldsl.GUID', 'GUID', (['"""905A0FE0-BC53-11DF-8C49-001E4FC686DA"""'], {}), "('905A0FE0-BC53-11DF-8C49-001E4FC686DA')\n", (261, 301), False, 'from rotypes.idldsl import define_winrt_com_method, GUID\n'), ((535, 604), 'rotypes.idldsl.define_winrt_com_method', 'define_winrt_com_method', (['IBufferByteAccess', '"""Buffer"""'], {'retval': 'c_void_p'}), "(IBufferByteAccess, 'Buffer', retval=c_void_p)\n", (558, 604), False, 'from rotypes.idldsl import define_winrt_com_method, GUID\n'), ((606, 672), 'rotypes.idldsl.define_winrt_com_method', 'define_winrt_com_method', (['IBuffer', '"""get_Capacity"""'], {'propget': 'c_uint32'}), "(IBuffer, 'get_Capacity', propget=c_uint32)\n", (629, 672), False, 'from rotypes.idldsl import define_winrt_com_method, GUID\n'), ((673, 737), 'rotypes.idldsl.define_winrt_com_method', 'define_winrt_com_method', (['IBuffer', '"""get_Length"""'], {'propget': 'c_uint32'}), "(IBuffer, 'get_Length', propget=c_uint32)\n", (696, 737), False, 'from rotypes.idldsl import define_winrt_com_method, GUID\n'), ((738, 802), 'rotypes.idldsl.define_winrt_com_method', 'define_winrt_com_method', (['IBuffer', '"""put_Length"""'], {'propput': 'c_uint32'}), "(IBuffer, 'put_Length', propput=c_uint32)\n", (761, 802), False, 'from rotypes.idldsl import define_winrt_com_method, GUID\n')] |
# -*- coding: utf-8 -*-
""" Playground documentation.
Module defining Playground Base Class
"""
import os
from abc import ABC
import yaml
import pymunk
from .utils import PositionAreaSampler
from .utils.definitions import SPACE_DAMPING, CollisionTypes, SceneElementTypes
# pylint: disable=unused-argument
# pylint: disable=line-too-long
class Playground(ABC):
""" Playground is a Base Class that manages the physical simulation.
Playground manages the interactions between Agents and Scene Elements.
Attributes:
size: size of the scene (width, length).
scene_elements: list of SceneElements present in the Playground.
fields: list of fields producing SceneElements in the Playground.
agents: list of Agents present in the Playground.
initial_agent_position: position or PositionAreaSampler,
Starting position of an agent (single agent).
done: bool, True if the playground reached termination.
"""
# pylint: disable=too-many-instance-attributes
scene_entities = []
def __init__(self, size):
# Generate Scene
self.size = size
self._width, self._length = self.size
# Initialization of the pymunk space, modelling all the physics
self.space = self._initialize_space()
# Public attributes for entities in the playground
self.scene_elements = []
self.fields = []
self.agents = []
# Private attributes for managing interactions in playground
self._disappeared_scene_elements = []
self._grasped_scene_elements = {}
self._teleported = []
# Add entities declared in the scene
for scene_entity in self.scene_entities:
self.add_scene_element(scene_entity)
self.done = False
self.initial_agent_position = None
self._handle_interactions()
self.time_limit = None
self.time_limit_reached_reward = None
self.time_test = 0
@staticmethod
def parse_configuration(key):
""" Private method that parses yaml configuration files.
Args:
key: (str) name of the playground configuration.
Returns:
Dictionary of attributes and default values.
"""
fname = 'utils/configs/playground.yml'
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, fname), 'r') as yaml_file:
default_config = yaml.load(yaml_file, Loader=yaml.SafeLoader)
return default_config[key]
@staticmethod
def _initialize_space():
""" Method to initialize Pymunk empty space for 2D physics.
Returns: Pymunk Space
"""
space = pymunk.Space()
space.gravity = pymunk.Vec2d(0., 0.)
space.damping = SPACE_DAMPING
return space
def update(self, steps):
""" Update the Playground
Update all SceneElements, Fields, Timers and Grasps
Runs the Physics engine for n steps.
Args:
steps: Number of steps
"""
for agent in self.agents:
agent.pre_step()
for _ in range(steps):
self.space.step(1. / steps)
for elem in self.scene_elements:
elem.pre_step()
if elem.follows_waypoints:
self.space.reindex_shapes_for_body(elem.pm_body)
self._fields_produce()
self._check_timers()
self._release_grasps()
self._check_teleports()
def reset(self):
""" Reset the Playground to its initial state.
"""
# remove entities and filter out entities which are temporary
for entity in self.scene_elements.copy():
self.remove_scene_element(entity)
# reset and replace entities that are not temporary
for entity in self._disappeared_scene_elements.copy():
entity.reset()
self.add_scene_element(entity)
# reset fields
for entity in self.fields:
entity.reset()
# reset agents
for agent in self.agents.copy():
agent.reset()
self.remove_agent(agent)
self.add_agent(agent)
self.done = False
def add_agent(self, new_agent, tries=100):
""" Method to add an Agent to the Playground.
If the Agent has its attribute allow_overlapping set to False,
the playground will try to add it multiple times.
Args:
new_agent: Agent to add to the Playground
tries: Number of times the Playground will try to place the agent
"""
# If already there
if new_agent in self.scene_elements:
raise ValueError('Agent already in Playground')
# Inform agent of the playground size
new_agent.size_playground = self.size
if new_agent.allow_overlapping:
self._add_agent(new_agent)
else:
success = self._add_agent_without_ovelapping(new_agent, tries = tries)
if not success:
raise ValueError("Agent couldn't be placed without overlapping")
def _add_agent(self, agent):
""" Add an agent to the playground.
Args:
agent: Agent.
"""
self.agents.append(agent)
if agent.initial_position is not None:
pass
elif self.initial_agent_position is not None:
agent.initial_position = self.initial_agent_position
else:
raise ValueError("""Agent initial position should be defined in the playground or passed as an argument)
to the class agent""")
agent.position = agent.initial_position
for body_part in agent.parts:
self.space.add(*body_part.pm_elements)
def _add_agent_without_ovelapping(self, agent, tries=100):
""" Method to add am Agent to the Playground without overlapping.
Useful when an Agent has a random initial position, to avoid overlapping.
Args:
agent: Agent to add to the Playground
tries: Number of times the Playground will try to place the new_entity
"""
trial = 0
visible_collide_parts = True
interactive_collide_parts = True
all_shapes = self.space.shapes.copy()
while (interactive_collide_parts or visible_collide_parts) and trial < tries:
self._add_agent(agent)
visible_collide_parts = False
interactive_collide_parts = False
for part in agent.parts:
visible_collide = False
interactive_collide = False
if part.pm_visible_shape is not None:
collisions = [part.pm_visible_shape.shapes_collide(shape) for shape in all_shapes]
visible_collide = any([len(collision.points) != 0 for collision in collisions])
if part.pm_interaction_shape is not None:
collisions = [part.pm_interaction_shape.shapes_collide(shape) for shape in all_shapes]
interactive_collide = any([len(collision.points) != 0 for collision in collisions])
visible_collide_parts = visible_collide or visible_collide_parts
interactive_collide_parts = interactive_collide or interactive_collide_parts
if visible_collide_parts or interactive_collide_parts:
self.remove_agent(agent)
trial += 1
if interactive_collide_parts or visible_collide_parts:
return False
return True
def _add_scene_element(self, new_scene_element, new_position):
""" Method to add a SceneElement to the Playground.
"""
if new_scene_element in self.scene_elements:
raise ValueError('Scene element already in Playground')
new_scene_element.size_playground = self.size
if new_position:
new_scene_element.position = new_scene_element.initial_position
self.space.add(*new_scene_element.pm_elements)
self.scene_elements.append(new_scene_element)
if new_scene_element in self._disappeared_scene_elements:
self._disappeared_scene_elements.remove(new_scene_element)
def _add_scene_element_without_ovelapping(self, scene_element, tries, new_position):
trial = 0
visible_collide = True
interactive_collide = True
all_shapes = self.space.shapes.copy()
while (visible_collide or interactive_collide) and trial < tries:
self._add_scene_element(scene_element, new_position)
visible_collide = False
interactive_collide = False
if scene_element.pm_visible_shape is not None:
collisions = [scene_element.pm_visible_shape.shapes_collide(shape) for shape in all_shapes]
visible_collide = any([len(collision.points) != 0 for collision in collisions])
if scene_element.pm_interaction_shape is not None:
collisions = [scene_element.pm_interaction_shape.shapes_collide(shape) for shape in all_shapes]
interactive_collide = any([len(collision.points) != 0 for collision in collisions])
if visible_collide or interactive_collide:
self.remove_scene_element(scene_element)
trial += 1
if visible_collide or interactive_collide:
return False
return True
def add_scene_element(self, scene_element, tries=100, new_position=True):
""" Method to add a SceneElement to the Playground.
If the Element has its attribute allow_overlapping set to False,
the playground will try to add it multiple times.
Useful when a SceneElement has a random initial position, to avoid overlapping.
Args:
scene_element: Scene Element to add to the Playground
tries: Number of times the Playground will try to place the new_entity
"""
if scene_element.entity_type is SceneElementTypes.FIELD:
# If already there
if scene_element in self.fields:
raise ValueError('Field already in Playground')
self.fields.append(scene_element)
else:
if scene_element in self.scene_elements:
raise ValueError('Field already in Playground')
# Else
scene_element.size_playground = self.size
if scene_element.allow_overlapping:
self._add_scene_element(scene_element, new_position)
else:
success = self._add_scene_element_without_ovelapping(scene_element, tries = tries, new_position=new_position)
if not success:
raise ValueError('Entity could not be placed without overlapping')
def _remove_agents(self):
for agent in self.agents:
self.remove_agent(agent)
def remove_agent(self, agent):
if agent not in self.agents:
return False
for part in agent.parts:
self.space.remove(*part.pm_elements)
part.velocity = [0, 0, 0]
part.grasped = []
agent.initial_position = None
self.agents.remove(agent)
return True
def remove_scene_element(self, scene_element):
if scene_element not in self.scene_elements:
return False
self.space.remove(*scene_element.pm_elements)
self.scene_elements.remove(scene_element)
if not scene_element.is_temporary_entity:
self._disappeared_scene_elements.append(scene_element)
for elem in self.scene_elements:
if elem.entity_type == 'dispenser' and scene_element in elem.produced_entities:
elem.produced_entities.remove(scene_element)
for field in self.fields:
if scene_element in field.produced_entities:
field.produced_entities.remove(scene_element)
if scene_element in self._grasped_scene_elements.keys():
body_part = self._grasped_scene_elements[scene_element]
self.space.remove(*body_part.grasped)
body_part.grasped = []
# self._grasped_scene_elements.pop(scene_element)
return True
def _fields_produce(self):
for field in self.fields:
if field.can_produce():
new_entity = field.produce()
self.add_scene_element(new_entity)
def _check_timers(self):
for entity in self.scene_elements:
if entity.timed and entity.timer == 0:
list_remove, list_add = entity.activate(self)
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
def _release_grasps(self):
for agent in self.agents:
for part in agent.parts:
if not part.is_holding and part.can_grasp:
for joint in part.grasped:
self.space.remove(joint)
part.grasped = []
for element_grasped, part in self._grasped_scene_elements.copy().items():
if not part.grasped:
self._grasped_scene_elements.pop(element_grasped)
def _check_teleports(self):
for agent, teleport in self._teleported:
overlaps = self.agent_overlaps_with_element(agent, teleport)
if not overlaps:
self._teleported.remove((agent, teleport))
def agent_overlaps_with_element(self, agent, element):
overlaps = False
for part in agent.parts:
if element.pm_visible_shape is not None:
overlaps = overlaps or part.pm_visible_shape.shapes_collide(element.pm_visible_shape).points != []
if element.pm_interaction_shape is not None:
overlaps = overlaps or part.pm_visible_shape.shapes_collide(element.pm_interaction_shape).points != []
return overlaps
def get_scene_element_from_shape(self, pm_shape):
"""
Returns: Returns the Scene Element associated with the pymunk shape.
"""
entity = next(iter([e for e in self.scene_elements if pm_shape in e.pm_elements]), None)
return entity
def get_agent_from_shape(self, pm_shape):
"""
Returns: Returns the Agent associated with the pymunk shape.
"""
for agent in self.agents:
if agent.owns_shape(pm_shape):
return agent
return None
def get_entity_from_shape(self, pm_shape):
"""
Returns the element associated with the pymunk shape
Args:
pm_shape: Pymunk shaape
Returns:
Single entitiy or None
"""
scene_element = self.get_scene_element_from_shape(pm_shape)
if scene_element is not None: return scene_element
for agent in self.agents:
part = agent.get_bodypart_from_shape(pm_shape)
if part is not None: return part
return None
def _get_closest_agent(self, ent):
dist_list = [(a.position[0] - ent.position[0])**2 + (a.position[1] - ent.position[1])**2 for a in self.agents]
index_min_dist = dist_list.index(min(dist_list))
closest_agent = self.agents[index_min_dist]
return closest_agent
def _agent_touches_entity(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
touched_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if touched_entity is None: return True
agent.reward += touched_entity.reward
list_remove, list_add = touched_entity.activate()
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
if touched_entity.terminate_upon_contact:
self.done = True
return True
def _agent_interacts(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
body_part = agent.get_bodypart_from_shape(arbiter.shapes[0])
interacting_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if interacting_entity is None: return True
if body_part.is_activating:
agent.reward += interacting_entity.reward
list_remove, list_add = interacting_entity.activate(body_part)
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
if interacting_entity.terminate_upon_contact:
self.done = True
body_part.is_activating = False
return True
def _agent_grasps(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
body_part = agent.get_bodypart_from_shape(arbiter.shapes[0])
interacting_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if interacting_entity is None: return True
if body_part.is_grasping and not body_part.is_holding:
body_part.is_holding = True
j_1 = pymunk.PinJoint(body_part.pm_body, interacting_entity.pm_body, (0, 5), (0, 0))
j_2 = pymunk.PinJoint(body_part.pm_body, interacting_entity.pm_body, (0, -5), (0, 0))
motor = pymunk.SimpleMotor(body_part.pm_body, interacting_entity.pm_body, 0)
self.space.add(j_1, j_2, motor) # , j_3, j_4, j_5, j_6, j_7, j_8)
body_part.grasped = [j_1, j_2, motor] # , j_3, j_4, j_5, j_6, j_7, j_8]
self._grasped_scene_elements[interacting_entity] = body_part
return True
def _agent_enters_zone(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
zone_reached = self.get_scene_element_from_shape(arbiter.shapes[1])
if zone_reached is None: return True
agent.reward += zone_reached.reward
if zone_reached.terminate_upon_contact:
self.done = True
return True
def _gem_interacts(self, arbiter, space, data):
gem = self.get_scene_element_from_shape(arbiter.shapes[0])
interacting_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if interacting_entity is None or gem is None: return True
agent = self._get_closest_agent(gem)
agent.reward += interacting_entity.reward
list_remove, list_add = interacting_entity.activate(gem)
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
if interacting_entity.terminate_upon_contact:
self.done = True
return True
def _agent_eats(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
body_part = agent.get_bodypart_from_shape(arbiter.shapes[0])
edible_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if edible_entity is None: return True
if body_part.is_eating:
agent.reward += edible_entity.get_reward()
self.remove_scene_element(edible_entity)
completely_eaten = edible_entity.eats()
if not completely_eaten:
self.add_scene_element(edible_entity, new_position=False)
body_part.is_eating = False
return True
def _agent_teleports(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
teleport = self.get_scene_element_from_shape(arbiter.shapes[1])
if teleport is None or teleport.target is None or (agent, teleport) in self._teleported:
return True
if teleport.target.traversable:
agent.position = (teleport.target.position[0], teleport.target.position[1],
agent.position[2])
else:
area_shape = teleport.target.physical_shape
if area_shape == 'rectangle':
width = teleport.target.width + agent.base_platform.radius * 2 + 1
length = teleport.target.length + agent.base_platform.radius * 2 + 1
angle = teleport.target.position[-1]
sampler = PositionAreaSampler(
center=[teleport.target.position[0], teleport.target.position[1]],
area_shape=area_shape,
angle=angle,
width_length=[width+2, length+2],
excl_width_length=[width, length],
)
else:
radius = teleport.target.radius + agent.base_platform.radius + 1
sampler = PositionAreaSampler(
center=[teleport.target.position[0], teleport.target.position[1]],
area_shape='circle',
radius=radius,
excl_radius=radius,
)
agent.position = sampler.sample()
if (agent, teleport) not in self._teleported:
self._teleported.append((agent, teleport.target))
return True
def _handle_interactions(self):
# Order is important
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.GRASPABLE, self._agent_grasps)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.CONTACT, self._agent_touches_entity)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.EDIBLE, self._agent_eats)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.INTERACTIVE, self._agent_interacts)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.PASSIVE, self._agent_enters_zone)
self.add_interaction(CollisionTypes.GEM, CollisionTypes.ACTIVATED_BY_GEM, self._gem_interacts)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.TELEPORT, self._agent_teleports)
def add_interaction(self, collision_type_1, collision_type_2, interaction_function):
"""
Args:
collision_type_1: collision type of the first entity
collision_type_2: collision type of the second entity
interaction_function: function that handles the interaction
Returns: None
"""
handler = self.space.add_collision_handler(collision_type_1, collision_type_2)
handler.pre_solve = interaction_function
class PlaygroundRegister:
"""
Class to register Playgrounds.
"""
playgrounds = {}
@classmethod
def register(cls, playground_name):
"""
Registers a playground
"""
def decorator(subclass):
if playground_name in cls.playgrounds:
raise ValueError(playground_name+' already registered')
cls.playgrounds[playground_name] = subclass
return subclass
return decorator
@classmethod
def filter(cls, name):
return [pg for name_pg, pg in cls.playgrounds.items() if name in name_pg]
| [
"pymunk.PinJoint",
"os.path.join",
"yaml.load",
"os.getcwd",
"pymunk.Space",
"os.path.dirname",
"pymunk.Vec2d",
"pymunk.SimpleMotor"
]
| [((2781, 2795), 'pymunk.Space', 'pymunk.Space', ([], {}), '()\n', (2793, 2795), False, 'import pymunk\n'), ((2820, 2842), 'pymunk.Vec2d', 'pymunk.Vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (2832, 2842), False, 'import pymunk\n'), ((2523, 2567), 'yaml.load', 'yaml.load', (['yaml_file'], {'Loader': 'yaml.SafeLoader'}), '(yaml_file, Loader=yaml.SafeLoader)\n', (2532, 2567), False, 'import yaml\n'), ((17527, 17605), 'pymunk.PinJoint', 'pymunk.PinJoint', (['body_part.pm_body', 'interacting_entity.pm_body', '(0, 5)', '(0, 0)'], {}), '(body_part.pm_body, interacting_entity.pm_body, (0, 5), (0, 0))\n', (17542, 17605), False, 'import pymunk\n'), ((17624, 17703), 'pymunk.PinJoint', 'pymunk.PinJoint', (['body_part.pm_body', 'interacting_entity.pm_body', '(0, -5)', '(0, 0)'], {}), '(body_part.pm_body, interacting_entity.pm_body, (0, -5), (0, 0))\n', (17639, 17703), False, 'import pymunk\n'), ((17724, 17792), 'pymunk.SimpleMotor', 'pymunk.SimpleMotor', (['body_part.pm_body', 'interacting_entity.pm_body', '(0)'], {}), '(body_part.pm_body, interacting_entity.pm_body, 0)\n', (17742, 17792), False, 'import pymunk\n'), ((2381, 2392), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2390, 2392), False, 'import os\n'), ((2394, 2419), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2409, 2419), False, 'import os\n'), ((2440, 2473), 'os.path.join', 'os.path.join', (['__location__', 'fname'], {}), '(__location__, fname)\n', (2452, 2473), False, 'import os\n')] |
import csv
import json
import pickle
import logging
import re
import pandas
import gzip
import os
import numpy as np
from random import randint, random
from tqdm import tqdm
from retriever.dense_retriever import DenseRetriever
from models.tokenization import tokenize
from typing import Union, List
class InputExample:
"""
Structure for one input example with texts, the label and a unique id
"""
def __init__(self, guid: str, texts: List[str], label: Union[int, float]):
"""
Creates one InputExample with the given texts, guid and label
str.strip() is called on both texts.
:param guid
id for the example
:param texts
the texts for the example
:param label
the label for the example
"""
self.guid = guid
self.texts = [text.strip() for text in texts]
self.label = label
def get_texts(self):
return self.texts
def get_label(self):
return self.label
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def get_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['label']
guid = "%s-%d" % (filename, id)
id += 1
if label == 'entailment':
label = 0
elif label == 'contradiction':
label = 1
else:
label = 2
examples.append(InputExample(guid=guid,
texts=[sample['s1'], sample['s2']],
label=label))
if 0 < max_examples <= len(examples):
break
return examples
def get_qa_examples(filename, max_examples=0, dev=False):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['relevant']
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=label))
if not dev:
if label == 1:
for _ in range(13):
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=label))
if 0 < max_examples <= len(examples):
break
return examples
def map_label(label):
labels = {"relevant": 0, "irrelevant": 1}
return labels[label.strip().lower()]
def get_qar_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=1.0))
if 0 < max_examples <= len(examples):
break
return examples
def get_qar_artificial_examples():
examples = []
id = 0
print('Loading passages...')
passages = []
file = open('data/msmarco/collection.tsv', 'r', encoding='utf8')
while True:
line = file.readline()
if not line:
break
line = line.rstrip('\n').split('\t')
passages.append(line[1])
print('Loaded passages')
with open('data/qar/qar_artificial_queries.csv') as f:
for i, line in enumerate(f):
queries = line.rstrip('\n').split('|')
for query in queries:
guid = "%s-%d" % ('', id)
id += 1
examples.append(InputExample(guid=guid,
texts=[query, passages[i]],
label=1.0))
return examples
def get_single_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['text']],
label=1))
if 0 < max_examples <= len(examples):
break
return examples
def get_qnli_examples(filename, max_examples=0, no_contradictions=False, fever_only=False):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['label']
if label == 'contradiction' and no_contradictions:
continue
if sample['evidence'] == '':
continue
if fever_only and sample['source'] != 'fever':
continue
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['statement'].strip(), sample['evidence'].strip()],
label=1.0))
if 0 < max_examples <= len(examples):
break
return examples
def get_retrieval_examples(filename, negative_corpus='data/msmarco/collection.tsv', max_examples=0, no_statements=True,
encoder_model=None, negative_samples_num=4):
examples = []
queries = []
passages = []
negative_passages = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
if 'evidence' in sample and sample['evidence'] == '':
continue
guid = "%s-%d" % (filename, id)
id += 1
if sample['type'] == 'question':
query = sample['question']
passage = sample['answer']
else:
query = sample['statement']
passage = sample['evidence']
query = query.strip()
passage = passage.strip()
if sample['type'] == 'statement' and no_statements:
continue
queries.append(query)
passages.append(passage)
if sample['source'] == 'natural-questions':
negative_passages.append(passage)
if max_examples == len(passages):
break
if encoder_model is not None:
# Load MSMARCO passages
logging.info('Loading MSM passages...')
with open(negative_corpus) as file:
for line in file:
p = line.rstrip('\n').split('\t')[1]
negative_passages.append(p)
logging.info('Building ANN index...')
dense_retriever = DenseRetriever(model=encoder_model, batch_size=1024, use_gpu=True)
dense_retriever.create_index_from_documents(negative_passages)
results = dense_retriever.search(queries=queries, limit=100, probes=256)
negative_samples = [
[negative_passages[p[0]] for p in r if negative_passages[p[0]] != passages[i]][:negative_samples_num]
for i, r in enumerate(results)
]
# print(queries[0])
# print(negative_samples[0][0])
for i in range(len(queries)):
texts = [queries[i], passages[i]] + negative_samples[i]
examples.append(InputExample(guid=guid,
texts=texts,
label=1.0))
else:
for i in range(len(queries)):
texts = [queries[i], passages[i]]
examples.append(InputExample(guid=guid,
texts=texts,
label=1.0))
return examples
def get_pair_input(tokenizer, sent1, sent2, max_len=256):
text = "[CLS] {} [SEP] {} [SEP]".format(sent1, sent2)
tokenized_text = tokenizer.tokenize(text)[:max_len]
indexed_tokens = tokenizer.encode(text)[:max_len]
segments_ids = []
sep_flag = False
for i in range(len(tokenized_text)):
if tokenized_text[i] == '[SEP]' and not sep_flag:
segments_ids.append(0)
sep_flag = True
elif sep_flag:
segments_ids.append(1)
else:
segments_ids.append(0)
return indexed_tokens, segments_ids
def build_batch(tokenizer, text_list, max_len=256):
token_id_list = []
segment_list = []
attention_masks = []
longest = -1
for pair in text_list:
sent1, sent2 = pair
ids, segs = get_pair_input(tokenizer, sent1, sent2, max_len=max_len)
if ids is None or segs is None:
continue
token_id_list.append(ids)
segment_list.append(segs)
attention_masks.append([1] * len(ids))
if len(ids) > longest:
longest = len(ids)
if len(token_id_list) == 0:
return None, None, None
# padding
assert (len(token_id_list) == len(segment_list))
for ii in range(len(token_id_list)):
token_id_list[ii] += [0] * (longest - len(token_id_list[ii]))
attention_masks[ii] += [1] * (longest - len(attention_masks[ii]))
segment_list[ii] += [1] * (longest - len(segment_list[ii]))
return token_id_list, segment_list, attention_masks
def load_unsupervised_dataset(dataset_file):
print('Loading dataset...')
x = pickle.load(open(dataset_file, "rb"))
print('Done')
return x, len(x[0])
def load_supervised_dataset(dataset_file):
print('Loading dataset...')
d = pickle.load(open(dataset_file, "rb"))
print('Done')
return d[0], d[1]
| [
"json.loads",
"retriever.dense_retriever.DenseRetriever",
"logging.info",
"tqdm.tqdm.write"
]
| [((7515, 7554), 'logging.info', 'logging.info', (['"""Loading MSM passages..."""'], {}), "('Loading MSM passages...')\n", (7527, 7554), False, 'import logging\n'), ((7735, 7772), 'logging.info', 'logging.info', (['"""Building ANN index..."""'], {}), "('Building ANN index...')\n", (7747, 7772), False, 'import logging\n'), ((7799, 7865), 'retriever.dense_retriever.DenseRetriever', 'DenseRetriever', ([], {'model': 'encoder_model', 'batch_size': '(1024)', 'use_gpu': '(True)'}), '(model=encoder_model, batch_size=1024, use_gpu=True)\n', (7813, 7865), False, 'from retriever.dense_retriever import DenseRetriever\n'), ((1222, 1237), 'tqdm.tqdm.write', 'tqdm.write', (['msg'], {}), '(msg)\n', (1232, 1237), False, 'from tqdm import tqdm\n'), ((1605, 1621), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (1615, 1621), False, 'import json\n'), ((2412, 2428), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2422, 2428), False, 'import json\n'), ((3481, 3497), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (3491, 3497), False, 'import json\n'), ((4912, 4928), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (4922, 4928), False, 'import json\n'), ((5524, 5540), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (5534, 5540), False, 'import json\n'), ((6616, 6632), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (6626, 6632), False, 'import json\n')] |
"""
PyTorch Profiler With TensorBoard
====================================
This tutorial demonstrates how to use TensorBoard plugin with PyTorch Profiler
to detect performance bottlenecks of the model.
Introduction
------------
PyTorch 1.8 includes an updated profiler API capable of
recording the CPU side operations as well as the CUDA kernel launches on the GPU side.
The profiler can visualize this information
in TensorBoard Plugin and provide analysis of the performance bottlenecks.
In this tutorial, we will use a simple Resnet model to demonstrate how to
use TensorBoard plugin to analyze model performance.
Setup
-----
To install ``torch`` and ``torchvision`` use the following command:
::
pip install torch torchvision
"""
######################################################################
# Steps
# -----
#
# 1. Prepare the data and model
# 2. Use profiler to record execution events
# 3. Run the profiler
# 4. Use TensorBoard to view results and analyze model performance
# 5. Improve performance with the help of profiler
# 6. Analyze performance with other advanced features
#
# 1. Prepare the data and model
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# First, import all necessary libraries:
#
import torch
import torch.nn
import torch.optim
import torch.profiler
import torch.utils.data
import torchvision.datasets
import torchvision.models
import torchvision.transforms as T
######################################################################
# Then prepare the input data. For this tutorial, we use the CIFAR10 dataset.
# Transform it to the desired format and use DataLoader to load each batch.
transform = T.Compose(
[T.Resize(224),
T.ToTensor(),
T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_set = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True)
######################################################################
# Next, create Resnet model, loss function, and optimizer objects.
# To run on GPU, move model and loss to GPU device.
device = torch.device("cuda:0")
model = torchvision.models.resnet18(pretrained=True).cuda(device)
criterion = torch.nn.CrossEntropyLoss().cuda(device)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
model.train()
######################################################################
# Define the training step for each batch of input data.
def train(data):
inputs, labels = data[0].to(device=device), data[1].to(device=device)
outputs = model(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
######################################################################
# 2. Use profiler to record execution events
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The profiler is enabled through the context manager and accepts several parameters,
# some of the most useful are:
#
# - ``schedule`` - callable that takes step (int) as a single parameter
# and returns the profiler action to perform at each step.
#
# In this example with ``wait=1, warmup=1, active=3, repeat=2``,
# profiler will skip the first step/iteration,
# start warming up on the second,
# record the following three iterations,
# after which the trace will become available and on_trace_ready (when set) is called.
# In total, the cycle repeats twice. Each cycle is called a "span" in TensorBoard plugin.
#
# During ``wait`` steps, the profiler is disabled.
# During ``warmup`` steps, the profiler starts tracing but the results are discarded.
# This is for reducing the profiling overhead.
# The overhead at the beginning of profiling is high and easy to bring skew to the profiling result.
# During ``active`` steps, the profiler works and records events.
# - ``on_trace_ready`` - callable that is called at the end of each cycle;
# In this example we use ``torch.profiler.tensorboard_trace_handler`` to generate result files for TensorBoard.
# After profiling, result files will be saved into the ``./log/resnet18`` directory.
# Specify this directory as a ``logdir`` parameter to analyze profile in TensorBoard.
# - ``record_shapes`` - whether to record shapes of the operator inputs.
# - ``profile_memory`` - Track tensor memory allocation/deallocation.
# - ``with_stack`` - Record source information (file and line number) for the ops.
# If the TensorBoard is launched in VSCode (`reference <https://code.visualstudio.com/docs/datascience/pytorch-support#_tensorboard-integration>`_),
# clicking a stack frame will navigate to the specific code line.
with torch.profiler.profile(
schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2),
on_trace_ready=torch.profiler.tensorboard_trace_handler('./log/resnet18'),
record_shapes=True,
with_stack=True
) as prof:
for step, batch_data in enumerate(train_loader):
if step >= (1 + 1 + 3) * 2:
break
train(batch_data)
prof.step() # Need to call this at the end of each step to notify profiler of steps' boundary.
######################################################################
# 3. Run the profiler
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Run the above code. The profiling result will be saved under ``./log/resnet18`` directory.
######################################################################
# 4. Use TensorBoard to view results and analyze model performance
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Install PyTorch Profiler TensorBoard Plugin.
#
# ::
#
# pip install torch_tb_profiler
#
######################################################################
# Launch the TensorBoard.
#
# ::
#
# tensorboard --logdir=./log
#
######################################################################
# Open the TensorBoard profile URL in Google Chrome browser or Microsoft Edge browser.
#
# ::
#
# http://localhost:6006/#pytorch_profiler
#
######################################################################
# You could see Profiler plugin page as shown below.
#
# - Overview
# .. image:: ../../_static/img/profiler_overview1.png
# :scale: 25 %
#
# The overview shows a high-level summary of model performance.
#
# The "GPU Summary" panel shows the GPU configuration and the GPU usage.
# In this example, the GPU Utilization is low.
# The details of these metrics are `here <https://github.com/guyang3532/kineto/blob/readme/tb_plugin/docs/gpu_utilization.md>`_.
#
# The "Step Time Breakdown" shows distribution of time spent in each step over different categories of execution.
# In this example, you can see the ``DataLoader`` overhead is significant.
#
# The bottom "Performance Recommendation" uses the profiling data
# to automatically highlight likely bottlenecks,
# and gives you actionable optimization suggestions.
#
# You can change the view page in left "Views" dropdown list.
#
# .. image:: ../../_static/img/profiler_views_list.png
# :alt:
#
#
# - Operator view
# The operator view displays the performance of every PyTorch operator
# that is executed either on the host or device.
#
# .. image:: ../../_static/img/profiler_operator_view.png
# :scale: 25 %
# The "Self" duration does not include its child operatorsโ time.
# The "Total" duration includes its child operatorsโ time.
#
# - View call stack
# Click the "View Callstack" of an operator, the operators with same name but different call stacks will be shown.
# Then click a "View Callstack" in this sub-table, the call stack frames will be shown.
#
# .. image:: ../../_static/img/profiler_callstack.png
# :scale: 25 %
#
# If the TensorBoard is launched inside VSCode
# (`Launch Guide <https://devblogs.microsoft.com/python/python-in-visual-studio-code-february-2021-release/#tensorboard-integration>`_),
# clicking a call stack frame will navigate to the specific code line.
#
# .. image:: ../../_static/img/profiler_vscode.png
# :scale: 25 %
#
#
# - Kernel view
# The GPU kernel view shows all kernelsโ time spent on GPU.
#
# .. image:: ../../_static/img/profiler_kernel_view.png
# :scale: 25 %
# Mean Blocks per SM:
# Blocks per SM = Blocks of this kernel / SM number of this GPU.
# If this number is less than 1, it indicates the GPU multiprocessors are not fully utilized.
# "Mean Blocks per SM" is weighted average of all runs of this kernel name, using each runโs duration as weight.
#
# Mean Est. Achieved Occupancy:
# Est. Achieved Occupancy is defined in this columnโs tooltip.
# For most cases such as memory bandwidth bounded kernels, the higher the better.
# "Mean Est. Achieved Occupancy" is weighted average of all runs of this kernel name,
# using each runโs duration as weight.
#
# - Trace view
# The trace view shows timeline of profiled operators and GPU kernels.
# You can select it to see details as below.
#
# .. image:: ../../_static/img/profiler_trace_view1.png
# :scale: 25 %
#
# You can move the graph and zoom in/out with the help of right side toolbar.
# And keyboard can also be used to zoom and move around inside the timeline.
# The โwโ and โsโ keys zoom in centered around the mouse,
# and the โaโ and โdโ keys move the timeline left and right.
# You can hit these keys multiple times until you see a readable representation.
#
# In this example, we can see the event prefixed with ``enumerate(DataLoader)`` costs a lot of time.
# And during most of this period, the GPU is idle.
# Because this function is loading data and transforming data on host side,
# during which the GPU resource is wasted.
######################################################################
# 5. Improve performance with the help of profiler
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# At the bottom of "Overview" page, the suggestion in "Performance Recommendation" hints the bottleneck is DataLoader.
# The PyTorch DataLoader uses single process by default.
# User could enable multi-process data loading by setting the parameter ``num_workers``.
# `Here <https://pytorch.org/docs/stable/data.html#single-and-multi-process-data-loading>`_ is more details.
#
# In this example, we follow the "Performance Recommendation" and set ``num_workers`` as below,
# pass a different name such as ``./log/resnet18_4workers`` to ``tensorboard_trace_handler``, and run it again.
#
# ::
#
# train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True, num_workers=4)
#
######################################################################
# Then letโs choose the recently profiled run in left "Runs" dropdown list.
#
# .. image:: ../../_static/img/profiler_overview2.png
# :scale: 25 %
#
# From the above view, we can find the step time is reduced to about 58ms comparing with previous run's 121ms,
# and the time reduction of ``DataLoader`` mainly contributes.
#
# .. image:: ../../_static/img/profiler_trace_view2.png
# :scale: 25 %
#
# From the above view, we can see that the runtime of ``enumerate(DataLoader)`` is reduced,
# and the GPU utilization is increased.
######################################################################
# 6. Analyze performance with other advanced features
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# - Memory view
# To profile memory, please add ``profile_memory=True`` in arguments of ``torch.profiler.profile``.
#
# Note: Because of the current non-optimized implementation of PyTorch profiler,
# enabling ``profile_memory=True`` will take about several minutes to finish.
# To save time, you can try our existing examples first by running:
#
# ::
#
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/memory_demo
#
# The profiler records all memory allocation/release events during profiling.
# For every specific operator, the plugin aggregates all these memory events inside its life span.
#
# .. image:: ../../_static/img/profiler_memory_view.png
# :scale: 25 %
#
# The memory type could be selected in "Device" selection box.
# For example, "GPU0" means the following table only shows each operatorโs memory usage on GPU 0, not including CPU or other GPUs.
#
# The "Size Increase" sums up all allocation bytes and minus all the memory release bytes.
#
# The "Allocation Size" sums up all allocation bytes without considering the memory release.
#
# - Distributed view
# The plugin now supports distributed view on profiling DDP with NCCL as backend.
#
# You can try it by using existing example on Azure:
#
# ::
#
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/distributed_bert
#
# .. image:: ../../_static/img/profiler_distributed_view.png
# :scale: 25 %
#
# The "Computation/Communication Overview" shows computation/communication ratio and their overlapping degree.
# From this view, User can figure out load balance issue among workers.
# For example, if the computation + overlapping time of one worker is much larger than others,
# there may be a problem of load balance or this worker may be a straggler.
#
# The "Synchronizing/Communication Overview" shows the efficiency of communication.
# "Data Transfer Time" is the time for actual data exchanging.
# "Synchronizing Time" is the time for waiting and synchronizing with other workers.
#
# If one workerโs "Synchronizing Time" is much shorter than that of other workersโ,
# this worker may be a straggler which may have more computation workload than other workersโ.
#
# The "Communication Operations Stats" summarizes the detailed statistics of all communication ops in each worker.
######################################################################
# Learn More
# ----------
#
# Take a look at the following documents to continue your learning,
# and feel free to open an issue `here <https://github.com/pytorch/kineto/issues>`_.
#
# - `Pytorch TensorBoard Profiler github <https://github.com/pytorch/kineto/tree/master/tb_plugin>`_
# - `torch.profiler API <https://pytorch.org/docs/master/profiler.html>`_
| [
"torch.profiler.tensorboard_trace_handler",
"torch.nn.CrossEntropyLoss",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"torch.profiler.schedule",
"torch.device"
]
| [((1888, 1955), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_set'], {'batch_size': '(32)', 'shuffle': '(True)'}), '(train_set, batch_size=32, shuffle=True)\n', (1915, 1955), False, 'import torch\n'), ((2157, 2179), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (2169, 2179), False, 'import torch\n'), ((1682, 1695), 'torchvision.transforms.Resize', 'T.Resize', (['(224)'], {}), '(224)\n', (1690, 1695), True, 'import torchvision.transforms as T\n'), ((1702, 1714), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1712, 1714), True, 'import torchvision.transforms as T\n'), ((1721, 1766), 'torchvision.transforms.Normalize', 'T.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1732, 1766), True, 'import torchvision.transforms as T\n'), ((2258, 2285), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2283, 2285), False, 'import torch\n'), ((4766, 4827), 'torch.profiler.schedule', 'torch.profiler.schedule', ([], {'wait': '(1)', 'warmup': '(1)', 'active': '(3)', 'repeat': '(2)'}), '(wait=1, warmup=1, active=3, repeat=2)\n', (4789, 4827), False, 'import torch\n'), ((4852, 4910), 'torch.profiler.tensorboard_trace_handler', 'torch.profiler.tensorboard_trace_handler', (['"""./log/resnet18"""'], {}), "('./log/resnet18')\n", (4892, 4910), False, 'import torch\n')] |
import typing as t
from gradient_boosting_model.config.core import config
import numpy as np
import pandas as pd
from marshmallow import fields, Schema, ValidationError
class HouseDataInputSchema(Schema):
Alley = fields.Str(allow_none=True)
BedroomAbvGr = fields.Integer()
BldgType = fields.Str()
BsmtCond = fields.Str(allow_none=True)
BsmtExposure = fields.Str(allow_none=True)
BsmtFinSF1 = fields.Float(allow_none=True)
BsmtFinSF2 = fields.Float(allow_none=True)
BsmtFinType1 = fields.Str(allow_none=True)
BsmtFinType2 = fields.Str(allow_none=True)
BsmtFullBath = fields.Float(allow_none=True)
BsmtHalfBath = fields.Float(allow_none=True)
BsmtQual = fields.Str(allow_none=True)
BsmtUnfSF = fields.Float()
CentralAir = fields.Str()
Condition1 = fields.Str()
Condition2 = fields.Str()
Electrical = fields.Str(allow_none=True)
EnclosedPorch = fields.Integer()
ExterCond = fields.Str()
ExterQual = fields.Str()
Exterior1st = fields.Str(allow_none=True)
Exterior2nd = fields.Str(allow_none=True)
Fence = fields.Str(allow_none=True)
FireplaceQu = fields.Str(allow_none=True)
Fireplaces = fields.Integer()
Foundation = fields.Str()
FullBath = fields.Integer()
Functional = fields.Str(allow_none=True)
GarageArea = fields.Float()
GarageCars = fields.Float()
GarageCond = fields.Str(allow_none=True)
GarageFinish = fields.Str(allow_none=True)
GarageQual = fields.Str(allow_none=True)
GarageType = fields.Str(allow_none=True)
GarageYrBlt = fields.Float(allow_none=True)
GrLivArea = fields.Integer()
HalfBath = fields.Integer()
Heating = fields.Str()
HeatingQC = fields.Str()
HouseStyle = fields.Str()
Id = fields.Integer()
KitchenAbvGr = fields.Integer()
KitchenQual = fields.Str(allow_none=True)
LandContour = fields.Str()
LandSlope = fields.Str()
LotArea = fields.Integer()
LotConfig = fields.Str()
LotFrontage = fields.Float(allow_none=True)
LotShape = fields.Str()
LowQualFinSF = fields.Integer()
MSSubClass = fields.Integer()
MSZoning = fields.Str(allow_none=True)
MasVnrArea = fields.Float(allow_none=True)
MasVnrType = fields.Str(allow_none=True)
MiscFeature = fields.Str(allow_none=True)
MiscVal = fields.Integer()
MoSold = fields.Integer()
Neighborhood = fields.Str()
OpenPorchSF = fields.Integer()
OverallCond = fields.Integer()
OverallQual = fields.Integer()
PavedDrive = fields.Str()
PoolArea = fields.Integer()
PoolQC = fields.Str(allow_none=True)
RoofMatl = fields.Str()
RoofStyle = fields.Str()
SaleCondition = fields.Str()
SaleType = fields.Str(allow_none=True)
ScreenPorch = fields.Integer()
Street = fields.Str()
TotRmsAbvGrd = fields.Integer()
TotalBsmtSF = fields.Float()
Utilities = fields.Str(allow_none=True)
WoodDeckSF = fields.Integer()
YearBuilt = fields.Integer()
YearRemodAdd = fields.Integer()
YrSold = fields.Integer()
FirstFlrSF = fields.Integer()
SecondFlrSF = fields.Integer()
ThreeSsnPortch = fields.Integer()
def drop_na_inputs(*, input_data: pd.DataFrame) -> pd.DataFrame:
"""Check model inputs for na values and filter."""
validated_data = input_data.copy()
if input_data[config.model_config.numerical_na_not_allowed].isnull().any().any():
validated_data = validated_data.dropna(
axis=0, subset=config.model_config.numerical_na_not_allowed
)
return validated_data
def validate_inputs(
*, input_data: pd.DataFrame
) -> t.Tuple[pd.DataFrame, t.Optional[dict]]:
"""Check model inputs for unprocessable values."""
# convert syntax error field names (beginning with numbers)
input_data.rename(columns=config.model_config.variables_to_rename, inplace=True)
validated_data = drop_na_inputs(input_data=input_data)
# set many=True to allow passing in a list
schema = HouseDataInputSchema(many=True)
errors = None
try:
# replace numpy nans so that Marshmallow can validate
schema.load(validated_data.replace({np.nan: None}).to_dict(orient="records"))
except ValidationError as exc:
errors = exc.messages
return validated_data, errors
| [
"marshmallow.fields.Integer",
"marshmallow.fields.Float",
"marshmallow.fields.Str"
]
| [((221, 248), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (231, 248), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((268, 284), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (282, 284), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((300, 312), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (310, 312), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((328, 355), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (338, 355), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((375, 402), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (385, 402), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((420, 449), 'marshmallow.fields.Float', 'fields.Float', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (432, 449), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((467, 496), 'marshmallow.fields.Float', 'fields.Float', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (479, 496), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((516, 543), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (526, 543), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((563, 590), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (573, 590), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((610, 639), 'marshmallow.fields.Float', 'fields.Float', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (622, 639), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((659, 688), 'marshmallow.fields.Float', 'fields.Float', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (671, 688), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((704, 731), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (714, 731), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((748, 762), 'marshmallow.fields.Float', 'fields.Float', ([], {}), '()\n', (760, 762), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((780, 792), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (790, 792), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((810, 822), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (820, 822), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((840, 852), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (850, 852), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((870, 897), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (880, 897), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((918, 934), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (932, 934), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((951, 963), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (961, 963), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((980, 992), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (990, 992), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1011, 1038), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1021, 1038), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1057, 1084), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1067, 1084), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1097, 1124), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1107, 1124), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1143, 1170), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1153, 1170), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1188, 1204), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (1202, 1204), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1222, 1234), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (1232, 1234), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1250, 1266), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (1264, 1266), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1284, 1311), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1294, 1311), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1329, 1343), 'marshmallow.fields.Float', 'fields.Float', ([], {}), '()\n', (1341, 1343), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1361, 1375), 'marshmallow.fields.Float', 'fields.Float', ([], {}), '()\n', (1373, 1375), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1393, 1420), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1403, 1420), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1440, 1467), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1450, 1467), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1485, 1512), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1495, 1512), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1530, 1557), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1540, 1557), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1576, 1605), 'marshmallow.fields.Float', 'fields.Float', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1588, 1605), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1622, 1638), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (1636, 1638), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1654, 1670), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (1668, 1670), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1685, 1697), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (1695, 1697), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1714, 1726), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (1724, 1726), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1744, 1756), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (1754, 1756), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1766, 1782), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (1780, 1782), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1802, 1818), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (1816, 1818), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1837, 1864), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1847, 1864), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1883, 1895), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (1893, 1895), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1912, 1924), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (1922, 1924), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1939, 1955), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (1953, 1955), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((1972, 1984), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (1982, 1984), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2003, 2032), 'marshmallow.fields.Float', 'fields.Float', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (2015, 2032), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2048, 2060), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (2058, 2060), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2080, 2096), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (2094, 2096), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2114, 2130), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (2128, 2130), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2146, 2173), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (2156, 2173), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2191, 2220), 'marshmallow.fields.Float', 'fields.Float', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (2203, 2220), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2238, 2265), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (2248, 2265), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2284, 2311), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (2294, 2311), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2326, 2342), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (2340, 2342), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2356, 2372), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (2370, 2372), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2392, 2404), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (2402, 2404), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2423, 2439), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (2437, 2439), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2458, 2474), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (2472, 2474), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2493, 2509), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (2507, 2509), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2527, 2539), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (2537, 2539), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2555, 2571), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (2569, 2571), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2585, 2612), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (2595, 2612), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2628, 2640), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (2638, 2640), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2657, 2669), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (2667, 2669), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2690, 2702), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (2700, 2702), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2718, 2745), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (2728, 2745), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2764, 2780), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (2778, 2780), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2794, 2806), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (2804, 2806), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2826, 2842), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (2840, 2842), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2861, 2875), 'marshmallow.fields.Float', 'fields.Float', ([], {}), '()\n', (2873, 2875), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2892, 2919), 'marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (2902, 2919), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2937, 2953), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (2951, 2953), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((2970, 2986), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (2984, 2986), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((3006, 3022), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (3020, 3022), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((3036, 3052), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (3050, 3052), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((3070, 3086), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (3084, 3086), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((3105, 3121), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (3119, 3121), False, 'from marshmallow import fields, Schema, ValidationError\n'), ((3143, 3159), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (3157, 3159), False, 'from marshmallow import fields, Schema, ValidationError\n')] |
import os
from rlbot.agents.base_agent import BOT_CONFIG_AGENT_HEADER
from rlbot.agents.base_dotnet_agent import BaseDotNetAgent
from rlbot.parsing.custom_config import ConfigHeader, ConfigObject
class DotNetBot(BaseDotNetAgent):
def get_port_file_path(self):
# Look for a port.cfg file in the same directory as THIS python file.
return os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__), 'port.cfg'))
def load_config(self, config_header: ConfigHeader):
self.dotnet_executable_path = config_header.getpath('dotnet_executable_path')
self.logger.info(".NET executable is configured as {}".format(self.dotnet_executable_path))
@staticmethod
def create_agent_configurations(config: ConfigObject):
params = config.get_header(BOT_CONFIG_AGENT_HEADER)
params.add_value('dotnet_executable_path', str, default=None,
description='Relative path to the executable that runs the .NET executable.')
| [
"os.path.dirname",
"os.getcwd"
]
| [((399, 410), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (408, 410), False, 'import os\n'), ((412, 437), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (427, 437), False, 'import os\n')] |
import torch
import pdb
from torch.autograd import gradcheck
import fused_conv_bias_relu
class ConvBiasReLU_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, padding, stride):
outputs = fused_conv_bias_relu.forward([x, weight, bias], padding, stride)
ctx.save_for_backward(x, weight, outputs[0])
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None
class ConvBiasMaskReLU_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, mask, padding, stride):
outputs = fused_conv_bias_relu.forward_mask([x, weight, bias, mask], padding, stride)
ctx.save_for_backward(x, weight, outputs[0])
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None, None
class ConvBias_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, padding, stride):
outputs = fused_conv_bias_relu.forward_no_relu([x, weight, bias], padding, stride)
ctx.save_for_backward(x, weight)
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward_no_relu(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None
ConvBiasReLU = ConvBiasReLU_.apply
ConvBiasMaskReLU = ConvBiasMaskReLU_.apply
ConvBias = ConvBias_.apply
| [
"torch.cuda.amp.custom_fwd",
"fused_conv_bias_relu.backward_no_relu",
"fused_conv_bias_relu.forward_mask",
"fused_conv_bias_relu.forward",
"fused_conv_bias_relu.forward_no_relu",
"fused_conv_bias_relu.backward"
]
| [((160, 209), 'torch.cuda.amp.custom_fwd', 'torch.cuda.amp.custom_fwd', ([], {'cast_inputs': 'torch.half'}), '(cast_inputs=torch.half)\n', (185, 209), False, 'import torch\n'), ((889, 938), 'torch.cuda.amp.custom_fwd', 'torch.cuda.amp.custom_fwd', ([], {'cast_inputs': 'torch.half'}), '(cast_inputs=torch.half)\n', (914, 938), False, 'import torch\n'), ((1633, 1682), 'torch.cuda.amp.custom_fwd', 'torch.cuda.amp.custom_fwd', ([], {'cast_inputs': 'torch.half'}), '(cast_inputs=torch.half)\n', (1658, 1682), False, 'import torch\n'), ((284, 348), 'fused_conv_bias_relu.forward', 'fused_conv_bias_relu.forward', (['[x, weight, bias]', 'padding', 'stride'], {}), '([x, weight, bias], padding, stride)\n', (312, 348), False, 'import fused_conv_bias_relu\n'), ((700, 756), 'fused_conv_bias_relu.backward', 'fused_conv_bias_relu.backward', (['bwd_args', 'padding', 'stride'], {}), '(bwd_args, padding, stride)\n', (729, 756), False, 'import fused_conv_bias_relu\n'), ((1019, 1094), 'fused_conv_bias_relu.forward_mask', 'fused_conv_bias_relu.forward_mask', (['[x, weight, bias, mask]', 'padding', 'stride'], {}), '([x, weight, bias, mask], padding, stride)\n', (1052, 1094), False, 'import fused_conv_bias_relu\n'), ((1446, 1502), 'fused_conv_bias_relu.backward', 'fused_conv_bias_relu.backward', (['bwd_args', 'padding', 'stride'], {}), '(bwd_args, padding, stride)\n', (1475, 1502), False, 'import fused_conv_bias_relu\n'), ((1757, 1829), 'fused_conv_bias_relu.forward_no_relu', 'fused_conv_bias_relu.forward_no_relu', (['[x, weight, bias]', 'padding', 'stride'], {}), '([x, weight, bias], padding, stride)\n', (1793, 1829), False, 'import fused_conv_bias_relu\n'), ((2169, 2233), 'fused_conv_bias_relu.backward_no_relu', 'fused_conv_bias_relu.backward_no_relu', (['bwd_args', 'padding', 'stride'], {}), '(bwd_args, padding, stride)\n', (2206, 2233), False, 'import fused_conv_bias_relu\n')] |
from jogo import desenha_jogo
from random import randint
import sys
def input_cria_usuario():
usuario = dict()
usuario['nome'] = input('Informe o seu nome: ')
usuario['pontos'] = 0
usuario['desafiado'] = False
return usuario
def comeco(j1, j2):
j1 = 1
j2 = 2
n= randint(j1,j2)
escolhildo = n
return escolhildo
# mexi a aqui
def completou(acertos, pala , jogador_adivinhao):#recebe as letras acertadass e depois verifica se a palavra esta completa
if acertos == len(pala):## e aqui
print(f'\t\t\t\t\t \033[37mJogador >> {jogador_adivinhao} << venceu !\033[m')
print("""
\033[35m
_____ ___ ___ ___ _______
/ ___| / | / |/ | | ____|
| | / | / /| /| | | |__
| | _ / /| | / / |__/ | | | __|
| |_| | / ___ | / / | | | |____
\_____//_/ |_| /_/ |_| |_______|
_____ _ _ ______ ______
/ _ \ | | / / | _____| | _ |
| | | | | | / / | |__ | |_| |
| | | | | | / / | __| | _ /
| |_| | | |/ / | |____ | | \ |
\_____/ |___/ |______| |_| \_|\033[m
""")
| [
"random.randint"
]
| [((305, 320), 'random.randint', 'randint', (['j1', 'j2'], {}), '(j1, j2)\n', (312, 320), False, 'from random import randint\n')] |
import numpy as np
def denormalize(x, x_min, x_max):
if x_max is None:
_range = 1
else:
_range = (x_max - x_min)
return x * _range + x_min
def normalize(x, x_min=None, x_max=None, return_bounds=False, estimate_bounds_if_none=True):
# if the bounds should be estimated if none do it for both
if estimate_bounds_if_none and x_min is None:
x_min = np.min(x, axis=0)
if estimate_bounds_if_none and x_max is None:
x_max = np.max(x, axis=0)
# if they are still none set them to default to avoid exception
if x_min is None:
x_min = np.zeros()
if x_max is None:
x_max = np.ones()
# calculate the denominator
denom = x_max - x_min
# we can not divide by zero -> plus small epsilon
denom += 1e-30
# normalize the actual values
N = (x - x_min) / denom
# return with or without bounds
if not return_bounds:
return N
else:
return N, x_min, x_max
def standardize(x, return_bounds=False):
mean = np.mean(x, axis=0)
std = np.std(x, axis=0)
# standardize
val = (x - mean) / std
if not return_bounds:
return val
else:
return val, mean, std
def destandardize(x, mean, std):
return (x * std) + mean
| [
"numpy.mean",
"numpy.ones",
"numpy.std",
"numpy.max",
"numpy.zeros",
"numpy.min"
]
| [((1035, 1053), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1042, 1053), True, 'import numpy as np\n'), ((1064, 1081), 'numpy.std', 'np.std', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1070, 1081), True, 'import numpy as np\n'), ((396, 413), 'numpy.min', 'np.min', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (402, 413), True, 'import numpy as np\n'), ((480, 497), 'numpy.max', 'np.max', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (486, 497), True, 'import numpy as np\n'), ((605, 615), 'numpy.zeros', 'np.zeros', ([], {}), '()\n', (613, 615), True, 'import numpy as np\n'), ((654, 663), 'numpy.ones', 'np.ones', ([], {}), '()\n', (661, 663), True, 'import numpy as np\n')] |
"""Check new
Revision ID: 92235b77ea53
Revises: 381fdb66ec27
Create Date: 2017-10-14 02:38:51.007307
"""
# revision identifiers, used by Alembic.
revision = '92235b77ea53'
down_revision = '381fdb66ec27'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_ActiveTranslationMessages_category', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_datetime', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_fmt', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_from_developer', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_key', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_namespace', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_position', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_same_tool', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_taken_from_default', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_tool_id', table_name='ActiveTranslationMessages')
op.drop_index('ix_Apps_composer', table_name='Apps')
op.drop_index('ix_Apps_creation_date', table_name='Apps')
op.drop_index('ix_Apps_last_access_date', table_name='Apps')
op.drop_index('ix_Apps_modification_date', table_name='Apps')
op.drop_index('ix_Apps_name', table_name='Apps')
op.drop_index('ix_Apps_owner_id', table_name='Apps')
op.drop_index('ix_Apps_unique_id', table_name='Apps')
op.drop_index('ix_GoLabOAuthUsers_display_name', table_name='GoLabOAuthUsers')
op.drop_index('ix_GoLabOAuthUsers_email', table_name='GoLabOAuthUsers')
op.drop_index('ix_Languages_language', table_name='Languages')
op.drop_index('ix_RepositoryApps_adaptable', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_contents_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_downloaded_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_external_id', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_failing', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_failing_since', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_change', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_check', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_download_change', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_processed_contents_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_processed_downloaded_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_processed_time', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_name', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_repository', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_translatable', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_url', table_name='RepositoryApps')
op.drop_index('ix_TranslatedApps_url', table_name='TranslatedApps')
op.drop_index('ix_TranslationBundles_from_developer', table_name='TranslationBundles')
op.drop_index('ix_TranslationBundles_language', table_name='TranslationBundles')
op.drop_index('ix_TranslationBundles_target', table_name='TranslationBundles')
op.drop_index('ix_TranslationCurrentActiveUsers_last_check', table_name='TranslationCurrentActiveUsers')
op.drop_index('ix_TranslationExternalSuggestions_engine', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_human_key', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_human_key_hash', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_language', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_origin_language', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationKeySuggestions_key', table_name='TranslationKeySuggestions')
op.drop_index('ix_TranslationKeySuggestions_language', table_name='TranslationKeySuggestions')
op.drop_index('ix_TranslationKeySuggestions_target', table_name='TranslationKeySuggestions')
op.drop_index('ix_TranslationMessageHistory_category', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_datetime', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_fmt', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_from_developer', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_key', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_namespace', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_parent_translation_id', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_position', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_same_tool', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_taken_from_default', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_tool_id', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationNotificationRecipients_created', table_name='TranslationNotificationRecipients')
op.drop_index('ix_TranslationNotificationRecipients_email', table_name='TranslationNotificationRecipients')
op.drop_index('ix_TranslationSubscriptions_last_check', table_name='TranslationSubscriptions')
op.drop_index('ix_TranslationSubscriptions_mechanism', table_name='TranslationSubscriptions')
op.drop_index('ix_TranslationSyncLogs_end_datetime', table_name='TranslationSyncLogs')
op.drop_index('ix_TranslationSyncLogs_start_datetime', table_name='TranslationSyncLogs')
op.drop_index('ix_TranslationUrls_automatic', table_name='TranslationUrls')
op.drop_index('ix_TranslationUrls_url', table_name='TranslationUrls')
op.drop_index('ix_TranslationValueSuggestions_human_key', table_name='TranslationValueSuggestions')
op.drop_index('ix_TranslationValueSuggestions_language', table_name='TranslationValueSuggestions')
op.drop_index('ix_TranslationValueSuggestions_target', table_name='TranslationValueSuggestions')
op.drop_index('ix_Users_creation_date', table_name='Users')
op.drop_index('ix_Users_last_access_date', table_name='Users')
op.create_index(op.f('ix_ActiveTranslationMessages_category'), 'ActiveTranslationMessages', ['category'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_datetime'), 'ActiveTranslationMessages', ['datetime'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_fmt'), 'ActiveTranslationMessages', ['fmt'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_from_developer'), 'ActiveTranslationMessages', ['from_developer'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_key'), 'ActiveTranslationMessages', ['key'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_namespace'), 'ActiveTranslationMessages', ['namespace'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_position'), 'ActiveTranslationMessages', ['position'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_same_tool'), 'ActiveTranslationMessages', ['same_tool'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_taken_from_default'), 'ActiveTranslationMessages', ['taken_from_default'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_tool_id'), 'ActiveTranslationMessages', ['tool_id'], unique=False)
op.create_index(op.f('ix_Apps_composer'), 'Apps', ['composer'], unique=False)
op.create_index(op.f('ix_Apps_creation_date'), 'Apps', ['creation_date'], unique=False)
op.create_index(op.f('ix_Apps_last_access_date'), 'Apps', ['last_access_date'], unique=False)
op.create_index(op.f('ix_Apps_modification_date'), 'Apps', ['modification_date'], unique=False)
op.create_index(op.f('ix_Apps_name'), 'Apps', ['name'], unique=False)
op.create_index(op.f('ix_Apps_owner_id'), 'Apps', ['owner_id'], unique=False)
op.create_index(op.f('ix_Apps_unique_id'), 'Apps', ['unique_id'], unique=True)
op.create_index(op.f('ix_GoLabOAuthUsers_display_name'), 'GoLabOAuthUsers', ['display_name'], unique=False)
op.create_index(op.f('ix_GoLabOAuthUsers_email'), 'GoLabOAuthUsers', ['email'], unique=True)
op.create_index(op.f('ix_Languages_language'), 'Languages', ['language'], unique=True)
op.create_index(op.f('ix_RepositoryApps_adaptable'), 'RepositoryApps', ['adaptable'], unique=False)
op.create_index(op.f('ix_RepositoryApps_contents_hash'), 'RepositoryApps', ['contents_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_downloaded_hash'), 'RepositoryApps', ['downloaded_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_external_id'), 'RepositoryApps', ['external_id'], unique=False)
op.create_index(op.f('ix_RepositoryApps_failing_since'), 'RepositoryApps', ['failing_since'], unique=False)
op.create_index(op.f('ix_RepositoryApps_failing'), 'RepositoryApps', ['failing'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_change'), 'RepositoryApps', ['last_change'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_check'), 'RepositoryApps', ['last_check'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_download_change'), 'RepositoryApps', ['last_download_change'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_processed_contents_hash'), 'RepositoryApps', ['last_processed_contents_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_processed_downloaded_hash'), 'RepositoryApps', ['last_processed_downloaded_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_processed_time'), 'RepositoryApps', ['last_processed_time'], unique=False)
op.create_index(op.f('ix_RepositoryApps_name'), 'RepositoryApps', ['name'], unique=False)
op.create_index(op.f('ix_RepositoryApps_repository'), 'RepositoryApps', ['repository'], unique=False)
op.create_index(op.f('ix_RepositoryApps_translatable'), 'RepositoryApps', ['translatable'], unique=False)
op.create_index(op.f('ix_RepositoryApps_url'), 'RepositoryApps', ['url'], unique=False)
op.create_index(op.f('ix_TranslatedApps_url'), 'TranslatedApps', ['url'], unique=True)
op.create_index(op.f('ix_TranslationBundles_from_developer'), 'TranslationBundles', ['from_developer'], unique=False)
op.create_index(op.f('ix_TranslationBundles_language'), 'TranslationBundles', ['language'], unique=False)
op.create_index(op.f('ix_TranslationBundles_target'), 'TranslationBundles', ['target'], unique=False)
op.create_index(op.f('ix_TranslationCurrentActiveUsers_last_check'), 'TranslationCurrentActiveUsers', ['last_check'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_engine'), 'TranslationExternalSuggestions', ['engine'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_human_key_hash'), 'TranslationExternalSuggestions', ['human_key_hash'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_human_key'), 'TranslationExternalSuggestions', ['human_key'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_language'), 'TranslationExternalSuggestions', ['language'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_origin_language'), 'TranslationExternalSuggestions', ['origin_language'], unique=False)
op.create_index(op.f('ix_TranslationKeySuggestions_key'), 'TranslationKeySuggestions', ['key'], unique=False)
op.create_index(op.f('ix_TranslationKeySuggestions_language'), 'TranslationKeySuggestions', ['language'], unique=False)
op.create_index(op.f('ix_TranslationKeySuggestions_target'), 'TranslationKeySuggestions', ['target'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_category'), 'TranslationMessageHistory', ['category'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_datetime'), 'TranslationMessageHistory', ['datetime'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_fmt'), 'TranslationMessageHistory', ['fmt'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_from_developer'), 'TranslationMessageHistory', ['from_developer'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_key'), 'TranslationMessageHistory', ['key'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_namespace'), 'TranslationMessageHistory', ['namespace'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_parent_translation_id'), 'TranslationMessageHistory', ['parent_translation_id'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_position'), 'TranslationMessageHistory', ['position'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_same_tool'), 'TranslationMessageHistory', ['same_tool'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_taken_from_default'), 'TranslationMessageHistory', ['taken_from_default'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_tool_id'), 'TranslationMessageHistory', ['tool_id'], unique=False)
op.create_index(op.f('ix_TranslationNotificationRecipients_created'), 'TranslationNotificationRecipients', ['created'], unique=False)
op.create_index(op.f('ix_TranslationNotificationRecipients_email'), 'TranslationNotificationRecipients', ['email'], unique=True)
op.create_index(op.f('ix_TranslationSubscriptions_last_check'), 'TranslationSubscriptions', ['last_check'], unique=False)
op.create_index(op.f('ix_TranslationSubscriptions_mechanism'), 'TranslationSubscriptions', ['mechanism'], unique=False)
op.create_index(op.f('ix_TranslationSyncLogs_end_datetime'), 'TranslationSyncLogs', ['end_datetime'], unique=False)
op.create_index(op.f('ix_TranslationSyncLogs_start_datetime'), 'TranslationSyncLogs', ['start_datetime'], unique=False)
op.create_index(op.f('ix_TranslationUrls_automatic'), 'TranslationUrls', ['automatic'], unique=False)
op.create_index(op.f('ix_TranslationUrls_url'), 'TranslationUrls', ['url'], unique=True)
op.create_index(op.f('ix_TranslationValueSuggestions_human_key'), 'TranslationValueSuggestions', ['human_key'], unique=False)
op.create_index(op.f('ix_TranslationValueSuggestions_language'), 'TranslationValueSuggestions', ['language'], unique=False)
op.create_index(op.f('ix_TranslationValueSuggestions_target'), 'TranslationValueSuggestions', ['target'], unique=False)
op.create_index(op.f('ix_Users_creation_date'), 'Users', ['creation_date'], unique=False)
op.create_index(op.f('ix_Users_last_access_date'), 'Users', ['last_access_date'], unique=False)
# op.create_unique_constraint(None, 'ActiveTranslationMessages', ['bundle_id', 'key'])
# op.create_unique_constraint(None, 'RepositoryApp2languages', ['repository_app_id', 'language_id'])
# op.create_unique_constraint(None, 'TranslationBundles', ['translation_url_id', 'language', 'target'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_Users_last_access_date'), table_name='Users')
op.drop_index(op.f('ix_Users_creation_date'), table_name='Users')
op.drop_index(op.f('ix_TranslationValueSuggestions_target'), table_name='TranslationValueSuggestions')
op.drop_index(op.f('ix_TranslationValueSuggestions_language'), table_name='TranslationValueSuggestions')
op.drop_index(op.f('ix_TranslationValueSuggestions_human_key'), table_name='TranslationValueSuggestions')
op.drop_index(op.f('ix_TranslationUrls_url'), table_name='TranslationUrls')
op.drop_index(op.f('ix_TranslationUrls_automatic'), table_name='TranslationUrls')
op.drop_index(op.f('ix_TranslationSyncLogs_start_datetime'), table_name='TranslationSyncLogs')
op.drop_index(op.f('ix_TranslationSyncLogs_end_datetime'), table_name='TranslationSyncLogs')
op.drop_index(op.f('ix_TranslationSubscriptions_mechanism'), table_name='TranslationSubscriptions')
op.drop_index(op.f('ix_TranslationSubscriptions_last_check'), table_name='TranslationSubscriptions')
op.drop_index(op.f('ix_TranslationNotificationRecipients_email'), table_name='TranslationNotificationRecipients')
op.drop_index(op.f('ix_TranslationNotificationRecipients_created'), table_name='TranslationNotificationRecipients')
op.drop_index(op.f('ix_TranslationMessageHistory_tool_id'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_taken_from_default'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_same_tool'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_position'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_parent_translation_id'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_namespace'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_key'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_from_developer'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_fmt'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_datetime'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_category'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationKeySuggestions_target'), table_name='TranslationKeySuggestions')
op.drop_index(op.f('ix_TranslationKeySuggestions_language'), table_name='TranslationKeySuggestions')
op.drop_index(op.f('ix_TranslationKeySuggestions_key'), table_name='TranslationKeySuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_origin_language'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_language'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_human_key'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_human_key_hash'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_engine'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationBundles_target'), table_name='TranslationBundles')
op.drop_index(op.f('ix_TranslationBundles_language'), table_name='TranslationBundles')
op.drop_index(op.f('ix_TranslationBundles_from_developer'), table_name='TranslationBundles')
op.drop_index(op.f('ix_TranslationCurrentActiveUsers_last_check'), table_name='TranslationCurrentActiveUsers')
# op.drop_constraint(None, 'TranslationBundles', type_='unique')
op.drop_index(op.f('ix_RepositoryApps_url'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_translatable'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_repository'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_name'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_processed_time'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_processed_downloaded_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_processed_contents_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_download_change'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_check'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_change'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_failing'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_failing_since'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_external_id'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_downloaded_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_contents_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_adaptable'), table_name='RepositoryApps')
# op.drop_constraint(None, 'RepositoryApp2languages', type_='unique')
op.drop_index(op.f('ix_TranslatedApps_url'), table_name='TranslatedApps')
op.drop_index(op.f('ix_Languages_language'), table_name='Languages')
op.drop_index(op.f('ix_GoLabOAuthUsers_email'), table_name='GoLabOAuthUsers')
op.drop_index(op.f('ix_GoLabOAuthUsers_display_name'), table_name='GoLabOAuthUsers')
op.drop_index(op.f('ix_Apps_unique_id'), table_name='Apps')
op.drop_index(op.f('ix_Apps_owner_id'), table_name='Apps')
op.drop_index(op.f('ix_Apps_name'), table_name='Apps')
op.drop_index(op.f('ix_Apps_modification_date'), table_name='Apps')
op.drop_index(op.f('ix_Apps_last_access_date'), table_name='Apps')
op.drop_index(op.f('ix_Apps_creation_date'), table_name='Apps')
op.drop_index(op.f('ix_Apps_composer'), table_name='Apps')
# op.drop_constraint(None, 'ActiveTranslationMessages', type_='unique')
op.drop_index(op.f('ix_ActiveTranslationMessages_tool_id'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_taken_from_default'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_same_tool'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_position'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_namespace'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_key'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_from_developer'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_fmt'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_datetime'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_category'), table_name='ActiveTranslationMessages')
op.create_index('ix_Users_last_access_date', 'Users', ['last_access_date'], unique=False)
op.create_index('ix_Users_creation_date', 'Users', ['creation_date'], unique=False)
op.create_index('ix_TranslationValueSuggestions_target', 'TranslationValueSuggestions', ['target'], unique=False)
op.create_index('ix_TranslationValueSuggestions_language', 'TranslationValueSuggestions', ['language'], unique=False)
op.create_index('ix_TranslationValueSuggestions_human_key', 'TranslationValueSuggestions', ['human_key'], unique=False)
op.create_index('ix_TranslationUrls_url', 'TranslationUrls', ['url'], unique=True)
op.create_index('ix_TranslationUrls_automatic', 'TranslationUrls', ['automatic'], unique=False)
op.create_index('ix_TranslationSyncLogs_start_datetime', 'TranslationSyncLogs', ['start_datetime'], unique=False)
op.create_index('ix_TranslationSyncLogs_end_datetime', 'TranslationSyncLogs', ['end_datetime'], unique=False)
op.create_index('ix_TranslationSubscriptions_mechanism', 'TranslationSubscriptions', ['mechanism'], unique=False)
op.create_index('ix_TranslationSubscriptions_last_check', 'TranslationSubscriptions', ['last_check'], unique=False)
op.create_index('ix_TranslationNotificationRecipients_email', 'TranslationNotificationRecipients', ['email'], unique=True)
op.create_index('ix_TranslationNotificationRecipients_created', 'TranslationNotificationRecipients', ['created'], unique=False)
op.create_index('ix_TranslationMessageHistory_tool_id', 'TranslationMessageHistory', ['tool_id'], unique=False)
op.create_index('ix_TranslationMessageHistory_taken_from_default', 'TranslationMessageHistory', ['taken_from_default'], unique=False)
op.create_index('ix_TranslationMessageHistory_same_tool', 'TranslationMessageHistory', ['same_tool'], unique=False)
op.create_index('ix_TranslationMessageHistory_position', 'TranslationMessageHistory', ['position'], unique=False)
op.create_index('ix_TranslationMessageHistory_parent_translation_id', 'TranslationMessageHistory', ['parent_translation_id'], unique=False)
op.create_index('ix_TranslationMessageHistory_namespace', 'TranslationMessageHistory', ['namespace'], unique=False)
op.create_index('ix_TranslationMessageHistory_key', 'TranslationMessageHistory', ['key'], unique=False)
op.create_index('ix_TranslationMessageHistory_from_developer', 'TranslationMessageHistory', ['from_developer'], unique=False)
op.create_index('ix_TranslationMessageHistory_fmt', 'TranslationMessageHistory', ['fmt'], unique=False)
op.create_index('ix_TranslationMessageHistory_datetime', 'TranslationMessageHistory', ['datetime'], unique=False)
op.create_index('ix_TranslationMessageHistory_category', 'TranslationMessageHistory', ['category'], unique=False)
op.create_index('ix_TranslationKeySuggestions_target', 'TranslationKeySuggestions', ['target'], unique=False)
op.create_index('ix_TranslationKeySuggestions_language', 'TranslationKeySuggestions', ['language'], unique=False)
op.create_index('ix_TranslationKeySuggestions_key', 'TranslationKeySuggestions', ['key'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_origin_language', 'TranslationExternalSuggestions', ['origin_language'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_language', 'TranslationExternalSuggestions', ['language'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_human_key_hash', 'TranslationExternalSuggestions', ['human_key_hash'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_human_key', 'TranslationExternalSuggestions', ['human_key'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_engine', 'TranslationExternalSuggestions', ['engine'], unique=False)
op.create_index('ix_TranslationCurrentActiveUsers_last_check', 'TranslationCurrentActiveUsers', ['last_check'], unique=False)
op.create_index('ix_TranslationBundles_target', 'TranslationBundles', ['target'], unique=False)
op.create_index('ix_TranslationBundles_language', 'TranslationBundles', ['language'], unique=False)
op.create_index('ix_TranslationBundles_from_developer', 'TranslationBundles', ['from_developer'], unique=False)
op.create_index('ix_TranslatedApps_url', 'TranslatedApps', ['url'], unique=True)
op.create_index('ix_RepositoryApps_url', 'RepositoryApps', ['url'], unique=False)
op.create_index('ix_RepositoryApps_translatable', 'RepositoryApps', ['translatable'], unique=False)
op.create_index('ix_RepositoryApps_repository', 'RepositoryApps', ['repository'], unique=False)
op.create_index('ix_RepositoryApps_name', 'RepositoryApps', ['name'], unique=False)
op.create_index('ix_RepositoryApps_last_processed_time', 'RepositoryApps', ['last_processed_time'], unique=False)
op.create_index('ix_RepositoryApps_last_processed_downloaded_hash', 'RepositoryApps', ['last_processed_downloaded_hash'], unique=False)
op.create_index('ix_RepositoryApps_last_processed_contents_hash', 'RepositoryApps', ['last_processed_contents_hash'], unique=False)
op.create_index('ix_RepositoryApps_last_download_change', 'RepositoryApps', ['last_download_change'], unique=False)
op.create_index('ix_RepositoryApps_last_check', 'RepositoryApps', ['last_check'], unique=False)
op.create_index('ix_RepositoryApps_last_change', 'RepositoryApps', ['last_change'], unique=False)
op.create_index('ix_RepositoryApps_failing_since', 'RepositoryApps', ['failing_since'], unique=False)
op.create_index('ix_RepositoryApps_failing', 'RepositoryApps', ['failing'], unique=False)
op.create_index('ix_RepositoryApps_external_id', 'RepositoryApps', ['external_id'], unique=False)
op.create_index('ix_RepositoryApps_downloaded_hash', 'RepositoryApps', ['downloaded_hash'], unique=False)
op.create_index('ix_RepositoryApps_contents_hash', 'RepositoryApps', ['contents_hash'], unique=False)
op.create_index('ix_RepositoryApps_adaptable', 'RepositoryApps', ['adaptable'], unique=False)
op.create_index('ix_Languages_language', 'Languages', ['language'], unique=True)
op.create_index('ix_GoLabOAuthUsers_email', 'GoLabOAuthUsers', ['email'], unique=True)
op.create_index('ix_GoLabOAuthUsers_display_name', 'GoLabOAuthUsers', ['display_name'], unique=False)
op.create_index('ix_Apps_unique_id', 'Apps', ['unique_id'], unique=True)
op.create_index('ix_Apps_owner_id', 'Apps', ['owner_id'], unique=False)
op.create_index('ix_Apps_name', 'Apps', ['name'], unique=False)
op.create_index('ix_Apps_modification_date', 'Apps', ['modification_date'], unique=False)
op.create_index('ix_Apps_last_access_date', 'Apps', ['last_access_date'], unique=False)
op.create_index('ix_Apps_creation_date', 'Apps', ['creation_date'], unique=False)
op.create_index('ix_Apps_composer', 'Apps', ['composer'], unique=False)
op.create_index('ix_ActiveTranslationMessages_tool_id', 'ActiveTranslationMessages', ['tool_id'], unique=False)
op.create_index('ix_ActiveTranslationMessages_taken_from_default', 'ActiveTranslationMessages', ['taken_from_default'], unique=False)
op.create_index('ix_ActiveTranslationMessages_same_tool', 'ActiveTranslationMessages', ['same_tool'], unique=False)
op.create_index('ix_ActiveTranslationMessages_position', 'ActiveTranslationMessages', ['position'], unique=False)
op.create_index('ix_ActiveTranslationMessages_namespace', 'ActiveTranslationMessages', ['namespace'], unique=False)
op.create_index('ix_ActiveTranslationMessages_key', 'ActiveTranslationMessages', ['key'], unique=False)
op.create_index('ix_ActiveTranslationMessages_from_developer', 'ActiveTranslationMessages', ['from_developer'], unique=False)
op.create_index('ix_ActiveTranslationMessages_fmt', 'ActiveTranslationMessages', ['fmt'], unique=False)
op.create_index('ix_ActiveTranslationMessages_datetime', 'ActiveTranslationMessages', ['datetime'], unique=False)
op.create_index('ix_ActiveTranslationMessages_category', 'ActiveTranslationMessages', ['category'], unique=False)
# ### end Alembic commands ###
| [
"alembic.op.f",
"alembic.op.create_index",
"alembic.op.drop_index"
]
| [((341, 440), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_ActiveTranslationMessages_category"""'], {'table_name': '"""ActiveTranslationMessages"""'}), "('ix_ActiveTranslationMessages_category', table_name=\n 'ActiveTranslationMessages')\n", (354, 440), False, 'from alembic import op\n'), ((440, 539), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_ActiveTranslationMessages_datetime"""'], {'table_name': '"""ActiveTranslationMessages"""'}), "('ix_ActiveTranslationMessages_datetime', table_name=\n 'ActiveTranslationMessages')\n", (453, 539), False, 'from alembic import op\n'), ((539, 633), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_ActiveTranslationMessages_fmt"""'], {'table_name': '"""ActiveTranslationMessages"""'}), "('ix_ActiveTranslationMessages_fmt', table_name=\n 'ActiveTranslationMessages')\n", (552, 633), False, 'from alembic import op\n'), ((633, 738), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_ActiveTranslationMessages_from_developer"""'], {'table_name': '"""ActiveTranslationMessages"""'}), "('ix_ActiveTranslationMessages_from_developer', table_name=\n 'ActiveTranslationMessages')\n", (646, 738), False, 'from alembic import op\n'), ((738, 832), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_ActiveTranslationMessages_key"""'], {'table_name': '"""ActiveTranslationMessages"""'}), "('ix_ActiveTranslationMessages_key', table_name=\n 'ActiveTranslationMessages')\n", (751, 832), False, 'from alembic import op\n'), ((832, 932), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_ActiveTranslationMessages_namespace"""'], {'table_name': '"""ActiveTranslationMessages"""'}), "('ix_ActiveTranslationMessages_namespace', table_name=\n 'ActiveTranslationMessages')\n", (845, 932), False, 'from alembic import op\n'), ((932, 1031), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_ActiveTranslationMessages_position"""'], {'table_name': '"""ActiveTranslationMessages"""'}), "('ix_ActiveTranslationMessages_position', table_name=\n 'ActiveTranslationMessages')\n", (945, 1031), False, 'from alembic import op\n'), ((1031, 1131), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_ActiveTranslationMessages_same_tool"""'], {'table_name': '"""ActiveTranslationMessages"""'}), "('ix_ActiveTranslationMessages_same_tool', table_name=\n 'ActiveTranslationMessages')\n", (1044, 1131), False, 'from alembic import op\n'), ((1131, 1240), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_ActiveTranslationMessages_taken_from_default"""'], {'table_name': '"""ActiveTranslationMessages"""'}), "('ix_ActiveTranslationMessages_taken_from_default', table_name\n ='ActiveTranslationMessages')\n", (1144, 1240), False, 'from alembic import op\n'), ((1240, 1338), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_ActiveTranslationMessages_tool_id"""'], {'table_name': '"""ActiveTranslationMessages"""'}), "('ix_ActiveTranslationMessages_tool_id', table_name=\n 'ActiveTranslationMessages')\n", (1253, 1338), False, 'from alembic import op\n'), ((1338, 1390), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_Apps_composer"""'], {'table_name': '"""Apps"""'}), "('ix_Apps_composer', table_name='Apps')\n", (1351, 1390), False, 'from alembic import op\n'), ((1395, 1452), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_Apps_creation_date"""'], {'table_name': '"""Apps"""'}), "('ix_Apps_creation_date', table_name='Apps')\n", (1408, 1452), False, 'from alembic import op\n'), ((1457, 1517), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_Apps_last_access_date"""'], {'table_name': '"""Apps"""'}), "('ix_Apps_last_access_date', table_name='Apps')\n", (1470, 1517), False, 'from alembic import op\n'), ((1522, 1583), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_Apps_modification_date"""'], {'table_name': '"""Apps"""'}), "('ix_Apps_modification_date', table_name='Apps')\n", (1535, 1583), False, 'from alembic import op\n'), ((1588, 1636), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_Apps_name"""'], {'table_name': '"""Apps"""'}), "('ix_Apps_name', table_name='Apps')\n", (1601, 1636), False, 'from alembic import op\n'), ((1641, 1693), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_Apps_owner_id"""'], {'table_name': '"""Apps"""'}), "('ix_Apps_owner_id', table_name='Apps')\n", (1654, 1693), False, 'from alembic import op\n'), ((1698, 1751), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_Apps_unique_id"""'], {'table_name': '"""Apps"""'}), "('ix_Apps_unique_id', table_name='Apps')\n", (1711, 1751), False, 'from alembic import op\n'), ((1756, 1834), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_GoLabOAuthUsers_display_name"""'], {'table_name': '"""GoLabOAuthUsers"""'}), "('ix_GoLabOAuthUsers_display_name', table_name='GoLabOAuthUsers')\n", (1769, 1834), False, 'from alembic import op\n'), ((1839, 1910), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_GoLabOAuthUsers_email"""'], {'table_name': '"""GoLabOAuthUsers"""'}), "('ix_GoLabOAuthUsers_email', table_name='GoLabOAuthUsers')\n", (1852, 1910), False, 'from alembic import op\n'), ((1915, 1977), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_Languages_language"""'], {'table_name': '"""Languages"""'}), "('ix_Languages_language', table_name='Languages')\n", (1928, 1977), False, 'from alembic import op\n'), ((1982, 2055), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_adaptable"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_adaptable', table_name='RepositoryApps')\n", (1995, 2055), False, 'from alembic import op\n'), ((2060, 2137), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_contents_hash"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_contents_hash', table_name='RepositoryApps')\n", (2073, 2137), False, 'from alembic import op\n'), ((2142, 2221), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_downloaded_hash"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_downloaded_hash', table_name='RepositoryApps')\n", (2155, 2221), False, 'from alembic import op\n'), ((2226, 2301), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_external_id"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_external_id', table_name='RepositoryApps')\n", (2239, 2301), False, 'from alembic import op\n'), ((2306, 2377), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_failing"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_failing', table_name='RepositoryApps')\n", (2319, 2377), False, 'from alembic import op\n'), ((2382, 2459), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_failing_since"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_failing_since', table_name='RepositoryApps')\n", (2395, 2459), False, 'from alembic import op\n'), ((2464, 2539), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_last_change"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_last_change', table_name='RepositoryApps')\n", (2477, 2539), False, 'from alembic import op\n'), ((2544, 2618), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_last_check"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_last_check', table_name='RepositoryApps')\n", (2557, 2618), False, 'from alembic import op\n'), ((2623, 2712), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_last_download_change"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_last_download_change', table_name=\n 'RepositoryApps')\n", (2636, 2712), False, 'from alembic import op\n'), ((2712, 2809), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_last_processed_contents_hash"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_last_processed_contents_hash', table_name=\n 'RepositoryApps')\n", (2725, 2809), False, 'from alembic import op\n'), ((2809, 2907), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_last_processed_downloaded_hash"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_last_processed_downloaded_hash',\n table_name='RepositoryApps')\n", (2822, 2907), False, 'from alembic import op\n'), ((2908, 2996), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_last_processed_time"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_last_processed_time', table_name=\n 'RepositoryApps')\n", (2921, 2996), False, 'from alembic import op\n'), ((2996, 3064), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_name"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_name', table_name='RepositoryApps')\n", (3009, 3064), False, 'from alembic import op\n'), ((3069, 3143), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_repository"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_repository', table_name='RepositoryApps')\n", (3082, 3143), False, 'from alembic import op\n'), ((3148, 3224), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_translatable"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_translatable', table_name='RepositoryApps')\n", (3161, 3224), False, 'from alembic import op\n'), ((3229, 3296), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_RepositoryApps_url"""'], {'table_name': '"""RepositoryApps"""'}), "('ix_RepositoryApps_url', table_name='RepositoryApps')\n", (3242, 3296), False, 'from alembic import op\n'), ((3301, 3368), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslatedApps_url"""'], {'table_name': '"""TranslatedApps"""'}), "('ix_TranslatedApps_url', table_name='TranslatedApps')\n", (3314, 3368), False, 'from alembic import op\n'), ((3373, 3464), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationBundles_from_developer"""'], {'table_name': '"""TranslationBundles"""'}), "('ix_TranslationBundles_from_developer', table_name=\n 'TranslationBundles')\n", (3386, 3464), False, 'from alembic import op\n'), ((3464, 3549), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationBundles_language"""'], {'table_name': '"""TranslationBundles"""'}), "('ix_TranslationBundles_language', table_name='TranslationBundles'\n )\n", (3477, 3549), False, 'from alembic import op\n'), ((3549, 3627), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationBundles_target"""'], {'table_name': '"""TranslationBundles"""'}), "('ix_TranslationBundles_target', table_name='TranslationBundles')\n", (3562, 3627), False, 'from alembic import op\n'), ((3632, 3741), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationCurrentActiveUsers_last_check"""'], {'table_name': '"""TranslationCurrentActiveUsers"""'}), "('ix_TranslationCurrentActiveUsers_last_check', table_name=\n 'TranslationCurrentActiveUsers')\n", (3645, 3741), False, 'from alembic import op\n'), ((3741, 3848), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationExternalSuggestions_engine"""'], {'table_name': '"""TranslationExternalSuggestions"""'}), "('ix_TranslationExternalSuggestions_engine', table_name=\n 'TranslationExternalSuggestions')\n", (3754, 3848), False, 'from alembic import op\n'), ((3848, 3958), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationExternalSuggestions_human_key"""'], {'table_name': '"""TranslationExternalSuggestions"""'}), "('ix_TranslationExternalSuggestions_human_key', table_name=\n 'TranslationExternalSuggestions')\n", (3861, 3958), False, 'from alembic import op\n'), ((3958, 4072), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationExternalSuggestions_human_key_hash"""'], {'table_name': '"""TranslationExternalSuggestions"""'}), "('ix_TranslationExternalSuggestions_human_key_hash',\n table_name='TranslationExternalSuggestions')\n", (3971, 4072), False, 'from alembic import op\n'), ((4073, 4182), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationExternalSuggestions_language"""'], {'table_name': '"""TranslationExternalSuggestions"""'}), "('ix_TranslationExternalSuggestions_language', table_name=\n 'TranslationExternalSuggestions')\n", (4086, 4182), False, 'from alembic import op\n'), ((4182, 4297), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationExternalSuggestions_origin_language"""'], {'table_name': '"""TranslationExternalSuggestions"""'}), "('ix_TranslationExternalSuggestions_origin_language',\n table_name='TranslationExternalSuggestions')\n", (4195, 4297), False, 'from alembic import op\n'), ((4298, 4392), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationKeySuggestions_key"""'], {'table_name': '"""TranslationKeySuggestions"""'}), "('ix_TranslationKeySuggestions_key', table_name=\n 'TranslationKeySuggestions')\n", (4311, 4392), False, 'from alembic import op\n'), ((4392, 4491), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationKeySuggestions_language"""'], {'table_name': '"""TranslationKeySuggestions"""'}), "('ix_TranslationKeySuggestions_language', table_name=\n 'TranslationKeySuggestions')\n", (4405, 4491), False, 'from alembic import op\n'), ((4491, 4588), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationKeySuggestions_target"""'], {'table_name': '"""TranslationKeySuggestions"""'}), "('ix_TranslationKeySuggestions_target', table_name=\n 'TranslationKeySuggestions')\n", (4504, 4588), False, 'from alembic import op\n'), ((4588, 4687), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationMessageHistory_category"""'], {'table_name': '"""TranslationMessageHistory"""'}), "('ix_TranslationMessageHistory_category', table_name=\n 'TranslationMessageHistory')\n", (4601, 4687), False, 'from alembic import op\n'), ((4687, 4786), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationMessageHistory_datetime"""'], {'table_name': '"""TranslationMessageHistory"""'}), "('ix_TranslationMessageHistory_datetime', table_name=\n 'TranslationMessageHistory')\n", (4700, 4786), False, 'from alembic import op\n'), ((4786, 4880), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationMessageHistory_fmt"""'], {'table_name': '"""TranslationMessageHistory"""'}), "('ix_TranslationMessageHistory_fmt', table_name=\n 'TranslationMessageHistory')\n", (4799, 4880), False, 'from alembic import op\n'), ((4880, 4985), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationMessageHistory_from_developer"""'], {'table_name': '"""TranslationMessageHistory"""'}), "('ix_TranslationMessageHistory_from_developer', table_name=\n 'TranslationMessageHistory')\n", (4893, 4985), False, 'from alembic import op\n'), ((4985, 5079), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationMessageHistory_key"""'], {'table_name': '"""TranslationMessageHistory"""'}), "('ix_TranslationMessageHistory_key', table_name=\n 'TranslationMessageHistory')\n", (4998, 5079), False, 'from alembic import op\n'), ((5079, 5179), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationMessageHistory_namespace"""'], {'table_name': '"""TranslationMessageHistory"""'}), "('ix_TranslationMessageHistory_namespace', table_name=\n 'TranslationMessageHistory')\n", (5092, 5179), False, 'from alembic import op\n'), ((5179, 5290), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationMessageHistory_parent_translation_id"""'], {'table_name': '"""TranslationMessageHistory"""'}), "('ix_TranslationMessageHistory_parent_translation_id',\n table_name='TranslationMessageHistory')\n", (5192, 5290), False, 'from alembic import op\n'), ((5291, 5390), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationMessageHistory_position"""'], {'table_name': '"""TranslationMessageHistory"""'}), "('ix_TranslationMessageHistory_position', table_name=\n 'TranslationMessageHistory')\n", (5304, 5390), False, 'from alembic import op\n'), ((5390, 5490), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationMessageHistory_same_tool"""'], {'table_name': '"""TranslationMessageHistory"""'}), "('ix_TranslationMessageHistory_same_tool', table_name=\n 'TranslationMessageHistory')\n", (5403, 5490), False, 'from alembic import op\n'), ((5490, 5599), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationMessageHistory_taken_from_default"""'], {'table_name': '"""TranslationMessageHistory"""'}), "('ix_TranslationMessageHistory_taken_from_default', table_name\n ='TranslationMessageHistory')\n", (5503, 5599), False, 'from alembic import op\n'), ((5599, 5697), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationMessageHistory_tool_id"""'], {'table_name': '"""TranslationMessageHistory"""'}), "('ix_TranslationMessageHistory_tool_id', table_name=\n 'TranslationMessageHistory')\n", (5612, 5697), False, 'from alembic import op\n'), ((5697, 5811), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationNotificationRecipients_created"""'], {'table_name': '"""TranslationNotificationRecipients"""'}), "('ix_TranslationNotificationRecipients_created', table_name=\n 'TranslationNotificationRecipients')\n", (5710, 5811), False, 'from alembic import op\n'), ((5811, 5923), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationNotificationRecipients_email"""'], {'table_name': '"""TranslationNotificationRecipients"""'}), "('ix_TranslationNotificationRecipients_email', table_name=\n 'TranslationNotificationRecipients')\n", (5824, 5923), False, 'from alembic import op\n'), ((5923, 6022), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationSubscriptions_last_check"""'], {'table_name': '"""TranslationSubscriptions"""'}), "('ix_TranslationSubscriptions_last_check', table_name=\n 'TranslationSubscriptions')\n", (5936, 6022), False, 'from alembic import op\n'), ((6022, 6120), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationSubscriptions_mechanism"""'], {'table_name': '"""TranslationSubscriptions"""'}), "('ix_TranslationSubscriptions_mechanism', table_name=\n 'TranslationSubscriptions')\n", (6035, 6120), False, 'from alembic import op\n'), ((6120, 6211), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationSyncLogs_end_datetime"""'], {'table_name': '"""TranslationSyncLogs"""'}), "('ix_TranslationSyncLogs_end_datetime', table_name=\n 'TranslationSyncLogs')\n", (6133, 6211), False, 'from alembic import op\n'), ((6211, 6304), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationSyncLogs_start_datetime"""'], {'table_name': '"""TranslationSyncLogs"""'}), "('ix_TranslationSyncLogs_start_datetime', table_name=\n 'TranslationSyncLogs')\n", (6224, 6304), False, 'from alembic import op\n'), ((6304, 6379), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationUrls_automatic"""'], {'table_name': '"""TranslationUrls"""'}), "('ix_TranslationUrls_automatic', table_name='TranslationUrls')\n", (6317, 6379), False, 'from alembic import op\n'), ((6384, 6453), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationUrls_url"""'], {'table_name': '"""TranslationUrls"""'}), "('ix_TranslationUrls_url', table_name='TranslationUrls')\n", (6397, 6453), False, 'from alembic import op\n'), ((6458, 6562), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationValueSuggestions_human_key"""'], {'table_name': '"""TranslationValueSuggestions"""'}), "('ix_TranslationValueSuggestions_human_key', table_name=\n 'TranslationValueSuggestions')\n", (6471, 6562), False, 'from alembic import op\n'), ((6562, 6665), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationValueSuggestions_language"""'], {'table_name': '"""TranslationValueSuggestions"""'}), "('ix_TranslationValueSuggestions_language', table_name=\n 'TranslationValueSuggestions')\n", (6575, 6665), False, 'from alembic import op\n'), ((6665, 6766), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_TranslationValueSuggestions_target"""'], {'table_name': '"""TranslationValueSuggestions"""'}), "('ix_TranslationValueSuggestions_target', table_name=\n 'TranslationValueSuggestions')\n", (6678, 6766), False, 'from alembic import op\n'), ((6766, 6825), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_Users_creation_date"""'], {'table_name': '"""Users"""'}), "('ix_Users_creation_date', table_name='Users')\n", (6779, 6825), False, 'from alembic import op\n'), ((6830, 6892), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_Users_last_access_date"""'], {'table_name': '"""Users"""'}), "('ix_Users_last_access_date', table_name='Users')\n", (6843, 6892), False, 'from alembic import op\n'), ((23071, 23164), 'alembic.op.create_index', 'op.create_index', (['"""ix_Users_last_access_date"""', '"""Users"""', "['last_access_date']"], {'unique': '(False)'}), "('ix_Users_last_access_date', 'Users', ['last_access_date'],\n unique=False)\n", (23086, 23164), False, 'from alembic import op\n'), ((23165, 23252), 'alembic.op.create_index', 'op.create_index', (['"""ix_Users_creation_date"""', '"""Users"""', "['creation_date']"], {'unique': '(False)'}), "('ix_Users_creation_date', 'Users', ['creation_date'],\n unique=False)\n", (23180, 23252), False, 'from alembic import op\n'), ((23253, 23370), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationValueSuggestions_target"""', '"""TranslationValueSuggestions"""', "['target']"], {'unique': '(False)'}), "('ix_TranslationValueSuggestions_target',\n 'TranslationValueSuggestions', ['target'], unique=False)\n", (23268, 23370), False, 'from alembic import op\n'), ((23371, 23492), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationValueSuggestions_language"""', '"""TranslationValueSuggestions"""', "['language']"], {'unique': '(False)'}), "('ix_TranslationValueSuggestions_language',\n 'TranslationValueSuggestions', ['language'], unique=False)\n", (23386, 23492), False, 'from alembic import op\n'), ((23493, 23616), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationValueSuggestions_human_key"""', '"""TranslationValueSuggestions"""', "['human_key']"], {'unique': '(False)'}), "('ix_TranslationValueSuggestions_human_key',\n 'TranslationValueSuggestions', ['human_key'], unique=False)\n", (23508, 23616), False, 'from alembic import op\n'), ((23617, 23703), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationUrls_url"""', '"""TranslationUrls"""', "['url']"], {'unique': '(True)'}), "('ix_TranslationUrls_url', 'TranslationUrls', ['url'],\n unique=True)\n", (23632, 23703), False, 'from alembic import op\n'), ((23704, 23804), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationUrls_automatic"""', '"""TranslationUrls"""', "['automatic']"], {'unique': '(False)'}), "('ix_TranslationUrls_automatic', 'TranslationUrls', [\n 'automatic'], unique=False)\n", (23719, 23804), False, 'from alembic import op\n'), ((23805, 23922), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationSyncLogs_start_datetime"""', '"""TranslationSyncLogs"""', "['start_datetime']"], {'unique': '(False)'}), "('ix_TranslationSyncLogs_start_datetime',\n 'TranslationSyncLogs', ['start_datetime'], unique=False)\n", (23820, 23922), False, 'from alembic import op\n'), ((23923, 24036), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationSyncLogs_end_datetime"""', '"""TranslationSyncLogs"""', "['end_datetime']"], {'unique': '(False)'}), "('ix_TranslationSyncLogs_end_datetime',\n 'TranslationSyncLogs', ['end_datetime'], unique=False)\n", (23938, 24036), False, 'from alembic import op\n'), ((24038, 24155), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationSubscriptions_mechanism"""', '"""TranslationSubscriptions"""', "['mechanism']"], {'unique': '(False)'}), "('ix_TranslationSubscriptions_mechanism',\n 'TranslationSubscriptions', ['mechanism'], unique=False)\n", (24053, 24155), False, 'from alembic import op\n'), ((24156, 24275), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationSubscriptions_last_check"""', '"""TranslationSubscriptions"""', "['last_check']"], {'unique': '(False)'}), "('ix_TranslationSubscriptions_last_check',\n 'TranslationSubscriptions', ['last_check'], unique=False)\n", (24171, 24275), False, 'from alembic import op\n'), ((24277, 24403), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationNotificationRecipients_email"""', '"""TranslationNotificationRecipients"""', "['email']"], {'unique': '(True)'}), "('ix_TranslationNotificationRecipients_email',\n 'TranslationNotificationRecipients', ['email'], unique=True)\n", (24292, 24403), False, 'from alembic import op\n'), ((24404, 24535), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationNotificationRecipients_created"""', '"""TranslationNotificationRecipients"""', "['created']"], {'unique': '(False)'}), "('ix_TranslationNotificationRecipients_created',\n 'TranslationNotificationRecipients', ['created'], unique=False)\n", (24419, 24535), False, 'from alembic import op\n'), ((24537, 24652), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationMessageHistory_tool_id"""', '"""TranslationMessageHistory"""', "['tool_id']"], {'unique': '(False)'}), "('ix_TranslationMessageHistory_tool_id',\n 'TranslationMessageHistory', ['tool_id'], unique=False)\n", (24552, 24652), False, 'from alembic import op\n'), ((24653, 24790), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationMessageHistory_taken_from_default"""', '"""TranslationMessageHistory"""', "['taken_from_default']"], {'unique': '(False)'}), "('ix_TranslationMessageHistory_taken_from_default',\n 'TranslationMessageHistory', ['taken_from_default'], unique=False)\n", (24668, 24790), False, 'from alembic import op\n'), ((24791, 24910), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationMessageHistory_same_tool"""', '"""TranslationMessageHistory"""', "['same_tool']"], {'unique': '(False)'}), "('ix_TranslationMessageHistory_same_tool',\n 'TranslationMessageHistory', ['same_tool'], unique=False)\n", (24806, 24910), False, 'from alembic import op\n'), ((24911, 25028), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationMessageHistory_position"""', '"""TranslationMessageHistory"""', "['position']"], {'unique': '(False)'}), "('ix_TranslationMessageHistory_position',\n 'TranslationMessageHistory', ['position'], unique=False)\n", (24926, 25028), False, 'from alembic import op\n'), ((25029, 25172), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationMessageHistory_parent_translation_id"""', '"""TranslationMessageHistory"""', "['parent_translation_id']"], {'unique': '(False)'}), "('ix_TranslationMessageHistory_parent_translation_id',\n 'TranslationMessageHistory', ['parent_translation_id'], unique=False)\n", (25044, 25172), False, 'from alembic import op\n'), ((25173, 25292), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationMessageHistory_namespace"""', '"""TranslationMessageHistory"""', "['namespace']"], {'unique': '(False)'}), "('ix_TranslationMessageHistory_namespace',\n 'TranslationMessageHistory', ['namespace'], unique=False)\n", (25188, 25292), False, 'from alembic import op\n'), ((25293, 25400), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationMessageHistory_key"""', '"""TranslationMessageHistory"""', "['key']"], {'unique': '(False)'}), "('ix_TranslationMessageHistory_key',\n 'TranslationMessageHistory', ['key'], unique=False)\n", (25308, 25400), False, 'from alembic import op\n'), ((25401, 25530), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationMessageHistory_from_developer"""', '"""TranslationMessageHistory"""', "['from_developer']"], {'unique': '(False)'}), "('ix_TranslationMessageHistory_from_developer',\n 'TranslationMessageHistory', ['from_developer'], unique=False)\n", (25416, 25530), False, 'from alembic import op\n'), ((25531, 25638), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationMessageHistory_fmt"""', '"""TranslationMessageHistory"""', "['fmt']"], {'unique': '(False)'}), "('ix_TranslationMessageHistory_fmt',\n 'TranslationMessageHistory', ['fmt'], unique=False)\n", (25546, 25638), False, 'from alembic import op\n'), ((25639, 25756), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationMessageHistory_datetime"""', '"""TranslationMessageHistory"""', "['datetime']"], {'unique': '(False)'}), "('ix_TranslationMessageHistory_datetime',\n 'TranslationMessageHistory', ['datetime'], unique=False)\n", (25654, 25756), False, 'from alembic import op\n'), ((25757, 25874), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationMessageHistory_category"""', '"""TranslationMessageHistory"""', "['category']"], {'unique': '(False)'}), "('ix_TranslationMessageHistory_category',\n 'TranslationMessageHistory', ['category'], unique=False)\n", (25772, 25874), False, 'from alembic import op\n'), ((25876, 25989), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationKeySuggestions_target"""', '"""TranslationKeySuggestions"""', "['target']"], {'unique': '(False)'}), "('ix_TranslationKeySuggestions_target',\n 'TranslationKeySuggestions', ['target'], unique=False)\n", (25891, 25989), False, 'from alembic import op\n'), ((25990, 26107), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationKeySuggestions_language"""', '"""TranslationKeySuggestions"""', "['language']"], {'unique': '(False)'}), "('ix_TranslationKeySuggestions_language',\n 'TranslationKeySuggestions', ['language'], unique=False)\n", (26005, 26107), False, 'from alembic import op\n'), ((26108, 26215), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationKeySuggestions_key"""', '"""TranslationKeySuggestions"""', "['key']"], {'unique': '(False)'}), "('ix_TranslationKeySuggestions_key',\n 'TranslationKeySuggestions', ['key'], unique=False)\n", (26123, 26215), False, 'from alembic import op\n'), ((26217, 26358), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationExternalSuggestions_origin_language"""', '"""TranslationExternalSuggestions"""', "['origin_language']"], {'unique': '(False)'}), "('ix_TranslationExternalSuggestions_origin_language',\n 'TranslationExternalSuggestions', ['origin_language'], unique=False)\n", (26232, 26358), False, 'from alembic import op\n'), ((26359, 26486), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationExternalSuggestions_language"""', '"""TranslationExternalSuggestions"""', "['language']"], {'unique': '(False)'}), "('ix_TranslationExternalSuggestions_language',\n 'TranslationExternalSuggestions', ['language'], unique=False)\n", (26374, 26486), False, 'from alembic import op\n'), ((26487, 26626), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationExternalSuggestions_human_key_hash"""', '"""TranslationExternalSuggestions"""', "['human_key_hash']"], {'unique': '(False)'}), "('ix_TranslationExternalSuggestions_human_key_hash',\n 'TranslationExternalSuggestions', ['human_key_hash'], unique=False)\n", (26502, 26626), False, 'from alembic import op\n'), ((26627, 26756), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationExternalSuggestions_human_key"""', '"""TranslationExternalSuggestions"""', "['human_key']"], {'unique': '(False)'}), "('ix_TranslationExternalSuggestions_human_key',\n 'TranslationExternalSuggestions', ['human_key'], unique=False)\n", (26642, 26756), False, 'from alembic import op\n'), ((26757, 26880), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationExternalSuggestions_engine"""', '"""TranslationExternalSuggestions"""', "['engine']"], {'unique': '(False)'}), "('ix_TranslationExternalSuggestions_engine',\n 'TranslationExternalSuggestions', ['engine'], unique=False)\n", (26772, 26880), False, 'from alembic import op\n'), ((26882, 27011), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationCurrentActiveUsers_last_check"""', '"""TranslationCurrentActiveUsers"""', "['last_check']"], {'unique': '(False)'}), "('ix_TranslationCurrentActiveUsers_last_check',\n 'TranslationCurrentActiveUsers', ['last_check'], unique=False)\n", (26897, 27011), False, 'from alembic import op\n'), ((27013, 27113), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationBundles_target"""', '"""TranslationBundles"""', "['target']"], {'unique': '(False)'}), "('ix_TranslationBundles_target', 'TranslationBundles', [\n 'target'], unique=False)\n", (27028, 27113), False, 'from alembic import op\n'), ((27113, 27217), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationBundles_language"""', '"""TranslationBundles"""', "['language']"], {'unique': '(False)'}), "('ix_TranslationBundles_language', 'TranslationBundles', [\n 'language'], unique=False)\n", (27128, 27217), False, 'from alembic import op\n'), ((27217, 27332), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslationBundles_from_developer"""', '"""TranslationBundles"""', "['from_developer']"], {'unique': '(False)'}), "('ix_TranslationBundles_from_developer',\n 'TranslationBundles', ['from_developer'], unique=False)\n", (27232, 27332), False, 'from alembic import op\n'), ((27334, 27419), 'alembic.op.create_index', 'op.create_index', (['"""ix_TranslatedApps_url"""', '"""TranslatedApps"""', "['url']"], {'unique': '(True)'}), "('ix_TranslatedApps_url', 'TranslatedApps', ['url'], unique=True\n )\n", (27349, 27419), False, 'from alembic import op\n'), ((27420, 27506), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_url"""', '"""RepositoryApps"""', "['url']"], {'unique': '(False)'}), "('ix_RepositoryApps_url', 'RepositoryApps', ['url'], unique=\n False)\n", (27435, 27506), False, 'from alembic import op\n'), ((27506, 27610), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_translatable"""', '"""RepositoryApps"""', "['translatable']"], {'unique': '(False)'}), "('ix_RepositoryApps_translatable', 'RepositoryApps', [\n 'translatable'], unique=False)\n", (27521, 27610), False, 'from alembic import op\n'), ((27610, 27710), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_repository"""', '"""RepositoryApps"""', "['repository']"], {'unique': '(False)'}), "('ix_RepositoryApps_repository', 'RepositoryApps', [\n 'repository'], unique=False)\n", (27625, 27710), False, 'from alembic import op\n'), ((27710, 27797), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_name"""', '"""RepositoryApps"""', "['name']"], {'unique': '(False)'}), "('ix_RepositoryApps_name', 'RepositoryApps', ['name'],\n unique=False)\n", (27725, 27797), False, 'from alembic import op\n'), ((27798, 27915), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_last_processed_time"""', '"""RepositoryApps"""', "['last_processed_time']"], {'unique': '(False)'}), "('ix_RepositoryApps_last_processed_time', 'RepositoryApps',\n ['last_processed_time'], unique=False)\n", (27813, 27915), False, 'from alembic import op\n'), ((27916, 28055), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_last_processed_downloaded_hash"""', '"""RepositoryApps"""', "['last_processed_downloaded_hash']"], {'unique': '(False)'}), "('ix_RepositoryApps_last_processed_downloaded_hash',\n 'RepositoryApps', ['last_processed_downloaded_hash'], unique=False)\n", (27931, 28055), False, 'from alembic import op\n'), ((28056, 28191), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_last_processed_contents_hash"""', '"""RepositoryApps"""', "['last_processed_contents_hash']"], {'unique': '(False)'}), "('ix_RepositoryApps_last_processed_contents_hash',\n 'RepositoryApps', ['last_processed_contents_hash'], unique=False)\n", (28071, 28191), False, 'from alembic import op\n'), ((28192, 28311), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_last_download_change"""', '"""RepositoryApps"""', "['last_download_change']"], {'unique': '(False)'}), "('ix_RepositoryApps_last_download_change', 'RepositoryApps',\n ['last_download_change'], unique=False)\n", (28207, 28311), False, 'from alembic import op\n'), ((28312, 28412), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_last_check"""', '"""RepositoryApps"""', "['last_check']"], {'unique': '(False)'}), "('ix_RepositoryApps_last_check', 'RepositoryApps', [\n 'last_check'], unique=False)\n", (28327, 28412), False, 'from alembic import op\n'), ((28412, 28514), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_last_change"""', '"""RepositoryApps"""', "['last_change']"], {'unique': '(False)'}), "('ix_RepositoryApps_last_change', 'RepositoryApps', [\n 'last_change'], unique=False)\n", (28427, 28514), False, 'from alembic import op\n'), ((28514, 28620), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_failing_since"""', '"""RepositoryApps"""', "['failing_since']"], {'unique': '(False)'}), "('ix_RepositoryApps_failing_since', 'RepositoryApps', [\n 'failing_since'], unique=False)\n", (28529, 28620), False, 'from alembic import op\n'), ((28620, 28713), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_failing"""', '"""RepositoryApps"""', "['failing']"], {'unique': '(False)'}), "('ix_RepositoryApps_failing', 'RepositoryApps', ['failing'],\n unique=False)\n", (28635, 28713), False, 'from alembic import op\n'), ((28714, 28816), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_external_id"""', '"""RepositoryApps"""', "['external_id']"], {'unique': '(False)'}), "('ix_RepositoryApps_external_id', 'RepositoryApps', [\n 'external_id'], unique=False)\n", (28729, 28816), False, 'from alembic import op\n'), ((28816, 28926), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_downloaded_hash"""', '"""RepositoryApps"""', "['downloaded_hash']"], {'unique': '(False)'}), "('ix_RepositoryApps_downloaded_hash', 'RepositoryApps', [\n 'downloaded_hash'], unique=False)\n", (28831, 28926), False, 'from alembic import op\n'), ((28926, 29032), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_contents_hash"""', '"""RepositoryApps"""', "['contents_hash']"], {'unique': '(False)'}), "('ix_RepositoryApps_contents_hash', 'RepositoryApps', [\n 'contents_hash'], unique=False)\n", (28941, 29032), False, 'from alembic import op\n'), ((29032, 29130), 'alembic.op.create_index', 'op.create_index', (['"""ix_RepositoryApps_adaptable"""', '"""RepositoryApps"""', "['adaptable']"], {'unique': '(False)'}), "('ix_RepositoryApps_adaptable', 'RepositoryApps', [\n 'adaptable'], unique=False)\n", (29047, 29130), False, 'from alembic import op\n'), ((29131, 29216), 'alembic.op.create_index', 'op.create_index', (['"""ix_Languages_language"""', '"""Languages"""', "['language']"], {'unique': '(True)'}), "('ix_Languages_language', 'Languages', ['language'], unique=True\n )\n", (29146, 29216), False, 'from alembic import op\n'), ((29217, 29307), 'alembic.op.create_index', 'op.create_index', (['"""ix_GoLabOAuthUsers_email"""', '"""GoLabOAuthUsers"""', "['email']"], {'unique': '(True)'}), "('ix_GoLabOAuthUsers_email', 'GoLabOAuthUsers', ['email'],\n unique=True)\n", (29232, 29307), False, 'from alembic import op\n'), ((29308, 29414), 'alembic.op.create_index', 'op.create_index', (['"""ix_GoLabOAuthUsers_display_name"""', '"""GoLabOAuthUsers"""', "['display_name']"], {'unique': '(False)'}), "('ix_GoLabOAuthUsers_display_name', 'GoLabOAuthUsers', [\n 'display_name'], unique=False)\n", (29323, 29414), False, 'from alembic import op\n'), ((29415, 29487), 'alembic.op.create_index', 'op.create_index', (['"""ix_Apps_unique_id"""', '"""Apps"""', "['unique_id']"], {'unique': '(True)'}), "('ix_Apps_unique_id', 'Apps', ['unique_id'], unique=True)\n", (29430, 29487), False, 'from alembic import op\n'), ((29492, 29563), 'alembic.op.create_index', 'op.create_index', (['"""ix_Apps_owner_id"""', '"""Apps"""', "['owner_id']"], {'unique': '(False)'}), "('ix_Apps_owner_id', 'Apps', ['owner_id'], unique=False)\n", (29507, 29563), False, 'from alembic import op\n'), ((29568, 29631), 'alembic.op.create_index', 'op.create_index', (['"""ix_Apps_name"""', '"""Apps"""', "['name']"], {'unique': '(False)'}), "('ix_Apps_name', 'Apps', ['name'], unique=False)\n", (29583, 29631), False, 'from alembic import op\n'), ((29636, 29729), 'alembic.op.create_index', 'op.create_index', (['"""ix_Apps_modification_date"""', '"""Apps"""', "['modification_date']"], {'unique': '(False)'}), "('ix_Apps_modification_date', 'Apps', ['modification_date'],\n unique=False)\n", (29651, 29729), False, 'from alembic import op\n'), ((29730, 29821), 'alembic.op.create_index', 'op.create_index', (['"""ix_Apps_last_access_date"""', '"""Apps"""', "['last_access_date']"], {'unique': '(False)'}), "('ix_Apps_last_access_date', 'Apps', ['last_access_date'],\n unique=False)\n", (29745, 29821), False, 'from alembic import op\n'), ((29822, 29908), 'alembic.op.create_index', 'op.create_index', (['"""ix_Apps_creation_date"""', '"""Apps"""', "['creation_date']"], {'unique': '(False)'}), "('ix_Apps_creation_date', 'Apps', ['creation_date'], unique=\n False)\n", (29837, 29908), False, 'from alembic import op\n'), ((29908, 29979), 'alembic.op.create_index', 'op.create_index', (['"""ix_Apps_composer"""', '"""Apps"""', "['composer']"], {'unique': '(False)'}), "('ix_Apps_composer', 'Apps', ['composer'], unique=False)\n", (29923, 29979), False, 'from alembic import op\n'), ((29985, 30100), 'alembic.op.create_index', 'op.create_index', (['"""ix_ActiveTranslationMessages_tool_id"""', '"""ActiveTranslationMessages"""', "['tool_id']"], {'unique': '(False)'}), "('ix_ActiveTranslationMessages_tool_id',\n 'ActiveTranslationMessages', ['tool_id'], unique=False)\n", (30000, 30100), False, 'from alembic import op\n'), ((30101, 30238), 'alembic.op.create_index', 'op.create_index', (['"""ix_ActiveTranslationMessages_taken_from_default"""', '"""ActiveTranslationMessages"""', "['taken_from_default']"], {'unique': '(False)'}), "('ix_ActiveTranslationMessages_taken_from_default',\n 'ActiveTranslationMessages', ['taken_from_default'], unique=False)\n", (30116, 30238), False, 'from alembic import op\n'), ((30239, 30358), 'alembic.op.create_index', 'op.create_index', (['"""ix_ActiveTranslationMessages_same_tool"""', '"""ActiveTranslationMessages"""', "['same_tool']"], {'unique': '(False)'}), "('ix_ActiveTranslationMessages_same_tool',\n 'ActiveTranslationMessages', ['same_tool'], unique=False)\n", (30254, 30358), False, 'from alembic import op\n'), ((30359, 30476), 'alembic.op.create_index', 'op.create_index', (['"""ix_ActiveTranslationMessages_position"""', '"""ActiveTranslationMessages"""', "['position']"], {'unique': '(False)'}), "('ix_ActiveTranslationMessages_position',\n 'ActiveTranslationMessages', ['position'], unique=False)\n", (30374, 30476), False, 'from alembic import op\n'), ((30477, 30596), 'alembic.op.create_index', 'op.create_index', (['"""ix_ActiveTranslationMessages_namespace"""', '"""ActiveTranslationMessages"""', "['namespace']"], {'unique': '(False)'}), "('ix_ActiveTranslationMessages_namespace',\n 'ActiveTranslationMessages', ['namespace'], unique=False)\n", (30492, 30596), False, 'from alembic import op\n'), ((30597, 30704), 'alembic.op.create_index', 'op.create_index', (['"""ix_ActiveTranslationMessages_key"""', '"""ActiveTranslationMessages"""', "['key']"], {'unique': '(False)'}), "('ix_ActiveTranslationMessages_key',\n 'ActiveTranslationMessages', ['key'], unique=False)\n", (30612, 30704), False, 'from alembic import op\n'), ((30705, 30834), 'alembic.op.create_index', 'op.create_index', (['"""ix_ActiveTranslationMessages_from_developer"""', '"""ActiveTranslationMessages"""', "['from_developer']"], {'unique': '(False)'}), "('ix_ActiveTranslationMessages_from_developer',\n 'ActiveTranslationMessages', ['from_developer'], unique=False)\n", (30720, 30834), False, 'from alembic import op\n'), ((30835, 30942), 'alembic.op.create_index', 'op.create_index', (['"""ix_ActiveTranslationMessages_fmt"""', '"""ActiveTranslationMessages"""', "['fmt']"], {'unique': '(False)'}), "('ix_ActiveTranslationMessages_fmt',\n 'ActiveTranslationMessages', ['fmt'], unique=False)\n", (30850, 30942), False, 'from alembic import op\n'), ((30943, 31060), 'alembic.op.create_index', 'op.create_index', (['"""ix_ActiveTranslationMessages_datetime"""', '"""ActiveTranslationMessages"""', "['datetime']"], {'unique': '(False)'}), "('ix_ActiveTranslationMessages_datetime',\n 'ActiveTranslationMessages', ['datetime'], unique=False)\n", (30958, 31060), False, 'from alembic import op\n'), ((31061, 31178), 'alembic.op.create_index', 'op.create_index', (['"""ix_ActiveTranslationMessages_category"""', '"""ActiveTranslationMessages"""', "['category']"], {'unique': '(False)'}), "('ix_ActiveTranslationMessages_category',\n 'ActiveTranslationMessages', ['category'], unique=False)\n", (31076, 31178), False, 'from alembic import op\n'), ((6914, 6959), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_category"""'], {}), "('ix_ActiveTranslationMessages_category')\n", (6918, 6959), False, 'from alembic import op\n'), ((7038, 7083), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_datetime"""'], {}), "('ix_ActiveTranslationMessages_datetime')\n", (7042, 7083), False, 'from alembic import op\n'), ((7162, 7202), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_fmt"""'], {}), "('ix_ActiveTranslationMessages_fmt')\n", (7166, 7202), False, 'from alembic import op\n'), ((7276, 7327), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_from_developer"""'], {}), "('ix_ActiveTranslationMessages_from_developer')\n", (7280, 7327), False, 'from alembic import op\n'), ((7412, 7452), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_key"""'], {}), "('ix_ActiveTranslationMessages_key')\n", (7416, 7452), False, 'from alembic import op\n'), ((7526, 7572), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_namespace"""'], {}), "('ix_ActiveTranslationMessages_namespace')\n", (7530, 7572), False, 'from alembic import op\n'), ((7652, 7697), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_position"""'], {}), "('ix_ActiveTranslationMessages_position')\n", (7656, 7697), False, 'from alembic import op\n'), ((7776, 7822), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_same_tool"""'], {}), "('ix_ActiveTranslationMessages_same_tool')\n", (7780, 7822), False, 'from alembic import op\n'), ((7902, 7957), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_taken_from_default"""'], {}), "('ix_ActiveTranslationMessages_taken_from_default')\n", (7906, 7957), False, 'from alembic import op\n'), ((8046, 8090), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_tool_id"""'], {}), "('ix_ActiveTranslationMessages_tool_id')\n", (8050, 8090), False, 'from alembic import op\n'), ((8168, 8192), 'alembic.op.f', 'op.f', (['"""ix_Apps_composer"""'], {}), "('ix_Apps_composer')\n", (8172, 8192), False, 'from alembic import op\n'), ((8250, 8279), 'alembic.op.f', 'op.f', (['"""ix_Apps_creation_date"""'], {}), "('ix_Apps_creation_date')\n", (8254, 8279), False, 'from alembic import op\n'), ((8342, 8374), 'alembic.op.f', 'op.f', (['"""ix_Apps_last_access_date"""'], {}), "('ix_Apps_last_access_date')\n", (8346, 8374), False, 'from alembic import op\n'), ((8440, 8473), 'alembic.op.f', 'op.f', (['"""ix_Apps_modification_date"""'], {}), "('ix_Apps_modification_date')\n", (8444, 8473), False, 'from alembic import op\n'), ((8540, 8560), 'alembic.op.f', 'op.f', (['"""ix_Apps_name"""'], {}), "('ix_Apps_name')\n", (8544, 8560), False, 'from alembic import op\n'), ((8614, 8638), 'alembic.op.f', 'op.f', (['"""ix_Apps_owner_id"""'], {}), "('ix_Apps_owner_id')\n", (8618, 8638), False, 'from alembic import op\n'), ((8696, 8721), 'alembic.op.f', 'op.f', (['"""ix_Apps_unique_id"""'], {}), "('ix_Apps_unique_id')\n", (8700, 8721), False, 'from alembic import op\n'), ((8779, 8818), 'alembic.op.f', 'op.f', (['"""ix_GoLabOAuthUsers_display_name"""'], {}), "('ix_GoLabOAuthUsers_display_name')\n", (8783, 8818), False, 'from alembic import op\n'), ((8891, 8923), 'alembic.op.f', 'op.f', (['"""ix_GoLabOAuthUsers_email"""'], {}), "('ix_GoLabOAuthUsers_email')\n", (8895, 8923), False, 'from alembic import op\n'), ((8988, 9017), 'alembic.op.f', 'op.f', (['"""ix_Languages_language"""'], {}), "('ix_Languages_language')\n", (8992, 9017), False, 'from alembic import op\n'), ((9079, 9114), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_adaptable"""'], {}), "('ix_RepositoryApps_adaptable')\n", (9083, 9114), False, 'from alembic import op\n'), ((9183, 9222), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_contents_hash"""'], {}), "('ix_RepositoryApps_contents_hash')\n", (9187, 9222), False, 'from alembic import op\n'), ((9295, 9336), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_downloaded_hash"""'], {}), "('ix_RepositoryApps_downloaded_hash')\n", (9299, 9336), False, 'from alembic import op\n'), ((9411, 9448), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_external_id"""'], {}), "('ix_RepositoryApps_external_id')\n", (9415, 9448), False, 'from alembic import op\n'), ((9519, 9558), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_failing_since"""'], {}), "('ix_RepositoryApps_failing_since')\n", (9523, 9558), False, 'from alembic import op\n'), ((9631, 9664), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_failing"""'], {}), "('ix_RepositoryApps_failing')\n", (9635, 9664), False, 'from alembic import op\n'), ((9731, 9768), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_last_change"""'], {}), "('ix_RepositoryApps_last_change')\n", (9735, 9768), False, 'from alembic import op\n'), ((9839, 9875), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_last_check"""'], {}), "('ix_RepositoryApps_last_check')\n", (9843, 9875), False, 'from alembic import op\n'), ((9945, 9991), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_last_download_change"""'], {}), "('ix_RepositoryApps_last_download_change')\n", (9949, 9991), False, 'from alembic import op\n'), ((10071, 10125), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_last_processed_contents_hash"""'], {}), "('ix_RepositoryApps_last_processed_contents_hash')\n", (10075, 10125), False, 'from alembic import op\n'), ((10213, 10269), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_last_processed_downloaded_hash"""'], {}), "('ix_RepositoryApps_last_processed_downloaded_hash')\n", (10217, 10269), False, 'from alembic import op\n'), ((10359, 10404), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_last_processed_time"""'], {}), "('ix_RepositoryApps_last_processed_time')\n", (10363, 10404), False, 'from alembic import op\n'), ((10483, 10513), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_name"""'], {}), "('ix_RepositoryApps_name')\n", (10487, 10513), False, 'from alembic import op\n'), ((10577, 10613), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_repository"""'], {}), "('ix_RepositoryApps_repository')\n", (10581, 10613), False, 'from alembic import op\n'), ((10683, 10721), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_translatable"""'], {}), "('ix_RepositoryApps_translatable')\n", (10687, 10721), False, 'from alembic import op\n'), ((10793, 10822), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_url"""'], {}), "('ix_RepositoryApps_url')\n", (10797, 10822), False, 'from alembic import op\n'), ((10885, 10914), 'alembic.op.f', 'op.f', (['"""ix_TranslatedApps_url"""'], {}), "('ix_TranslatedApps_url')\n", (10889, 10914), False, 'from alembic import op\n'), ((10976, 11020), 'alembic.op.f', 'op.f', (['"""ix_TranslationBundles_from_developer"""'], {}), "('ix_TranslationBundles_from_developer')\n", (10980, 11020), False, 'from alembic import op\n'), ((11098, 11136), 'alembic.op.f', 'op.f', (['"""ix_TranslationBundles_language"""'], {}), "('ix_TranslationBundles_language')\n", (11102, 11136), False, 'from alembic import op\n'), ((11208, 11244), 'alembic.op.f', 'op.f', (['"""ix_TranslationBundles_target"""'], {}), "('ix_TranslationBundles_target')\n", (11212, 11244), False, 'from alembic import op\n'), ((11314, 11365), 'alembic.op.f', 'op.f', (['"""ix_TranslationCurrentActiveUsers_last_check"""'], {}), "('ix_TranslationCurrentActiveUsers_last_check')\n", (11318, 11365), False, 'from alembic import op\n'), ((11450, 11498), 'alembic.op.f', 'op.f', (['"""ix_TranslationExternalSuggestions_engine"""'], {}), "('ix_TranslationExternalSuggestions_engine')\n", (11454, 11498), False, 'from alembic import op\n'), ((11580, 11636), 'alembic.op.f', 'op.f', (['"""ix_TranslationExternalSuggestions_human_key_hash"""'], {}), "('ix_TranslationExternalSuggestions_human_key_hash')\n", (11584, 11636), False, 'from alembic import op\n'), ((11726, 11777), 'alembic.op.f', 'op.f', (['"""ix_TranslationExternalSuggestions_human_key"""'], {}), "('ix_TranslationExternalSuggestions_human_key')\n", (11730, 11777), False, 'from alembic import op\n'), ((11862, 11912), 'alembic.op.f', 'op.f', (['"""ix_TranslationExternalSuggestions_language"""'], {}), "('ix_TranslationExternalSuggestions_language')\n", (11866, 11912), False, 'from alembic import op\n'), ((11996, 12053), 'alembic.op.f', 'op.f', (['"""ix_TranslationExternalSuggestions_origin_language"""'], {}), "('ix_TranslationExternalSuggestions_origin_language')\n", (12000, 12053), False, 'from alembic import op\n'), ((12144, 12184), 'alembic.op.f', 'op.f', (['"""ix_TranslationKeySuggestions_key"""'], {}), "('ix_TranslationKeySuggestions_key')\n", (12148, 12184), False, 'from alembic import op\n'), ((12258, 12303), 'alembic.op.f', 'op.f', (['"""ix_TranslationKeySuggestions_language"""'], {}), "('ix_TranslationKeySuggestions_language')\n", (12262, 12303), False, 'from alembic import op\n'), ((12382, 12425), 'alembic.op.f', 'op.f', (['"""ix_TranslationKeySuggestions_target"""'], {}), "('ix_TranslationKeySuggestions_target')\n", (12386, 12425), False, 'from alembic import op\n'), ((12502, 12547), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_category"""'], {}), "('ix_TranslationMessageHistory_category')\n", (12506, 12547), False, 'from alembic import op\n'), ((12626, 12671), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_datetime"""'], {}), "('ix_TranslationMessageHistory_datetime')\n", (12630, 12671), False, 'from alembic import op\n'), ((12750, 12790), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_fmt"""'], {}), "('ix_TranslationMessageHistory_fmt')\n", (12754, 12790), False, 'from alembic import op\n'), ((12864, 12915), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_from_developer"""'], {}), "('ix_TranslationMessageHistory_from_developer')\n", (12868, 12915), False, 'from alembic import op\n'), ((13000, 13040), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_key"""'], {}), "('ix_TranslationMessageHistory_key')\n", (13004, 13040), False, 'from alembic import op\n'), ((13114, 13160), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_namespace"""'], {}), "('ix_TranslationMessageHistory_namespace')\n", (13118, 13160), False, 'from alembic import op\n'), ((13240, 13298), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_parent_translation_id"""'], {}), "('ix_TranslationMessageHistory_parent_translation_id')\n", (13244, 13298), False, 'from alembic import op\n'), ((13390, 13435), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_position"""'], {}), "('ix_TranslationMessageHistory_position')\n", (13394, 13435), False, 'from alembic import op\n'), ((13514, 13560), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_same_tool"""'], {}), "('ix_TranslationMessageHistory_same_tool')\n", (13518, 13560), False, 'from alembic import op\n'), ((13640, 13695), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_taken_from_default"""'], {}), "('ix_TranslationMessageHistory_taken_from_default')\n", (13644, 13695), False, 'from alembic import op\n'), ((13784, 13828), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_tool_id"""'], {}), "('ix_TranslationMessageHistory_tool_id')\n", (13788, 13828), False, 'from alembic import op\n'), ((13906, 13958), 'alembic.op.f', 'op.f', (['"""ix_TranslationNotificationRecipients_created"""'], {}), "('ix_TranslationNotificationRecipients_created')\n", (13910, 13958), False, 'from alembic import op\n'), ((14044, 14094), 'alembic.op.f', 'op.f', (['"""ix_TranslationNotificationRecipients_email"""'], {}), "('ix_TranslationNotificationRecipients_email')\n", (14048, 14094), False, 'from alembic import op\n'), ((14177, 14223), 'alembic.op.f', 'op.f', (['"""ix_TranslationSubscriptions_last_check"""'], {}), "('ix_TranslationSubscriptions_last_check')\n", (14181, 14223), False, 'from alembic import op\n'), ((14303, 14348), 'alembic.op.f', 'op.f', (['"""ix_TranslationSubscriptions_mechanism"""'], {}), "('ix_TranslationSubscriptions_mechanism')\n", (14307, 14348), False, 'from alembic import op\n'), ((14427, 14470), 'alembic.op.f', 'op.f', (['"""ix_TranslationSyncLogs_end_datetime"""'], {}), "('ix_TranslationSyncLogs_end_datetime')\n", (14431, 14470), False, 'from alembic import op\n'), ((14547, 14592), 'alembic.op.f', 'op.f', (['"""ix_TranslationSyncLogs_start_datetime"""'], {}), "('ix_TranslationSyncLogs_start_datetime')\n", (14551, 14592), False, 'from alembic import op\n'), ((14671, 14707), 'alembic.op.f', 'op.f', (['"""ix_TranslationUrls_automatic"""'], {}), "('ix_TranslationUrls_automatic')\n", (14675, 14707), False, 'from alembic import op\n'), ((14777, 14807), 'alembic.op.f', 'op.f', (['"""ix_TranslationUrls_url"""'], {}), "('ix_TranslationUrls_url')\n", (14781, 14807), False, 'from alembic import op\n'), ((14870, 14918), 'alembic.op.f', 'op.f', (['"""ix_TranslationValueSuggestions_human_key"""'], {}), "('ix_TranslationValueSuggestions_human_key')\n", (14874, 14918), False, 'from alembic import op\n'), ((15000, 15047), 'alembic.op.f', 'op.f', (['"""ix_TranslationValueSuggestions_language"""'], {}), "('ix_TranslationValueSuggestions_language')\n", (15004, 15047), False, 'from alembic import op\n'), ((15128, 15173), 'alembic.op.f', 'op.f', (['"""ix_TranslationValueSuggestions_target"""'], {}), "('ix_TranslationValueSuggestions_target')\n", (15132, 15173), False, 'from alembic import op\n'), ((15252, 15282), 'alembic.op.f', 'op.f', (['"""ix_Users_creation_date"""'], {}), "('ix_Users_creation_date')\n", (15256, 15282), False, 'from alembic import op\n'), ((15346, 15379), 'alembic.op.f', 'op.f', (['"""ix_Users_last_access_date"""'], {}), "('ix_Users_last_access_date')\n", (15350, 15379), False, 'from alembic import op\n'), ((15870, 15903), 'alembic.op.f', 'op.f', (['"""ix_Users_last_access_date"""'], {}), "('ix_Users_last_access_date')\n", (15874, 15903), False, 'from alembic import op\n'), ((15943, 15973), 'alembic.op.f', 'op.f', (['"""ix_Users_creation_date"""'], {}), "('ix_Users_creation_date')\n", (15947, 15973), False, 'from alembic import op\n'), ((16014, 16059), 'alembic.op.f', 'op.f', (['"""ix_TranslationValueSuggestions_target"""'], {}), "('ix_TranslationValueSuggestions_target')\n", (16018, 16059), False, 'from alembic import op\n'), ((16121, 16168), 'alembic.op.f', 'op.f', (['"""ix_TranslationValueSuggestions_language"""'], {}), "('ix_TranslationValueSuggestions_language')\n", (16125, 16168), False, 'from alembic import op\n'), ((16230, 16278), 'alembic.op.f', 'op.f', (['"""ix_TranslationValueSuggestions_human_key"""'], {}), "('ix_TranslationValueSuggestions_human_key')\n", (16234, 16278), False, 'from alembic import op\n'), ((16340, 16370), 'alembic.op.f', 'op.f', (['"""ix_TranslationUrls_url"""'], {}), "('ix_TranslationUrls_url')\n", (16344, 16370), False, 'from alembic import op\n'), ((16420, 16456), 'alembic.op.f', 'op.f', (['"""ix_TranslationUrls_automatic"""'], {}), "('ix_TranslationUrls_automatic')\n", (16424, 16456), False, 'from alembic import op\n'), ((16506, 16551), 'alembic.op.f', 'op.f', (['"""ix_TranslationSyncLogs_start_datetime"""'], {}), "('ix_TranslationSyncLogs_start_datetime')\n", (16510, 16551), False, 'from alembic import op\n'), ((16605, 16648), 'alembic.op.f', 'op.f', (['"""ix_TranslationSyncLogs_end_datetime"""'], {}), "('ix_TranslationSyncLogs_end_datetime')\n", (16609, 16648), False, 'from alembic import op\n'), ((16702, 16747), 'alembic.op.f', 'op.f', (['"""ix_TranslationSubscriptions_mechanism"""'], {}), "('ix_TranslationSubscriptions_mechanism')\n", (16706, 16747), False, 'from alembic import op\n'), ((16806, 16852), 'alembic.op.f', 'op.f', (['"""ix_TranslationSubscriptions_last_check"""'], {}), "('ix_TranslationSubscriptions_last_check')\n", (16810, 16852), False, 'from alembic import op\n'), ((16911, 16961), 'alembic.op.f', 'op.f', (['"""ix_TranslationNotificationRecipients_email"""'], {}), "('ix_TranslationNotificationRecipients_email')\n", (16915, 16961), False, 'from alembic import op\n'), ((17029, 17081), 'alembic.op.f', 'op.f', (['"""ix_TranslationNotificationRecipients_created"""'], {}), "('ix_TranslationNotificationRecipients_created')\n", (17033, 17081), False, 'from alembic import op\n'), ((17149, 17193), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_tool_id"""'], {}), "('ix_TranslationMessageHistory_tool_id')\n", (17153, 17193), False, 'from alembic import op\n'), ((17253, 17308), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_taken_from_default"""'], {}), "('ix_TranslationMessageHistory_taken_from_default')\n", (17257, 17308), False, 'from alembic import op\n'), ((17368, 17414), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_same_tool"""'], {}), "('ix_TranslationMessageHistory_same_tool')\n", (17372, 17414), False, 'from alembic import op\n'), ((17474, 17519), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_position"""'], {}), "('ix_TranslationMessageHistory_position')\n", (17478, 17519), False, 'from alembic import op\n'), ((17579, 17637), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_parent_translation_id"""'], {}), "('ix_TranslationMessageHistory_parent_translation_id')\n", (17583, 17637), False, 'from alembic import op\n'), ((17697, 17743), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_namespace"""'], {}), "('ix_TranslationMessageHistory_namespace')\n", (17701, 17743), False, 'from alembic import op\n'), ((17803, 17843), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_key"""'], {}), "('ix_TranslationMessageHistory_key')\n", (17807, 17843), False, 'from alembic import op\n'), ((17903, 17954), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_from_developer"""'], {}), "('ix_TranslationMessageHistory_from_developer')\n", (17907, 17954), False, 'from alembic import op\n'), ((18014, 18054), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_fmt"""'], {}), "('ix_TranslationMessageHistory_fmt')\n", (18018, 18054), False, 'from alembic import op\n'), ((18114, 18159), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_datetime"""'], {}), "('ix_TranslationMessageHistory_datetime')\n", (18118, 18159), False, 'from alembic import op\n'), ((18219, 18264), 'alembic.op.f', 'op.f', (['"""ix_TranslationMessageHistory_category"""'], {}), "('ix_TranslationMessageHistory_category')\n", (18223, 18264), False, 'from alembic import op\n'), ((18324, 18367), 'alembic.op.f', 'op.f', (['"""ix_TranslationKeySuggestions_target"""'], {}), "('ix_TranslationKeySuggestions_target')\n", (18328, 18367), False, 'from alembic import op\n'), ((18427, 18472), 'alembic.op.f', 'op.f', (['"""ix_TranslationKeySuggestions_language"""'], {}), "('ix_TranslationKeySuggestions_language')\n", (18431, 18472), False, 'from alembic import op\n'), ((18532, 18572), 'alembic.op.f', 'op.f', (['"""ix_TranslationKeySuggestions_key"""'], {}), "('ix_TranslationKeySuggestions_key')\n", (18536, 18572), False, 'from alembic import op\n'), ((18632, 18689), 'alembic.op.f', 'op.f', (['"""ix_TranslationExternalSuggestions_origin_language"""'], {}), "('ix_TranslationExternalSuggestions_origin_language')\n", (18636, 18689), False, 'from alembic import op\n'), ((18754, 18804), 'alembic.op.f', 'op.f', (['"""ix_TranslationExternalSuggestions_language"""'], {}), "('ix_TranslationExternalSuggestions_language')\n", (18758, 18804), False, 'from alembic import op\n'), ((18869, 18920), 'alembic.op.f', 'op.f', (['"""ix_TranslationExternalSuggestions_human_key"""'], {}), "('ix_TranslationExternalSuggestions_human_key')\n", (18873, 18920), False, 'from alembic import op\n'), ((18985, 19041), 'alembic.op.f', 'op.f', (['"""ix_TranslationExternalSuggestions_human_key_hash"""'], {}), "('ix_TranslationExternalSuggestions_human_key_hash')\n", (18989, 19041), False, 'from alembic import op\n'), ((19106, 19154), 'alembic.op.f', 'op.f', (['"""ix_TranslationExternalSuggestions_engine"""'], {}), "('ix_TranslationExternalSuggestions_engine')\n", (19110, 19154), False, 'from alembic import op\n'), ((19219, 19255), 'alembic.op.f', 'op.f', (['"""ix_TranslationBundles_target"""'], {}), "('ix_TranslationBundles_target')\n", (19223, 19255), False, 'from alembic import op\n'), ((19308, 19346), 'alembic.op.f', 'op.f', (['"""ix_TranslationBundles_language"""'], {}), "('ix_TranslationBundles_language')\n", (19312, 19346), False, 'from alembic import op\n'), ((19399, 19443), 'alembic.op.f', 'op.f', (['"""ix_TranslationBundles_from_developer"""'], {}), "('ix_TranslationBundles_from_developer')\n", (19403, 19443), False, 'from alembic import op\n'), ((19496, 19547), 'alembic.op.f', 'op.f', (['"""ix_TranslationCurrentActiveUsers_last_check"""'], {}), "('ix_TranslationCurrentActiveUsers_last_check')\n", (19500, 19547), False, 'from alembic import op\n'), ((19680, 19709), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_url"""'], {}), "('ix_RepositoryApps_url')\n", (19684, 19709), False, 'from alembic import op\n'), ((19758, 19796), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_translatable"""'], {}), "('ix_RepositoryApps_translatable')\n", (19762, 19796), False, 'from alembic import op\n'), ((19845, 19881), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_repository"""'], {}), "('ix_RepositoryApps_repository')\n", (19849, 19881), False, 'from alembic import op\n'), ((19930, 19960), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_name"""'], {}), "('ix_RepositoryApps_name')\n", (19934, 19960), False, 'from alembic import op\n'), ((20009, 20054), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_last_processed_time"""'], {}), "('ix_RepositoryApps_last_processed_time')\n", (20013, 20054), False, 'from alembic import op\n'), ((20103, 20159), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_last_processed_downloaded_hash"""'], {}), "('ix_RepositoryApps_last_processed_downloaded_hash')\n", (20107, 20159), False, 'from alembic import op\n'), ((20208, 20262), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_last_processed_contents_hash"""'], {}), "('ix_RepositoryApps_last_processed_contents_hash')\n", (20212, 20262), False, 'from alembic import op\n'), ((20311, 20357), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_last_download_change"""'], {}), "('ix_RepositoryApps_last_download_change')\n", (20315, 20357), False, 'from alembic import op\n'), ((20406, 20442), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_last_check"""'], {}), "('ix_RepositoryApps_last_check')\n", (20410, 20442), False, 'from alembic import op\n'), ((20491, 20528), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_last_change"""'], {}), "('ix_RepositoryApps_last_change')\n", (20495, 20528), False, 'from alembic import op\n'), ((20577, 20610), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_failing"""'], {}), "('ix_RepositoryApps_failing')\n", (20581, 20610), False, 'from alembic import op\n'), ((20659, 20698), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_failing_since"""'], {}), "('ix_RepositoryApps_failing_since')\n", (20663, 20698), False, 'from alembic import op\n'), ((20747, 20784), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_external_id"""'], {}), "('ix_RepositoryApps_external_id')\n", (20751, 20784), False, 'from alembic import op\n'), ((20833, 20874), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_downloaded_hash"""'], {}), "('ix_RepositoryApps_downloaded_hash')\n", (20837, 20874), False, 'from alembic import op\n'), ((20923, 20962), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_contents_hash"""'], {}), "('ix_RepositoryApps_contents_hash')\n", (20927, 20962), False, 'from alembic import op\n'), ((21011, 21046), 'alembic.op.f', 'op.f', (['"""ix_RepositoryApps_adaptable"""'], {}), "('ix_RepositoryApps_adaptable')\n", (21015, 21046), False, 'from alembic import op\n'), ((21169, 21198), 'alembic.op.f', 'op.f', (['"""ix_TranslatedApps_url"""'], {}), "('ix_TranslatedApps_url')\n", (21173, 21198), False, 'from alembic import op\n'), ((21247, 21276), 'alembic.op.f', 'op.f', (['"""ix_Languages_language"""'], {}), "('ix_Languages_language')\n", (21251, 21276), False, 'from alembic import op\n'), ((21320, 21352), 'alembic.op.f', 'op.f', (['"""ix_GoLabOAuthUsers_email"""'], {}), "('ix_GoLabOAuthUsers_email')\n", (21324, 21352), False, 'from alembic import op\n'), ((21402, 21441), 'alembic.op.f', 'op.f', (['"""ix_GoLabOAuthUsers_display_name"""'], {}), "('ix_GoLabOAuthUsers_display_name')\n", (21406, 21441), False, 'from alembic import op\n'), ((21491, 21516), 'alembic.op.f', 'op.f', (['"""ix_Apps_unique_id"""'], {}), "('ix_Apps_unique_id')\n", (21495, 21516), False, 'from alembic import op\n'), ((21555, 21579), 'alembic.op.f', 'op.f', (['"""ix_Apps_owner_id"""'], {}), "('ix_Apps_owner_id')\n", (21559, 21579), False, 'from alembic import op\n'), ((21618, 21638), 'alembic.op.f', 'op.f', (['"""ix_Apps_name"""'], {}), "('ix_Apps_name')\n", (21622, 21638), False, 'from alembic import op\n'), ((21677, 21710), 'alembic.op.f', 'op.f', (['"""ix_Apps_modification_date"""'], {}), "('ix_Apps_modification_date')\n", (21681, 21710), False, 'from alembic import op\n'), ((21749, 21781), 'alembic.op.f', 'op.f', (['"""ix_Apps_last_access_date"""'], {}), "('ix_Apps_last_access_date')\n", (21753, 21781), False, 'from alembic import op\n'), ((21820, 21849), 'alembic.op.f', 'op.f', (['"""ix_Apps_creation_date"""'], {}), "('ix_Apps_creation_date')\n", (21824, 21849), False, 'from alembic import op\n'), ((21888, 21912), 'alembic.op.f', 'op.f', (['"""ix_Apps_composer"""'], {}), "('ix_Apps_composer')\n", (21892, 21912), False, 'from alembic import op\n'), ((22027, 22071), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_tool_id"""'], {}), "('ix_ActiveTranslationMessages_tool_id')\n", (22031, 22071), False, 'from alembic import op\n'), ((22131, 22186), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_taken_from_default"""'], {}), "('ix_ActiveTranslationMessages_taken_from_default')\n", (22135, 22186), False, 'from alembic import op\n'), ((22246, 22292), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_same_tool"""'], {}), "('ix_ActiveTranslationMessages_same_tool')\n", (22250, 22292), False, 'from alembic import op\n'), ((22352, 22397), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_position"""'], {}), "('ix_ActiveTranslationMessages_position')\n", (22356, 22397), False, 'from alembic import op\n'), ((22457, 22503), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_namespace"""'], {}), "('ix_ActiveTranslationMessages_namespace')\n", (22461, 22503), False, 'from alembic import op\n'), ((22563, 22603), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_key"""'], {}), "('ix_ActiveTranslationMessages_key')\n", (22567, 22603), False, 'from alembic import op\n'), ((22663, 22714), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_from_developer"""'], {}), "('ix_ActiveTranslationMessages_from_developer')\n", (22667, 22714), False, 'from alembic import op\n'), ((22774, 22814), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_fmt"""'], {}), "('ix_ActiveTranslationMessages_fmt')\n", (22778, 22814), False, 'from alembic import op\n'), ((22874, 22919), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_datetime"""'], {}), "('ix_ActiveTranslationMessages_datetime')\n", (22878, 22919), False, 'from alembic import op\n'), ((22979, 23024), 'alembic.op.f', 'op.f', (['"""ix_ActiveTranslationMessages_category"""'], {}), "('ix_ActiveTranslationMessages_category')\n", (22983, 23024), False, 'from alembic import op\n')] |
import glob
import os
import sys
from tempfile import TemporaryDirectory
import netCDF4
import numpy as np
import numpy.ma as ma
from all_products_fun import Check
from lidar_fun import LidarFun
from cloudnetpy import concat_lib
from cloudnetpy.instruments import ceilo2nc
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(SCRIPT_PATH)
FILES = glob.glob(f"{SCRIPT_PATH}/data/cl61d/*.nc")
FILES.sort()
SITE_META = {
"name": "Hyytiรคlรค",
"altitude": 123,
"calibration_factor": 2.0,
"latitude": 45.0,
"longitude": 22.0,
}
class TestCl61d(Check):
site_meta = SITE_META
date = "2021-08-29"
temp_dir = TemporaryDirectory()
daily_file = temp_dir.name + "/daily.nc"
concat_lib.concatenate_files(FILES, daily_file, concat_dimension="profile")
temp_path = temp_dir.name + "/test.nc"
uuid = ceilo2nc(daily_file, temp_path, site_meta, date=date)
def test_variable_names(self):
keys = {
"beta",
"beta_smooth",
"calibration_factor",
"range",
"height",
"zenith_angle",
"time",
"depolarisation",
"altitude",
"latitude",
"longitude",
"wavelength",
}
assert set(self.nc.variables.keys()) == keys
def test_common_lidar(self):
lidar_fun = LidarFun(self.nc, self.site_meta, self.date, self.uuid)
for name, method in LidarFun.__dict__.items():
if "test_" in name:
getattr(lidar_fun, name)()
def test_variable_values(self):
assert abs(self.nc.variables["wavelength"][:] - 910.55) < 0.001
assert self.nc.variables["zenith_angle"][:] == 3.0
assert ma.max(self.nc.variables["depolarisation"][:]) < 1
assert ma.min(self.nc.variables["depolarisation"][:]) > -0.1
def test_comments(self):
assert "SNR threshold applied: 5" in self.nc.variables["beta"].comment
def test_global_attributes(self):
assert self.nc.source == "Vaisala CL61d"
assert self.nc.title == f'CL61d ceilometer from {self.site_meta["name"]}'
def test_date_argument(tmp_path):
daily_file = str(tmp_path / "daily.nc")
test_file = str(tmp_path / "test.nc")
concat_lib.concatenate_files(FILES, daily_file, concat_dimension="profile")
ceilo2nc(daily_file, test_file, SITE_META, date="2021-08-30")
with netCDF4.Dataset(test_file) as nc:
assert len(nc.variables["time"]) == 12
assert np.all(np.diff(nc.variables["time"][:]) > 0)
assert nc.year == "2021"
assert nc.month == "08"
assert nc.day == "30"
| [
"tempfile.TemporaryDirectory",
"numpy.ma.max",
"lidar_fun.LidarFun.__dict__.items",
"netCDF4.Dataset",
"numpy.ma.min",
"numpy.diff",
"os.path.realpath",
"cloudnetpy.instruments.ceilo2nc",
"lidar_fun.LidarFun",
"sys.path.append",
"cloudnetpy.concat_lib.concatenate_files",
"glob.glob"
]
| [((334, 362), 'sys.path.append', 'sys.path.append', (['SCRIPT_PATH'], {}), '(SCRIPT_PATH)\n', (349, 362), False, 'import sys\n'), ((372, 415), 'glob.glob', 'glob.glob', (['f"""{SCRIPT_PATH}/data/cl61d/*.nc"""'], {}), "(f'{SCRIPT_PATH}/data/cl61d/*.nc')\n", (381, 415), False, 'import glob\n'), ((306, 332), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (322, 332), False, 'import os\n'), ((658, 678), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (676, 678), False, 'from tempfile import TemporaryDirectory\n'), ((728, 803), 'cloudnetpy.concat_lib.concatenate_files', 'concat_lib.concatenate_files', (['FILES', 'daily_file'], {'concat_dimension': '"""profile"""'}), "(FILES, daily_file, concat_dimension='profile')\n", (756, 803), False, 'from cloudnetpy import concat_lib\n'), ((858, 911), 'cloudnetpy.instruments.ceilo2nc', 'ceilo2nc', (['daily_file', 'temp_path', 'site_meta'], {'date': 'date'}), '(daily_file, temp_path, site_meta, date=date)\n', (866, 911), False, 'from cloudnetpy.instruments import ceilo2nc\n'), ((2277, 2352), 'cloudnetpy.concat_lib.concatenate_files', 'concat_lib.concatenate_files', (['FILES', 'daily_file'], {'concat_dimension': '"""profile"""'}), "(FILES, daily_file, concat_dimension='profile')\n", (2305, 2352), False, 'from cloudnetpy import concat_lib\n'), ((2357, 2418), 'cloudnetpy.instruments.ceilo2nc', 'ceilo2nc', (['daily_file', 'test_file', 'SITE_META'], {'date': '"""2021-08-30"""'}), "(daily_file, test_file, SITE_META, date='2021-08-30')\n", (2365, 2418), False, 'from cloudnetpy.instruments import ceilo2nc\n'), ((1383, 1438), 'lidar_fun.LidarFun', 'LidarFun', (['self.nc', 'self.site_meta', 'self.date', 'self.uuid'], {}), '(self.nc, self.site_meta, self.date, self.uuid)\n', (1391, 1438), False, 'from lidar_fun import LidarFun\n'), ((1467, 1492), 'lidar_fun.LidarFun.__dict__.items', 'LidarFun.__dict__.items', ([], {}), '()\n', (1490, 1492), False, 'from lidar_fun import LidarFun\n'), ((2428, 2454), 'netCDF4.Dataset', 'netCDF4.Dataset', (['test_file'], {}), '(test_file)\n', (2443, 2454), False, 'import netCDF4\n'), ((1752, 1798), 'numpy.ma.max', 'ma.max', (["self.nc.variables['depolarisation'][:]"], {}), "(self.nc.variables['depolarisation'][:])\n", (1758, 1798), True, 'import numpy.ma as ma\n'), ((1818, 1864), 'numpy.ma.min', 'ma.min', (["self.nc.variables['depolarisation'][:]"], {}), "(self.nc.variables['depolarisation'][:])\n", (1824, 1864), True, 'import numpy.ma as ma\n'), ((2531, 2563), 'numpy.diff', 'np.diff', (["nc.variables['time'][:]"], {}), "(nc.variables['time'][:])\n", (2538, 2563), True, 'import numpy as np\n')] |
import click
import requests
from bs4 import BeautifulSoup
from modules.Word.managers.DictionaryManager import DictionaryManager
import re
@click.command()
@click.option('--url', help='URL to fetch from')
@click.pass_context
def search(ctx, url):
dictionary_manager: DictionaryManager = ctx.obj[DictionaryManager]
soup = BeautifulSoup(requests.get(url).text, 'html.parser')
words_list = soup.text.split()
words_found = {}
print("Starting...")
i = 1
percentage = 5
percentage_increments = 5
for word in words_list:
try:
if (i / len(words_list) * 100) > percentage:
print(f'{percentage}% read')
percentage += percentage_increments
i += 1
word = re.sub(' +', ' ', word)
if word in words_found:
words_found[word] += 1
continue
dictionary_manager.is_word(word)
words_found[word] = 1
except Exception as e:
print(f'{str(e)}: {word}')
print("Complete...")
| [
"click.option",
"re.sub",
"click.command",
"requests.get"
]
| [((142, 157), 'click.command', 'click.command', ([], {}), '()\n', (155, 157), False, 'import click\n'), ((159, 206), 'click.option', 'click.option', (['"""--url"""'], {'help': '"""URL to fetch from"""'}), "('--url', help='URL to fetch from')\n", (171, 206), False, 'import click\n'), ((345, 362), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (357, 362), False, 'import requests\n'), ((757, 780), 're.sub', 're.sub', (['""" +"""', '""" """', 'word'], {}), "(' +', ' ', word)\n", (763, 780), False, 'import re\n')] |
# -*- coding: utf-8 -*-
import matplotlib as mpl
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from cell2cell.clustering import compute_linkage
from cell2cell.preprocessing.manipulate_dataframes import check_symmetry
from cell2cell.plotting.aesthetics import map_colors_to_metadata
def clustermap_cci(interaction_space, method='ward', optimal_leaf=True, metadata=None, sample_col='#SampleID',
group_col='Groups', meta_cmap='gist_rainbow', colors=None, excluded_cells=None, title='',
cbar_title='CCI score', cbar_fontsize=18, filename=None, **kwargs):
'''Generates a clustermap (heatmap + dendrograms from a hierarchical
clustering) based on CCI scores of cell-cell pairs.
Parameters
----------
interaction_space : cell2cell.core.interaction_space.InteractionSpace
Interaction space that contains all a distance matrix after running the
the method compute_pairwise_cci_scores. Alternatively, this object
can be a numpy-array or a pandas DataFrame. Also, a
SingleCellInteractions or a BulkInteractions object after running
the method compute_pairwise_cci_scores.
method : str, default='ward'
Clustering method for computing a linkage as in
scipy.cluster.hierarchy.linkage
optimal_leaf : boolean, default=True
Whether sorting the leaf of the dendrograms to have a minimal distance
between successive leaves. For more information, see
scipy.cluster.hierarchy.optimal_leaf_ordering
metadata : pandas.Dataframe, default=None
Metadata associated with the cells, cell types or samples in the
matrix containing CCI scores. If None, cells will not be colored
by major groups.
sample_col : str, default='#SampleID'
Column in the metadata for the cells, cell types or samples
in the matrix containing CCI scores.
group_col : str, default='Groups'
Column in the metadata containing the major groups of cells, cell types
or samples in the matrix with CCI scores.
meta_cmap : str, default='gist_rainbow'
Name of the color palette for coloring the major groups of cells.
colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells. If colors is specified, meta_cmap will be
ignored.
excluded_cells : list, default=None
List containing cell names that are present in the interaction_space
object but that will be excluded from this plot.
title : str, default=''
Title of the clustermap.
cbar_title : str, default='CCI score'
Title for the colorbar, depending on the score employed.
cbar_fontsize : int, default=18
Font size for the colorbar title as well as labels for axes X and Y.
filename : str, default=None
Path to save the figure of the elbow analysis. If None, the figure is not
saved.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
hier : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
'''
if hasattr(interaction_space, 'distance_matrix'):
print('Interaction space detected as an InteractionSpace class')
distance_matrix = interaction_space.distance_matrix
space_type = 'class'
elif (type(interaction_space) is np.ndarray) or (type(interaction_space) is pd.core.frame.DataFrame):
print('Interaction space detected as a distance matrix')
distance_matrix = interaction_space
space_type = 'matrix'
elif hasattr(interaction_space, 'interaction_space'):
print('Interaction space detected as a Interactions class')
if not hasattr(interaction_space.interaction_space, 'distance_matrix'):
raise ValueError('First run the method compute_pairwise_interactions() in your interaction' + \
' object to generate a distance matrix.')
else:
interaction_space = interaction_space.interaction_space
distance_matrix = interaction_space.distance_matrix
space_type = 'class'
else:
raise ValueError('First run the method compute_pairwise_interactions() in your interaction' + \
' object to generate a distance matrix.')
# Drop excluded cells
if excluded_cells is not None:
df = distance_matrix.loc[~distance_matrix.index.isin(excluded_cells),
~distance_matrix.columns.isin(excluded_cells)]
else:
df = distance_matrix
# Check symmetry to get linkage
symmetric = check_symmetry(df)
if (not symmetric) & (type(interaction_space) is pd.core.frame.DataFrame):
assert set(df.index) == set(df.columns), 'The distance matrix does not have the same elements in rows and columns'
# Obtain info for generating plot
linkage = _get_distance_matrix_linkages(df=df,
kwargs=kwargs,
method=method,
optimal_ordering=optimal_leaf,
symmetric=symmetric
)
kwargs_ = kwargs.copy()
# PLOT CCI MATRIX
if space_type == 'class':
df = interaction_space.interaction_elements['cci_matrix']
else:
df = distance_matrix
if excluded_cells is not None:
df = df.loc[~df.index.isin(excluded_cells),
~df.columns.isin(excluded_cells)]
# Colors
if metadata is not None:
col_colors = map_colors_to_metadata(metadata=metadata,
ref_df=df,
colors=colors,
sample_col=sample_col,
group_col=group_col,
cmap=meta_cmap)
if not symmetric:
row_colors = col_colors
else:
row_colors = None
else:
col_colors = None
row_colors = None
# Plot hierarchical clustering (triangular)
hier = _plot_triangular_clustermap(df=df,
symmetric=symmetric,
linkage=linkage,
col_colors=col_colors,
row_colors=row_colors,
title=title,
cbar_title=cbar_title,
cbar_fontsize=cbar_fontsize,
**kwargs_)
if ~symmetric:
hier.ax_heatmap.set_xlabel('Receiver cells', fontsize=cbar_fontsize)
hier.ax_heatmap.set_ylabel('Sender cells', fontsize=cbar_fontsize)
if filename is not None:
plt.savefig(filename, dpi=300,
bbox_inches='tight')
return hier
def _get_distance_matrix_linkages(df, kwargs, method='ward', optimal_ordering=True, symmetric=None):
'''Computes linkages for the CCI matrix.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores in a form of distances (that is, smaller
values represent stronger interactions). Diagonal must be filled
by zeros.
kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
method : str, default='ward'
Clustering method for computing a linkage as in
scipy.cluster.hierarchy.linkage
optimal_ordering : boolean, default=True
Whether sorting the leaf of the dendrograms to have a minimal distance
between successive leaves. For more information, see
scipy.cluster.hierarchy.optimal_leaf_ordering
symmetric : boolean, default=None
Whether df is symmetric.
Returns
-------
linkage : ndarray
The hierarchical clustering of cells encoded as a linkage matrix.
'''
if symmetric is None:
symmetric = check_symmetry(df)
if symmetric:
if 'col_cluster' in kwargs.keys():
kwargs['row_cluster'] = kwargs['col_cluster']
if kwargs['col_cluster']:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
elif 'row_cluster' in kwargs.keys():
if kwargs['row_cluster']:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
else:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
return linkage
def _triangularize_distance_matrix(df, linkage=None, symmetric=None, **kwargs):
'''Generates a mask to plot the upper triangle of the CCI matrix.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores. Must be a symmetric matrix.
linkage : ndarray, default=None
The hierarchical clustering of cells encoded as a linkage matrix.
symmetric : boolean, default=None
Whether df is symmetric.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
mask : ndarray
Mask that contains ones in the places to be hidden in the clustermap.
Only the diagonal and the upper triangle are not masked (contain
zeros).
'''
if symmetric is None:
symmetric = check_symmetry(df)
# Triangular matrix
if symmetric:
order_map = dict()
if linkage is None:
mask = np.ones((df.shape[0], df.shape[1]))
for i in range(mask.shape[0]):
for j in range(i, mask.shape[1]):
mask[i, j] = 0
else:
# Plot hierarchical clustering for getting indexes according to linkage
hier = sns.clustermap(df,
col_linkage=linkage,
row_linkage=linkage,
**kwargs
)
plt.close()
ind_order = hier.dendrogram_col.reordered_ind
mask = np.zeros((df.shape[0], df.shape[1]))
for i, ind in enumerate(ind_order):
order_map[i] = ind
filter_list = [order_map[j] for j in range(i)]
mask[ind, filter_list] = 1
else:
mask = None
return mask
def _plot_triangular_clustermap(df, symmetric=None, linkage=None, mask=None, col_colors=None, row_colors=None,
title='', cbar_title='CCI score', cbar_fontsize=12, **kwargs):
'''Plots a triangular clustermap based on a mask.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores. Must be a symmetric matrix.
linkage : ndarray, default=None
The hierarchical clustering of cells encoded as a linkage matrix.
mask : ndarray, default=None
Mask that contains ones in the places to be hidden in the clustermap.
Only the diagonal and the upper triangle are not masked (contain
zeros). If None, a mask will be computed based on the CCI matrix
symmetry.
col_colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells in the columns.
row_colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells in the rows.
title : str, default=''
Title of the clustermap.
cbar_title : str, default='CCI score'
Title for the colorbar, depending on the score employed.
cbar_fontsize : int, default=12
Font size for the colorbar title as well as labels for axes X and Y.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
hier : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
'''
if symmetric is None:
symmetric = check_symmetry(df)
if mask is None:
mask = _triangularize_distance_matrix(df=df,
linkage=linkage,
symmetric=symmetric,
**kwargs
)
hier = sns.clustermap(df,
col_linkage=linkage,
row_linkage=linkage,
mask=mask,
col_colors=col_colors,
row_colors=row_colors,
**kwargs
)
hier = _move_xticks_triangular_clustermap(clustermap=hier,
symmetric=symmetric
)
# Title
if len(title) > 0:
hier.ax_col_dendrogram.set_title(title, fontsize=16)
# Color bar label
cbar = hier.ax_heatmap.collections[0].colorbar
cbar.ax.set_ylabel(cbar_title, fontsize=cbar_fontsize)
cbar.ax.yaxis.set_label_position("left")
return hier
def _move_xticks_triangular_clustermap(clustermap, symmetric=True):
'''Moves xticks to the diagonal when plotting a symmetric matrix
in the form of a upper triangle.
Parameters
---------
clustermap : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
symmetric : boolean, default=None
Whether the CCI matrix plotted in the clustermap is symmetric.
Returns
-------
clustermap : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance, with the xticks moved to the
diagonal if the CCI matrix was symmetric. If not, the same
input clustermap is returned, but with rotated xtick labels.
'''
if symmetric:
# Apply offset transform to all xticklabels.
clustermap.ax_row_dendrogram.set_visible(False)
clustermap.ax_heatmap.tick_params(bottom=False) # Hide xtick line
x_labels = clustermap.ax_heatmap.xaxis.get_majorticklabels()
dpi_x = clustermap.fig.dpi_scale_trans.to_values()[0]
dpi_y = clustermap.fig.dpi_scale_trans.to_values()[3]
x0 = clustermap.ax_heatmap.transData.transform(x_labels[0].get_position())
x1 = clustermap.ax_heatmap.transData.transform(x_labels[1].get_position())
ylims = clustermap.ax_heatmap.get_ylim()
bottom_points = clustermap.ax_heatmap.transData.transform((1.0, ylims[0]))[1]
for i, xl in enumerate(x_labels):
# Move labels in dx and dy points.
swap_xy = (1.0, xl.get_position()[0] + 0.5)
new_y_points = clustermap.ax_heatmap.transData.transform(swap_xy)[1]
dx = -0.5 * abs(x1[0] - x0[0]) / dpi_x
dy = (new_y_points - bottom_points) / dpi_y
offset = mpl.transforms.ScaledTranslation(dx, dy, clustermap.fig.dpi_scale_trans)
xl.set_transform(xl.get_transform() + offset)
if symmetric:
rot = 45
else:
rot = 90
va = 'center'
clustermap.ax_heatmap.set_xticklabels(clustermap.ax_heatmap.xaxis.get_majorticklabels(),
rotation=rot,
rotation_mode='anchor',
va='bottom',
ha='right') # , fontsize=9.5)
clustermap.ax_heatmap.set_yticklabels(clustermap.ax_heatmap.yaxis.get_majorticklabels(),
rotation=0,
va=va,
ha='left') # , fontsize=9.5)
return clustermap | [
"matplotlib.pyplot.savefig",
"numpy.ones",
"cell2cell.preprocessing.manipulate_dataframes.check_symmetry",
"seaborn.clustermap",
"cell2cell.clustering.compute_linkage",
"matplotlib.pyplot.close",
"numpy.zeros",
"cell2cell.plotting.aesthetics.map_colors_to_metadata",
"matplotlib.transforms.ScaledTranslation"
]
| [((4765, 4783), 'cell2cell.preprocessing.manipulate_dataframes.check_symmetry', 'check_symmetry', (['df'], {}), '(df)\n', (4779, 4783), False, 'from cell2cell.preprocessing.manipulate_dataframes import check_symmetry\n'), ((12729, 12860), 'seaborn.clustermap', 'sns.clustermap', (['df'], {'col_linkage': 'linkage', 'row_linkage': 'linkage', 'mask': 'mask', 'col_colors': 'col_colors', 'row_colors': 'row_colors'}), '(df, col_linkage=linkage, row_linkage=linkage, mask=mask,\n col_colors=col_colors, row_colors=row_colors, **kwargs)\n', (12743, 12860), True, 'import seaborn as sns\n'), ((5773, 5904), 'cell2cell.plotting.aesthetics.map_colors_to_metadata', 'map_colors_to_metadata', ([], {'metadata': 'metadata', 'ref_df': 'df', 'colors': 'colors', 'sample_col': 'sample_col', 'group_col': 'group_col', 'cmap': 'meta_cmap'}), '(metadata=metadata, ref_df=df, colors=colors,\n sample_col=sample_col, group_col=group_col, cmap=meta_cmap)\n', (5795, 5904), False, 'from cell2cell.plotting.aesthetics import map_colors_to_metadata\n'), ((7067, 7118), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "(filename, dpi=300, bbox_inches='tight')\n", (7078, 7118), True, 'from matplotlib import pyplot as plt\n'), ((8237, 8255), 'cell2cell.preprocessing.manipulate_dataframes.check_symmetry', 'check_symmetry', (['df'], {}), '(df)\n', (8251, 8255), False, 'from cell2cell.preprocessing.manipulate_dataframes import check_symmetry\n'), ((9761, 9779), 'cell2cell.preprocessing.manipulate_dataframes.check_symmetry', 'check_symmetry', (['df'], {}), '(df)\n', (9775, 9779), False, 'from cell2cell.preprocessing.manipulate_dataframes import check_symmetry\n'), ((12390, 12408), 'cell2cell.preprocessing.manipulate_dataframes.check_symmetry', 'check_symmetry', (['df'], {}), '(df)\n', (12404, 12408), False, 'from cell2cell.preprocessing.manipulate_dataframes import check_symmetry\n'), ((9897, 9932), 'numpy.ones', 'np.ones', (['(df.shape[0], df.shape[1])'], {}), '((df.shape[0], df.shape[1]))\n', (9904, 9932), True, 'import numpy as np\n'), ((10178, 10248), 'seaborn.clustermap', 'sns.clustermap', (['df'], {'col_linkage': 'linkage', 'row_linkage': 'linkage'}), '(df, col_linkage=linkage, row_linkage=linkage, **kwargs)\n', (10192, 10248), True, 'import seaborn as sns\n'), ((10398, 10409), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10407, 10409), True, 'from matplotlib import pyplot as plt\n'), ((10487, 10523), 'numpy.zeros', 'np.zeros', (['(df.shape[0], df.shape[1])'], {}), '((df.shape[0], df.shape[1]))\n', (10495, 10523), True, 'import numpy as np\n'), ((15244, 15316), 'matplotlib.transforms.ScaledTranslation', 'mpl.transforms.ScaledTranslation', (['dx', 'dy', 'clustermap.fig.dpi_scale_trans'], {}), '(dx, dy, clustermap.fig.dpi_scale_trans)\n', (15276, 15316), True, 'import matplotlib as mpl\n'), ((8440, 8509), 'cell2cell.clustering.compute_linkage', 'compute_linkage', (['df'], {'method': 'method', 'optimal_ordering': 'optimal_ordering'}), '(df, method=method, optimal_ordering=optimal_ordering)\n', (8455, 8509), False, 'from cell2cell.clustering import compute_linkage\n'), ((8823, 8892), 'cell2cell.clustering.compute_linkage', 'compute_linkage', (['df'], {'method': 'method', 'optimal_ordering': 'optimal_ordering'}), '(df, method=method, optimal_ordering=optimal_ordering)\n', (8838, 8892), False, 'from cell2cell.clustering import compute_linkage\n'), ((8668, 8737), 'cell2cell.clustering.compute_linkage', 'compute_linkage', (['df'], {'method': 'method', 'optimal_ordering': 'optimal_ordering'}), '(df, method=method, optimal_ordering=optimal_ordering)\n', (8683, 8737), False, 'from cell2cell.clustering import compute_linkage\n')] |
# -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for spanner operations list."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.api_lib.spanner import backup_operations
from googlecloudsdk.api_lib.spanner import database_operations
from googlecloudsdk.api_lib.spanner import instance_config_operations
from googlecloudsdk.api_lib.spanner import instance_operations
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exceptions
from googlecloudsdk.command_lib.spanner import flags
def _CommonRun(args):
"""Performs run actions common to all List stages."""
is_database_type = (
args.type == 'DATABASE_RESTORE' or args.type == 'DATABASE' or
args.type == 'DATABASE_CREATE' or args.type == 'DATABASE_UPDATE_DDL')
if args.backup or args.type == 'BACKUP':
# Update output table for backup operations.
# pylint:disable=protected-access
args._GetParser().ai.display_info.AddFormat("""
table(
name.basename():label=OPERATION_ID,
done():label=DONE,
metadata.'@type'.split('.').slice(-1:).join(),
metadata.name.split('/').slice(-1:).join():label=BACKUP,
metadata.database.split('/').slice(-1).join():label=SOURCE_DATABASE,
metadata.progress.startTime:label=START_TIME,
metadata.progress.endTime:label=END_TIME
)
""")
if args.type == 'DATABASE_RESTORE':
# Update output table for restore operations.
# pylint:disable=protected-access
args._GetParser().ai.display_info.AddFormat("""
table(
name.basename():label=OPERATION_ID,
done():label=DONE,
metadata.'@type'.split('.').slice(-1:).join(),
metadata.name.split('/').slice(-1:).join():label=RESTORED_DATABASE,
metadata.backupInfo.backup.split('/').slice(-1).join():label=SOURCE_BACKUP,
metadata.progress.startTime:label=START_TIME,
metadata.progress.endTime:label=END_TIME
)
""")
elif is_database_type:
# Update output table for database operations.
# pylint:disable=protected-access
args._GetParser().ai.display_info.AddFormat("""
table(
name.basename():label=OPERATION_ID,
metadata.statements.join(sep="\n"),
done():label=DONE,
metadata.'@type'.split('.').slice(-1:).join(),
database().split('/').slice(-1:).join():label=DATABASE_ID
)
""")
# Checks that user only specified either database or backup flag.
if (args.IsSpecified('database') and args.IsSpecified('backup')):
raise c_exceptions.InvalidArgumentException(
'--database or --backup',
'Must specify either --database or --backup. To search backups for a '
'specific database, use the --database flag with --type=BACKUP')
# Checks that the user did not specify the backup flag with the type filter
# set to a database operation type.
if (args.IsSpecified('backup') and is_database_type):
raise c_exceptions.InvalidArgumentException(
'--backup or --type',
'The backup flag cannot be used with the type flag set to a '
'database operation type.')
if args.type == 'INSTANCE':
if args.IsSpecified('database'):
raise c_exceptions.InvalidArgumentException(
'--database or --type',
'The `--database` flag cannot be used with `--type=INSTANCE`.')
if args.IsSpecified('backup'):
raise c_exceptions.InvalidArgumentException(
'--backup or --type',
'The `--backup` flag cannot be used with `--type=INSTANCE`.')
if args.type == 'BACKUP':
if args.database:
db_filter = backup_operations.BuildDatabaseFilter(args.instance,
args.database)
return backup_operations.List(args.instance, db_filter)
if args.backup:
return backup_operations.ListGeneric(args.instance, args.backup)
return backup_operations.List(args.instance)
if is_database_type:
type_filter = database_operations.BuildDatabaseOperationTypeFilter(
args.type)
return database_operations.ListDatabaseOperations(args.instance,
args.database,
type_filter)
if args.backup:
return backup_operations.ListGeneric(args.instance, args.backup)
if args.database:
return database_operations.List(args.instance, args.database)
return instance_operations.List(args.instance)
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA)
class List(base.ListCommand):
"""List the Cloud Spanner operations on the given instance or database."""
detailed_help = {
'EXAMPLES':
textwrap.dedent("""\
To list Cloud Spanner instance operations for an instance, run:
$ {command} --instance=my-instance-id --type=INSTANCE
To list Cloud Spanner backup operations for an instance, run:
$ {command} --instance=my-instance-id --type=BACKUP
To list Cloud Spanner database operations for an instance, run:
$ {command} --instance=my-instance-id --type=DATABASE
To list Cloud Spanner database operations for a database, run:
$ {command} --instance=my-instance-id --database=my-database-id --type=DATABASE
To list Cloud Spanner backup operations for a database, run:
$ {command} --instance=my-instance-id --database=my-database-id --type=BACKUP
To list Cloud Spanner backup operations for a backup, run:
$ {command} --instance=my-instance-id --backup=my-backup-id --type=BACKUP
"""),
}
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Please add arguments in alphabetical order except for no- or a clear-
pair for that argument which can follow the argument itself.
Args:
parser: An argparse parser that you can use to add arguments that go on
the command line after this command. Positional arguments are allowed.
"""
flags.Instance(
positional=False,
text='The ID of the instance the operations are executing on.'
).AddToParser(parser)
flags.AddCommonListArgs(parser)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
return _CommonRun(args)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class AlphaList(List):
"""List the Cloud Spanner operations on the given instance or database or instance-config."""
@staticmethod
def Args(parser):
"""See base class."""
mutex_group = parser.add_group(mutex=True, required=True)
mutex_group.add_argument(
'--instance-config',
completer=flags.InstanceConfigCompleter,
help='The ID of the instance config the operation is executing on.')
mutex_group.add_argument(
'--instance',
completer=flags.InstanceCompleter,
help='The ID of the instance the operation is executing on.')
additional_choices = {
'INSTANCE_CONFIG_CREATE':
'Instance config create operations are returned for the given '
'instance config (--instance-config).',
'INSTANCE_CONFIG_UPDATE':
'Instance config update operations are returned for the given '
'instance config (--instance-config).'
}
flags.AddCommonListArgs(parser, additional_choices)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
if args.instance_config:
type_filter = instance_config_operations.BuildInstanceConfigOperationTypeFilter(
args.type)
return instance_config_operations.List(args.instance_config, type_filter)
return _CommonRun(args)
| [
"textwrap.dedent",
"googlecloudsdk.api_lib.spanner.backup_operations.BuildDatabaseFilter",
"googlecloudsdk.calliope.exceptions.InvalidArgumentException",
"googlecloudsdk.api_lib.spanner.database_operations.ListDatabaseOperations",
"googlecloudsdk.api_lib.spanner.instance_operations.List",
"googlecloudsdk.api_lib.spanner.instance_config_operations.BuildInstanceConfigOperationTypeFilter",
"googlecloudsdk.api_lib.spanner.backup_operations.ListGeneric",
"googlecloudsdk.api_lib.spanner.database_operations.BuildDatabaseOperationTypeFilter",
"googlecloudsdk.api_lib.spanner.instance_config_operations.List",
"googlecloudsdk.command_lib.spanner.flags.Instance",
"googlecloudsdk.calliope.base.ReleaseTracks",
"googlecloudsdk.command_lib.spanner.flags.AddCommonListArgs",
"googlecloudsdk.api_lib.spanner.database_operations.List",
"googlecloudsdk.api_lib.spanner.backup_operations.List"
]
| [((5270, 5334), 'googlecloudsdk.calliope.base.ReleaseTracks', 'base.ReleaseTracks', (['base.ReleaseTrack.GA', 'base.ReleaseTrack.BETA'], {}), '(base.ReleaseTrack.GA, base.ReleaseTrack.BETA)\n', (5288, 5334), False, 'from googlecloudsdk.calliope import base\n'), ((7332, 7375), 'googlecloudsdk.calliope.base.ReleaseTracks', 'base.ReleaseTracks', (['base.ReleaseTrack.ALPHA'], {}), '(base.ReleaseTrack.ALPHA)\n', (7350, 7375), False, 'from googlecloudsdk.calliope import base\n'), ((5227, 5266), 'googlecloudsdk.api_lib.spanner.instance_operations.List', 'instance_operations.List', (['args.instance'], {}), '(args.instance)\n', (5251, 5266), False, 'from googlecloudsdk.api_lib.spanner import instance_operations\n'), ((3328, 3533), 'googlecloudsdk.calliope.exceptions.InvalidArgumentException', 'c_exceptions.InvalidArgumentException', (['"""--database or --backup"""', '"""Must specify either --database or --backup. To search backups for a specific database, use the --database flag with --type=BACKUP"""'], {}), "('--database or --backup',\n 'Must specify either --database or --backup. To search backups for a specific database, use the --database flag with --type=BACKUP'\n )\n", (3365, 3533), True, 'from googlecloudsdk.calliope import exceptions as c_exceptions\n'), ((3736, 3891), 'googlecloudsdk.calliope.exceptions.InvalidArgumentException', 'c_exceptions.InvalidArgumentException', (['"""--backup or --type"""', '"""The backup flag cannot be used with the type flag set to a database operation type."""'], {}), "('--backup or --type',\n 'The backup flag cannot be used with the type flag set to a database operation type.'\n )\n", (3773, 3891), True, 'from googlecloudsdk.calliope import exceptions as c_exceptions\n'), ((4685, 4722), 'googlecloudsdk.api_lib.spanner.backup_operations.List', 'backup_operations.List', (['args.instance'], {}), '(args.instance)\n', (4707, 4722), False, 'from googlecloudsdk.api_lib.spanner import backup_operations\n'), ((4765, 4828), 'googlecloudsdk.api_lib.spanner.database_operations.BuildDatabaseOperationTypeFilter', 'database_operations.BuildDatabaseOperationTypeFilter', (['args.type'], {}), '(args.type)\n', (4817, 4828), False, 'from googlecloudsdk.api_lib.spanner import database_operations\n'), ((4849, 4938), 'googlecloudsdk.api_lib.spanner.database_operations.ListDatabaseOperations', 'database_operations.ListDatabaseOperations', (['args.instance', 'args.database', 'type_filter'], {}), '(args.instance, args.database,\n type_filter)\n', (4891, 4938), False, 'from googlecloudsdk.api_lib.spanner import database_operations\n'), ((5073, 5130), 'googlecloudsdk.api_lib.spanner.backup_operations.ListGeneric', 'backup_operations.ListGeneric', (['args.instance', 'args.backup'], {}), '(args.instance, args.backup)\n', (5102, 5130), False, 'from googlecloudsdk.api_lib.spanner import backup_operations\n'), ((5162, 5216), 'googlecloudsdk.api_lib.spanner.database_operations.List', 'database_operations.List', (['args.instance', 'args.database'], {}), '(args.instance, args.database)\n', (5186, 5216), False, 'from googlecloudsdk.api_lib.spanner import database_operations\n'), ((5491, 6416), 'textwrap.dedent', 'textwrap.dedent', (['""" To list Cloud Spanner instance operations for an instance, run:\n\n $ {command} --instance=my-instance-id --type=INSTANCE\n\n To list Cloud Spanner backup operations for an instance, run:\n\n $ {command} --instance=my-instance-id --type=BACKUP\n\n To list Cloud Spanner database operations for an instance, run:\n\n $ {command} --instance=my-instance-id --type=DATABASE\n\n To list Cloud Spanner database operations for a database, run:\n\n $ {command} --instance=my-instance-id --database=my-database-id --type=DATABASE\n\n To list Cloud Spanner backup operations for a database, run:\n\n $ {command} --instance=my-instance-id --database=my-database-id --type=BACKUP\n\n To list Cloud Spanner backup operations for a backup, run:\n\n $ {command} --instance=my-instance-id --backup=my-backup-id --type=BACKUP\n """'], {}), '(\n """ To list Cloud Spanner instance operations for an instance, run:\n\n $ {command} --instance=my-instance-id --type=INSTANCE\n\n To list Cloud Spanner backup operations for an instance, run:\n\n $ {command} --instance=my-instance-id --type=BACKUP\n\n To list Cloud Spanner database operations for an instance, run:\n\n $ {command} --instance=my-instance-id --type=DATABASE\n\n To list Cloud Spanner database operations for a database, run:\n\n $ {command} --instance=my-instance-id --database=my-database-id --type=DATABASE\n\n To list Cloud Spanner backup operations for a database, run:\n\n $ {command} --instance=my-instance-id --database=my-database-id --type=BACKUP\n\n To list Cloud Spanner backup operations for a backup, run:\n\n $ {command} --instance=my-instance-id --backup=my-backup-id --type=BACKUP\n """\n )\n', (5506, 6416), False, 'import textwrap\n'), ((6986, 7017), 'googlecloudsdk.command_lib.spanner.flags.AddCommonListArgs', 'flags.AddCommonListArgs', (['parser'], {}), '(parser)\n', (7009, 7017), False, 'from googlecloudsdk.command_lib.spanner import flags\n'), ((8332, 8383), 'googlecloudsdk.command_lib.spanner.flags.AddCommonListArgs', 'flags.AddCommonListArgs', (['parser', 'additional_choices'], {}), '(parser, additional_choices)\n', (8355, 8383), False, 'from googlecloudsdk.command_lib.spanner import flags\n'), ((3991, 4120), 'googlecloudsdk.calliope.exceptions.InvalidArgumentException', 'c_exceptions.InvalidArgumentException', (['"""--database or --type"""', '"""The `--database` flag cannot be used with `--type=INSTANCE`."""'], {}), "('--database or --type',\n 'The `--database` flag cannot be used with `--type=INSTANCE`.')\n", (4028, 4120), True, 'from googlecloudsdk.calliope import exceptions as c_exceptions\n'), ((4185, 4310), 'googlecloudsdk.calliope.exceptions.InvalidArgumentException', 'c_exceptions.InvalidArgumentException', (['"""--backup or --type"""', '"""The `--backup` flag cannot be used with `--type=INSTANCE`."""'], {}), "('--backup or --type',\n 'The `--backup` flag cannot be used with `--type=INSTANCE`.')\n", (4222, 4310), True, 'from googlecloudsdk.calliope import exceptions as c_exceptions\n'), ((4397, 4464), 'googlecloudsdk.api_lib.spanner.backup_operations.BuildDatabaseFilter', 'backup_operations.BuildDatabaseFilter', (['args.instance', 'args.database'], {}), '(args.instance, args.database)\n', (4434, 4464), False, 'from googlecloudsdk.api_lib.spanner import backup_operations\n'), ((4534, 4582), 'googlecloudsdk.api_lib.spanner.backup_operations.List', 'backup_operations.List', (['args.instance', 'db_filter'], {}), '(args.instance, db_filter)\n', (4556, 4582), False, 'from googlecloudsdk.api_lib.spanner import backup_operations\n'), ((4616, 4673), 'googlecloudsdk.api_lib.spanner.backup_operations.ListGeneric', 'backup_operations.ListGeneric', (['args.instance', 'args.backup'], {}), '(args.instance, args.backup)\n', (4645, 4673), False, 'from googlecloudsdk.api_lib.spanner import backup_operations\n'), ((8716, 8792), 'googlecloudsdk.api_lib.spanner.instance_config_operations.BuildInstanceConfigOperationTypeFilter', 'instance_config_operations.BuildInstanceConfigOperationTypeFilter', (['args.type'], {}), '(args.type)\n', (8781, 8792), False, 'from googlecloudsdk.api_lib.spanner import instance_config_operations\n'), ((8817, 8883), 'googlecloudsdk.api_lib.spanner.instance_config_operations.List', 'instance_config_operations.List', (['args.instance_config', 'type_filter'], {}), '(args.instance_config, type_filter)\n', (8848, 8883), False, 'from googlecloudsdk.api_lib.spanner import instance_config_operations\n'), ((6842, 6943), 'googlecloudsdk.command_lib.spanner.flags.Instance', 'flags.Instance', ([], {'positional': '(False)', 'text': '"""The ID of the instance the operations are executing on."""'}), "(positional=False, text=\n 'The ID of the instance the operations are executing on.')\n", (6856, 6943), False, 'from googlecloudsdk.command_lib.spanner import flags\n')] |
# coding=utf-8
# This script is finished following HF's datasets' template:
# https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py
# More examples as references to write a customized dataset can be found here:
# https://github.com/huggingface/datasets/tree/master/datasets
from __future__ import absolute_import, division, print_function
import json
import datasets
_CITATION = """\
"""
_DESCRIPTION = """\
"""
_TRAIN_DOWNLOAD_URL = "data/train.json"
_VAL_DOWNLOAD_URL = "data/val.json"
class Translation(datasets.GeneratorBasedBuilder):
"""customize dataset."""
# VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"source": datasets.Value("string"),
"target": datasets.Value("string"),
}
),
supervised_keys=None,
homepage="#",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
val_path = dl_manager.download_and_extract(_VAL_DOWNLOAD_URL)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path}),
]
def _generate_examples(self, filepath):
with open(filepath, encoding='utf-8') as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"source": data["english"],
"target": data["chinese"],
}
| [
"datasets.SplitGenerator",
"json.loads",
"datasets.Value"
]
| [((1268, 1359), 'datasets.SplitGenerator', 'datasets.SplitGenerator', ([], {'name': 'datasets.Split.TRAIN', 'gen_kwargs': "{'filepath': train_path}"}), "(name=datasets.Split.TRAIN, gen_kwargs={'filepath':\n train_path})\n", (1291, 1359), False, 'import datasets\n'), ((1369, 1464), 'datasets.SplitGenerator', 'datasets.SplitGenerator', ([], {'name': 'datasets.Split.VALIDATION', 'gen_kwargs': "{'filepath': val_path}"}), "(name=datasets.Split.VALIDATION, gen_kwargs={\n 'filepath': val_path})\n", (1392, 1464), False, 'import datasets\n'), ((1633, 1648), 'json.loads', 'json.loads', (['row'], {}), '(row)\n', (1643, 1648), False, 'import json\n'), ((831, 855), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (845, 855), False, 'import datasets\n'), ((887, 911), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (901, 911), False, 'import datasets\n')] |
import itertools
from typing import Sequence, Iterator
# Source: https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/core/utils/chat_formatting.py
def error(text: str) -> str:
"""Get text prefixed with an error emoji.
Returns
-------
str
The new message.
"""
return "\N{NO ENTRY SIGN} {}".format(text)
def warning(text: str) -> str:
"""Get text prefixed with a warning emoji.
Returns
-------
str
The new message.
"""
return "\N{WARNING SIGN} {}".format(text)
def info(text: str) -> str:
"""Get text prefixed with an info emoji.
Returns
-------
str
The new message.
"""
return "\N{INFORMATION SOURCE} {}".format(text)
def question(text: str) -> str:
"""Get text prefixed with a question emoji.
Returns
-------
str
The new message.
"""
return "\N{BLACK QUESTION MARK ORNAMENT} {}".format(text)
def bold(text: str) -> str:
"""Get the given text in bold.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "**{}**".format(text)
def box(text: str, lang: str = "") -> str:
"""Get the given text in a code block.
Parameters
----------
text : str
The text to be marked up.
lang : `str`, optional
The syntax highlighting language for the codeblock.
Returns
-------
str
The marked up text.
"""
ret = "```{}\n{}\n```".format(lang, text)
return ret
def inline(text: str) -> str:
"""Get the given text as inline code.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "`{}`".format(text)
def italics(text: str) -> str:
"""Get the given text in italics.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "*{}*".format(text)
def bordered(*columns: Sequence[str], ascii_border: bool = False) -> str:
"""Get two blocks of text in a borders.
Note
----
This will only work with a monospaced font.
Parameters
----------
*columns : `sequence` of `str`
The columns of text, each being a list of lines in that column.
ascii_border : bool
Whether or not the border should be pure ASCII.
Returns
-------
str
The bordered text.
"""
borders = {
"TL": "-" if ascii_border else "โ", # Top-left
"TR": "-" if ascii_border else "โ", # Top-right
"BL": "-" if ascii_border else "โ", # Bottom-left
"BR": "-" if ascii_border else "โ", # Bottom-right
"HZ": "-" if ascii_border else "โ", # Horizontal
"VT": "|" if ascii_border else "โ", # Vertical
}
sep = " " * 4 # Separator between boxes
widths = tuple(
max(len(row) for row in column) + 9 for column in columns
) # width of each col
colsdone = [False] * len(columns) # whether or not each column is done
lines = [sep.join("{TL}" + "{HZ}" * width + "{TR}" for width in widths)]
for line in itertools.zip_longest(*columns):
row = []
for colidx, column in enumerate(line):
width = widths[colidx]
done = colsdone[colidx]
if column is None:
if not done:
# bottom border of column
column = "{HZ}" * width
row.append("{BL}" + column + "{BR}")
colsdone[colidx] = True # mark column as done
else:
# leave empty
row.append(" " * (width + 2))
else:
column += " " * (width - len(column)) # append padded spaces
row.append("{VT}" + column + "{VT}")
lines.append(sep.join(row))
final_row = []
for width, done in zip(widths, colsdone):
if not done:
final_row.append("{BL}" + "{HZ}" * width + "{BR}")
else:
final_row.append(" " * (width + 2))
lines.append(sep.join(final_row))
return "\n".join(lines).format(**borders)
def pagify(
text: str,
delims: Sequence[str] = ["\n"],
*,
priority: bool = False,
escape_mass_mentions: bool = True,
shorten_by: int = 8,
page_length: int = 2000
) -> Iterator[str]:
"""Generate multiple pages from the given text.
Note
----
This does not respect code blocks or inline code.
Parameters
----------
text : str
The content to pagify and send.
delims : `sequence` of `str`, optional
Characters where page breaks will occur. If no delimiters are found
in a page, the page will break after ``page_length`` characters.
By default this only contains the newline.
Other Parameters
----------------
priority : `bool`
Set to :code:`True` to choose the page break delimiter based on the
order of ``delims``. Otherwise, the page will always break at the
last possible delimiter.
escape_mass_mentions : `bool`
If :code:`True`, any mass mentions (here or everyone) will be
silenced.
shorten_by : `int`
How much to shorten each page by. Defaults to 8.
page_length : `int`
The maximum length of each page. Defaults to 2000.
Yields
------
`str`
Pages of the given text.
"""
in_text = text
page_length -= shorten_by
while len(in_text) > page_length:
this_page_len = page_length
if escape_mass_mentions:
this_page_len -= in_text.count("@here", 0, page_length) + in_text.count(
"@everyone", 0, page_length
)
closest_delim = (in_text.rfind(d, 1, this_page_len) for d in delims)
if priority:
closest_delim = next((x for x in closest_delim if x > 0), -1)
else:
closest_delim = max(closest_delim)
closest_delim = closest_delim if closest_delim != -1 else this_page_len
if escape_mass_mentions:
to_send = escape(in_text[:closest_delim], mass_mentions=True)
else:
to_send = in_text[:closest_delim]
if not to_send.strip():
yield to_send
in_text = in_text[closest_delim:]
if not in_text.strip():
if escape_mass_mentions:
yield escape(in_text, mass_mentions=True)
else:
yield in_text
def strikethrough(text: str) -> str:
"""Get the given text with a strikethrough.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "~~{}~~".format(text)
def underline(text: str) -> str:
"""Get the given text with an underline.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "__{}__".format(text)
def escape(text: str, *, mass_mentions: bool = False, formatting: bool = False) -> str:
"""Get text with all mass mentions or markdown escaped.
Parameters
----------
text : str
The text to be escaped.
mass_mentions : `bool`, optional
Set to :code:`True` to escape mass mentions in the text.
formatting : `bool`, optional
Set to :code:`True` to escpae any markdown formatting in the text.
Returns
-------
str
The escaped text.
"""
if mass_mentions:
text = text.replace("@everyone", "@\u200beveryone")
text = text.replace("@here", "@\u200bhere")
if formatting:
text = (
text.replace("`", "\\`")
.replace("*", "\\*")
.replace("_", "\\_")
.replace("~", "\\~")
)
return text
| [
"itertools.zip_longest"
]
| [((3243, 3274), 'itertools.zip_longest', 'itertools.zip_longest', (['*columns'], {}), '(*columns)\n', (3264, 3274), False, 'import itertools\n')] |
# @date 2018-12-28
# @author <NAME>, All rights reserved without prejudices.
# @license Copyright (c) 2018 Dream Overflow
# Strategy trade for margin with multiples positions.
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Tuple
if TYPE_CHECKING:
from trader.trader import Trader
from instrument.instrument import Instrument
from strategy.strategytrader import StrategyTrader
from strategy.strategytradercontext import StrategyTraderContextBuilder
from common.signal import Signal
from trader.order import Order
from .strategytrade import StrategyTrade
import logging
logger = logging.getLogger('siis.strategy.margintrade')
class StrategyMarginTrade(StrategyTrade):
"""
Specialization for margin trading.
This type of trade is related to margin trading market, allowing or not hedging, where there is a
position identifier per trade, but generally in the same direction (no hedging).
Works with crypto margin brokers (kraken...).
@todo do we need like with asset trade an exit_trades list to compute the axp and x values, because
if we use cumulative-filled and avg-price we have the same problem here too.
@todo have to check about position_updated qty with direction maybe or take care to have trade signal and
distinct entry from exit
@todo fees and commissions
"""
__slots__ = 'create_ref_oid', 'stop_ref_oid', 'limit_ref_oid', 'create_oid', 'stop_oid', 'limit_oid', \
'position_id', 'leverage', 'stop_order_qty', 'limit_order_qty'
def __init__(self, timeframe: float):
super().__init__(StrategyTrade.TRADE_MARGIN, timeframe)
self.create_ref_oid = None
self.stop_ref_oid = None
self.limit_ref_oid = None
self.create_oid = None # related entry order id
self.stop_oid = None # related stop order id
self.limit_oid = None # related limit order id
self.position_id = None # related informal position id
self.leverage = 1.0
self.stop_order_qty = 0.0 # if stop_oid then this is the qty placed on the stop order
self.limit_order_qty = 0.0 # if limit_oid then this is the qty placed on the limit order
def open(self, trader: Trader, instrument: Instrument, direction: int, order_type: int,
order_price: float, quantity: float, take_profit: float, stop_loss: float,
leverage: float = 1.0, hedging: Optional[bool] = None) -> bool:
"""
Open a position or buy an asset.
"""
if self._entry_state != StrategyTrade.STATE_NEW:
return False
order = Order(trader, instrument.market_id)
order.direction = direction
order.price = order_price
order.order_type = order_type
order.quantity = quantity
order.post_only = False
order.margin_trade = True
order.leverage = leverage
if hedging:
order.hedging = hedging
# generated a reference order id
trader.set_ref_order_id(order)
self.create_ref_oid = order.ref_order_id
self.dir = order.direction
self.op = order.price # retains the order price
self.oq = order.quantity # ordered quantity
self.tp = take_profit
self.sl = stop_loss
self.leverage = leverage
self._stats['entry-order-type'] = order.order_type
if trader.create_order(order, instrument) > 0:
# keep the related create position identifier if available
self.create_oid = order.order_id
self.position_id = order.position_id
if not self.eot and order.created_time:
# only at the first open
self.eot = order.created_time
return True
else:
self._entry_state = StrategyTrade.STATE_REJECTED
return False
def reopen(self, trader: Trader, instrument: Instrument, quantity: float) -> bool:
if self._entry_state != StrategyTrade.STATE_CANCELED:
return False
# reset
self._entry_state = StrategyTrade.STATE_NEW
self.eot = 0
order = Order(trader, instrument.market_id)
order.direction = self.dir
order.price = self.op
order.order_type = self._stats['entry-order-type']
order.quantity = quantity
order.post_only = False
order.margin_trade = True
order.leverage = self.leverage
# generated a reference order id
trader.set_ref_order_id(order)
self.create_ref_oid = order.ref_order_id
self.oq = order.quantity # ordered quantity
if trader.create_order(order, instrument) > 0:
self.create_oid = order.order_id
self.position_id = order.position_id
if not self.eot and order.created_time:
# only at the first open
self.eot = order.created_time
return True
else:
self._entry_state = StrategyTrade.STATE_REJECTED
return False
def remove(self, trader: Trader, instrument: Instrument) -> int:
"""
Remove the orders, but doesn't close the position.
"""
error = False
if self.create_oid:
# cancel the remaining buy order
if trader.cancel_order(self.create_oid, instrument) > 0:
self.create_ref_oid = None
self.create_oid = None
if self.e <= 0:
# no entry qty processed, entry canceled
self._entry_state = StrategyTrade.STATE_CANCELED
else:
# cancel a partially filled trade means it is then fully filled
self._entry_state = StrategyTrade.STATE_FILLED
else:
error = True
if self.stop_oid:
# cancel the stop order
if trader.cancel_order(self.stop_oid, instrument) > 0:
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
if self.e <= 0 and self.x <= 0:
# no exit qty
self._exit_state = StrategyTrade.STATE_CANCELED
elif self.x >= self.e:
self._exit_state = StrategyTrade.STATE_FILLED
else:
self._exit_state = StrategyTrade.STATE_PARTIALLY_FILLED
else:
error = True
if self.limit_oid:
# cancel the limit order
if trader.cancel_order(self.limit_oid, instrument) > 0:
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
if self.e <= 0 and self.x <= 0:
# no exit qty
self._exit_state = StrategyTrade.STATE_CANCELED
elif self.x >= self.e:
self._exit_state = StrategyTrade.STATE_FILLED
else:
self._exit_state = StrategyTrade.STATE_PARTIALLY_FILLED
else:
error = True
return not error
def cancel_open(self, trader: Trader, instrument: Instrument) -> int:
if self.create_oid:
# cancel the buy order
if trader.cancel_order(self.create_oid, instrument) > 0:
self.create_ref_oid = None
self.create_oid = None
if self.e <= 0:
# cancel a just opened trade means it is canceled
self._entry_state = StrategyTrade.STATE_CANCELED
else:
# cancel a partially filled trade means it is then fully filled
self._entry_state = StrategyTrade.STATE_FILLED
return self.ACCEPTED
else:
data = trader.order_info(self.create_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no create order, nothing to do
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_CANCELED
else:
# exists, do nothing need to retry
return self.ERROR
return self.NOTHING_TO_DO
def modify_take_profit(self, trader: Trader, instrument: Instrument, limit_price: float, hard: bool = True) -> int:
if self._closing:
# already closing order
return self.NOTHING_TO_DO
if self._exit_state == StrategyTrade.STATE_FILLED:
# exit already fully filled
return self.NOTHING_TO_DO
if self.limit_oid:
# cancel the limit order and create a new one
if trader.cancel_order(self.limit_oid, instrument) > 0:
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
data = trader.order_info(self.limit_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no limit order
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
return self.ERROR
if self.x >= self.e:
# all entry qty is filled, if lesser something wrong but its ok
return self.NOTHING_TO_DO
if limit_price and hard:
# only if filled entry partially or totally
order = Order(trader, instrument.market_id)
order.direction = -self.direction
order.order_type = Order.ORDER_LIMIT
order.reduce_only = True
order.quantity = self.e - self.x # remaining
order.price = limit_price
order.margin_trade = True
order.leverage = self.leverage
trader.set_ref_order_id(order)
self.limit_ref_oid = order.ref_order_id
self._stats['take-profit-order-type'] = order.order_type
create_order_result = trader.create_order(order, instrument)
if create_order_result > 0:
self.limit_oid = order.order_id
self.limit_order_qty = order.quantity
self.last_tp_ot[0] = order.created_time
self.last_tp_ot[1] += 1
self.tp = limit_price
return self.ACCEPTED
elif create_order_result == Order.REASON_INSUFFICIENT_MARGIN:
# rejected because not enough margin, must stop to retry
self.limit_ref_oid = None
self.limit_order_qty = 0.0
self._exit_state = self.STATE_ERROR
return self.INSUFFICIENT_MARGIN
else:
self.limit_ref_oid = None
self.limit_order_qty = 0.0
return self.REJECTED
elif limit_price:
# soft take-profit
self.tp = limit_price
else:
# remove take-profit
self.tp = 0.0
return self.NOTHING_TO_DO
def modify_stop_loss(self, trader: Trader, instrument: Instrument, stop_price: float, hard: bool = True) -> int:
if self._closing:
# already closing order
return self.NOTHING_TO_DO
if self._exit_state == StrategyTrade.STATE_FILLED:
# exit already fully filled
return self.NOTHING_TO_DO
if self.stop_oid:
# cancel the stop order and create a new one
if trader.cancel_order(self.stop_oid, instrument) > 0:
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
data = trader.order_info(self.stop_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no stop order
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
return self.ERROR
if self.x >= self.e:
# all entry qty is filled, if lesser something wrong but its ok
return self.NOTHING_TO_DO
if stop_price and hard:
# only if filled entry partially or totally
order = Order(trader, instrument.market_id)
order.direction = -self.direction
order.order_type = Order.ORDER_STOP
order.reduce_only = True
order.quantity = self.e - self.x # remaining
order.stop_price = stop_price
order.leverage = self.leverage
order.margin_trade = True
trader.set_ref_order_id(order)
self.stop_ref_oid = order.ref_order_id
self._stats['stop-order-type'] = order.order_type
create_order_result = trader.create_order(order, instrument)
if create_order_result > 0:
self.stop_oid = order.order_id
self.stop_order_qty = order.quantity
self.last_stop_ot[0] = order.created_time
self.last_stop_ot[1] += 1
self.sl = stop_price
return self.ACCEPTED
elif create_order_result == Order.REASON_INSUFFICIENT_MARGIN:
# rejected because not enough margin, must stop to retry
self.stop_ref_oid = None
self.stop_order_qty = 0.0
self._exit_state = self.STATE_ERROR
return self.INSUFFICIENT_MARGIN
else:
self.stop_ref_oid = None
self.stop_order_qty = 0.0
return self.REJECTED
elif stop_price:
# soft stop-loss
self.sl = stop_price
else:
# remove stop-loss
self.sl = 0.0
return self.NOTHING_TO_DO
def close(self, trader: Trader, instrument: Instrument) -> int:
"""
Close the position and cancel the related orders.
"""
if self._closing:
# already closing order
return self.NOTHING_TO_DO
if self.create_oid:
# cancel the remaining buy order
if trader.cancel_order(self.create_oid, instrument) > 0:
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_CANCELED
else:
data = trader.order_info(self.create_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no create order
self.create_ref_oid = None
self.create_oid = None
else:
return self.ERROR
if self.stop_oid:
# cancel the stop order
if trader.cancel_order(self.stop_oid, instrument) > 0:
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
data = trader.order_info(self.stop_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no stop order
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
return self.ERROR
if self.limit_oid:
# cancel the limit order
if trader.cancel_order(self.limit_oid, instrument) > 0:
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
data = trader.order_info(self.limit_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no limit order
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
return self.ERROR
if self.x >= self.e:
# all qty is filled
return self.NOTHING_TO_DO
order = Order(trader, instrument.market_id)
order.direction = -self.dir # neg dir
order.order_type = Order.ORDER_MARKET
order.reduce_only = True
order.quantity = self.e - self.x # remaining qty
order.margin_trade = True
order.leverage = self.leverage
# generated a reference order id
trader.set_ref_order_id(order)
self.stop_ref_oid = order.ref_order_id
self._stats['stop-order-type'] = order.order_type
create_order_result = trader.create_order(order, instrument)
if create_order_result > 0:
self.stop_oid = order.order_id
self.stop_order_qty = order.quantity
# closing order defined
self._closing = True
return self.ACCEPTED
elif create_order_result == Order.REASON_INSUFFICIENT_MARGIN:
# rejected because not enough margin, must stop to retry
self.stop_ref_oid = None
self.stop_order_qty = 0.0
self._exit_state = self.STATE_ERROR
return self.INSUFFICIENT_MARGIN
else:
self.stop_ref_oid = None
self.stop_order_qty = 0.0
return self.REJECTED
def has_stop_order(self) -> bool:
return self.stop_oid is not None and self.stop_oid != ""
def has_limit_order(self) -> bool:
return self.limit_oid is not None and self.limit_oid != ""
def support_both_order(self) -> bool:
return True
@classmethod
def is_margin(cls) -> bool:
return True
@classmethod
def is_spot(cls) -> bool:
return False
#
# signal
#
def order_signal(self, signal_type: int, data: dict, ref_order_id: str, instrument: Instrument):
if signal_type == Signal.SIGNAL_ORDER_OPENED:
# already get at the return of create_order
if ref_order_id == self.create_ref_oid:
self.create_oid = data['id']
# init created timestamp at the create order open
if not self.eot:
self.eot = data['timestamp']
if data.get('stop-loss'):
self.sl = data['stop-loss']
if data.get('take-profit'):
self.tp = data['take-profit']
self._entry_state = StrategyTrade.STATE_OPENED
elif ref_order_id == self.stop_ref_oid:
self.stop_oid = data['id']
if not self.xot:
self.xot = data['timestamp']
elif ref_order_id == self.limit_ref_oid:
self.limit_oid = data['id']
if not self.xot:
self.xot = data['timestamp']
elif signal_type == Signal.SIGNAL_ORDER_DELETED:
# order is no longer active
if data == self.create_oid:
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_DELETED
elif data == self.limit_oid:
self.limit_ref_oid = None
self.limit_oid = None
elif data == self.stop_oid:
self.stop_ref_oid = None
self.stop_oid = None
elif signal_type == Signal.SIGNAL_ORDER_CANCELED:
# order is no longer active
if data == self.create_oid:
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_CANCELED
elif data == self.limit_oid:
self.limit_ref_oid = None
self.limit_oid = None
elif data == self.stop_oid:
self.stop_ref_oid = None
self.stop_oid = None
elif signal_type == Signal.SIGNAL_ORDER_UPDATED:
# order price/qty modified, cannot really be used because the strategy might
# cancel the trade or create another one.
# for the qty we could have a remaining_qty member, then comparing
pass
elif signal_type == Signal.SIGNAL_ORDER_TRADED:
# order fully or partially filled
filled = 0
if data['id'] == self.create_oid:
prev_e = self.e
# a single order for the entry, then its OK and preferred to uses cumulative-filled and avg-price
# because precision comes from the broker
if data.get('cumulative-filled') is not None and data['cumulative-filled'] > 0:
filled = data['cumulative-filled'] - self.e # compute filled qty
elif data.get('filled') is not None and data['filled'] > 0:
filled = data['filled']
else:
filled = 0
if data.get('avg-price') is not None and data['avg-price'] > 0:
# in that case we have avg-price already computed
self.aep = data['avg-price']
elif data.get('exec-price') is not None and data['exec-price'] > 0:
# compute the average price
self.aep = ((self.aep * self.e) + (data['exec-price'] * filled)) / (self.e + filled)
else:
# no have uses order price
self.aep = self.op
# cumulative filled entry qty
if data.get('cumulative-filled') is not None:
self.e = data.get('cumulative-filled')
elif filled > 0:
self.e = instrument.adjust_quantity(self.e + filled)
if filled > 0:
# probably need to update exit orders
self._dirty = True
logger.info("Entry avg-price=%s cum-filled=%s" % (self.aep, self.e))
if self.e >= self.oq:
self._entry_state = StrategyTrade.STATE_FILLED
# if no send of ORDER_DELETED signal, cleanup here
self.create_oid = None
self.create_ref_oid = None
else:
self._entry_state = StrategyTrade.STATE_PARTIALLY_FILLED
# retains the trade timestamp
if not self._stats['first-realized-entry-timestamp']:
self._stats['first-realized-entry-timestamp'] = data.get('timestamp', 0.0)
self._stats['last-realized-entry-timestamp'] = data.get('timestamp', 0.0)
elif data['id'] == self.limit_oid or data['id'] == self.stop_oid:
prev_x = self.x
# either we have 'filled' component (partial qty) or the 'cumulative-filled' or both
if data.get('cumulative-filled') is not None and data['cumulative-filled'] > 0:
filled = data['cumulative-filled'] - self.x # computed filled qty
elif data.get('filled') is not None and data['filled'] > 0:
filled = data['filled']
else:
filled = 0
if data.get('avg-price') is not None and data['avg-price'] > 0:
# recompute profit-loss
if self.dir > 0:
self.pl = (data['avg-price'] - self.aep) / self.aep
elif self.dir < 0:
self.pl = (self.aep - data['avg-price']) / self.aep
# in that case we have avg-price already computed
self.axp = data['avg-price']
elif data.get('exec-price') is not None and data['exec-price'] > 0:
# increase/decrease profit/loss (over entry executed quantity)
if self.dir > 0:
self.pl += ((data['exec-price'] * filled) - (self.aep * filled)) / (self.aep * self.e)
elif self.dir < 0:
self.pl += ((self.aep * filled) - (data['exec-price'] * filled)) / (self.aep * self.e)
# compute the average price
self.axp = ((self.axp * self.x) + (data['exec-price'] * filled)) / (self.x + filled)
# cumulative filled exit qty
if data.get('cumulative-filled') is not None:
self.x = data.get('cumulative-filled')
elif filled > 0:
self.x = instrument.adjust_quantity(self.x + filled)
logger.info("Exit avg-price=%s cum-filled=%s" % (self.axp, self.x))
if self.x >= self.oq:
self._exit_state = StrategyTrade.STATE_FILLED
# if no send of ORDER_DELETED signal, cleanup here
if data['id'] == self.limit_oid:
self.limit_oid = None
self.limit_ref_oid = None
elif data['id'] == self.stop_oid:
self.stop_oid = None
self.stop_ref_oid = None
else:
self._exit_state = StrategyTrade.STATE_PARTIALLY_FILLED
# retains the trade timestamp
if not self._stats['first-realized-exit-timestamp']:
self._stats['first-realized-exit-timestamp'] = data.get('timestamp', 0.0)
self._stats['last-realized-exit-timestamp'] = data.get('timestamp', 0.0)
def position_signal(self, signal_type: int, data: dict, ref_order_id: str, instrument: Instrument):
if signal_type == Signal.SIGNAL_POSITION_OPENED:
self.position_id = data['id']
if data.get('profit-loss'):
self._stats['unrealized-profit-loss'] = data['profit-loss']
if data.get('profit-currency'):
self._stats['profit-loss-currency'] = data['profit-currency']
elif signal_type == Signal.SIGNAL_POSITION_UPDATED:
# update the unrealized profit-loss in currency
if data.get('profit-loss'):
self._stats['unrealized-profit-loss'] = data['profit-loss']
if data.get('profit-currency'):
self._stats['profit-loss-currency'] = data['profit-currency']
elif signal_type == Signal.SIGNAL_POSITION_DELETED:
# no longer related position
self.position_id = None
if data.get('profit-loss'):
self._stats['unrealized-profit-loss'] = data['profit-loss']
if data.get('profit-currency'):
self._stats['profit-loss-currency'] = data['profit-currency']
elif signal_type == Signal.SIGNAL_POSITION_AMENDED:
# might not occurs
pass
def is_target_order(self, order_id: str, ref_order_id: str) -> bool:
if order_id and (order_id == self.create_oid or order_id == self.stop_oid or order_id == self.limit_oid):
return True
if ref_order_id and (ref_order_id == self.create_ref_oid or
ref_order_id == self.stop_ref_oid or
ref_order_id == self.limit_ref_oid):
return True
return False
def is_target_position(self, position_id: str, ref_order_id: str) -> bool:
if position_id and (position_id == self.position_id):
return True
if ref_order_id and (ref_order_id == self.create_ref_oid):
return True
#
# persistence
#
def dumps(self) -> dict:
data = super().dumps()
data['create-ref-oid'] = self.create_ref_oid
data['stop-ref-oid'] = self.stop_ref_oid
data['limit-ref-oid'] = self.limit_ref_oid
data['create-oid'] = self.create_oid
data['stop-oid'] = self.stop_oid
data['limit-oid'] = self.limit_oid
data['position-id'] = self.position_id
data['stop-order-qty'] = self.stop_order_qty
data['limit-order-qty'] = self.limit_order_qty
return data
def loads(self, data: dict, strategy_trader: StrategyTrader,
context_builder: Optional[StrategyTraderContextBuilder] = None) -> bool:
if not super().loads(data, strategy_trader, context_builder):
return False
self.create_ref_oid = data.get('create-ref-oid')
self.stop_ref_oid = data.get('stop-ref-oid')
self.limit_ref_oid = data.get('limit-ref-oid')
self.create_oid = data.get('create-oid')
self.stop_oid = data.get('stop-oid')
self.limit_oid = data.get('limit-oid')
self.position_id = data.get('position-id')
self.stop_order_qty = data.get('stop-order-qty', 0.0)
self.limit_order_qty = data.get('limit-order-qty', 0.0)
return True
def check(self, trader: Trader, instrument: Instrument) -> int:
result = 1
#
# entry
#
if self.create_oid:
data = trader.order_info(self.create_oid, instrument)
if data is None:
# API error, do nothing need retry
result = -1
# entry order error status
# self._entry_state = StrategyTrade.STATE_ERROR
else:
if data['id'] is None:
# cannot retrieve the order, wrong id
result = 0
# no longer entry order
self.create_oid = None
self.create_ref_oid = None
else:
if data['cumulative-filled'] > self.e or data['fully-filled']:
self.order_signal(Signal.SIGNAL_ORDER_TRADED, data, data['ref-id'], instrument)
if data['status'] in ('closed', 'deleted'):
self.order_signal(Signal.SIGNAL_ORDER_DELETED, data['id'], data['ref-id'], instrument)
elif data['status'] in ('expired', 'canceled'):
self.order_signal(Signal.SIGNAL_ORDER_CANCELED, data['id'], data['ref-id'], instrument)
#
# exit
#
if self.stop_oid:
data = trader.order_info(self.stop_oid, instrument)
if data is None:
# API error, do nothing need retry
result = -1
# exit order error status
# self._exit_state = StrategyTrade.STATE_ERROR
else:
if data['id'] is None:
# cannot retrieve the order, wrong id
result = 0
# no longer stop order
self.stop_oid = None
self.stop_ref_oid = None
else:
if data['cumulative-filled'] > self.x or data['fully-filled']:
self.order_signal(Signal.SIGNAL_ORDER_TRADED, data, data['ref-id'], instrument)
if data['status'] in ('closed', 'deleted'):
self.order_signal(Signal.SIGNAL_ORDER_DELETED, data['id'], data['ref-id'], instrument)
elif data['status'] in ('expired', 'canceled'):
self.order_signal(Signal.SIGNAL_ORDER_CANCELED, data['id'], data['ref-id'], instrument)
if self.limit_oid:
data = trader.order_info(self.limit_oid, instrument)
if data is None:
# API error, do nothing need retry
result = -1
# exit order error status
# self._exit_state = StrategyTrade.STATE_ERROR
else:
if data['id'] is None:
# cannot retrieve the order, wrong id
result = 0
# no longer stop order
self.limit_oid = None
self.limit_ref_oid = None
else:
if data['cumulative-filled'] > self.x or data['fully-filled']:
self.order_signal(Signal.SIGNAL_ORDER_TRADED, data, data['ref-id'], instrument)
if data['status'] in ('closed', 'deleted'):
self.order_signal(Signal.SIGNAL_ORDER_DELETED, data['id'], data['ref-id'], instrument)
elif data['status'] in ('expired', 'canceled'):
self.order_signal(Signal.SIGNAL_ORDER_CANCELED, data['id'], data['ref-id'], instrument)
return result
def repair(self, trader: Trader, instrument: Instrument) -> bool:
# @todo fix the trade
return False
#
# stats
#
def update_stats(self, instrument: Instrument, timestamp: float):
super().update_stats(instrument, timestamp)
if self.is_active():
# @todo support only for quantity in asset not in lot or contract of different size
last_price = instrument.close_exec_price(self.direction)
upnl = 0.0 # unrealized PNL
rpnl = 0.0 # realized PNL
# non realized quantity
nrq = self.e - self.x
if self.dir > 0:
upnl = last_price * nrq - self.aep * nrq
rpnl = self.axp * self.x - self.aep * self.x
elif self.dir < 0:
upnl = self.aep * nrq - last_price * nrq
rpnl = self.aep * self.x - self.axp * self.x
# including fees and realized profit and loss
self._stats['unrealized-profit-loss'] = instrument.adjust_quote(
upnl + rpnl - self._stats['entry-fees'] - self._stats['exit-fees'])
def info_report(self, strategy_trader: StrategyTrader) -> Tuple[str]:
data = list(super().info_report(strategy_trader))
if self.create_oid or self.create_ref_oid:
data.append("Entry order id / ref : %s / %s" % (self.create_oid, self.create_ref_oid))
if self.stop_oid or self.stop_ref_oid:
data.append("Stop order id / ref : %s / %s" % (self.stop_oid, self.stop_ref_oid))
if self.limit_oid or self.limit_ref_oid:
data.append("Limit order id / ref : %s / %s" % (self.limit_oid, self.limit_ref_oid))
if self.position_id:
data.append("Position id : %s" % (self.position_id,))
return tuple(data)
| [
"logging.getLogger",
"trader.order.Order"
]
| [((631, 677), 'logging.getLogger', 'logging.getLogger', (['"""siis.strategy.margintrade"""'], {}), "('siis.strategy.margintrade')\n", (648, 677), False, 'import logging\n'), ((2655, 2690), 'trader.order.Order', 'Order', (['trader', 'instrument.market_id'], {}), '(trader, instrument.market_id)\n', (2660, 2690), False, 'from trader.order import Order\n'), ((4191, 4226), 'trader.order.Order', 'Order', (['trader', 'instrument.market_id'], {}), '(trader, instrument.market_id)\n', (4196, 4226), False, 'from trader.order import Order\n'), ((17178, 17213), 'trader.order.Order', 'Order', (['trader', 'instrument.market_id'], {}), '(trader, instrument.market_id)\n', (17183, 17213), False, 'from trader.order import Order\n'), ((9950, 9985), 'trader.order.Order', 'Order', (['trader', 'instrument.market_id'], {}), '(trader, instrument.market_id)\n', (9955, 9985), False, 'from trader.order import Order\n'), ((12925, 12960), 'trader.order.Order', 'Order', (['trader', 'instrument.market_id'], {}), '(trader, instrument.market_id)\n', (12930, 12960), False, 'from trader.order import Order\n')] |
"""Install exception handler for process crash."""
from selfdrive.swaglog import cloudlog
from selfdrive.version import version
import sentry_sdk
from sentry_sdk.integrations.threading import ThreadingIntegration
def capture_exception(*args, **kwargs) -> None:
cloudlog.error("crash", exc_info=kwargs.get('exc_info', 1))
try:
sentry_sdk.capture_exception(*args, **kwargs)
sentry_sdk.flush() # https://github.com/getsentry/sentry-python/issues/291
except Exception:
cloudlog.exception("sentry exception")
def bind_user(**kwargs) -> None:
sentry_sdk.set_user(kwargs)
def bind_extra(**kwargs) -> None:
for k, v in kwargs.items():
sentry_sdk.set_tag(k, v)
def init() -> None:
sentry_sdk.init("https://[email protected]/5861866",
default_integrations=False, integrations=[ThreadingIntegration(propagate_hub=True)],
release=version)
| [
"sentry_sdk.flush",
"sentry_sdk.integrations.threading.ThreadingIntegration",
"sentry_sdk.set_user",
"selfdrive.swaglog.cloudlog.exception",
"sentry_sdk.set_tag",
"sentry_sdk.capture_exception"
]
| [((562, 589), 'sentry_sdk.set_user', 'sentry_sdk.set_user', (['kwargs'], {}), '(kwargs)\n', (581, 589), False, 'import sentry_sdk\n'), ((337, 382), 'sentry_sdk.capture_exception', 'sentry_sdk.capture_exception', (['*args'], {}), '(*args, **kwargs)\n', (365, 382), False, 'import sentry_sdk\n'), ((387, 405), 'sentry_sdk.flush', 'sentry_sdk.flush', ([], {}), '()\n', (403, 405), False, 'import sentry_sdk\n'), ((659, 683), 'sentry_sdk.set_tag', 'sentry_sdk.set_tag', (['k', 'v'], {}), '(k, v)\n', (677, 683), False, 'import sentry_sdk\n'), ((487, 525), 'selfdrive.swaglog.cloudlog.exception', 'cloudlog.exception', (['"""sentry exception"""'], {}), "('sentry exception')\n", (505, 525), False, 'from selfdrive.swaglog import cloudlog\n'), ((860, 900), 'sentry_sdk.integrations.threading.ThreadingIntegration', 'ThreadingIntegration', ([], {'propagate_hub': '(True)'}), '(propagate_hub=True)\n', (880, 900), False, 'from sentry_sdk.integrations.threading import ThreadingIntegration\n')] |
import math
import visualization.panda.world as wd
import modeling.geometric_model as gm
import modeling.collision_model as cm
import grasping.planning.antipodal as gpa
import robot_sim.end_effectors.grippers.yumi_gripper.yumi_gripper as yg
base = wd.World(cam_pos=[1, 1, 1],w=960,
h=540, lookat_pos=[0, 0, 0])
gm.gen_frame().attach_to(base)
# object
object_tube = cm.CollisionModel("objects/tubebig.stl")
object_tube.set_rgba([.9, .75, .35, 1])
object_tube.attach_to(base)
# hnd_s
gripper_s = yg.YumiGripper()
grasp_info_list = gpa.plan_grasps(gripper_s, object_tube,
angle_between_contact_normals=math.radians(177),
openning_direction='loc_x',
max_samples=15, min_dist_between_sampled_contact_points=.005,
contact_offset=.005)
gpa.write_pickle_file('tubebig', grasp_info_list, './', 'yumi_tube_big.pickle')
for grasp_info in grasp_info_list:
jaw_width, jaw_center_pos, jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
gripper_s.grip_at_with_jcpose(jaw_center_pos, jaw_center_rotmat, jaw_width)
gripper_s.gen_meshmodel(rgba=(1,0,0,0.01)).attach_to(base)
base.run() | [
"modeling.collision_model.CollisionModel",
"grasping.planning.antipodal.write_pickle_file",
"math.radians",
"modeling.geometric_model.gen_frame",
"robot_sim.end_effectors.grippers.yumi_gripper.yumi_gripper.YumiGripper",
"visualization.panda.world.World"
]
| [((249, 312), 'visualization.panda.world.World', 'wd.World', ([], {'cam_pos': '[1, 1, 1]', 'w': '(960)', 'h': '(540)', 'lookat_pos': '[0, 0, 0]'}), '(cam_pos=[1, 1, 1], w=960, h=540, lookat_pos=[0, 0, 0])\n', (257, 312), True, 'import visualization.panda.world as wd\n'), ((383, 423), 'modeling.collision_model.CollisionModel', 'cm.CollisionModel', (['"""objects/tubebig.stl"""'], {}), "('objects/tubebig.stl')\n", (400, 423), True, 'import modeling.collision_model as cm\n'), ((513, 529), 'robot_sim.end_effectors.grippers.yumi_gripper.yumi_gripper.YumiGripper', 'yg.YumiGripper', ([], {}), '()\n', (527, 529), True, 'import robot_sim.end_effectors.grippers.yumi_gripper.yumi_gripper as yg\n'), ((884, 963), 'grasping.planning.antipodal.write_pickle_file', 'gpa.write_pickle_file', (['"""tubebig"""', 'grasp_info_list', '"""./"""', '"""yumi_tube_big.pickle"""'], {}), "('tubebig', grasp_info_list, './', 'yumi_tube_big.pickle')\n", (905, 963), True, 'import grasping.planning.antipodal as gpa\n'), ((329, 343), 'modeling.geometric_model.gen_frame', 'gm.gen_frame', ([], {}), '()\n', (341, 343), True, 'import modeling.geometric_model as gm\n'), ((652, 669), 'math.radians', 'math.radians', (['(177)'], {}), '(177)\n', (664, 669), False, 'import math\n')] |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Assignment service."""
import copy
import itertools
from oslo_log import log
from keystone.common import cache
from keystone.common import driver_hints
from keystone.common import manager
from keystone.common import provider_api
import keystone.conf
from keystone import exception
from keystone.i18n import _
from keystone import notifications
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
PROVIDERS = provider_api.ProviderAPIs
# This is a general cache region for assignment administration (CRUD
# operations).
MEMOIZE = cache.get_memoization_decorator(group='role')
# This builds a discrete cache region dedicated to role assignments computed
# for a given user + project/domain pair. Any write operation to add or remove
# any role assignment should invalidate this entire cache region.
COMPUTED_ASSIGNMENTS_REGION = cache.create_region(name='computed assignments')
MEMOIZE_COMPUTED_ASSIGNMENTS = cache.get_memoization_decorator(
group='role',
region=COMPUTED_ASSIGNMENTS_REGION)
@notifications.listener
class Manager(manager.Manager):
"""Default pivot point for the Assignment backend.
See :class:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
driver_namespace = 'keystone.assignment'
_provides_api = 'assignment_api'
_SYSTEM_SCOPE_TOKEN = 'system'
_USER_SYSTEM = 'UserSystem'
_GROUP_SYSTEM = 'GroupSystem'
_PROJECT = 'project'
_ROLE_REMOVED_FROM_USER = 'role_removed_from_user'
_INVALIDATION_USER_PROJECT_TOKENS = 'invalidate_user_project_tokens'
def __init__(self):
assignment_driver = CONF.assignment.driver
super(Manager, self).__init__(assignment_driver)
self.event_callbacks = {
notifications.ACTIONS.deleted: {
'domain': [self._delete_domain_assignments],
},
}
def _delete_domain_assignments(self, service, resource_type, operations,
payload):
domain_id = payload['resource_info']
self.driver.delete_domain_assignments(domain_id)
def _get_group_ids_for_user_id(self, user_id):
# TODO(morganfainberg): Implement a way to get only group_ids
# instead of the more expensive to_dict() call for each record.
return [x['id'] for
x in PROVIDERS.identity_api.list_groups_for_user(user_id)]
def list_user_ids_for_project(self, tenant_id):
PROVIDERS.resource_api.get_project(tenant_id)
assignment_list = self.list_role_assignments(
project_id=tenant_id, effective=True)
# Use set() to process the list to remove any duplicates
return list(set([x['user_id'] for x in assignment_list]))
def _send_app_cred_notification_for_role_removal(self, role_id):
"""Delete all application credential for a specific role.
:param role_id: role identifier
:type role_id: string
"""
assignments = self.list_role_assignments(role_id=role_id)
for assignment in assignments:
if 'user_id' in assignment and 'project_id' in assignment:
payload = {
'user_id': assignment['user_id'],
'project_id': assignment['project_id']
}
notifications.Audit.internal(
notifications.REMOVE_APP_CREDS_FOR_USER, payload
)
@MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_user_and_project(self, user_id, tenant_id):
"""Get the roles associated with a user within given project.
This includes roles directly assigned to the user on the
project, as well as those by virtue of group membership or
inheritance.
:returns: a list of role ids.
:raises keystone.exception.ProjectNotFound: If the project doesn't
exist.
"""
PROVIDERS.resource_api.get_project(tenant_id)
assignment_list = self.list_role_assignments(
user_id=user_id, project_id=tenant_id, effective=True)
# Use set() to process the list to remove any duplicates
return list(set([x['role_id'] for x in assignment_list]))
@MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_trustor_and_project(self, trustor_id, project_id):
"""Get the roles associated with a trustor within given project.
This includes roles directly assigned to the trustor on the
project, as well as those by virtue of group membership or
inheritance, but it doesn't include the domain roles.
:returns: a list of role ids.
:raises keystone.exception.ProjectNotFound: If the project doesn't
exist.
"""
PROVIDERS.resource_api.get_project(project_id)
assignment_list = self.list_role_assignments(
user_id=trustor_id, project_id=project_id, effective=True,
strip_domain_roles=False)
# Use set() to process the list to remove any duplicates
return list(set([x['role_id'] for x in assignment_list]))
@MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_user_and_domain(self, user_id, domain_id):
"""Get the roles associated with a user within given domain.
:returns: a list of role ids.
:raises keystone.exception.DomainNotFound: If the domain doesn't exist.
"""
PROVIDERS.resource_api.get_domain(domain_id)
assignment_list = self.list_role_assignments(
user_id=user_id, domain_id=domain_id, effective=True)
# Use set() to process the list to remove any duplicates
return list(set([x['role_id'] for x in assignment_list]))
def get_roles_for_groups(self, group_ids, project_id=None, domain_id=None):
"""Get a list of roles for this group on domain and/or project."""
# if no group ids were passed, there are no roles. Without this check,
# all assignments for the project or domain will be fetched,
# which is not what we want.
if not group_ids:
return []
if project_id is not None:
PROVIDERS.resource_api.get_project(project_id)
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, project_id=project_id,
effective=True)
elif domain_id is not None:
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, domain_id=domain_id,
effective=True)
else:
raise AttributeError(_("Must specify either domain or project"))
role_ids = list(set([x['role_id'] for x in assignment_list]))
return PROVIDERS.role_api.list_roles_from_ids(role_ids)
@notifications.role_assignment('created')
def _add_role_to_user_and_project_adapter(self, role_id, user_id=None,
group_id=None, domain_id=None,
project_id=None,
inherited_to_projects=False,
context=None):
# The parameters for this method must match the parameters for
# create_grant so that the notifications.role_assignment decorator
# will work.
PROVIDERS.resource_api.get_project(project_id)
PROVIDERS.role_api.get_role(role_id)
self.driver.add_role_to_user_and_project(user_id, project_id, role_id)
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
self._add_role_to_user_and_project_adapter(
role_id, user_id=user_id, project_id=tenant_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
# TODO(henry-nash): We might want to consider list limiting this at some
# point in the future.
@MEMOIZE_COMPUTED_ASSIGNMENTS
def list_projects_for_user(self, user_id):
# FIXME(lbragstad): Without the use of caching, listing effective role
# assignments is slow, especially with large data set (lots of users
# with multiple role assignments). This should serve as a marker in
# case we have the opportunity to come back and optimize this code so
# that it can be performant without having a hard dependency on
# caching. Please see https://bugs.launchpad.net/keystone/+bug/1700852
# for more details.
assignment_list = self.list_role_assignments(
user_id=user_id, effective=True)
# Use set() to process the list to remove any duplicates
project_ids = list(set([x['project_id'] for x in assignment_list
if x.get('project_id')]))
return PROVIDERS.resource_api.list_projects_from_ids(project_ids)
# TODO(henry-nash): We might want to consider list limiting this at some
# point in the future.
@MEMOIZE_COMPUTED_ASSIGNMENTS
def list_domains_for_user(self, user_id):
assignment_list = self.list_role_assignments(
user_id=user_id, effective=True)
# Use set() to process the list to remove any duplicates
domain_ids = list(set([x['domain_id'] for x in assignment_list
if x.get('domain_id')]))
return PROVIDERS.resource_api.list_domains_from_ids(domain_ids)
def list_domains_for_groups(self, group_ids):
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, effective=True)
domain_ids = list(set([x['domain_id'] for x in assignment_list
if x.get('domain_id')]))
return PROVIDERS.resource_api.list_domains_from_ids(domain_ids)
def list_projects_for_groups(self, group_ids):
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, effective=True)
project_ids = list(set([x['project_id'] for x in assignment_list
if x.get('project_id')]))
return PROVIDERS.resource_api.list_projects_from_ids(project_ids)
@notifications.role_assignment('deleted')
def _remove_role_from_user_and_project_adapter(self, role_id, user_id=None,
group_id=None,
domain_id=None,
project_id=None,
inherited_to_projects=False,
context=None):
# The parameters for this method must match the parameters for
# delete_grant so that the notifications.role_assignment decorator
# will work.
self.driver.remove_role_from_user_and_project(user_id, project_id,
role_id)
payload = {'user_id': user_id, 'project_id': project_id}
notifications.Audit.internal(
notifications.REMOVE_APP_CREDS_FOR_USER,
payload
)
self._invalidate_token_cache(
role_id, group_id, user_id, project_id, domain_id
)
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
self._remove_role_from_user_and_project_adapter(
role_id, user_id=user_id, project_id=tenant_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
def _invalidate_token_cache(self, role_id, group_id, user_id, project_id,
domain_id):
if group_id:
actor_type = 'group'
actor_id = group_id
elif user_id:
actor_type = 'user'
actor_id = user_id
if domain_id:
target_type = 'domain'
target_id = domain_id
elif project_id:
target_type = 'project'
target_id = project_id
reason = (
'Invalidating the token cache because role %(role_id)s was '
'removed from %(actor_type)s %(actor_id)s on %(target_type)s '
'%(target_id)s.' %
{'role_id': role_id, 'actor_type': actor_type,
'actor_id': actor_id, 'target_type': target_type,
'target_id': target_id}
)
notifications.invalidate_token_cache_notification(reason)
@notifications.role_assignment('created')
def create_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False,
initiator=None):
role = PROVIDERS.role_api.get_role(role_id)
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
project = PROVIDERS.resource_api.get_project(project_id)
# For domain specific roles, the domain of the project
# and role must match
if role['domain_id'] and project['domain_id'] != role['domain_id']:
raise exception.DomainSpecificRoleMismatch(
role_id=role_id,
project_id=project_id)
self.driver.create_grant(
role_id, user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
def get_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
role_ref = PROVIDERS.role_api.get_role(role_id)
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
PROVIDERS.resource_api.get_project(project_id)
self.check_grant_role_id(
role_id, user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
return role_ref
def list_grants(self, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
PROVIDERS.resource_api.get_project(project_id)
grant_ids = self.list_grant_role_ids(
user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
return PROVIDERS.role_api.list_roles_from_ids(grant_ids)
@notifications.role_assignment('deleted')
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False,
initiator=None):
# check if role exist before any processing
PROVIDERS.role_api.get_role(role_id)
if group_id is None:
# check if role exists on the user before revoke
self.check_grant_role_id(
role_id, user_id=user_id, group_id=None, domain_id=domain_id,
project_id=project_id,
inherited_to_projects=inherited_to_projects
)
self._invalidate_token_cache(
role_id, group_id, user_id, project_id, domain_id
)
else:
try:
# check if role exists on the group before revoke
self.check_grant_role_id(
role_id, user_id=None, group_id=group_id,
domain_id=domain_id, project_id=project_id,
inherited_to_projects=inherited_to_projects
)
if CONF.token.revoke_by_id:
self._invalidate_token_cache(
role_id, group_id, user_id, project_id, domain_id
)
except exception.GroupNotFound:
LOG.debug('Group %s not found, no tokens to invalidate.',
group_id)
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
PROVIDERS.resource_api.get_project(project_id)
self.driver.delete_grant(
role_id, user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
# The methods _expand_indirect_assignment, _list_direct_role_assignments
# and _list_effective_role_assignments below are only used on
# list_role_assignments, but they are not in its scope as nested functions
# since it would significantly increase McCabe complexity, that should be
# kept as it is in order to detect unnecessarily complex code, which is not
# this case.
def _expand_indirect_assignment(self, ref, user_id=None, project_id=None,
subtree_ids=None, expand_groups=True):
"""Return a list of expanded role assignments.
This methods is called for each discovered assignment that either needs
a group assignment expanded into individual user assignments, or needs
an inherited assignment to be applied to its children.
In all cases, if either user_id and/or project_id is specified, then we
filter the result on those values.
If project_id is specified and subtree_ids is None, then this
indicates that we are only interested in that one project. If
subtree_ids is not None, then this is an indicator that any
inherited assignments need to be expanded down the tree. The
actual subtree_ids don't need to be used as a filter here, since we
already ensured only those assignments that could affect them
were passed to this method.
If expand_groups is True then we expand groups out to a list of
assignments, one for each member of that group.
"""
def create_group_assignment(base_ref, user_id):
"""Create a group assignment from the provided ref."""
ref = copy.deepcopy(base_ref)
ref['user_id'] = user_id
indirect = ref.setdefault('indirect', {})
indirect['group_id'] = ref.pop('group_id')
return ref
def expand_group_assignment(ref, user_id):
"""Expand group role assignment.
For any group role assignment on a target, it is replaced by a list
of role assignments containing one for each user of that group on
that target.
An example of accepted ref is::
{
'group_id': group_id,
'project_id': project_id,
'role_id': role_id
}
Once expanded, it should be returned as a list of entities like the
one below, one for each each user_id in the provided group_id.
::
{
'user_id': user_id,
'project_id': project_id,
'role_id': role_id,
'indirect' : {
'group_id': group_id
}
}
Returned list will be formatted by the Controller, which will
deduce a role assignment came from group membership if it has both
'user_id' in the main body of the dict and 'group_id' in indirect
subdict.
"""
if user_id:
return [create_group_assignment(ref, user_id=user_id)]
# Note(prashkre): Try to get the users in a group,
# if a group wasn't found in the backend, users are set
# as empty list.
try:
users = PROVIDERS.identity_api.list_users_in_group(
ref['group_id'])
except exception.GroupNotFound:
LOG.warning('Group %(group)s was not found but still has role '
'assignments.', {'group': ref['group_id']})
users = []
return [create_group_assignment(ref, user_id=m['id'])
for m in users]
def expand_inherited_assignment(ref, user_id, project_id, subtree_ids,
expand_groups):
"""Expand inherited role assignments.
If expand_groups is True and this is a group role assignment on a
target, replace it by a list of role assignments containing one for
each user of that group, on every project under that target. If
expand_groups is False, then return a group assignment on an
inherited target.
If this is a user role assignment on a specific target (i.e.
project_id is specified, but subtree_ids is None) then simply
format this as a single assignment (since we are effectively
filtering on project_id). If however, project_id is None or
subtree_ids is not None, then replace this one assignment with a
list of role assignments for that user on every project under
that target.
An example of accepted ref is::
{
'group_id': group_id,
'project_id': parent_id,
'role_id': role_id,
'inherited_to_projects': 'projects'
}
Once expanded, it should be returned as a list of entities like the
one below, one for each each user_id in the provided group_id and
for each subproject_id in the project_id subtree.
::
{
'user_id': user_id,
'project_id': subproject_id,
'role_id': role_id,
'indirect' : {
'group_id': group_id,
'project_id': parent_id
}
}
Returned list will be formatted by the Controller, which will
deduce a role assignment came from group membership if it has both
'user_id' in the main body of the dict and 'group_id' in the
'indirect' subdict, as well as it is possible to deduce if it has
come from inheritance if it contains both a 'project_id' in the
main body of the dict and 'parent_id' in the 'indirect' subdict.
"""
def create_inherited_assignment(base_ref, project_id):
"""Create a project assignment from the provided ref.
base_ref can either be a project or domain inherited
assignment ref.
"""
ref = copy.deepcopy(base_ref)
indirect = ref.setdefault('indirect', {})
if ref.get('project_id'):
indirect['project_id'] = ref.pop('project_id')
else:
indirect['domain_id'] = ref.pop('domain_id')
ref['project_id'] = project_id
ref.pop('inherited_to_projects')
return ref
# Define expanded project list to which to apply this assignment
if project_id:
# Since ref is an inherited assignment and we are filtering by
# project(s), we are only going to apply the assignment to the
# relevant project(s)
project_ids = [project_id]
if subtree_ids:
project_ids += subtree_ids
# If this is a domain inherited assignment, then we know
# that all the project_ids will get this assignment. If
# it's a project inherited assignment, and the assignment
# point is an ancestor of project_id, then we know that
# again all the project_ids will get the assignment. If,
# however, the assignment point is within the subtree,
# then only a partial tree will get the assignment.
resource_api = PROVIDERS.resource_api
if ref.get('project_id'):
if ref['project_id'] in project_ids:
project_ids = (
[x['id'] for x in
resource_api.list_projects_in_subtree(
ref['project_id'])])
elif ref.get('domain_id'):
# A domain inherited assignment, so apply it to all projects
# in this domain
project_ids = (
[x['id'] for x in
PROVIDERS.resource_api.list_projects_in_domain(
ref['domain_id'])])
else:
# It must be a project assignment, so apply it to its subtree
project_ids = (
[x['id'] for x in
PROVIDERS.resource_api.list_projects_in_subtree(
ref['project_id'])])
new_refs = []
if 'group_id' in ref:
if expand_groups:
# Expand role assignment to all group members on any
# inherited target of any of the projects
for ref in expand_group_assignment(ref, user_id):
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
else:
# Just place the group assignment on any inherited target
# of any of the projects
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
else:
# Expand role assignment for all projects
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
return new_refs
if ref.get('inherited_to_projects') == 'projects':
return expand_inherited_assignment(
ref, user_id, project_id, subtree_ids, expand_groups)
elif 'group_id' in ref and expand_groups:
return expand_group_assignment(ref, user_id)
return [ref]
def add_implied_roles(self, role_refs):
"""Expand out implied roles.
The role_refs passed in have had all inheritance and group assignments
expanded out. We now need to look at the role_id in each ref and see
if it is a prior role for some implied roles. If it is, then we need to
duplicate that ref, one for each implied role. We store the prior role
in the indirect dict that is part of such a duplicated ref, so that a
caller can determine where the assignment came from.
"""
def _make_implied_ref_copy(prior_ref, implied_role_id):
# Create a ref for an implied role from the ref of a prior role,
# setting the new role_id to be the implied role and the indirect
# role_id to be the prior role
implied_ref = copy.deepcopy(prior_ref)
implied_ref['role_id'] = implied_role_id
indirect = implied_ref.setdefault('indirect', {})
indirect['role_id'] = prior_ref['role_id']
return implied_ref
if not CONF.token.infer_roles:
return role_refs
try:
implied_roles_cache = {}
role_refs_to_check = list(role_refs)
ref_results = list(role_refs)
checked_role_refs = list()
while(role_refs_to_check):
next_ref = role_refs_to_check.pop()
checked_role_refs.append(next_ref)
next_role_id = next_ref['role_id']
if next_role_id in implied_roles_cache:
implied_roles = implied_roles_cache[next_role_id]
else:
implied_roles = (
PROVIDERS.role_api.list_implied_roles(next_role_id))
implied_roles_cache[next_role_id] = implied_roles
for implied_role in implied_roles:
implied_ref = (
_make_implied_ref_copy(
next_ref, implied_role['implied_role_id']))
if implied_ref in checked_role_refs:
# Avoid traversing a cycle
continue
else:
ref_results.append(implied_ref)
role_refs_to_check.append(implied_ref)
except exception.NotImplemented:
LOG.error('Role driver does not support implied roles.')
return ref_results
def _filter_by_role_id(self, role_id, ref_results):
# if we arrive here, we need to filer by role_id.
filter_results = []
for ref in ref_results:
if ref['role_id'] == role_id:
filter_results.append(ref)
return filter_results
def _strip_domain_roles(self, role_refs):
"""Post process assignment list for domain roles.
Domain roles are only designed to do the job of inferring other roles
and since that has been done before this method is called, we need to
remove any assignments that include a domain role.
"""
def _role_is_global(role_id):
ref = PROVIDERS.role_api.get_role(role_id)
return (ref['domain_id'] is None)
filter_results = []
for ref in role_refs:
if _role_is_global(ref['role_id']):
filter_results.append(ref)
return filter_results
def _list_effective_role_assignments(self, role_id, user_id, group_id,
domain_id, project_id, subtree_ids,
inherited, source_from_group_ids,
strip_domain_roles):
"""List role assignments in effective mode.
When using effective mode, besides the direct assignments, the indirect
ones that come from grouping or inheritance are retrieved and will then
be expanded.
The resulting list of assignments will be filtered by the provided
parameters. If subtree_ids is not None, then we also want to include
all subtree_ids in the filter as well. Since we are in effective mode,
group can never act as a filter (since group assignments are expanded
into user roles) and domain can only be filter if we want non-inherited
assignments, since domains can't inherit assignments.
The goal of this method is to only ask the driver for those
assignments as could effect the result based on the parameter filters
specified, hence avoiding retrieving a huge list.
"""
def list_role_assignments_for_actor(
role_id, inherited, user_id=None, group_ids=None,
project_id=None, subtree_ids=None, domain_id=None):
"""List role assignments for actor on target.
List direct and indirect assignments for an actor, optionally
for a given target (i.e. projects or domain).
:param role_id: List for a specific role, can be None meaning all
roles
:param inherited: Indicates whether inherited assignments or only
direct assignments are required. If None, then
both are required.
:param user_id: If not None, list only assignments that affect this
user.
:param group_ids: A list of groups required. Only one of user_id
and group_ids can be specified
:param project_id: If specified, only include those assignments
that affect at least this project, with
additionally any projects specified in
subtree_ids
:param subtree_ids: The list of projects in the subtree. If
specified, also include those assignments that
affect these projects. These projects are
guaranteed to be in the same domain as the
project specified in project_id. subtree_ids
can only be specified if project_id has also
been specified.
:param domain_id: If specified, only include those assignments
that affect this domain - by definition this will
not include any inherited assignments
:returns: List of assignments matching the criteria. Any inherited
or group assignments that could affect the resulting
response are included.
"""
project_ids_of_interest = None
if project_id:
if subtree_ids:
project_ids_of_interest = subtree_ids + [project_id]
else:
project_ids_of_interest = [project_id]
# List direct project role assignments
non_inherited_refs = []
if inherited is False or inherited is None:
# Get non inherited assignments
non_inherited_refs = self.driver.list_role_assignments(
role_id=role_id, domain_id=domain_id,
project_ids=project_ids_of_interest, user_id=user_id,
group_ids=group_ids, inherited_to_projects=False)
inherited_refs = []
if inherited is True or inherited is None:
# Get inherited assignments
if project_id:
# The project and any subtree are guaranteed to be owned by
# the same domain, so since we are filtering by these
# specific projects, then we can only get inherited
# assignments from their common domain or from any of
# their parents projects.
# List inherited assignments from the project's domain
proj_domain_id = PROVIDERS.resource_api.get_project(
project_id)['domain_id']
inherited_refs += self.driver.list_role_assignments(
role_id=role_id, domain_id=proj_domain_id,
user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
# For inherited assignments from projects, since we know
# they are from the same tree the only places these can
# come from are from parents of the main project or
# inherited assignments on the project or subtree itself.
source_ids = [project['id'] for project in
PROVIDERS.resource_api.list_project_parents(
project_id)]
if subtree_ids:
source_ids += project_ids_of_interest
if source_ids:
inherited_refs += self.driver.list_role_assignments(
role_id=role_id, project_ids=source_ids,
user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
else:
# List inherited assignments without filtering by target
inherited_refs = self.driver.list_role_assignments(
role_id=role_id, user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
return non_inherited_refs + inherited_refs
# If filtering by group or inherited domain assignment the list is
# guaranteed to be empty
if group_id or (domain_id and inherited):
return []
if user_id and source_from_group_ids:
# You can't do both - and since source_from_group_ids is only used
# internally, this must be a coding error by the caller.
msg = _('Cannot list assignments sourced from groups and filtered '
'by user ID.')
raise exception.UnexpectedError(msg)
# If filtering by domain, then only non-inherited assignments are
# relevant, since domains don't inherit assignments
inherited = False if domain_id else inherited
# List user or explicit group assignments.
# Due to the need to expand implied roles, this call will skip
# filtering by role_id and instead return the whole set of roles.
# Matching on the specified role is performed at the end.
direct_refs = list_role_assignments_for_actor(
role_id=None, user_id=user_id, group_ids=source_from_group_ids,
project_id=project_id, subtree_ids=subtree_ids,
domain_id=domain_id, inherited=inherited)
# And those from the user's groups, so long as we are not restricting
# to a set of source groups (in which case we already got those
# assignments in the direct listing above).
group_refs = []
if not source_from_group_ids and user_id:
group_ids = self._get_group_ids_for_user_id(user_id)
if group_ids:
group_refs = list_role_assignments_for_actor(
role_id=None, project_id=project_id,
subtree_ids=subtree_ids, group_ids=group_ids,
domain_id=domain_id, inherited=inherited)
# Expand grouping and inheritance on retrieved role assignments
refs = []
expand_groups = (source_from_group_ids is None)
for ref in (direct_refs + group_refs):
refs += self._expand_indirect_assignment(
ref, user_id, project_id, subtree_ids, expand_groups)
refs = self.add_implied_roles(refs)
if strip_domain_roles:
refs = self._strip_domain_roles(refs)
if role_id:
refs = self._filter_by_role_id(role_id, refs)
return refs
def _list_direct_role_assignments(self, role_id, user_id, group_id, system,
domain_id, project_id, subtree_ids,
inherited):
"""List role assignments without applying expansion.
Returns a list of direct role assignments, where their attributes match
the provided filters. If subtree_ids is not None, then we also want to
include all subtree_ids in the filter as well.
"""
group_ids = [group_id] if group_id else None
project_ids_of_interest = None
if project_id:
if subtree_ids:
project_ids_of_interest = subtree_ids + [project_id]
else:
project_ids_of_interest = [project_id]
project_and_domain_assignments = []
if not system:
project_and_domain_assignments = self.driver.list_role_assignments(
role_id=role_id, user_id=user_id, group_ids=group_ids,
domain_id=domain_id, project_ids=project_ids_of_interest,
inherited_to_projects=inherited)
system_assignments = []
if system or (not project_id and not domain_id and not system):
if user_id:
assignments = self.list_system_grants_for_user(user_id)
for assignment in assignments:
system_assignments.append(
{'system': {'all': True},
'user_id': user_id,
'role_id': assignment['id']}
)
elif group_id:
assignments = self.list_system_grants_for_group(group_id)
for assignment in assignments:
system_assignments.append(
{'system': {'all': True},
'group_id': group_id,
'role_id': assignment['id']}
)
else:
assignments = self.list_all_system_grants()
for assignment in assignments:
a = {}
if assignment['type'] == self._GROUP_SYSTEM:
a['group_id'] = assignment['actor_id']
elif assignment['type'] == self._USER_SYSTEM:
a['user_id'] = assignment['actor_id']
a['role_id'] = assignment['role_id']
a['system'] = {'all': True}
system_assignments.append(a)
for i, assignment in enumerate(system_assignments):
if role_id and role_id != assignment['role_id']:
system_assignments.pop(i)
assignments = []
for assignment in itertools.chain(
project_and_domain_assignments, system_assignments):
assignments.append(assignment)
return assignments
def list_role_assignments(self, role_id=None, user_id=None, group_id=None,
system=None, domain_id=None, project_id=None,
include_subtree=False, inherited=None,
effective=None, include_names=False,
source_from_group_ids=None,
strip_domain_roles=True):
"""List role assignments, honoring effective mode and provided filters.
Returns a list of role assignments, where their attributes match the
provided filters (role_id, user_id, group_id, domain_id, project_id and
inherited). If include_subtree is True, then assignments on all
descendants of the project specified by project_id are also included.
The inherited filter defaults to None, meaning to get both
non-inherited and inherited role assignments.
If effective mode is specified, this means that rather than simply
return the assignments that match the filters, any group or
inheritance assignments will be expanded. Group assignments will
become assignments for all the users in that group, and inherited
assignments will be shown on the projects below the assignment point.
Think of effective mode as being the list of assignments that actually
affect a user, for example the roles that would be placed in a token.
If include_names is set to true the entities' names are returned
in addition to their id's.
source_from_group_ids is a list of group IDs and, if specified, then
only those assignments that are derived from membership of these groups
are considered, and any such assignments will not be expanded into
their user membership assignments. This is different to a group filter
of the resulting list, instead being a restriction on which assignments
should be considered before expansion of inheritance. This option is
only used internally (i.e. it is not exposed at the API level) and is
only supported in effective mode (since in regular mode there is no
difference between this and a group filter, other than it is a list of
groups).
In effective mode, any domain specific roles are usually stripped from
the returned assignments (since such roles are not placed in tokens).
This stripping can be disabled by specifying strip_domain_roles=False,
which is useful for internal calls like trusts which need to examine
the full set of roles.
"""
subtree_ids = None
if project_id and include_subtree:
subtree_ids = (
[x['id'] for x in
PROVIDERS.resource_api.list_projects_in_subtree(
project_id)])
if system != 'all':
system = None
if effective:
role_assignments = self._list_effective_role_assignments(
role_id, user_id, group_id, domain_id, project_id,
subtree_ids, inherited, source_from_group_ids,
strip_domain_roles)
else:
role_assignments = self._list_direct_role_assignments(
role_id, user_id, group_id, system, domain_id, project_id,
subtree_ids, inherited)
if include_names:
return self._get_names_from_role_assignments(role_assignments)
return role_assignments
def _get_names_from_role_assignments(self, role_assignments):
role_assign_list = []
for role_asgmt in role_assignments:
new_assign = copy.deepcopy(role_asgmt)
for key, value in role_asgmt.items():
if key == 'domain_id':
_domain = PROVIDERS.resource_api.get_domain(value)
new_assign['domain_name'] = _domain['name']
elif key == 'user_id':
try:
# Note(knikolla): Try to get the user, otherwise
# if the user wasn't found in the backend
# use empty values.
_user = PROVIDERS.identity_api.get_user(value)
except exception.UserNotFound:
msg = ('User %(user)s not found in the'
' backend but still has role assignments.')
LOG.warning(msg, {'user': value})
new_assign['user_name'] = ''
new_assign['user_domain_id'] = ''
new_assign['user_domain_name'] = ''
else:
new_assign['user_name'] = _user['name']
new_assign['user_domain_id'] = _user['domain_id']
new_assign['user_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_user['domain_id'])['name'])
elif key == 'group_id':
try:
# Note(knikolla): Try to get the group, otherwise
# if the group wasn't found in the backend
# use empty values.
_group = PROVIDERS.identity_api.get_group(value)
except exception.GroupNotFound:
msg = ('Group %(group)s not found in the'
' backend but still has role assignments.')
LOG.warning(msg, {'group': value})
new_assign['group_name'] = ''
new_assign['group_domain_id'] = ''
new_assign['group_domain_name'] = ''
else:
new_assign['group_name'] = _group['name']
new_assign['group_domain_id'] = _group['domain_id']
new_assign['group_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_group['domain_id'])['name'])
elif key == 'project_id':
_project = PROVIDERS.resource_api.get_project(value)
new_assign['project_name'] = _project['name']
new_assign['project_domain_id'] = _project['domain_id']
new_assign['project_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_project['domain_id'])['name'])
elif key == 'role_id':
_role = PROVIDERS.role_api.get_role(value)
new_assign['role_name'] = _role['name']
if _role['domain_id'] is not None:
new_assign['role_domain_id'] = _role['domain_id']
new_assign['role_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_role['domain_id'])['name'])
role_assign_list.append(new_assign)
return role_assign_list
def delete_group_assignments(self, group_id):
# FIXME(lbragstad): This should be refactored in the Rocky release so
# that we can pass the group_id to the system assignment backend like
# we do with the project and domain assignment backend. Holding off on
# this because it will require an interface change to the backend,
# making it harder to backport for Queens RC.
self.driver.delete_group_assignments(group_id)
system_assignments = self.list_system_grants_for_group(group_id)
for assignment in system_assignments:
self.delete_system_grant_for_group(group_id, assignment['id'])
def delete_user_assignments(self, user_id):
# FIXME(lbragstad): This should be refactored in the Rocky release so
# that we can pass the user_id to the system assignment backend like we
# do with the project and domain assignment backend. Holding off on
# this because it will require an interface change to the backend,
# making it harder to backport for Queens RC.
self.driver.delete_user_assignments(user_id)
system_assignments = self.list_system_grants_for_user(user_id)
for assignment in system_assignments:
self.delete_system_grant_for_user(user_id, assignment['id'])
def check_system_grant_for_user(self, user_id, role_id):
"""Check if a user has a specific role on the system.
:param user_id: the ID of the user in the assignment
:param role_id: the ID of the system role in the assignment
:raises keystone.exception.RoleAssignmentNotFound: if the user doesn't
have a role assignment matching the role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
return self.driver.check_system_grant(
role_id, user_id, target_id, inherited
)
def list_system_grants_for_user(self, user_id):
"""Return a list of roles the user has on the system.
:param user_id: the ID of the user
:returns: a list of role assignments the user has system-wide
"""
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._USER_SYSTEM
grants = self.driver.list_system_grants(
user_id, target_id, assignment_type
)
grant_ids = []
for grant in grants:
grant_ids.append(grant['role_id'])
return PROVIDERS.role_api.list_roles_from_ids(grant_ids)
def create_system_grant_for_user(self, user_id, role_id):
"""Grant a user a role on the system.
:param user_id: the ID of the user
:param role_id: the ID of the role to grant on the system
"""
role = PROVIDERS.role_api.get_role(role_id)
if role.get('domain_id'):
raise exception.ValidationError(
'Role %(role_id)s is a domain-specific role. Unable to use '
'a domain-specific role in a system assignment.' % {
'role_id': role_id
}
)
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._USER_SYSTEM
inherited = False
self.driver.create_system_grant(
role_id, user_id, target_id, assignment_type, inherited
)
def delete_system_grant_for_user(self, user_id, role_id):
"""Remove a system grant from a user.
:param user_id: the ID of the user
:param role_id: the ID of the role to remove from the user on the
system
:raises keystone.exception.RoleAssignmentNotFound: if the user doesn't
have a role assignment with role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
self.driver.delete_system_grant(role_id, user_id, target_id, inherited)
def check_system_grant_for_group(self, group_id, role_id):
"""Check if a group has a specific role on the system.
:param group_id: the ID of the group in the assignment
:param role_id: the ID of the system role in the assignment
:raises keystone.exception.RoleAssignmentNotFound: if the group doesn't
have a role assignment matching the role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
return self.driver.check_system_grant(
role_id, group_id, target_id, inherited
)
def list_system_grants_for_group(self, group_id):
"""Return a list of roles the group has on the system.
:param group_id: the ID of the group
:returns: a list of role assignments the group has system-wide
"""
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._GROUP_SYSTEM
grants = self.driver.list_system_grants(
group_id, target_id, assignment_type
)
grant_ids = []
for grant in grants:
grant_ids.append(grant['role_id'])
return PROVIDERS.role_api.list_roles_from_ids(grant_ids)
def create_system_grant_for_group(self, group_id, role_id):
"""Grant a group a role on the system.
:param group_id: the ID of the group
:param role_id: the ID of the role to grant on the system
"""
role = PROVIDERS.role_api.get_role(role_id)
if role.get('domain_id'):
raise exception.ValidationError(
'Role %(role_id)s is a domain-specific role. Unable to use '
'a domain-specific role in a system assignment.' % {
'role_id': role_id
}
)
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._GROUP_SYSTEM
inherited = False
self.driver.create_system_grant(
role_id, group_id, target_id, assignment_type, inherited
)
def delete_system_grant_for_group(self, group_id, role_id):
"""Remove a system grant from a group.
:param group_id: the ID of the group
:param role_id: the ID of the role to remove from the group on the
system
:raises keystone.exception.RoleAssignmentNotFound: if the group doesn't
have a role assignment with role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
self.driver.delete_system_grant(
role_id, group_id, target_id, inherited
)
def list_all_system_grants(self):
"""Return a list of all system grants."""
actor_id = None
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = None
return self.driver.list_system_grants(
actor_id, target_id, assignment_type
)
class RoleManager(manager.Manager):
"""Default pivot point for the Role backend."""
driver_namespace = 'keystone.role'
_provides_api = 'role_api'
_ROLE = 'role'
def __init__(self):
# If there is a specific driver specified for role, then use it.
# Otherwise retrieve the driver type from the assignment driver.
role_driver = CONF.role.driver
if role_driver is None:
# Explicitly load the assignment manager object
assignment_driver = CONF.assignment.driver
assignment_manager_obj = manager.load_driver(
Manager.driver_namespace,
assignment_driver)
role_driver = assignment_manager_obj.default_role_driver()
super(RoleManager, self).__init__(role_driver)
@MEMOIZE
def get_role(self, role_id):
return self.driver.get_role(role_id)
def get_unique_role_by_name(self, role_name, hints=None):
if not hints:
hints = driver_hints.Hints()
hints.add_filter("name", role_name, case_sensitive=True)
found_roles = PROVIDERS.role_api.list_roles(hints)
if not found_roles:
raise exception.RoleNotFound(
_("Role %s is not defined") % role_name
)
elif len(found_roles) == 1:
return {'id': found_roles[0]['id']}
else:
raise exception.AmbiguityError(resource='role',
name=role_name)
def create_role(self, role_id, role, initiator=None):
ret = self.driver.create_role(role_id, role)
notifications.Audit.created(self._ROLE, role_id, initiator)
if MEMOIZE.should_cache(ret):
self.get_role.set(ret, self, role_id)
return ret
@manager.response_truncated
def list_roles(self, hints=None):
return self.driver.list_roles(hints or driver_hints.Hints())
def update_role(self, role_id, role, initiator=None):
original_role = self.driver.get_role(role_id)
if ('domain_id' in role and
role['domain_id'] != original_role['domain_id']):
raise exception.ValidationError(
message=_('Update of `domain_id` is not allowed.'))
ret = self.driver.update_role(role_id, role)
notifications.Audit.updated(self._ROLE, role_id, initiator)
self.get_role.invalidate(self, role_id)
return ret
def delete_role(self, role_id, initiator=None):
PROVIDERS.assignment_api.delete_role_assignments(role_id)
PROVIDERS.assignment_api._send_app_cred_notification_for_role_removal(
role_id
)
self.driver.delete_role(role_id)
notifications.Audit.deleted(self._ROLE, role_id, initiator)
self.get_role.invalidate(self, role_id)
reason = (
'Invalidating the token cache because role %(role_id)s has been '
'removed. Role assignments for users will be recalculated and '
'enforced accordingly the next time they authenticate or validate '
'a token' % {'role_id': role_id}
)
notifications.invalidate_token_cache_notification(reason)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
# TODO(ayoung): Add notification
def create_implied_role(self, prior_role_id, implied_role_id):
implied_role = self.driver.get_role(implied_role_id)
prior_role = self.driver.get_role(prior_role_id)
if implied_role['name'] in CONF.assignment.prohibited_implied_role:
raise exception.InvalidImpliedRole(role_id=implied_role_id)
if prior_role['domain_id'] is None and implied_role['domain_id']:
msg = _('Global role cannot imply a domain-specific role')
raise exception.InvalidImpliedRole(msg,
role_id=implied_role_id)
response = self.driver.create_implied_role(
prior_role_id, implied_role_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
return response
def delete_implied_role(self, prior_role_id, implied_role_id):
self.driver.delete_implied_role(prior_role_id, implied_role_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
| [
"itertools.chain",
"keystone.i18n._",
"keystone.common.cache.create_region",
"keystone.common.cache.get_memoization_decorator",
"keystone.common.manager.load_driver",
"copy.deepcopy",
"oslo_log.log.getLogger",
"keystone.notifications.role_assignment",
"keystone.exception.UnexpectedError",
"keystone.exception.DomainSpecificRoleMismatch",
"keystone.exception.AmbiguityError",
"keystone.notifications.invalidate_token_cache_notification",
"keystone.notifications.Audit.deleted",
"keystone.common.driver_hints.Hints",
"keystone.notifications.Audit.updated",
"keystone.exception.InvalidImpliedRole",
"keystone.notifications.Audit.created",
"keystone.exception.ValidationError",
"keystone.notifications.Audit.internal"
]
| [((996, 1019), 'oslo_log.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (1009, 1019), False, 'from oslo_log import log\n'), ((1153, 1198), 'keystone.common.cache.get_memoization_decorator', 'cache.get_memoization_decorator', ([], {'group': '"""role"""'}), "(group='role')\n", (1184, 1198), False, 'from keystone.common import cache\n'), ((1452, 1500), 'keystone.common.cache.create_region', 'cache.create_region', ([], {'name': '"""computed assignments"""'}), "(name='computed assignments')\n", (1471, 1500), False, 'from keystone.common import cache\n'), ((1532, 1618), 'keystone.common.cache.get_memoization_decorator', 'cache.get_memoization_decorator', ([], {'group': '"""role"""', 'region': 'COMPUTED_ASSIGNMENTS_REGION'}), "(group='role', region=\n COMPUTED_ASSIGNMENTS_REGION)\n", (1563, 1618), False, 'from keystone.common import cache\n'), ((7375, 7415), 'keystone.notifications.role_assignment', 'notifications.role_assignment', (['"""created"""'], {}), "('created')\n", (7404, 7415), False, 'from keystone import notifications\n'), ((10684, 10724), 'keystone.notifications.role_assignment', 'notifications.role_assignment', (['"""deleted"""'], {}), "('deleted')\n", (10713, 10724), False, 'from keystone import notifications\n'), ((12922, 12962), 'keystone.notifications.role_assignment', 'notifications.role_assignment', (['"""created"""'], {}), "('created')\n", (12951, 12962), False, 'from keystone import notifications\n'), ((15165, 15205), 'keystone.notifications.role_assignment', 'notifications.role_assignment', (['"""deleted"""'], {}), "('deleted')\n", (15194, 15205), False, 'from keystone import notifications\n'), ((11532, 11610), 'keystone.notifications.Audit.internal', 'notifications.Audit.internal', (['notifications.REMOVE_APP_CREDS_FOR_USER', 'payload'], {}), '(notifications.REMOVE_APP_CREDS_FOR_USER, payload)\n', (11560, 11610), False, 'from keystone import notifications\n'), ((12858, 12915), 'keystone.notifications.invalidate_token_cache_notification', 'notifications.invalidate_token_cache_notification', (['reason'], {}), '(reason)\n', (12907, 12915), False, 'from keystone import notifications\n'), ((41774, 41841), 'itertools.chain', 'itertools.chain', (['project_and_domain_assignments', 'system_assignments'], {}), '(project_and_domain_assignments, system_assignments)\n', (41789, 41841), False, 'import itertools\n'), ((57473, 57532), 'keystone.notifications.Audit.created', 'notifications.Audit.created', (['self._ROLE', 'role_id', 'initiator'], {}), '(self._ROLE, role_id, initiator)\n', (57500, 57532), False, 'from keystone import notifications\n'), ((58170, 58229), 'keystone.notifications.Audit.updated', 'notifications.Audit.updated', (['self._ROLE', 'role_id', 'initiator'], {}), '(self._ROLE, role_id, initiator)\n', (58197, 58229), False, 'from keystone import notifications\n'), ((58574, 58633), 'keystone.notifications.Audit.deleted', 'notifications.Audit.deleted', (['self._ROLE', 'role_id', 'initiator'], {}), '(self._ROLE, role_id, initiator)\n', (58601, 58633), False, 'from keystone import notifications\n'), ((58998, 59055), 'keystone.notifications.invalidate_token_cache_notification', 'notifications.invalidate_token_cache_notification', (['reason'], {}), '(reason)\n', (59047, 59055), False, 'from keystone import notifications\n'), ((18753, 18776), 'copy.deepcopy', 'copy.deepcopy', (['base_ref'], {}), '(base_ref)\n', (18766, 18776), False, 'import copy\n'), ((27784, 27808), 'copy.deepcopy', 'copy.deepcopy', (['prior_ref'], {}), '(prior_ref)\n', (27797, 27808), False, 'import copy\n'), ((37028, 37101), 'keystone.i18n._', '_', (['"""Cannot list assignments sourced from groups and filtered by user ID."""'], {}), "('Cannot list assignments sourced from groups and filtered by user ID.')\n", (37029, 37101), False, 'from keystone.i18n import _\n'), ((37143, 37173), 'keystone.exception.UnexpectedError', 'exception.UnexpectedError', (['msg'], {}), '(msg)\n', (37168, 37173), False, 'from keystone import exception\n'), ((45607, 45632), 'copy.deepcopy', 'copy.deepcopy', (['role_asgmt'], {}), '(role_asgmt)\n', (45620, 45632), False, 'import copy\n'), ((51886, 52052), 'keystone.exception.ValidationError', 'exception.ValidationError', (["('Role %(role_id)s is a domain-specific role. Unable to use a domain-specific role in a system assignment.'\n % {'role_id': role_id})"], {}), "(\n 'Role %(role_id)s is a domain-specific role. Unable to use a domain-specific role in a system assignment.'\n % {'role_id': role_id})\n", (51911, 52052), False, 'from keystone import exception\n'), ((54484, 54650), 'keystone.exception.ValidationError', 'exception.ValidationError', (["('Role %(role_id)s is a domain-specific role. Unable to use a domain-specific role in a system assignment.'\n % {'role_id': role_id})"], {}), "(\n 'Role %(role_id)s is a domain-specific role. Unable to use a domain-specific role in a system assignment.'\n % {'role_id': role_id})\n", (54509, 54650), False, 'from keystone import exception\n'), ((56429, 56493), 'keystone.common.manager.load_driver', 'manager.load_driver', (['Manager.driver_namespace', 'assignment_driver'], {}), '(Manager.driver_namespace, assignment_driver)\n', (56448, 56493), False, 'from keystone.common import manager\n'), ((56851, 56871), 'keystone.common.driver_hints.Hints', 'driver_hints.Hints', ([], {}), '()\n', (56869, 56871), False, 'from keystone.common import driver_hints\n'), ((59422, 59475), 'keystone.exception.InvalidImpliedRole', 'exception.InvalidImpliedRole', ([], {'role_id': 'implied_role_id'}), '(role_id=implied_role_id)\n', (59450, 59475), False, 'from keystone import exception\n'), ((59568, 59620), 'keystone.i18n._', '_', (['"""Global role cannot imply a domain-specific role"""'], {}), "('Global role cannot imply a domain-specific role')\n", (59569, 59620), False, 'from keystone.i18n import _\n'), ((59639, 59697), 'keystone.exception.InvalidImpliedRole', 'exception.InvalidImpliedRole', (['msg'], {'role_id': 'implied_role_id'}), '(msg, role_id=implied_role_id)\n', (59667, 59697), False, 'from keystone import exception\n'), ((3929, 4007), 'keystone.notifications.Audit.internal', 'notifications.Audit.internal', (['notifications.REMOVE_APP_CREDS_FOR_USER', 'payload'], {}), '(notifications.REMOVE_APP_CREDS_FOR_USER, payload)\n', (3957, 4007), False, 'from keystone import notifications\n'), ((13597, 13673), 'keystone.exception.DomainSpecificRoleMismatch', 'exception.DomainSpecificRoleMismatch', ([], {'role_id': 'role_id', 'project_id': 'project_id'}), '(role_id=role_id, project_id=project_id)\n', (13633, 13673), False, 'from keystone import exception\n'), ((23307, 23330), 'copy.deepcopy', 'copy.deepcopy', (['base_ref'], {}), '(base_ref)\n', (23320, 23330), False, 'import copy\n'), ((57252, 57309), 'keystone.exception.AmbiguityError', 'exception.AmbiguityError', ([], {'resource': '"""role"""', 'name': 'role_name'}), "(resource='role', name=role_name)\n", (57276, 57309), False, 'from keystone import exception\n'), ((57758, 57778), 'keystone.common.driver_hints.Hints', 'driver_hints.Hints', ([], {}), '()\n', (57776, 57778), False, 'from keystone.common import driver_hints\n'), ((7190, 7232), 'keystone.i18n._', '_', (['"""Must specify either domain or project"""'], {}), "('Must specify either domain or project')\n", (7191, 7232), False, 'from keystone.i18n import _\n'), ((57082, 57109), 'keystone.i18n._', '_', (['"""Role %s is not defined"""'], {}), "('Role %s is not defined')\n", (57083, 57109), False, 'from keystone.i18n import _\n'), ((58064, 58106), 'keystone.i18n._', '_', (['"""Update of `domain_id` is not allowed."""'], {}), "('Update of `domain_id` is not allowed.')\n", (58065, 58106), False, 'from keystone.i18n import _\n')] |
"""
sources.chicago
===============
Reads a CSV file in the format (as of April 2017) of data available from:
- https://catalog.data.gov/dataset/crimes-one-year-prior-to-present-e171f
- https://catalog.data.gov/dataset/crimes-2001-to-present-398a4
The default data is loaded from a file "chicago.csv" which should be downloaded
from one of the above links. The format of the data, frustratingly, differs
between the snapshot of last year, and the total.
The data is partly anonymous in that the address within a block is obscured,
while the geocoding seems complicated (work in progress to understand)...
The crime type "HOMICIDE" is reported multiple times in the dataset.
"""
import csv as _csv
import os.path as _path
import datetime
import numpy as _np
from ..data import TimedPoints
_datadir = None
_default_filename = "chicago.csv"
_FEET_IN_METERS = 3937 / 1200
def set_data_directory(datadir):
"""Set the default location for search for the default input file."""
global _datadir
_datadir = datadir
def get_default_filename():
"""Returns the default filename, if available. Otherwise raises
AttributeError.
"""
global _datadir
if _datadir is None:
raise AttributeError("datadir not set; call `set_data_directory()`.")
return _path.join(_datadir, _default_filename)
def _date_from_csv(date_string):
return datetime.datetime.strptime(date_string, "%m/%d/%Y %I:%M:%S %p")
def date_from_iso(iso_string):
"""Convert a datetime string in ISO format into a :class:`datetime`
instance.
:param iso_string: Like "2017-10-23T05:12:39"
:return: A :class:`datetime` instance.
"""
return datetime.datetime.strptime(iso_string, "%Y-%m-%dT%H:%M:%S")
def _date_from_other(dt_str):
# Like 4/16/13 5:00
try:
date, time = dt_str.split()
month, day, year = date.split("/")
hour, minutes = time.split(":")
return datetime.datetime(year=int(year)+2000, month=int(month), day=int(day),
hour=int(hour), minute=int(minutes))
except Exception as ex:
raise Exception("Failed to parse {}, cause {}/{}".format(dt_str, type(ex), ex))
_FIELDS = {
"snapshot" : {
"_DESCRIPTION_FIELD" : ' PRIMARY DESCRIPTION',
"_X_FIELD" : 'X COORDINATE',
"_Y_FIELD" : 'Y COORDINATE',
"_TIME_FIELD" : 'DATE OF OCCURRENCE',
"_GEOJSON_LOOKUP" : {"case": 'CASE#',
"address": "BLOCK",
"location": ' LOCATION DESCRIPTION',
"crime": ' PRIMARY DESCRIPTION',
"type": ' SECONDARY DESCRIPTION',
"timestamp": 'DATE OF OCCURRENCE'},
"GEOJSON_COORDS" : ('LONGITUDE', 'LATITUDE'),
"DT_CONVERT" : _date_from_csv
},
"all" : {
"_DESCRIPTION_FIELD" : 'Primary Type',
"_X_FIELD" : 'X Coordinate',
"_Y_FIELD" : 'Y Coordinate',
"_TIME_FIELD" : 'Date',
"_GEOJSON_LOOKUP" : {"case": 'Case Number',
"address": "Block",
"location": 'Location Description',
"crime": 'Primary Type',
"type": 'Description',
"timestamp": 'Date'},
"GEOJSON_COORDS" : ('Longitude', 'Latitude'),
"DT_CONVERT" : _date_from_csv
},
"gen" : {
"_DESCRIPTION_FIELD" : 'CRIME',
"_X_FIELD" : 'X',
"_Y_FIELD" : 'Y',
"_TIME_FIELD" : 'TIMESTAMP',
"_GEOJSON_LOOKUP" : {"case": 'CASE',
"address": "BLOCK",
"location": 'LOCATION',
"crime": 'CRIME',
"type": 'SUB-TYPE',
"timestamp": 'TIMESTAMP'},
"GEOJSON_COORDS" : ('X', 'Y'),
"DT_CONVERT" : _date_from_csv
}
}
_FIELDS["all_other"] = dict(_FIELDS["all"])
_FIELDS["all_other"]["DT_CONVERT"] = _date_from_other
def _convert_header(header, dic):
lookup = dict()
for field in [dic["_DESCRIPTION_FIELD"], dic["_X_FIELD"], dic["_Y_FIELD"], dic["_TIME_FIELD"]]:
if not field in header:
raise Exception("No field '{}' found in header".format(field))
lookup[field] = header.index(field)
return lookup
def default_burglary_data():
"""Load the default data, if available, giving just "THEFT" data.
:return: An instance of :class:`open_cp.data.TimedPoints` or `None`.
"""
try:
return load(get_default_filename(), {"THEFT"})
except Exception:
return None
def _get_dic(type):
try:
return _FIELDS[type]
except KeyError:
raise ValueError("Don't understand type {}".format(type))
def _load_to_list(file, dic, primary_description_names):
reader = _csv.reader(file)
lookup = _convert_header(next(reader), dic)
dt_convert = dic["DT_CONVERT"]
data = []
for row in reader:
description = row[lookup[dic["_DESCRIPTION_FIELD"]]].strip()
if not description in primary_description_names:
continue
x = row[lookup[dic["_X_FIELD"]]].strip()
y = row[lookup[dic["_Y_FIELD"]]].strip()
t = row[lookup[dic["_TIME_FIELD"]]].strip()
if x != "" and y != "":
data.append((dt_convert(t), float(x), float(y)))
return data
def load(file, primary_description_names, to_meters=True, type="snapshot"):
"""Load data from a CSV file in the expected format.
:param file: Name of the CSV file load, or a file-like object.
:param primary_description_names: Set of names to search for in the
"primary description field". E.g. pass `{"THEFT"}` to return only the
"theft" crime type.
:param to_meters: Convert the coordinates to meters; True by default.
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:return: An instance of :class:`open_cp.data.TimedPoints` or `None`.
"""
dic = _get_dic(type)
if isinstance(file, str):
with open(file) as file:
data = _load_to_list(file, dic, primary_description_names)
else:
data = _load_to_list(file, dic, primary_description_names)
data.sort(key = lambda triple : triple[0])
xcoords = _np.empty(len(data))
ycoords = _np.empty(len(data))
for i, (_, x, y) in enumerate(data):
xcoords[i], ycoords[i] = x, y
times = [t for t, _, _ in data]
if to_meters:
xcoords /= _FEET_IN_METERS
ycoords /= _FEET_IN_METERS
return TimedPoints.from_coords(times, xcoords, ycoords)
def _convert_header_for_geojson(header, dic):
try:
column_lookup = {}
for key, col_head in dic["_GEOJSON_LOOKUP"].items():
column_lookup[key] = header.index(col_head)
coord_lookup = [header.index(chead) for chead in dic["GEOJSON_COORDS"]]
return column_lookup, coord_lookup
except KeyError as ex:
raise ValueError("Header not in expected format: {} caused by {}/{}".format(
header, type(ex), ex))
def _generate_GeoJSON_Features(file, dic):
dt_convert = dic["DT_CONVERT"]
reader = _csv.reader(file)
column_lookup, coord_lookup = _convert_header_for_geojson(next(reader), dic)
for row in reader:
properties = {key : row[i] for key, i in column_lookup.items()}
properties["timestamp"] = dt_convert(properties["timestamp"]).isoformat()
if row[coord_lookup[0]] == "":
geometry = None
else:
coordinates = [float(row[i]) for i in coord_lookup]
geometry = {"type":"Point", "coordinates":coordinates}
yield {"geometry": geometry, "properties": properties,
"type": "Feature"}
def generate_GeoJSON_Features(file, type="snapshot"):
"""Generate a sequence of GeoJSON "features" from the CSV file.
See :func:`load_to_GeoJSON`.
:param file: Either a filename, or a file object.
"""
dic = _get_dic(type)
if isinstance(file, str):
with open(file) as f:
yield from _generate_GeoJSON_Features(f, dic)
else:
yield from _generate_GeoJSON_Features(file, dic)
def load_to_GeoJSON(filename, type="snapshot"):
"""Load the specified CSV file to a list of GeoJSON (see
http://geojson.org/) features. Events with no location data have `None`
as the geometry. Timestamps are converted to standard ISO string format.
The returned "properties" have these keys:
- "case" for the "CASE#" field
- "crime" for the "PRIMARY DESCRIPTION" field
- "type" for the "SECONDARY DESCRIPTION" field
- "location" for the "LOCATION DESCRIPTION" field
- "timestamp" for the "DATE OF OCCURRENCE" field
- "address" for the "BLOCK" field
:param filename: Filename of the CSV file to process
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:return: List of Python dictionaries in GeoJSON format.
"""
return list(generate_GeoJSON_Features(filename, type))
try:
import geopandas as gpd
import shapely.geometry as _geometry
except:
gpd = None
_geometry = None
def convert_null_geometry_to_empty(frame):
"""Utility method. Convert any geometry in the geoDataFrame which is
"null" (`None` or empty) to a Point type geometry which is empty. The
returned geoDateFrame is suitable for projecting and other geometrical
transformations.
"""
def null_to_point(x):
if x is None or x.is_empty:
return _geometry.Point()
return x
newgeo = frame.geometry.map(null_to_point)
return frame.set_geometry(newgeo)
def convert_null_geometry_to_none(frame):
"""Utility method. Convert any geometry in the geoDataFrame which is
"null" (`None` or empty) to `None`. The returned geoDateFrame is suitable
for saving.
"""
def null_to_none(x):
if x is None or x.is_empty:
return None
return x
newgeo = frame.geometry.map(null_to_none)
return frame.set_geometry(newgeo)
def load_to_geoDataFrame(filename, datetime_as_string=True,
type="snapshot", empty_geometry="none"):
"""Return the same data as :func:`load_to_GeoJSON` but as a geoPandas
data-frame.
:param filename: Filename of the CSV file to process
:param datetime_as_string: Write the timestamp as an ISO formatted string.
Defaults to True which is best for saving the dataframe as e.g. a shape
file. Set to False to get timestamps as python objects, which is best
for using (geo)pandas to analyse the data.
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:param empty_geometry: Either "none" to return `None` as the geometry of
crimes which have no location data in the CSV file (this is correct if
you wish to save the data-frame); or "empty" to return an empty `Point`
type (which is correct, for example, if you wish to re-project the
data-frame). Yes, GeoPandas appears to be annoying like this.
"""
geo_data = load_to_GeoJSON(filename, type=type)
if not datetime_as_string:
for feature in geo_data:
feature["properties"]["timestamp"] = _date_from_iso(feature["properties"]["timestamp"])
frame = gpd.GeoDataFrame.from_features(geo_data)
if empty_geometry == "none":
pass
elif empty_geometry == "empty":
frame = convert_null_geometry_to_empty(frame)
else:
raise ValueError("Unknown `empty_geometry` parameter `{}`".format(empty_geometry))
frame.crs = {"init":"epsg:4326"}
return frame
_sides = None
def _load_sides():
global _sides
if _sides is not None:
return
global _datadir
geojson = _path.join(_datadir, "Chicago_Areas.geojson")
frame = gpd.read_file(geojson)
side_mapping = {
"Far North" : [1,2,3,4,9,10,11,12,13,14,76,77],
"Northwest" : [15,16,17,18,19,20],
"North" : [5,6,7,21,22],
"West" : list(range(23, 32)),
"Central" : [8,32,33],
"South" : list(range(34,44)) + [60, 69],
"Southwest" : [56,57,58,59] + list(range(61,69)),
"Far Southwest" : list(range(70,76)),
"Far Southeast" : list(range(44,56))
}
frame["side"] = frame.area_numbe.map(lambda x : next(key
for key, item in side_mapping.items() if int(x) in item) )
_sides = frame.drop(["area", "area_num_1", "comarea", "comarea_id",
"perimeter", "shape_area", "shape_len"], axis=1)
_sides.crs = {"init": "epsg:4326"}
_sides = _sides.to_crs({"init": "epsg:2790"})
def get_side(name):
"""Return a geometry (a polygon, typically) of the outline of the shape
of the given "side" of Chicago, projected to {"init":"epsg:2790"}, which
is Illinois in metres.
Needs the file "Chicago_Areas.geojson" to be in the "datadir". This can
be downloaded from:
https://data.cityofchicago.org/Facilities-Geographic-Boundaries/Boundaries-Community-Areas-current-/cauq-8yn6
:param name: One of "Far North", "Northwest", "North", "West", "Central",
"South", "Southwest", "Far Southwest", "Far Southeast"
"""
_load_sides()
return _sides[_sides.side == name].unary_union
| [
"geopandas.GeoDataFrame.from_features",
"geopandas.read_file",
"datetime.datetime.strptime",
"os.path.join",
"shapely.geometry.Point",
"csv.reader"
]
| [((1287, 1326), 'os.path.join', '_path.join', (['_datadir', '_default_filename'], {}), '(_datadir, _default_filename)\n', (1297, 1326), True, 'import os.path as _path\n'), ((1372, 1435), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date_string', '"""%m/%d/%Y %I:%M:%S %p"""'], {}), "(date_string, '%m/%d/%Y %I:%M:%S %p')\n", (1398, 1435), False, 'import datetime\n'), ((1676, 1735), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['iso_string', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(iso_string, '%Y-%m-%dT%H:%M:%S')\n", (1702, 1735), False, 'import datetime\n'), ((4650, 4667), 'csv.reader', '_csv.reader', (['file'], {}), '(file)\n', (4661, 4667), True, 'import csv as _csv\n'), ((7056, 7073), 'csv.reader', '_csv.reader', (['file'], {}), '(file)\n', (7067, 7073), True, 'import csv as _csv\n'), ((11349, 11389), 'geopandas.GeoDataFrame.from_features', 'gpd.GeoDataFrame.from_features', (['geo_data'], {}), '(geo_data)\n', (11379, 11389), True, 'import geopandas as gpd\n'), ((11811, 11856), 'os.path.join', '_path.join', (['_datadir', '"""Chicago_Areas.geojson"""'], {}), "(_datadir, 'Chicago_Areas.geojson')\n", (11821, 11856), True, 'import os.path as _path\n'), ((11869, 11891), 'geopandas.read_file', 'gpd.read_file', (['geojson'], {}), '(geojson)\n', (11882, 11891), True, 'import geopandas as gpd\n'), ((9504, 9521), 'shapely.geometry.Point', '_geometry.Point', ([], {}), '()\n', (9519, 9521), True, 'import shapely.geometry as _geometry\n')] |
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import io
import os
from pathlib import Path
import saneyaml
from packagedcode import models
from packageurl import PackageURL
# TODO: Override get_package_resource so it returns the Resource that the ABOUT file is describing
TRACE = os.environ.get('SCANCODE_DEBUG_PACKAGE', False)
def logger_debug(*args):
pass
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(
' '.join(isinstance(a, str) and a or repr(a) for a in args)
)
class AboutFileHandler(models.DatafileHandler):
datasource_id = 'about_file'
default_package_type = 'about'
path_patterns = ('*.ABOUT',)
description = 'AboutCode ABOUT file'
documentation_url = 'https://aboutcode-toolkit.readthedocs.io/en/latest/specification.html'
@classmethod
def parse(cls, location):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package archive, manifest or similar.
"""
with io.open(location, encoding='utf-8') as loc:
package_data = saneyaml.load(loc.read())
# About files can contain any purl and also have a namespace
about_type = package_data.get('type')
about_ns = package_data.get('namespace')
purl_type = None
purl_ns = None
purl = package_data.get('purl')
if purl:
purl = PackageURL.from_string(purl)
if purl:
purl_type = purl.type
package_type = about_type or purl_type or cls.default_package_type
package_ns = about_ns or purl_ns
name = package_data.get('name')
version = package_data.get('version')
homepage_url = package_data.get('home_url') or package_data.get('homepage_url')
download_url = package_data.get('download_url')
copyright_statement = package_data.get('copyright')
license_expression = package_data.get('license_expression')
declared_license = license_expression
owner = package_data.get('owner')
if not isinstance(owner, str):
owner = repr(owner)
parties = [models.Party(type=models.party_person, name=owner, role='owner')]
# FIXME: also include notice_file and license_file(s) as file_references
file_references = []
about_resource = package_data.get('about_resource')
if about_resource:
file_references.append(models.FileReference(path=about_resource))
# FIXME: we should put the unprocessed attributes in extra data
yield models.PackageData(
datasource_id=cls.datasource_id,
type=package_type,
namespace=package_ns,
name=name,
version=version,
declared_license=declared_license,
license_expression=license_expression,
copyright=copyright_statement,
parties=parties,
homepage_url=homepage_url,
download_url=download_url,
file_references=file_references,
)
@classmethod
def assemble(cls, package_data, resource, codebase):
"""
Yield a Package. Note that ABOUT files do not carry dependencies.
"""
datafile_path = resource.path
# do we have enough to create a package?
if package_data.purl:
package = models.Package.from_package_data(
package_data=package_data,
datafile_path=datafile_path,
)
package_uid = package.package_uid
# NOTE: we do not attach files to the Package level. Instead we
# update `for_package` in the file
resource.for_packages.append(package_uid)
resource.save(codebase)
if not package.license_expression:
package.license_expression = cls.compute_normalized_license(package)
yield package
if resource.pid is not None and package_data.file_references:
parent_resource = resource.parent(codebase)
if parent_resource and package_data.file_references:
root_path = Path(parent_resource.path)
# FIXME: we should be able to get the path relatively to the
# ABOUT file resource a file ref extends from the root of
# the filesystem
file_references_by_path = {
str(root_path / ref.path): ref
for ref in package.file_references
}
for res in parent_resource.walk(codebase):
ref = file_references_by_path.get(res.path)
if not ref:
continue
# path is found and processed: remove it, so we can
# check if we found all of them
del file_references_by_path[res.path]
res.for_packages.append(package_uid)
res.save(codebase)
yield res
# if we have left over file references, add these to extra data
if file_references_by_path:
missing = sorted(file_references_by_path.values(), key=lambda r: r.path)
package.extra_data['missing_file_references'] = missing
else:
package.extra_data['missing_file_references'] = package_data.file_references[:]
# we yield this as we do not want this further processed
yield resource
| [
"logging.getLogger",
"logging.basicConfig",
"packagedcode.models.Package.from_package_data",
"pathlib.Path",
"os.environ.get",
"io.open",
"packagedcode.models.PackageData",
"packagedcode.models.FileReference",
"packageurl.PackageURL.from_string",
"packagedcode.models.Party"
]
| [((596, 643), 'os.environ.get', 'os.environ.get', (['"""SCANCODE_DEBUG_PACKAGE"""', '(False)'], {}), "('SCANCODE_DEBUG_PACKAGE', False)\n", (610, 643), False, 'import os\n'), ((740, 767), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (757, 767), False, 'import logging\n'), ((772, 810), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (791, 810), False, 'import logging\n'), ((1497, 1532), 'io.open', 'io.open', (['location'], {'encoding': '"""utf-8"""'}), "(location, encoding='utf-8')\n", (1504, 1532), False, 'import io\n'), ((1883, 1911), 'packageurl.PackageURL.from_string', 'PackageURL.from_string', (['purl'], {}), '(purl)\n', (1905, 1911), False, 'from packageurl import PackageURL\n'), ((2628, 2692), 'packagedcode.models.Party', 'models.Party', ([], {'type': 'models.party_person', 'name': 'owner', 'role': '"""owner"""'}), "(type=models.party_person, name=owner, role='owner')\n", (2640, 2692), False, 'from packagedcode import models\n'), ((3057, 3404), 'packagedcode.models.PackageData', 'models.PackageData', ([], {'datasource_id': 'cls.datasource_id', 'type': 'package_type', 'namespace': 'package_ns', 'name': 'name', 'version': 'version', 'declared_license': 'declared_license', 'license_expression': 'license_expression', 'copyright': 'copyright_statement', 'parties': 'parties', 'homepage_url': 'homepage_url', 'download_url': 'download_url', 'file_references': 'file_references'}), '(datasource_id=cls.datasource_id, type=package_type,\n namespace=package_ns, name=name, version=version, declared_license=\n declared_license, license_expression=license_expression, copyright=\n copyright_statement, parties=parties, homepage_url=homepage_url,\n download_url=download_url, file_references=file_references)\n', (3075, 3404), False, 'from packagedcode import models\n'), ((3854, 3947), 'packagedcode.models.Package.from_package_data', 'models.Package.from_package_data', ([], {'package_data': 'package_data', 'datafile_path': 'datafile_path'}), '(package_data=package_data, datafile_path=\n datafile_path)\n', (3886, 3947), False, 'from packagedcode import models\n'), ((2927, 2968), 'packagedcode.models.FileReference', 'models.FileReference', ([], {'path': 'about_resource'}), '(path=about_resource)\n', (2947, 2968), False, 'from packagedcode import models\n'), ((4646, 4672), 'pathlib.Path', 'Path', (['parent_resource.path'], {}), '(parent_resource.path)\n', (4650, 4672), False, 'from pathlib import Path\n')] |
# -*- coding: utf-8 -*-
#django
from django.contrib import admin
from django.db import transaction
#python
import csv
from decimal import Decimal
#gazepattern
from .models import Experiment, ExperimentPoint, Image, ImageRectangle, ExperimentPointCSV, ExperimentFunction
@transaction.atomic
def procesar(modeladmin, request, queryset):
for query in queryset:
file = query.file
with open(file.path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
rows = [row for row in csv_reader if len(row)]
for row in rows:
experiment_id = int(row[0])
fixation_number = int(row[1])
x = Decimal(row[2])
y = Decimal(row[3])
experiment = Experiment.objects.get(pk=experiment_id)
experiment_point = ExperimentPoint()
experiment_point.experiment = experiment
experiment_point.fixation_number = fixation_number
experiment_point.x = x
experiment_point.y = y
experiment_point.save()
procesar.short_description = "Procesar CSV para generar experiments points"
class ExperimentPointCSVAdmin(admin.ModelAdmin):
list_display = ['id', 'file']
ordering = ['id']
actions = [procesar, ]
class ExperimentPointAdmin(admin.ModelAdmin):
list_display = ['id', 'experiment_id', 'fixation_number', 'x', 'y']
ordering = ['id']
search_fields = ["experiment__id"]
class ImageAdmin(admin.ModelAdmin):
list_display = ['id', 'name']
ordering = ['id']
class ExperimentAdmin(admin.ModelAdmin):
list_display = ['id', 'name', 'description']
ordering = ['id']
class ImageRectangleAdmin(admin.ModelAdmin):
list_display = ['id', 'image_id','name']
ordering = ['id']
search_fields = ['image__id']
class ExperimentFunctionAdmin(admin.ModelAdmin):
list_display = ['id', 'experiment_id', 'function']
ordering = ['id']
search_fields = ['experiment__id']
admin.site.register(ExperimentPointCSV, ExperimentPointCSVAdmin)
admin.site.register(ExperimentPoint, ExperimentPointAdmin)
admin.site.register(Image, ImageAdmin)
admin.site.register(Experiment, ExperimentAdmin)
admin.site.register(ImageRectangle, ImageRectangleAdmin)
admin.site.register(ExperimentFunction, ExperimentFunctionAdmin) | [
"django.contrib.admin.site.register",
"csv.reader",
"decimal.Decimal"
]
| [((2026, 2090), 'django.contrib.admin.site.register', 'admin.site.register', (['ExperimentPointCSV', 'ExperimentPointCSVAdmin'], {}), '(ExperimentPointCSV, ExperimentPointCSVAdmin)\n', (2045, 2090), False, 'from django.contrib import admin\n'), ((2091, 2149), 'django.contrib.admin.site.register', 'admin.site.register', (['ExperimentPoint', 'ExperimentPointAdmin'], {}), '(ExperimentPoint, ExperimentPointAdmin)\n', (2110, 2149), False, 'from django.contrib import admin\n'), ((2150, 2188), 'django.contrib.admin.site.register', 'admin.site.register', (['Image', 'ImageAdmin'], {}), '(Image, ImageAdmin)\n', (2169, 2188), False, 'from django.contrib import admin\n'), ((2189, 2237), 'django.contrib.admin.site.register', 'admin.site.register', (['Experiment', 'ExperimentAdmin'], {}), '(Experiment, ExperimentAdmin)\n', (2208, 2237), False, 'from django.contrib import admin\n'), ((2238, 2294), 'django.contrib.admin.site.register', 'admin.site.register', (['ImageRectangle', 'ImageRectangleAdmin'], {}), '(ImageRectangle, ImageRectangleAdmin)\n', (2257, 2294), False, 'from django.contrib import admin\n'), ((2295, 2359), 'django.contrib.admin.site.register', 'admin.site.register', (['ExperimentFunction', 'ExperimentFunctionAdmin'], {}), '(ExperimentFunction, ExperimentFunctionAdmin)\n', (2314, 2359), False, 'from django.contrib import admin\n'), ((457, 492), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (467, 492), False, 'import csv\n'), ((691, 706), 'decimal.Decimal', 'Decimal', (['row[2]'], {}), '(row[2])\n', (698, 706), False, 'from decimal import Decimal\n'), ((727, 742), 'decimal.Decimal', 'Decimal', (['row[3]'], {}), '(row[3])\n', (734, 742), False, 'from decimal import Decimal\n')] |
from random import randint
menor = 100
linha = 0
maior = 0
m = []
for i in range(10):
m.append([])
for j in range(10):
m[i].append(randint(1,99))
for i in range(10):
for j in range(10):
print(f'{m[i][j]:2}',end=' ')
print()
for i in range(10):
for j in range(10):
if m[i][j] > maior:
maior = m[i][j]
linha = i
for i in range(10):
if m[linha][i] < menor:
menor = m[linha][i]
print(f'o minimax รฉ {menor}, com o maior sendo {maior} na linha {linha+1}.')
| [
"random.randint"
]
| [((148, 162), 'random.randint', 'randint', (['(1)', '(99)'], {}), '(1, 99)\n', (155, 162), False, 'from random import randint\n')] |
from mock import patch
import pytest
from pontoon.base.models import User
from pontoon.pretranslation.pretranslate import get_translations
from pontoon.test.factories import (
EntityFactory,
TranslationMemoryFactory,
)
@patch("pontoon.pretranslation.pretranslate.get_google_translate_data")
@pytest.mark.django_db
def test_get_translations(gt_mock, locale_b, resource_a, google_translate_locale):
entities = [
EntityFactory(resource=resource_a, string=x, order=i)
for i, x in enumerate(["abaa", "abac", "aaab", "abab"])
]
entities[1].string_plural = entities[1].string
entities[3].string_plural = entities[3].string
entities[1].save()
entities[3].save()
google_translate_locale.cldr_plurals = "1, 2"
google_translate_locale.save()
for entity in entities[0:2]:
TranslationMemoryFactory.create(
entity=entity, source=entity.string, target=entity.string, locale=locale_b,
)
TranslationMemoryFactory.create(
entity=entity,
source=entity.string,
target=entity.string,
locale=google_translate_locale,
)
# Mock the return value of get_google_translate_data
gt_mock.return_value = {
"status": True,
"translation": "gt_translation",
}
tm_user = User.objects.get(email="<EMAIL>")
gt_user = User.objects.get(email="<EMAIL>")
# 100% match exists in translation memory.
response_a = get_translations(entities[0], locale_b)
response_b = get_translations(entities[0], google_translate_locale)
assert response_a == [(entities[0].string, None, tm_user)]
assert response_b == [(entities[0].string, None, tm_user)]
# 100% match does not exists and locale.google_translate_code is None.
response = get_translations(entities[2], locale_b)
assert response == []
# 100% match does not exists and locale.google_translate_code is not None.
response = get_translations(entities[2], google_translate_locale)
assert response == [("gt_translation", None, gt_user)]
# Entity.string_plural is not None.
response_a = get_translations(entities[1], google_translate_locale)
response_b = get_translations(entities[3], google_translate_locale)
assert response_a == [
(entities[1].string, 0, tm_user),
(entities[1].string, 1, tm_user),
]
assert response_b == [
("gt_translation", 0, gt_user),
("gt_translation", 1, gt_user),
]
| [
"mock.patch",
"pontoon.base.models.User.objects.get",
"pontoon.test.factories.TranslationMemoryFactory.create",
"pontoon.pretranslation.pretranslate.get_translations",
"pontoon.test.factories.EntityFactory"
]
| [((232, 302), 'mock.patch', 'patch', (['"""pontoon.pretranslation.pretranslate.get_google_translate_data"""'], {}), "('pontoon.pretranslation.pretranslate.get_google_translate_data')\n", (237, 302), False, 'from mock import patch\n'), ((1329, 1362), 'pontoon.base.models.User.objects.get', 'User.objects.get', ([], {'email': '"""<EMAIL>"""'}), "(email='<EMAIL>')\n", (1345, 1362), False, 'from pontoon.base.models import User\n'), ((1377, 1410), 'pontoon.base.models.User.objects.get', 'User.objects.get', ([], {'email': '"""<EMAIL>"""'}), "(email='<EMAIL>')\n", (1393, 1410), False, 'from pontoon.base.models import User\n'), ((1476, 1515), 'pontoon.pretranslation.pretranslate.get_translations', 'get_translations', (['entities[0]', 'locale_b'], {}), '(entities[0], locale_b)\n', (1492, 1515), False, 'from pontoon.pretranslation.pretranslate import get_translations\n'), ((1533, 1587), 'pontoon.pretranslation.pretranslate.get_translations', 'get_translations', (['entities[0]', 'google_translate_locale'], {}), '(entities[0], google_translate_locale)\n', (1549, 1587), False, 'from pontoon.pretranslation.pretranslate import get_translations\n'), ((1805, 1844), 'pontoon.pretranslation.pretranslate.get_translations', 'get_translations', (['entities[2]', 'locale_b'], {}), '(entities[2], locale_b)\n', (1821, 1844), False, 'from pontoon.pretranslation.pretranslate import get_translations\n'), ((1966, 2020), 'pontoon.pretranslation.pretranslate.get_translations', 'get_translations', (['entities[2]', 'google_translate_locale'], {}), '(entities[2], google_translate_locale)\n', (1982, 2020), False, 'from pontoon.pretranslation.pretranslate import get_translations\n'), ((2138, 2192), 'pontoon.pretranslation.pretranslate.get_translations', 'get_translations', (['entities[1]', 'google_translate_locale'], {}), '(entities[1], google_translate_locale)\n', (2154, 2192), False, 'from pontoon.pretranslation.pretranslate import get_translations\n'), ((2210, 2264), 'pontoon.pretranslation.pretranslate.get_translations', 'get_translations', (['entities[3]', 'google_translate_locale'], {}), '(entities[3], google_translate_locale)\n', (2226, 2264), False, 'from pontoon.pretranslation.pretranslate import get_translations\n'), ((434, 487), 'pontoon.test.factories.EntityFactory', 'EntityFactory', ([], {'resource': 'resource_a', 'string': 'x', 'order': 'i'}), '(resource=resource_a, string=x, order=i)\n', (447, 487), False, 'from pontoon.test.factories import EntityFactory, TranslationMemoryFactory\n'), ((835, 947), 'pontoon.test.factories.TranslationMemoryFactory.create', 'TranslationMemoryFactory.create', ([], {'entity': 'entity', 'source': 'entity.string', 'target': 'entity.string', 'locale': 'locale_b'}), '(entity=entity, source=entity.string, target\n =entity.string, locale=locale_b)\n', (866, 947), False, 'from pontoon.test.factories import EntityFactory, TranslationMemoryFactory\n'), ((974, 1101), 'pontoon.test.factories.TranslationMemoryFactory.create', 'TranslationMemoryFactory.create', ([], {'entity': 'entity', 'source': 'entity.string', 'target': 'entity.string', 'locale': 'google_translate_locale'}), '(entity=entity, source=entity.string, target\n =entity.string, locale=google_translate_locale)\n', (1005, 1101), False, 'from pontoon.test.factories import EntityFactory, TranslationMemoryFactory\n')] |
# -*- encoding: utf-8 -*-
"""Utility functions for computing combinations of dimensions and hierarchy
levels"""
from __future__ import absolute_import
import re
import os.path
import json
from collections import OrderedDict
from .errors import ModelInconsistencyError, ArgumentError, ConfigurationError
from . import compat
__all__ = [
"IgnoringDictionary",
"MissingPackage",
"localize_common",
"localize_attributes",
"get_localizable_attributes",
"decamelize",
"to_identifier",
"assert_instance",
"assert_all_instances",
"read_json_file",
"sorted_dependencies",
]
class IgnoringDictionary(OrderedDict):
"""Simple dictionary extension that will ignore any keys of which values
are empty (None/False)"""
def __setitem__(self, key, value):
if value is not None:
super(IgnoringDictionary, self).__setitem__(key, value)
def set(self, key, value):
"""Sets `value` for `key` even if value is null."""
super(IgnoringDictionary, self).__setitem__(key, value)
def __repr__(self):
items = []
for key, value in self.items():
item = '%s: %s' % (repr(key), repr(value))
items.append(item)
return "{%s}" % ", ".join(items)
def assert_instance(obj, class_, label):
"""Raises ArgumentError when `obj` is not instance of `cls`"""
if not isinstance(obj, class_):
raise ModelInconsistencyError("%s should be sublcass of %s, "
"provided: %s" % (label,
class_.__name__,
type(obj).__name__))
def assert_all_instances(list_, class_, label="object"):
"""Raises ArgumentError when objects in `list_` are not instances of
`cls`"""
for obj in list_ or []:
assert_instance(obj, class_, label="object")
class MissingPackageError(Exception):
"""Exception raised when encountered a missing package."""
pass
class MissingPackage(object):
"""Bogus class to handle missing optional packages - packages that are not
necessarily required for Cubes, but are needed for certain features."""
def __init__(self, package, feature = None, source = None, comment = None):
self.package = package
self.feature = feature
self.source = source
self.comment = comment
def __call__(self, *args, **kwargs):
self._fail()
def __getattr__(self, name):
self._fail()
def _fail(self):
if self.feature:
use = " to be able to use: %s" % self.feature
else:
use = ""
if self.source:
source = " from %s" % self.source
else:
source = ""
if self.comment:
comment = ". %s" % self.comment
else:
comment = ""
raise MissingPackageError("Optional package '%s' is not installed. "
"Please install the package%s%s%s" %
(self.package, source, use, comment))
def optional_import(name, feature=None, source=None, comment=None):
"""Optionally import package `name`. If package does not exist, import a
placeholder object, that raises an exception with more detailed
description about the missing package."""
try:
return __import__(name)
except ImportError:
return MissingPackage(name, feature, source, comment)
def expand_dictionary(record, separator='.'):
"""Return expanded dictionary: treat keys are paths separated by
`separator`, create sub-dictionaries as necessary"""
result = {}
for key, value in record.items():
current = result
path = key.split(separator)
for part in path[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[path[-1]] = value
return result
def localize_common(obj, trans):
"""Localize common attributes: label and description"""
if "label" in trans:
obj.label = trans["label"]
if "description" in trans:
obj.description = trans["description"]
def localize_attributes(attribs, translations):
"""Localize list of attributes. `translations` should be a dictionary with
keys as attribute names, values are dictionaries with localizable
attribute metadata, such as ``label`` or ``description``."""
for (name, atrans) in translations.items():
attrib = attribs[name]
localize_common(attrib, atrans)
def get_localizable_attributes(obj):
"""Returns a dictionary with localizable attributes of `obj`."""
# FIXME: use some kind of class attribute to get list of localizable attributes
locale = {}
try:
if obj.label:
locale["label"] = obj.label
except:
pass
try:
if obj.description:
locale["description"] = obj.description
except:
pass
return locale
def decamelize(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s1)
def to_identifier(name):
return re.sub(r' ', r'_', name).lower()
def to_label(name, capitalize=True):
"""Converts `name` into label by replacing underscores by spaces. If
`capitalize` is ``True`` (default) then the first letter of the label is
capitalized."""
label = name.replace("_", " ")
if capitalize:
label = label.capitalize()
return label
def coalesce_option_value(value, value_type, label=None):
"""Convert string into an object value of `value_type`. The type might be:
`string` (no conversion), `integer`, `float`, `list` โ comma separated
list of strings.
"""
value_type = value_type.lower()
try:
if value_type in ('string', 'str'):
return_value = str(value)
elif value_type == 'list':
if isinstance(value, compat.string_type):
return_value = value.split(",")
else:
return_value = list(value)
elif value_type == "float":
return_value = float(value)
elif value_type in ["integer", "int"]:
return_value = int(value)
elif value_type in ["bool", "boolean"]:
if not value:
return_value = False
elif isinstance(value, compat.string_type):
return_value = value.lower() in ["1", "true", "yes", "on"]
else:
return_value = bool(value)
else:
raise ArgumentError("Unknown option value type %s" % value_type)
except ValueError:
if label:
label = "parameter %s " % label
else:
label = ""
raise ArgumentError("Unable to convert %svalue '%s' into type %s" %
(label, astring, value_type))
return return_value
def coalesce_options(options, types):
"""Coalesce `options` dictionary according to types dictionary. Keys in
`types` refer to keys in `options`, values of `types` are value types:
string, list, float, integer or bool."""
out = {}
for key, value in options.items():
if key in types:
out[key] = coalesce_option_value(value, types[key], key)
else:
out[key] = value
return out
def read_json_file(path, kind=None):
"""Read a JSON from `path`. This is convenience function that provides
more descriptive exception handling."""
kind = "%s " % str(kind) if kind else ""
if not os.path.exists(path):
raise ConfigurationError("Can not find %sfile '%s'"
% (kind, path))
try:
f = compat.open_unicode(path)
except IOError:
raise ConfigurationError("Can not open %sfile '%s'"
% (kind, path))
try:
content = json.load(f)
except ValueError as e:
raise SyntaxError("Syntax error in %sfile %s: %s"
% (kind, path, str(e)))
finally:
f.close()
return content
def sorted_dependencies(graph):
"""Return keys from `deps` ordered by dependency (topological sort).
`deps` is a dictionary where keys are strings and values are list of
strings where keys is assumed to be dependant on values.
Example::
A ---> B -+--> C
|
+--> D --> E
Will be: ``{"A": ["B"], "B": ["C", "D"], "D": ["E"],"E": []}``
"""
graph = dict((key, set(value)) for key, value in graph.items())
# L โ Empty list that will contain the sorted elements
L = []
# S โ Set of all nodes with no dependencies (incoming edges)
S = set(parent for parent, req in graph.items() if not req)
while S:
# remove a node n from S
n = S.pop()
# insert n into L
L.append(n)
# for each node m with an edge e from n to m do
# (n that depends on m)
parents = [parent for parent, req in graph.items() if n in req]
for parent in parents:
graph[parent].remove(n)
# remove edge e from the graph
# if m has no other incoming edges then insert m into S
if not graph[parent]:
S.add(parent)
# if graph has edges then -> error
nonempty = [k for k, v in graph.items() if v]
if nonempty:
raise ArgumentError("Cyclic dependency of: %s"
% ", ".join(nonempty))
return L
| [
"json.load",
"re.sub"
]
| [((5075, 5118), 're.sub', 're.sub', (['"""(.)([A-Z][a-z]+)"""', '"""\\\\1 \\\\2"""', 'name'], {}), "('(.)([A-Z][a-z]+)', '\\\\1 \\\\2', name)\n", (5081, 5118), False, 'import re\n'), ((5129, 5171), 're.sub', 're.sub', (['"""([a-z0-9])([A-Z])"""', '"""\\\\1 \\\\2"""', 's1'], {}), "('([a-z0-9])([A-Z])', '\\\\1 \\\\2', s1)\n", (5135, 5171), False, 'import re\n'), ((7967, 7979), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7976, 7979), False, 'import json\n'), ((5209, 5231), 're.sub', 're.sub', (['""" """', '"""_"""', 'name'], {}), "(' ', '_', name)\n", (5215, 5231), False, 'import re\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from modules import Conv, ResBlock
class Wavenet_Student(nn.Module):
def __init__(self, num_blocks_student=[1, 1, 1, 1, 1, 1], num_layers=10,
front_channels=32, residual_channels=64, gate_channels=128, skip_channels=64,
kernel_size=3, cin_channels=80, causal=True):
super(Wavenet_Student, self).__init__()
self.num_blocks = num_blocks_student
self.num_flow = len(self.num_blocks)
self.num_layers = num_layers
self.iafs = nn.ModuleList()
for i in range(self.num_flow):
self.iafs.append(Wavenet_Flow(out_channels=2,
num_blocks=self.num_blocks[i], num_layers=self.num_layers,
front_channels=front_channels, residual_channels=residual_channels,
gate_channels=gate_channels, skip_channels=skip_channels,
kernel_size=kernel_size, cin_channels=cin_channels, causal=causal))
def forward(self, z, c):
return self.iaf(z, c)
def iaf(self, z, c_up):
mu_tot, logs_tot = 0., 0.
for i, iaf in enumerate(self.iafs):
mu_logs = iaf(z, c_up)
mu = mu_logs[:, 0:1, :-1]
logs = mu_logs[:, 1:, :-1]
mu_tot = mu_tot * torch.exp(logs) + mu
logs_tot = logs_tot + logs
z = z[:, :, 1:] * torch.exp(logs) + mu
z = F.pad(z, pad=(1, 0), mode='constant', value=0)
return z, mu_tot, logs_tot
def receptive_field(self):
receptive_field = 1
for iaf in self.iafs:
receptive_field += iaf.receptive_field_size() - 1
return receptive_field
def generate(self, z, c_up):
x, _, _ = self.iaf(z, c_up)
return x
def remove_weight_norm(self):
for iaf in self.iafs:
iaf.remove_weight_norm()
class Wavenet_Flow(nn.Module):
def __init__(self, out_channels=1, num_blocks=1, num_layers=10,
front_channels=32, residual_channels=64, gate_channels=32, skip_channels=None,
kernel_size=3, cin_channels=80, causal=True):
super(Wavenet_Flow, self). __init__()
self.causal = causal
self.num_blocks = num_blocks
self.num_layers = num_layers
self.front_channels = front_channels
self.out_channels = out_channels
self.gate_channels = gate_channels
self.residual_channels = residual_channels
self.skip_channels = skip_channels
self.cin_channels = cin_channels
self.kernel_size = kernel_size
self.front_conv = nn.Sequential(
Conv(1, self.residual_channels, self.front_channels, causal=self.causal),
nn.ReLU()
)
self.res_blocks = nn.ModuleList()
self.res_blocks_fast = nn.ModuleList()
for b in range(self.num_blocks):
for n in range(self.num_layers):
self.res_blocks.append(ResBlock(self.residual_channels, self.gate_channels, self.skip_channels,
self.kernel_size, dilation=2**n,
cin_channels=self.cin_channels, local_conditioning=True,
causal=self.causal, mode='SAME'))
self.final_conv = nn.Sequential(
nn.ReLU(),
Conv(self.skip_channels, self.skip_channels, 1, causal=self.causal),
nn.ReLU(),
Conv(self.skip_channels, self.out_channels, 1, causal=self.causal)
)
def forward(self, x, c):
return self.wavenet(x, c)
def wavenet(self, tensor, c=None):
h = self.front_conv(tensor)
skip = 0
for i, f in enumerate(self.res_blocks):
h, s = f(h, c)
skip += s
out = self.final_conv(skip)
return out
def receptive_field_size(self):
num_dir = 1 if self.causal else 2
dilations = [2 ** (i % self.num_layers) for i in range(self.num_layers * self.num_blocks)]
return num_dir * (self.kernel_size - 1) * sum(dilations) + 1 + (self.front_channels - 1)
def remove_weight_norm(self):
for f in self.res_blocks:
f.remove_weight_norm()
| [
"torch.nn.ReLU",
"torch.nn.ModuleList",
"modules.ResBlock",
"torch.exp",
"modules.Conv",
"torch.nn.functional.pad"
]
| [((569, 584), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (582, 584), True, 'import torch.nn as nn\n'), ((2892, 2907), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2905, 2907), True, 'import torch.nn as nn\n'), ((2939, 2954), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2952, 2954), True, 'import torch.nn as nn\n'), ((1539, 1585), 'torch.nn.functional.pad', 'F.pad', (['z'], {'pad': '(1, 0)', 'mode': '"""constant"""', 'value': '(0)'}), "(z, pad=(1, 0), mode='constant', value=0)\n", (1544, 1585), True, 'import torch.nn.functional as F\n'), ((2760, 2832), 'modules.Conv', 'Conv', (['(1)', 'self.residual_channels', 'self.front_channels'], {'causal': 'self.causal'}), '(1, self.residual_channels, self.front_channels, causal=self.causal)\n', (2764, 2832), False, 'from modules import Conv, ResBlock\n'), ((2846, 2855), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2853, 2855), True, 'import torch.nn as nn\n'), ((3474, 3483), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3481, 3483), True, 'import torch.nn as nn\n'), ((3497, 3564), 'modules.Conv', 'Conv', (['self.skip_channels', 'self.skip_channels', '(1)'], {'causal': 'self.causal'}), '(self.skip_channels, self.skip_channels, 1, causal=self.causal)\n', (3501, 3564), False, 'from modules import Conv, ResBlock\n'), ((3578, 3587), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3585, 3587), True, 'import torch.nn as nn\n'), ((3601, 3667), 'modules.Conv', 'Conv', (['self.skip_channels', 'self.out_channels', '(1)'], {'causal': 'self.causal'}), '(self.skip_channels, self.out_channels, 1, causal=self.causal)\n', (3605, 3667), False, 'from modules import Conv, ResBlock\n'), ((1412, 1427), 'torch.exp', 'torch.exp', (['logs'], {}), '(logs)\n', (1421, 1427), False, 'import torch\n'), ((1502, 1517), 'torch.exp', 'torch.exp', (['logs'], {}), '(logs)\n', (1511, 1517), False, 'import torch\n'), ((3080, 3285), 'modules.ResBlock', 'ResBlock', (['self.residual_channels', 'self.gate_channels', 'self.skip_channels', 'self.kernel_size'], {'dilation': '(2 ** n)', 'cin_channels': 'self.cin_channels', 'local_conditioning': '(True)', 'causal': 'self.causal', 'mode': '"""SAME"""'}), "(self.residual_channels, self.gate_channels, self.skip_channels,\n self.kernel_size, dilation=2 ** n, cin_channels=self.cin_channels,\n local_conditioning=True, causal=self.causal, mode='SAME')\n", (3088, 3285), False, 'from modules import Conv, ResBlock\n')] |
try:
from gevent import monkey
monkey.patch_all()
except ImportError:
# fine if no gevent is available
pass
import base64
import logging
from unittest.mock import Mock
from flask.app import Flask
from flask_testing import TestCase
from openbrokerapi.api import BrokerCredentials
from openbrokerapi.log_util import basic_config
class BrokerTestCase(TestCase):
auth_header = 'Basic ' + base64.b64encode(b":").decode("ascii")
def create_app(self):
from openbrokerapi.api import get_blueprint
app = Flask(__name__)
self.broker = Mock()
app.register_blueprint(
get_blueprint(self.broker,
BrokerCredentials("", ""),
basic_config(level=logging.WARN)
)
)
return app
| [
"unittest.mock.Mock",
"gevent.monkey.patch_all",
"openbrokerapi.log_util.basic_config",
"base64.b64encode",
"openbrokerapi.api.BrokerCredentials",
"flask.app.Flask"
]
| [((39, 57), 'gevent.monkey.patch_all', 'monkey.patch_all', ([], {}), '()\n', (55, 57), False, 'from gevent import monkey\n'), ((542, 557), 'flask.app.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (547, 557), False, 'from flask.app import Flask\n'), ((580, 586), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (584, 586), False, 'from unittest.mock import Mock\n'), ((409, 431), 'base64.b64encode', 'base64.b64encode', (["b':'"], {}), "(b':')\n", (425, 431), False, 'import base64\n'), ((685, 710), 'openbrokerapi.api.BrokerCredentials', 'BrokerCredentials', (['""""""', '""""""'], {}), "('', '')\n", (702, 710), False, 'from openbrokerapi.api import BrokerCredentials\n'), ((738, 770), 'openbrokerapi.log_util.basic_config', 'basic_config', ([], {'level': 'logging.WARN'}), '(level=logging.WARN)\n', (750, 770), False, 'from openbrokerapi.log_util import basic_config\n')] |
#! /usr/bin/env python
from __future__ import print_function
import pandas as pd
import numpy as np
import argparse
def generate_csv(start_index, fname):
cols = [
str('A' + str(i)) for i in range(start_index, NUM_COLS + start_index)
]
data = []
for i in range(NUM_ROWS):
vals = (np.random.choice(NUM_DISTINCT_VALS) for j in range(NUM_COLS))
data.append(vals)
df = pd.DataFrame(data=data, columns=cols)
df.to_csv(fname, index=False, header=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate sample tables to test joins.')
parser.add_argument('--num-rows', '-r', type=int, default=100)
parser.add_argument('--num-cols', '-c', type=int, required=True)
parser.add_argument('--num-distinct-vals', '-d', type=int, required=True)
parser.add_argument('--num-cols-overlap', '-o', type=int, default=1)
args = parser.parse_args()
NUM_ROWS = args.num_rows
NUM_COLS = args.num_cols
NUM_DISTINCT_VALS = args.num_distinct_vals
num_overlap = args.num_cols_overlap
if num_overlap > NUM_COLS:
print('--num-cols-overlap cannot be greater than --num-cols')
import sys
sys.exit(1)
generate_csv(0, 'table_a.csv')
generate_csv(NUM_COLS - num_overlap, 'table_b.csv')
| [
"pandas.DataFrame",
"numpy.random.choice",
"argparse.ArgumentParser",
"sys.exit"
]
| [((413, 450), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': 'cols'}), '(data=data, columns=cols)\n', (425, 450), True, 'import pandas as pd\n'), ((540, 616), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate sample tables to test joins."""'}), "(description='Generate sample tables to test joins.')\n", (563, 616), False, 'import argparse\n'), ((1219, 1230), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1227, 1230), False, 'import sys\n'), ((315, 350), 'numpy.random.choice', 'np.random.choice', (['NUM_DISTINCT_VALS'], {}), '(NUM_DISTINCT_VALS)\n', (331, 350), True, 'import numpy as np\n')] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['OpenShiftManagedClusterArgs', 'OpenShiftManagedCluster']
@pulumi.input_type
class OpenShiftManagedClusterArgs:
def __init__(__self__, *,
open_shift_version: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
agent_pool_profiles: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]]] = None,
auth_profile: Optional[pulumi.Input['OpenShiftManagedClusterAuthProfileArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
master_pool_profile: Optional[pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs']] = None,
monitor_profile: Optional[pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs']] = None,
network_profile: Optional[pulumi.Input['NetworkProfileArgs']] = None,
plan: Optional[pulumi.Input['PurchasePlanArgs']] = None,
refresh_cluster: Optional[pulumi.Input[bool]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
router_profiles: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a OpenShiftManagedCluster resource.
:param pulumi.Input[str] open_shift_version: Version of OpenShift specified when creating the cluster.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]] agent_pool_profiles: Configuration of OpenShift cluster VMs.
:param pulumi.Input['OpenShiftManagedClusterAuthProfileArgs'] auth_profile: Configures OpenShift authentication.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs'] master_pool_profile: Configuration for OpenShift master VMs.
:param pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs'] monitor_profile: Configures Log Analytics integration.
:param pulumi.Input['NetworkProfileArgs'] network_profile: Configuration for OpenShift networking.
:param pulumi.Input['PurchasePlanArgs'] plan: Define the resource plan as required by ARM for billing purposes
:param pulumi.Input[bool] refresh_cluster: Allows node rotation
:param pulumi.Input[str] resource_name: The name of the OpenShift managed cluster resource.
:param pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]] router_profiles: Configuration for OpenShift router(s).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
pulumi.set(__self__, "open_shift_version", open_shift_version)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if agent_pool_profiles is not None:
pulumi.set(__self__, "agent_pool_profiles", agent_pool_profiles)
if auth_profile is not None:
pulumi.set(__self__, "auth_profile", auth_profile)
if location is not None:
pulumi.set(__self__, "location", location)
if master_pool_profile is not None:
pulumi.set(__self__, "master_pool_profile", master_pool_profile)
if monitor_profile is not None:
pulumi.set(__self__, "monitor_profile", monitor_profile)
if network_profile is not None:
pulumi.set(__self__, "network_profile", network_profile)
if plan is not None:
pulumi.set(__self__, "plan", plan)
if refresh_cluster is not None:
pulumi.set(__self__, "refresh_cluster", refresh_cluster)
if resource_name is not None:
pulumi.set(__self__, "resource_name", resource_name)
if router_profiles is not None:
pulumi.set(__self__, "router_profiles", router_profiles)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="openShiftVersion")
def open_shift_version(self) -> pulumi.Input[str]:
"""
Version of OpenShift specified when creating the cluster.
"""
return pulumi.get(self, "open_shift_version")
@open_shift_version.setter
def open_shift_version(self, value: pulumi.Input[str]):
pulumi.set(self, "open_shift_version", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="agentPoolProfiles")
def agent_pool_profiles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]]]:
"""
Configuration of OpenShift cluster VMs.
"""
return pulumi.get(self, "agent_pool_profiles")
@agent_pool_profiles.setter
def agent_pool_profiles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]]]):
pulumi.set(self, "agent_pool_profiles", value)
@property
@pulumi.getter(name="authProfile")
def auth_profile(self) -> Optional[pulumi.Input['OpenShiftManagedClusterAuthProfileArgs']]:
"""
Configures OpenShift authentication.
"""
return pulumi.get(self, "auth_profile")
@auth_profile.setter
def auth_profile(self, value: Optional[pulumi.Input['OpenShiftManagedClusterAuthProfileArgs']]):
pulumi.set(self, "auth_profile", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="masterPoolProfile")
def master_pool_profile(self) -> Optional[pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs']]:
"""
Configuration for OpenShift master VMs.
"""
return pulumi.get(self, "master_pool_profile")
@master_pool_profile.setter
def master_pool_profile(self, value: Optional[pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs']]):
pulumi.set(self, "master_pool_profile", value)
@property
@pulumi.getter(name="monitorProfile")
def monitor_profile(self) -> Optional[pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs']]:
"""
Configures Log Analytics integration.
"""
return pulumi.get(self, "monitor_profile")
@monitor_profile.setter
def monitor_profile(self, value: Optional[pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs']]):
pulumi.set(self, "monitor_profile", value)
@property
@pulumi.getter(name="networkProfile")
def network_profile(self) -> Optional[pulumi.Input['NetworkProfileArgs']]:
"""
Configuration for OpenShift networking.
"""
return pulumi.get(self, "network_profile")
@network_profile.setter
def network_profile(self, value: Optional[pulumi.Input['NetworkProfileArgs']]):
pulumi.set(self, "network_profile", value)
@property
@pulumi.getter
def plan(self) -> Optional[pulumi.Input['PurchasePlanArgs']]:
"""
Define the resource plan as required by ARM for billing purposes
"""
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: Optional[pulumi.Input['PurchasePlanArgs']]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter(name="refreshCluster")
def refresh_cluster(self) -> Optional[pulumi.Input[bool]]:
"""
Allows node rotation
"""
return pulumi.get(self, "refresh_cluster")
@refresh_cluster.setter
def refresh_cluster(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "refresh_cluster", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the OpenShift managed cluster resource.
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="routerProfiles")
def router_profiles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]]]:
"""
Configuration for OpenShift router(s).
"""
return pulumi.get(self, "router_profiles")
@router_profiles.setter
def router_profiles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]]]):
pulumi.set(self, "router_profiles", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class OpenShiftManagedCluster(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_pool_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAgentPoolProfileArgs']]]]] = None,
auth_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAuthProfileArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
master_pool_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMasterPoolProfileArgs']]] = None,
monitor_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMonitorProfileArgs']]] = None,
network_profile: Optional[pulumi.Input[pulumi.InputType['NetworkProfileArgs']]] = None,
open_shift_version: Optional[pulumi.Input[str]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['PurchasePlanArgs']]] = None,
refresh_cluster: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
router_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftRouterProfileArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
OpenShift Managed cluster.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAgentPoolProfileArgs']]]] agent_pool_profiles: Configuration of OpenShift cluster VMs.
:param pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAuthProfileArgs']] auth_profile: Configures OpenShift authentication.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMasterPoolProfileArgs']] master_pool_profile: Configuration for OpenShift master VMs.
:param pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMonitorProfileArgs']] monitor_profile: Configures Log Analytics integration.
:param pulumi.Input[pulumi.InputType['NetworkProfileArgs']] network_profile: Configuration for OpenShift networking.
:param pulumi.Input[str] open_shift_version: Version of OpenShift specified when creating the cluster.
:param pulumi.Input[pulumi.InputType['PurchasePlanArgs']] plan: Define the resource plan as required by ARM for billing purposes
:param pulumi.Input[bool] refresh_cluster: Allows node rotation
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_name_: The name of the OpenShift managed cluster resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftRouterProfileArgs']]]] router_profiles: Configuration for OpenShift router(s).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: OpenShiftManagedClusterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
OpenShift Managed cluster.
:param str resource_name: The name of the resource.
:param OpenShiftManagedClusterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(OpenShiftManagedClusterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_pool_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAgentPoolProfileArgs']]]]] = None,
auth_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAuthProfileArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
master_pool_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMasterPoolProfileArgs']]] = None,
monitor_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMonitorProfileArgs']]] = None,
network_profile: Optional[pulumi.Input[pulumi.InputType['NetworkProfileArgs']]] = None,
open_shift_version: Optional[pulumi.Input[str]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['PurchasePlanArgs']]] = None,
refresh_cluster: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
router_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftRouterProfileArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OpenShiftManagedClusterArgs.__new__(OpenShiftManagedClusterArgs)
__props__.__dict__["agent_pool_profiles"] = agent_pool_profiles
__props__.__dict__["auth_profile"] = auth_profile
__props__.__dict__["location"] = location
__props__.__dict__["master_pool_profile"] = master_pool_profile
__props__.__dict__["monitor_profile"] = monitor_profile
__props__.__dict__["network_profile"] = network_profile
if open_shift_version is None and not opts.urn:
raise TypeError("Missing required property 'open_shift_version'")
__props__.__dict__["open_shift_version"] = open_shift_version
__props__.__dict__["plan"] = plan
__props__.__dict__["refresh_cluster"] = refresh_cluster
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["router_profiles"] = router_profiles
__props__.__dict__["tags"] = tags
__props__.__dict__["cluster_version"] = None
__props__.__dict__["fqdn"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["public_hostname"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerservice/v20191027preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice/v20180930preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice/v20180930preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice/v20190430:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice/v20190430:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice/v20190930preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice/v20190930preview:OpenShiftManagedCluster")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(OpenShiftManagedCluster, __self__).__init__(
'azure-native:containerservice/v20191027preview:OpenShiftManagedCluster',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'OpenShiftManagedCluster':
"""
Get an existing OpenShiftManagedCluster resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = OpenShiftManagedClusterArgs.__new__(OpenShiftManagedClusterArgs)
__props__.__dict__["agent_pool_profiles"] = None
__props__.__dict__["auth_profile"] = None
__props__.__dict__["cluster_version"] = None
__props__.__dict__["fqdn"] = None
__props__.__dict__["location"] = None
__props__.__dict__["master_pool_profile"] = None
__props__.__dict__["monitor_profile"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_profile"] = None
__props__.__dict__["open_shift_version"] = None
__props__.__dict__["plan"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["public_hostname"] = None
__props__.__dict__["refresh_cluster"] = None
__props__.__dict__["router_profiles"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return OpenShiftManagedCluster(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="agentPoolProfiles")
def agent_pool_profiles(self) -> pulumi.Output[Optional[Sequence['outputs.OpenShiftManagedClusterAgentPoolProfileResponse']]]:
"""
Configuration of OpenShift cluster VMs.
"""
return pulumi.get(self, "agent_pool_profiles")
@property
@pulumi.getter(name="authProfile")
def auth_profile(self) -> pulumi.Output[Optional['outputs.OpenShiftManagedClusterAuthProfileResponse']]:
"""
Configures OpenShift authentication.
"""
return pulumi.get(self, "auth_profile")
@property
@pulumi.getter(name="clusterVersion")
def cluster_version(self) -> pulumi.Output[str]:
"""
Version of OpenShift specified when creating the cluster.
"""
return pulumi.get(self, "cluster_version")
@property
@pulumi.getter
def fqdn(self) -> pulumi.Output[str]:
"""
Service generated FQDN for OpenShift API server loadbalancer internal hostname.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="masterPoolProfile")
def master_pool_profile(self) -> pulumi.Output[Optional['outputs.OpenShiftManagedClusterMasterPoolProfileResponse']]:
"""
Configuration for OpenShift master VMs.
"""
return pulumi.get(self, "master_pool_profile")
@property
@pulumi.getter(name="monitorProfile")
def monitor_profile(self) -> pulumi.Output[Optional['outputs.OpenShiftManagedClusterMonitorProfileResponse']]:
"""
Configures Log Analytics integration.
"""
return pulumi.get(self, "monitor_profile")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkProfile")
def network_profile(self) -> pulumi.Output[Optional['outputs.NetworkProfileResponse']]:
"""
Configuration for OpenShift networking.
"""
return pulumi.get(self, "network_profile")
@property
@pulumi.getter(name="openShiftVersion")
def open_shift_version(self) -> pulumi.Output[str]:
"""
Version of OpenShift specified when creating the cluster.
"""
return pulumi.get(self, "open_shift_version")
@property
@pulumi.getter
def plan(self) -> pulumi.Output[Optional['outputs.PurchasePlanResponse']]:
"""
Define the resource plan as required by ARM for billing purposes
"""
return pulumi.get(self, "plan")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The current deployment or provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicHostname")
def public_hostname(self) -> pulumi.Output[str]:
"""
Service generated FQDN or private IP for OpenShift API server.
"""
return pulumi.get(self, "public_hostname")
@property
@pulumi.getter(name="refreshCluster")
def refresh_cluster(self) -> pulumi.Output[Optional[bool]]:
"""
Allows node rotation
"""
return pulumi.get(self, "refresh_cluster")
@property
@pulumi.getter(name="routerProfiles")
def router_profiles(self) -> pulumi.Output[Optional[Sequence['outputs.OpenShiftRouterProfileResponse']]]:
"""
Configuration for OpenShift router(s).
"""
return pulumi.get(self, "router_profiles")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| [
"pulumi.get",
"pulumi.Alias",
"pulumi.getter",
"pulumi.set",
"pulumi.ResourceOptions",
"pulumi.ResourceOptions.merge"
]
| [((4505, 4543), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""openShiftVersion"""'}), "(name='openShiftVersion')\n", (4518, 4543), False, 'import pulumi\n'), ((4909, 4948), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""resourceGroupName"""'}), "(name='resourceGroupName')\n", (4922, 4948), False, 'import pulumi\n'), ((5293, 5332), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""agentPoolProfiles"""'}), "(name='agentPoolProfiles')\n", (5306, 5332), False, 'import pulumi\n'), ((5837, 5870), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""authProfile"""'}), "(name='authProfile')\n", (5850, 5870), False, 'import pulumi\n'), ((6588, 6627), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""masterPoolProfile"""'}), "(name='masterPoolProfile')\n", (6601, 6627), False, 'import pulumi\n'), ((7086, 7122), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""monitorProfile"""'}), "(name='monitorProfile')\n", (7099, 7122), False, 'import pulumi\n'), ((7553, 7589), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""networkProfile"""'}), "(name='networkProfile')\n", (7566, 7589), False, 'import pulumi\n'), ((8342, 8378), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""refreshCluster"""'}), "(name='refreshCluster')\n", (8355, 8378), False, 'import pulumi\n'), ((8714, 8748), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""resourceName"""'}), "(name='resourceName')\n", (8727, 8748), False, 'import pulumi\n'), ((9103, 9139), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""routerProfiles"""'}), "(name='routerProfiles')\n", (9116, 9139), False, 'import pulumi\n'), ((20481, 20520), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""agentPoolProfiles"""'}), "(name='agentPoolProfiles')\n", (20494, 20520), False, 'import pulumi\n'), ((20799, 20832), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""authProfile"""'}), "(name='authProfile')\n", (20812, 20832), False, 'import pulumi\n'), ((21079, 21115), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""clusterVersion"""'}), "(name='clusterVersion')\n", (21092, 21115), False, 'import pulumi\n'), ((21732, 21771), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""masterPoolProfile"""'}), "(name='masterPoolProfile')\n", (21745, 21771), False, 'import pulumi\n'), ((22041, 22077), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""monitorProfile"""'}), "(name='monitorProfile')\n", (22054, 22077), False, 'import pulumi\n'), ((22496, 22532), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""networkProfile"""'}), "(name='networkProfile')\n", (22509, 22532), False, 'import pulumi\n'), ((22768, 22806), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""openShiftVersion"""'}), "(name='openShiftVersion')\n", (22781, 22806), False, 'import pulumi\n'), ((23277, 23316), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""provisioningState"""'}), "(name='provisioningState')\n", (23290, 23316), False, 'import pulumi\n'), ((23561, 23597), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""publicHostname"""'}), "(name='publicHostname')\n", (23574, 23597), False, 'import pulumi\n'), ((23817, 23853), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""refreshCluster"""'}), "(name='refreshCluster')\n", (23830, 23853), False, 'import pulumi\n'), ((24042, 24078), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""routerProfiles"""'}), "(name='routerProfiles')\n", (24055, 24078), False, 'import pulumi\n'), ((3228, 3290), 'pulumi.set', 'pulumi.set', (['__self__', '"""open_shift_version"""', 'open_shift_version'], {}), "(__self__, 'open_shift_version', open_shift_version)\n", (3238, 3290), False, 'import pulumi\n'), ((3299, 3363), 'pulumi.set', 'pulumi.set', (['__self__', '"""resource_group_name"""', 'resource_group_name'], {}), "(__self__, 'resource_group_name', resource_group_name)\n", (3309, 3363), False, 'import pulumi\n'), ((4704, 4742), 'pulumi.get', 'pulumi.get', (['self', '"""open_shift_version"""'], {}), "(self, 'open_shift_version')\n", (4714, 4742), False, 'import pulumi\n'), ((4843, 4888), 'pulumi.set', 'pulumi.set', (['self', '"""open_shift_version"""', 'value'], {}), "(self, 'open_shift_version', value)\n", (4853, 4888), False, 'import pulumi\n'), ((5084, 5123), 'pulumi.get', 'pulumi.get', (['self', '"""resource_group_name"""'], {}), "(self, 'resource_group_name')\n", (5094, 5123), False, 'import pulumi\n'), ((5226, 5272), 'pulumi.set', 'pulumi.set', (['self', '"""resource_group_name"""', 'value'], {}), "(self, 'resource_group_name', value)\n", (5236, 5272), False, 'import pulumi\n'), ((5552, 5591), 'pulumi.get', 'pulumi.get', (['self', '"""agent_pool_profiles"""'], {}), "(self, 'agent_pool_profiles')\n", (5562, 5591), False, 'import pulumi\n'), ((5770, 5816), 'pulumi.set', 'pulumi.set', (['self', '"""agent_pool_profiles"""', 'value'], {}), "(self, 'agent_pool_profiles', value)\n", (5780, 5816), False, 'import pulumi\n'), ((6051, 6083), 'pulumi.get', 'pulumi.get', (['self', '"""auth_profile"""'], {}), "(self, 'auth_profile')\n", (6061, 6083), False, 'import pulumi\n'), ((6219, 6258), 'pulumi.set', 'pulumi.set', (['self', '"""auth_profile"""', 'value'], {}), "(self, 'auth_profile', value)\n", (6229, 6258), False, 'import pulumi\n'), ((6413, 6441), 'pulumi.get', 'pulumi.get', (['self', '"""location"""'], {}), "(self, 'location')\n", (6423, 6441), False, 'import pulumi\n'), ((6532, 6567), 'pulumi.set', 'pulumi.set', (['self', '"""location"""', 'value'], {}), "(self, 'location', value)\n", (6542, 6567), False, 'import pulumi\n'), ((6824, 6863), 'pulumi.get', 'pulumi.get', (['self', '"""master_pool_profile"""'], {}), "(self, 'master_pool_profile')\n", (6834, 6863), False, 'import pulumi\n'), ((7019, 7065), 'pulumi.set', 'pulumi.set', (['self', '"""master_pool_profile"""', 'value'], {}), "(self, 'master_pool_profile', value)\n", (7029, 7065), False, 'import pulumi\n'), ((7310, 7345), 'pulumi.get', 'pulumi.get', (['self', '"""monitor_profile"""'], {}), "(self, 'monitor_profile')\n", (7320, 7345), False, 'import pulumi\n'), ((7490, 7532), 'pulumi.set', 'pulumi.set', (['self', '"""monitor_profile"""', 'value'], {}), "(self, 'monitor_profile', value)\n", (7500, 7532), False, 'import pulumi\n'), ((7756, 7791), 'pulumi.get', 'pulumi.get', (['self', '"""network_profile"""'], {}), "(self, 'network_profile')\n", (7766, 7791), False, 'import pulumi\n'), ((7913, 7955), 'pulumi.set', 'pulumi.set', (['self', '"""network_profile"""', 'value'], {}), "(self, 'network_profile', value)\n", (7923, 7955), False, 'import pulumi\n'), ((8168, 8192), 'pulumi.get', 'pulumi.get', (['self', '"""plan"""'], {}), "(self, 'plan')\n", (8178, 8192), False, 'import pulumi\n'), ((8290, 8321), 'pulumi.set', 'pulumi.set', (['self', '"""plan"""', 'value'], {}), "(self, 'plan', value)\n", (8300, 8321), False, 'import pulumi\n'), ((8510, 8545), 'pulumi.get', 'pulumi.get', (['self', '"""refresh_cluster"""'], {}), "(self, 'refresh_cluster')\n", (8520, 8545), False, 'import pulumi\n'), ((8651, 8693), 'pulumi.set', 'pulumi.set', (['self', '"""refresh_cluster"""', 'value'], {}), "(self, 'refresh_cluster', value)\n", (8661, 8693), False, 'import pulumi\n'), ((8908, 8941), 'pulumi.get', 'pulumi.get', (['self', '"""resource_name"""'], {}), "(self, 'resource_name')\n", (8918, 8941), False, 'import pulumi\n'), ((9042, 9082), 'pulumi.set', 'pulumi.set', (['self', '"""resource_name"""', 'value'], {}), "(self, 'resource_name', value)\n", (9052, 9082), False, 'import pulumi\n'), ((9337, 9372), 'pulumi.get', 'pulumi.get', (['self', '"""router_profiles"""'], {}), "(self, 'router_profiles')\n", (9347, 9372), False, 'import pulumi\n'), ((9526, 9568), 'pulumi.set', 'pulumi.set', (['self', '"""router_profiles"""', 'value'], {}), "(self, 'router_profiles', value)\n", (9536, 9568), False, 'import pulumi\n'), ((9743, 9767), 'pulumi.get', 'pulumi.get', (['self', '"""tags"""'], {}), "(self, 'tags')\n", (9753, 9767), False, 'import pulumi\n'), ((9878, 9909), 'pulumi.set', 'pulumi.set', (['self', '"""tags"""', 'value'], {}), "(self, 'tags', value)\n", (9888, 9909), False, 'import pulumi\n'), ((18507, 18553), 'pulumi.ResourceOptions.merge', 'pulumi.ResourceOptions.merge', (['opts', 'alias_opts'], {}), '(opts, alias_opts)\n', (18535, 18553), False, 'import pulumi\n'), ((20739, 20778), 'pulumi.get', 'pulumi.get', (['self', '"""agent_pool_profiles"""'], {}), "(self, 'agent_pool_profiles')\n", (20749, 20778), False, 'import pulumi\n'), ((21026, 21058), 'pulumi.get', 'pulumi.get', (['self', '"""auth_profile"""'], {}), "(self, 'auth_profile')\n", (21036, 21058), False, 'import pulumi\n'), ((21274, 21309), 'pulumi.get', 'pulumi.get', (['self', '"""cluster_version"""'], {}), "(self, 'cluster_version')\n", (21284, 21309), False, 'import pulumi\n'), ((21513, 21537), 'pulumi.get', 'pulumi.get', (['self', '"""fqdn"""'], {}), "(self, 'fqdn')\n", (21523, 21537), False, 'import pulumi\n'), ((21683, 21711), 'pulumi.get', 'pulumi.get', (['self', '"""location"""'], {}), "(self, 'location')\n", (21693, 21711), False, 'import pulumi\n'), ((21981, 22020), 'pulumi.get', 'pulumi.get', (['self', '"""master_pool_profile"""'], {}), "(self, 'master_pool_profile')\n", (21991, 22020), False, 'import pulumi\n'), ((22278, 22313), 'pulumi.get', 'pulumi.get', (['self', '"""monitor_profile"""'], {}), "(self, 'monitor_profile')\n", (22288, 22313), False, 'import pulumi\n'), ((22451, 22475), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (22461, 22475), False, 'import pulumi\n'), ((22712, 22747), 'pulumi.get', 'pulumi.get', (['self', '"""network_profile"""'], {}), "(self, 'network_profile')\n", (22722, 22747), False, 'import pulumi\n'), ((22968, 23006), 'pulumi.get', 'pulumi.get', (['self', '"""open_shift_version"""'], {}), "(self, 'open_shift_version')\n", (22978, 23006), False, 'import pulumi\n'), ((23232, 23256), 'pulumi.get', 'pulumi.get', (['self', '"""plan"""'], {}), "(self, 'plan')\n", (23242, 23256), False, 'import pulumi\n'), ((23502, 23540), 'pulumi.get', 'pulumi.get', (['self', '"""provisioning_state"""'], {}), "(self, 'provisioning_state')\n", (23512, 23540), False, 'import pulumi\n'), ((23761, 23796), 'pulumi.get', 'pulumi.get', (['self', '"""public_hostname"""'], {}), "(self, 'public_hostname')\n", (23771, 23796), False, 'import pulumi\n'), ((23986, 24021), 'pulumi.get', 'pulumi.get', (['self', '"""refresh_cluster"""'], {}), "(self, 'refresh_cluster')\n", (23996, 24021), False, 'import pulumi\n'), ((24275, 24310), 'pulumi.get', 'pulumi.get', (['self', '"""router_profiles"""'], {}), "(self, 'router_profiles')\n", (24285, 24310), False, 'import pulumi\n'), ((24472, 24496), 'pulumi.get', 'pulumi.get', (['self', '"""tags"""'], {}), "(self, 'tags')\n", (24482, 24496), False, 'import pulumi\n'), ((24634, 24658), 'pulumi.get', 'pulumi.get', (['self', '"""type"""'], {}), "(self, 'type')\n", (24644, 24658), False, 'import pulumi\n'), ((3420, 3484), 'pulumi.set', 'pulumi.set', (['__self__', '"""agent_pool_profiles"""', 'agent_pool_profiles'], {}), "(__self__, 'agent_pool_profiles', agent_pool_profiles)\n", (3430, 3484), False, 'import pulumi\n'), ((3534, 3584), 'pulumi.set', 'pulumi.set', (['__self__', '"""auth_profile"""', 'auth_profile'], {}), "(__self__, 'auth_profile', auth_profile)\n", (3544, 3584), False, 'import pulumi\n'), ((3630, 3672), 'pulumi.set', 'pulumi.set', (['__self__', '"""location"""', 'location'], {}), "(__self__, 'location', location)\n", (3640, 3672), False, 'import pulumi\n'), ((3729, 3793), 'pulumi.set', 'pulumi.set', (['__self__', '"""master_pool_profile"""', 'master_pool_profile'], {}), "(__self__, 'master_pool_profile', master_pool_profile)\n", (3739, 3793), False, 'import pulumi\n'), ((3846, 3902), 'pulumi.set', 'pulumi.set', (['__self__', '"""monitor_profile"""', 'monitor_profile'], {}), "(__self__, 'monitor_profile', monitor_profile)\n", (3856, 3902), False, 'import pulumi\n'), ((3955, 4011), 'pulumi.set', 'pulumi.set', (['__self__', '"""network_profile"""', 'network_profile'], {}), "(__self__, 'network_profile', network_profile)\n", (3965, 4011), False, 'import pulumi\n'), ((4053, 4087), 'pulumi.set', 'pulumi.set', (['__self__', '"""plan"""', 'plan'], {}), "(__self__, 'plan', plan)\n", (4063, 4087), False, 'import pulumi\n'), ((4140, 4196), 'pulumi.set', 'pulumi.set', (['__self__', '"""refresh_cluster"""', 'refresh_cluster'], {}), "(__self__, 'refresh_cluster', refresh_cluster)\n", (4150, 4196), False, 'import pulumi\n'), ((4247, 4299), 'pulumi.set', 'pulumi.set', (['__self__', '"""resource_name"""', 'resource_name'], {}), "(__self__, 'resource_name', resource_name)\n", (4257, 4299), False, 'import pulumi\n'), ((4352, 4408), 'pulumi.set', 'pulumi.set', (['__self__', '"""router_profiles"""', 'router_profiles'], {}), "(__self__, 'router_profiles', router_profiles)\n", (4362, 4408), False, 'import pulumi\n'), ((4450, 4484), 'pulumi.set', 'pulumi.set', (['__self__', '"""tags"""', 'tags'], {}), "(__self__, 'tags', tags)\n", (4460, 4484), False, 'import pulumi\n'), ((15642, 15666), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (15664, 15666), False, 'import pulumi\n'), ((19407, 19436), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (19429, 19436), False, 'import pulumi\n'), ((17688, 17786), 'pulumi.Alias', 'pulumi.Alias', ([], {'type_': '"""azure-nextgen:containerservice/v20191027preview:OpenShiftManagedCluster"""'}), "(type_=\n 'azure-nextgen:containerservice/v20191027preview:OpenShiftManagedCluster')\n", (17700, 17786), False, 'import pulumi\n'), ((17783, 17858), 'pulumi.Alias', 'pulumi.Alias', ([], {'type_': '"""azure-native:containerservice:OpenShiftManagedCluster"""'}), "(type_='azure-native:containerservice:OpenShiftManagedCluster')\n", (17795, 17858), False, 'import pulumi\n'), ((17860, 17936), 'pulumi.Alias', 'pulumi.Alias', ([], {'type_': '"""azure-nextgen:containerservice:OpenShiftManagedCluster"""'}), "(type_='azure-nextgen:containerservice:OpenShiftManagedCluster')\n", (17872, 17936), False, 'import pulumi\n'), ((17938, 18035), 'pulumi.Alias', 'pulumi.Alias', ([], {'type_': '"""azure-native:containerservice/v20180930preview:OpenShiftManagedCluster"""'}), "(type_=\n 'azure-native:containerservice/v20180930preview:OpenShiftManagedCluster')\n", (17950, 18035), False, 'import pulumi\n'), ((18032, 18130), 'pulumi.Alias', 'pulumi.Alias', ([], {'type_': '"""azure-nextgen:containerservice/v20180930preview:OpenShiftManagedCluster"""'}), "(type_=\n 'azure-nextgen:containerservice/v20180930preview:OpenShiftManagedCluster')\n", (18044, 18130), False, 'import pulumi\n'), ((18127, 18217), 'pulumi.Alias', 'pulumi.Alias', ([], {'type_': '"""azure-native:containerservice/v20190430:OpenShiftManagedCluster"""'}), "(type_=\n 'azure-native:containerservice/v20190430:OpenShiftManagedCluster')\n", (18139, 18217), False, 'import pulumi\n'), ((18214, 18305), 'pulumi.Alias', 'pulumi.Alias', ([], {'type_': '"""azure-nextgen:containerservice/v20190430:OpenShiftManagedCluster"""'}), "(type_=\n 'azure-nextgen:containerservice/v20190430:OpenShiftManagedCluster')\n", (18226, 18305), False, 'import pulumi\n'), ((18302, 18399), 'pulumi.Alias', 'pulumi.Alias', ([], {'type_': '"""azure-native:containerservice/v20190930preview:OpenShiftManagedCluster"""'}), "(type_=\n 'azure-native:containerservice/v20190930preview:OpenShiftManagedCluster')\n", (18314, 18399), False, 'import pulumi\n'), ((18396, 18494), 'pulumi.Alias', 'pulumi.Alias', ([], {'type_': '"""azure-nextgen:containerservice/v20190930preview:OpenShiftManagedCluster"""'}), "(type_=\n 'azure-nextgen:containerservice/v20190930preview:OpenShiftManagedCluster')\n", (18408, 18494), False, 'import pulumi\n')] |
#!/usr/bin/env python
import psycopg2
import time
from ..models import User
class StorageManager:
def __init__(self):
self.conn = None
self._connect()
self._create_table()
def _connect(self):
while True:
try:
self.conn = psycopg2.connect(
host='storage',
database='app_storage',
user='admin',
password='<PASSWORD>'
)
except psycopg2.Error:
print('Cannot connect to database, sleeping 3 seconds')
time.sleep(3)
else:
break
def _create_table(self):
while True:
try:
cursor = self.conn.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS users \
(id SERIAL PRIMARY KEY, login VARCHAR(128), \
email VARCHAR(128), hash_password VARCHAR(<PASSWORD>), \
confirmed BOOLEAN)')
except psycopg2.Error:
print('Database error, reconnecting')
self._connect()
else:
break
def insert(self, user):
'''
If insert is success, the function returns true,
Else, it returns false
'''
while True:
try:
if self.select(user.login, category='login') is not None:
return False
cursor = self.conn.cursor()
cursor.execute('INSERT INTO users(login, email, hash_password, confirmed) \
VALUES (%s, %s, %s, %s)', (user.login, user.email, user.hash_password, user.confirmed))
self.conn.commit()
return True
except psycopg2.Error:
print('Database error, reconnecting')
time.sleep(1)
self._connect()
else:
break
def select(self, value, category='login'):
'''
The function returns None, if there is no user with very value of
category, else it returns User instance
'''
while True:
try:
cursor = self.conn.cursor()
cursor.execute('SELECT * FROM users WHERE %s = %%s' % category, (value,))
self.conn.commit()
fetch = cursor.fetchall()
if len(fetch) == 0:
return None
user = User(fetch[0][1], fetch[0][2])
user.id = fetch[0][0]
user.hash_password = fetch[0][3]
user.confirmed = fetch[0][4]
return user
except psycopg2.Error:
print('Database error, reconnecting')
time.sleep(1)
self._connect()
else:
break
def confirm(self, value, category='login'):
'''
The function sets \'confirmed\' parameter of the user with very value
of category as True\n
If such user not found, returns False, else returns True
'''
while True:
try:
if self.select(value, category=category) is not None:
cursor = self.conn.cursor()
cursor.execute('UPDATE users SET confirmed = TRUE WHERE %s = %%s' % category, (value,))
self.conn.commit()
return True
else:
return False
except psycopg2.Error:
print('Database error, reconnecting')
time.sleep(1)
self._connect()
else:
break
| [
"psycopg2.connect",
"time.sleep"
]
| [((293, 390), 'psycopg2.connect', 'psycopg2.connect', ([], {'host': '"""storage"""', 'database': '"""app_storage"""', 'user': '"""admin"""', 'password': '"""<PASSWORD>"""'}), "(host='storage', database='app_storage', user='admin',\n password='<PASSWORD>')\n", (309, 390), False, 'import psycopg2\n'), ((608, 621), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (618, 621), False, 'import time\n'), ((1878, 1891), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1888, 1891), False, 'import time\n'), ((2793, 2806), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2803, 2806), False, 'import time\n'), ((3619, 3632), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3629, 3632), False, 'import time\n')] |
from django.db import models
from .media import Water
from .media import Electricity
from .media import Gas
from .media import WasteWater
from .media import Telecommunication
from .generic import Attachment
from .generic import Photo
from .generic import Location as EstateLocation
from cigeo.models import GenericNote as EstateNote
class ScientificParkTelecommunication(Telecommunication):
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkWasteWater(WasteWater):
diameter = capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkAttachment(Attachment):
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkPhoto(Photo):
green_field = models.ForeignKey(
"ScientificPark",
on_delete=models.CASCADE
)
pass
class ScientificParkTechnologicalWater(Water):
distance = None
diameter = None
capacity = None
well = None
well_capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkElectricity(Electricity):
distance = None
capacity = None
current = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkDrinkWater(Water):
distance = None
diameter = None
capacity = None
well = None
well_capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkGas(Gas):
diameter = pressure = capacity = None
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkLocation(EstateLocation):
green_field = models.OneToOneField(
"ScientificPark",
on_delete=models.CASCADE
)
class ScientificParkGenericNote(EstateNote):
green_field = models.ForeignKey(
"ScientificPark",
on_delete=models.CASCADE
)
| [
"django.db.models.OneToOneField",
"django.db.models.ForeignKey"
]
| [((413, 477), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""ScientificPark"""'], {'on_delete': 'models.CASCADE'}), "('ScientificPark', on_delete=models.CASCADE)\n", (433, 477), False, 'from django.db import models\n'), ((595, 659), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""ScientificPark"""'], {'on_delete': 'models.CASCADE'}), "('ScientificPark', on_delete=models.CASCADE)\n", (615, 659), False, 'from django.db import models\n'), ((746, 810), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""ScientificPark"""'], {'on_delete': 'models.CASCADE'}), "('ScientificPark', on_delete=models.CASCADE)\n", (766, 810), False, 'from django.db import models\n'), ((887, 948), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""ScientificPark"""'], {'on_delete': 'models.CASCADE'}), "('ScientificPark', on_delete=models.CASCADE)\n", (904, 948), False, 'from django.db import models\n'), ((1150, 1214), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""ScientificPark"""'], {'on_delete': 'models.CASCADE'}), "('ScientificPark', on_delete=models.CASCADE)\n", (1170, 1214), False, 'from django.db import models\n'), ((1363, 1427), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""ScientificPark"""'], {'on_delete': 'models.CASCADE'}), "('ScientificPark', on_delete=models.CASCADE)\n", (1383, 1427), False, 'from django.db import models\n'), ((1619, 1683), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""ScientificPark"""'], {'on_delete': 'models.CASCADE'}), "('ScientificPark', on_delete=models.CASCADE)\n", (1639, 1683), False, 'from django.db import models\n'), ((1799, 1863), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""ScientificPark"""'], {'on_delete': 'models.CASCADE'}), "('ScientificPark', on_delete=models.CASCADE)\n", (1819, 1863), False, 'from django.db import models\n'), ((1953, 2017), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""ScientificPark"""'], {'on_delete': 'models.CASCADE'}), "('ScientificPark', on_delete=models.CASCADE)\n", (1973, 2017), False, 'from django.db import models\n'), ((2106, 2167), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""ScientificPark"""'], {'on_delete': 'models.CASCADE'}), "('ScientificPark', on_delete=models.CASCADE)\n", (2123, 2167), False, 'from django.db import models\n')] |
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAdminUser
from goods.models import SPU, SPUSpecification
from meiduo_admin.serializers.spus import SPUSimpleSerializer, SPUSpecSerializer
class SPUSimpleView(ListAPIView):
permission_classes = [IsAdminUser]
queryset = SPU.objects.all()
serializer_class = SPUSimpleSerializer
# GET/meiduo_admin/goods/(?P<pk>\d+)/specs/
class SPUSpecView(ListAPIView):
"""่ทๅSPUๅๅ็่งๆ ผ้้กนๆฐๆฎ"""
permission_classes = [IsAdminUser]
# ๆๅฎ่งๅพ็ฑปๆไฝฟ็จ็ๆฅ่ฏข้
def get_queryset(self):
pk = self.kwargs['pk']
specs = SPUSpecification.objects.filter(spu_id=pk)
return specs
# ๆๅฎ่งๅพ็ฑปๆไฝฟ็จ็ๅบๅๅๅจ็ฑป
serializer_class = SPUSpecSerializer
| [
"goods.models.SPU.objects.all",
"goods.models.SPUSpecification.objects.filter"
]
| [((319, 336), 'goods.models.SPU.objects.all', 'SPU.objects.all', ([], {}), '()\n', (334, 336), False, 'from goods.models import SPU, SPUSpecification\n'), ((617, 659), 'goods.models.SPUSpecification.objects.filter', 'SPUSpecification.objects.filter', ([], {'spu_id': 'pk'}), '(spu_id=pk)\n', (648, 659), False, 'from goods.models import SPU, SPUSpecification\n')] |
import tkinter as tk
from tkinter.messagebox import showerror
from constants.frames import MAIN_FRAME_NAME
from util import add_new_quantity
class AddQuantityFrame(tk.Frame):
def __init__(self, root, controller):
tk.Frame.__init__(self, root)
self.controller = controller
self.main_label = tk.Label(self, text="ะะพะฑะฐะฒะปะตะฝะธะต ะฝะพะฒะพะน ะฒะตะปะธัะธะฝั", font="Helvetica 30 bold")
self.main_label.pack(pady=50)
self.info_label = tk.Label(self, text="ะะฒะตะดะธัะต ะฝะฐะทะฒะฐะฝะธะต ะฒะตะปะธัะธะฝั", font="Helvetica 20")
self.info_label.pack(pady=40)
self.quantity_name_entry = tk.Entry(self, width=24)
self.quantity_name_entry.pack()
self.add_button = tk.Button(self, text="ะะพะฑะฐะฒะธัั ะฒะตะปะธัะธะฝั", width=20, height=3, command=self.__add_quantity)
self.add_button.pack(pady=40)
self.back_button = tk.Button(self, text="ะะฐะทะฐะด", width=20, height=3,
command=lambda: self.controller.show_frame(MAIN_FRAME_NAME))
self.back_button.pack()
def __add_quantity(self):
quantity_name = self.quantity_name_entry.get()
if quantity_name == "":
showerror("ะะฐะทะฒะฐะฝะธะต ะฒะตะปะธัะธะฝั", "ะะฒะตะดะธัะต ะฝะฐะทะฒะฐะฝะธะต ะฒะตะปะธัะธะฝั")
return
if len(quantity_name) > 30:
showerror("ะะปะธะฝะฝะพะต ะฝะฐะทะฒะฐะฝะธะต", "ะะฐะทะฒะฐะฝะธะต ะฒะตะปะธัะธะฝั ะผะพะถะตั ัะพะดะตัะถะฐัั ะฝะต ะฑะพะปะตะต 30 ัะธะผะฒะพะปะพะฒ")
return
add_new_quantity(quantity_name)
self.controller.show_frame(MAIN_FRAME_NAME)
def render(self):
self.clear()
def clear(self):
self.quantity_name_entry.delete(0, tk.END)
| [
"tkinter.messagebox.showerror",
"tkinter.Frame.__init__",
"tkinter.Entry",
"util.add_new_quantity",
"tkinter.Button",
"tkinter.Label"
]
| [((229, 258), 'tkinter.Frame.__init__', 'tk.Frame.__init__', (['self', 'root'], {}), '(self, root)\n', (246, 258), True, 'import tkinter as tk\n'), ((323, 397), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""ะะพะฑะฐะฒะปะตะฝะธะต ะฝะพะฒะพะน ะฒะตะปะธัะธะฝั"""', 'font': '"""Helvetica 30 bold"""'}), "(self, text='ะะพะฑะฐะฒะปะตะฝะธะต ะฝะพะฒะพะน ะฒะตะปะธัะธะฝั', font='Helvetica 30 bold')\n", (331, 397), True, 'import tkinter as tk\n'), ((463, 532), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""ะะฒะตะดะธัะต ะฝะฐะทะฒะฐะฝะธะต ะฒะตะปะธัะธะฝั"""', 'font': '"""Helvetica 20"""'}), "(self, text='ะะฒะตะดะธัะต ะฝะฐะทะฒะฐะฝะธะต ะฒะตะปะธัะธะฝั', font='Helvetica 20')\n", (471, 532), True, 'import tkinter as tk\n'), ((607, 631), 'tkinter.Entry', 'tk.Entry', (['self'], {'width': '(24)'}), '(self, width=24)\n', (615, 631), True, 'import tkinter as tk\n'), ((699, 794), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""ะะพะฑะฐะฒะธัั ะฒะตะปะธัะธะฝั"""', 'width': '(20)', 'height': '(3)', 'command': 'self.__add_quantity'}), "(self, text='ะะพะฑะฐะฒะธัั ะฒะตะปะธัะธะฝั', width=20, height=3, command=self.\n __add_quantity)\n", (708, 794), True, 'import tkinter as tk\n'), ((1410, 1441), 'util.add_new_quantity', 'add_new_quantity', (['quantity_name'], {}), '(quantity_name)\n', (1426, 1441), False, 'from util import add_new_quantity\n'), ((1166, 1225), 'tkinter.messagebox.showerror', 'showerror', (['"""ะะฐะทะฒะฐะฝะธะต ะฒะตะปะธัะธะฝั"""', '"""ะะฒะตะดะธัะต ะฝะฐะทะฒะฐะฝะธะต ะฒะตะปะธัะธะฝั"""'], {}), "('ะะฐะทะฒะฐะฝะธะต ะฒะตะปะธัะธะฝั', 'ะะฒะตะดะธัะต ะฝะฐะทะฒะฐะฝะธะต ะฒะตะปะธัะธะฝั')\n", (1175, 1225), False, 'from tkinter.messagebox import showerror\n'), ((1294, 1385), 'tkinter.messagebox.showerror', 'showerror', (['"""ะะปะธะฝะฝะพะต ะฝะฐะทะฒะฐะฝะธะต"""', '"""ะะฐะทะฒะฐะฝะธะต ะฒะตะปะธัะธะฝั ะผะพะถะตั ัะพะดะตัะถะฐัั ะฝะต ะฑะพะปะตะต 30 ัะธะผะฒะพะปะพะฒ"""'], {}), "('ะะปะธะฝะฝะพะต ะฝะฐะทะฒะฐะฝะธะต',\n 'ะะฐะทะฒะฐะฝะธะต ะฒะตะปะธัะธะฝั ะผะพะถะตั ัะพะดะตัะถะฐัั ะฝะต ะฑะพะปะตะต 30 ัะธะผะฒะพะปะพะฒ')\n", (1303, 1385), False, 'from tkinter.messagebox import showerror\n')] |
from setuptools import setup
import versioneer
setup(name='gym_pysc2',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
install_requires=['gym'] # And any other dependencies foo needs
) | [
"versioneer.get_cmdclass",
"versioneer.get_version"
]
| [((86, 110), 'versioneer.get_version', 'versioneer.get_version', ([], {}), '()\n', (108, 110), False, 'import versioneer\n'), ((127, 152), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ([], {}), '()\n', (150, 152), False, 'import versioneer\n')] |
"""
Neighborhood Components Analysis (NCA)
Ported to Python from https://github.com/vomjom/nca
"""
from __future__ import absolute_import
import numpy as np
from six.moves import xrange
from sklearn.utils.validation import check_X_y
from .base_metric import BaseMetricLearner
EPS = np.finfo(float).eps
class NCA(BaseMetricLearner):
def __init__(self, num_dims=None, max_iter=100, learning_rate=0.01):
self.num_dims = num_dims
self.max_iter = max_iter
self.learning_rate = learning_rate
def transformer(self):
return self.A_
def fit(self, X, y):
"""
X: data matrix, (n x d)
y: scalar labels, (n)
"""
X, labels = check_X_y(X, y)
n, d = X.shape
num_dims = self.num_dims
if num_dims is None:
num_dims = d
# Initialize A to a scaling matrix
A = np.zeros((num_dims, d))
np.fill_diagonal(A, 1./(np.maximum(X.max(axis=0)-X.min(axis=0), EPS)))
# Run NCA
dX = X[:,None] - X[None] # shape (n, n, d)
tmp = np.einsum('...i,...j->...ij', dX, dX) # shape (n, n, d, d)
masks = labels[:,None] == labels[None]
for it in xrange(self.max_iter):
for i, label in enumerate(labels):
mask = masks[i]
Ax = A.dot(X.T).T # shape (n, num_dims)
softmax = np.exp(-((Ax[i] - Ax)**2).sum(axis=1)) # shape (n)
softmax[i] = 0
softmax /= softmax.sum()
t = softmax[:, None, None] * tmp[i] # shape (n, d, d)
d = softmax[mask].sum() * t.sum(axis=0) - t[mask].sum(axis=0)
A += self.learning_rate * A.dot(d)
self.X_ = X
self.A_ = A
self.n_iter_ = it
return self
| [
"numpy.zeros",
"six.moves.xrange",
"numpy.einsum",
"numpy.finfo",
"sklearn.utils.validation.check_X_y"
]
| [((285, 300), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (293, 300), True, 'import numpy as np\n'), ((660, 675), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (669, 675), False, 'from sklearn.utils.validation import check_X_y\n'), ((817, 840), 'numpy.zeros', 'np.zeros', (['(num_dims, d)'], {}), '((num_dims, d))\n', (825, 840), True, 'import numpy as np\n'), ((989, 1026), 'numpy.einsum', 'np.einsum', (['"""...i,...j->...ij"""', 'dX', 'dX'], {}), "('...i,...j->...ij', dX, dX)\n", (998, 1026), True, 'import numpy as np\n'), ((1106, 1127), 'six.moves.xrange', 'xrange', (['self.max_iter'], {}), '(self.max_iter)\n', (1112, 1127), False, 'from six.moves import xrange\n')] |
import os
import youtube_dl
os.system("setup.bat")
playlist = input("Paste the Youtube Playlist URL Here.")
track = 1
print("""THIS TOOL WILL ATTEMPT TO DOWNLOAD THE FIRST 1000 SONGS IN THE QUEUE.\n
PLEASE DO NOT INTERRUPT THE TOOL.
YOU MAY CLOSE THE TOOL WHEN IT DISPLAYS "DONE!".
ALL DOWNLOADED SONGS WILL BE IN THE SAME DIRECTORY THIS FILE IS IN.
TO EXTRACT THEM, FILTER BY MP3.""")
for x in range(1000):
file = open("Downloader.bat","w")
file.write("youtube-dl -x --playlist-start {} --audio-format mp3 --playlist-end {} {}".format(str(track),str(track),playlist))
file.close
os.system("Downloader.bat")
track = track + 1
print("DONE! You may now close this window.")
| [
"os.system"
]
| [((30, 52), 'os.system', 'os.system', (['"""setup.bat"""'], {}), "('setup.bat')\n", (39, 52), False, 'import os\n'), ((640, 667), 'os.system', 'os.system', (['"""Downloader.bat"""'], {}), "('Downloader.bat')\n", (649, 667), False, 'import os\n')] |
# coding: utf-8
"""
manage.py
~~~~~~~~~
"""
import os
import sys
import shutil
import platform
from app import app
from gen import Gen
from flask_script import Manager
"""็ผ็ ่ฎพ็ฝฎ"""
if (platform.python_version().split('.')[0] == '2'):
# reload(sys) is evil :)
reload(sys)
sys.setdefaultencoding('utf-8')
"""Git้
็ฝฎ"""
git_url = app.config['GIT_URL']
git_branch = app.config['BRANCH']
manager = Manager(app)
def first_upload():
if not git_url:
raise
else:
harbor_folder = os.path.join(os.getcwd(), '.harbor')
os.chdir(harbor_folder)
os.popen('git checkout -b %s' % git_branch)
os.popen('git pull %s %s' % (git_url, git_branch))
os.popen('git add .')
os.popen('git commit -m "railgun site update...โ
"')
os.popen('git push -u %s %s' % (git_url, git_branch))
def other_upload():
if not git_url:
raise
else:
harbor_folder = os.path.join(os.getcwd(), '.harbor')
os.chdir(harbor_folder)
os.popen('git checkout %s' % git_branch)
os.popen('git add .')
os.popen('git commit -m "railgun site update...โ
"')
os.popen('git push -u %s %s' % (git_url, git_branch))
def update_static_res():
static_folder = os.path.join(os.getcwd(), 'app/static')
static_build_folder = os.path.join(os.getcwd(), 'app/build/static')
if os.path.isdir(static_build_folder):
shutil.rmtree(static_build_folder)
shutil.copytree(static_folder, static_build_folder)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'build':
_gen = Gen(app)
_gen.gen()
# update static resources
update_static_res()
elif len(sys.argv) > 1 and sys.argv[1] == 'first_upload':
first_upload()
elif len(sys.argv) > 1 and sys.argv[1] == 'other_upload':
other_upload()
else:
manager.run()
| [
"sys.setdefaultencoding",
"flask_script.Manager",
"os.getcwd",
"shutil.copytree",
"os.chdir",
"gen.Gen",
"os.path.isdir",
"os.popen",
"shutil.rmtree",
"platform.python_version"
]
| [((417, 429), 'flask_script.Manager', 'Manager', (['app'], {}), '(app)\n', (424, 429), False, 'from flask_script import Manager\n'), ((293, 324), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (315, 324), False, 'import sys\n'), ((1380, 1414), 'os.path.isdir', 'os.path.isdir', (['static_build_folder'], {}), '(static_build_folder)\n', (1393, 1414), False, 'import os\n'), ((1463, 1514), 'shutil.copytree', 'shutil.copytree', (['static_folder', 'static_build_folder'], {}), '(static_folder, static_build_folder)\n', (1478, 1514), False, 'import shutil\n'), ((565, 588), 'os.chdir', 'os.chdir', (['harbor_folder'], {}), '(harbor_folder)\n', (573, 588), False, 'import os\n'), ((597, 640), 'os.popen', 'os.popen', (["('git checkout -b %s' % git_branch)"], {}), "('git checkout -b %s' % git_branch)\n", (605, 640), False, 'import os\n'), ((649, 699), 'os.popen', 'os.popen', (["('git pull %s %s' % (git_url, git_branch))"], {}), "('git pull %s %s' % (git_url, git_branch))\n", (657, 699), False, 'import os\n'), ((708, 729), 'os.popen', 'os.popen', (['"""git add ."""'], {}), "('git add .')\n", (716, 729), False, 'import os\n'), ((738, 790), 'os.popen', 'os.popen', (['"""git commit -m "railgun site update...โ
\\""""'], {}), '(\'git commit -m "railgun site update...โ
"\')\n', (746, 790), False, 'import os\n'), ((799, 852), 'os.popen', 'os.popen', (["('git push -u %s %s' % (git_url, git_branch))"], {}), "('git push -u %s %s' % (git_url, git_branch))\n", (807, 852), False, 'import os\n'), ((988, 1011), 'os.chdir', 'os.chdir', (['harbor_folder'], {}), '(harbor_folder)\n', (996, 1011), False, 'import os\n'), ((1020, 1060), 'os.popen', 'os.popen', (["('git checkout %s' % git_branch)"], {}), "('git checkout %s' % git_branch)\n", (1028, 1060), False, 'import os\n'), ((1069, 1090), 'os.popen', 'os.popen', (['"""git add ."""'], {}), "('git add .')\n", (1077, 1090), False, 'import os\n'), ((1099, 1151), 'os.popen', 'os.popen', (['"""git commit -m "railgun site update...โ
\\""""'], {}), '(\'git commit -m "railgun site update...โ
"\')\n', (1107, 1151), False, 'import os\n'), ((1160, 1213), 'os.popen', 'os.popen', (["('git push -u %s %s' % (git_url, git_branch))"], {}), "('git push -u %s %s' % (git_url, git_branch))\n", (1168, 1213), False, 'import os\n'), ((1274, 1285), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1283, 1285), False, 'import os\n'), ((1340, 1351), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1349, 1351), False, 'import os\n'), ((1424, 1458), 'shutil.rmtree', 'shutil.rmtree', (['static_build_folder'], {}), '(static_build_folder)\n', (1437, 1458), False, 'import shutil\n'), ((1611, 1619), 'gen.Gen', 'Gen', (['app'], {}), '(app)\n', (1614, 1619), False, 'from gen import Gen\n'), ((533, 544), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (542, 544), False, 'import os\n'), ((956, 967), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (965, 967), False, 'import os\n'), ((195, 220), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (218, 220), False, 'import platform\n')] |
from typing import Any
import torch
import torch.nn as nn
from cvmodels.segmentation import unet, deeplab as dl
def output(model: nn.Module, input_batch: torch.Tensor) -> Any:
model.eval()
with torch.no_grad():
return model(input_batch)
def numel(m: torch.nn.Module, only_trainable: bool = True) -> int:
parameters = m.parameters()
if only_trainable:
parameters = list(p for p in parameters if p.requires_grad)
unique = dict((p.data_ptr(), p) for p in parameters).values()
return sum(p.numel() for p in unique)
def test_unet_out_transpose(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
model = unet.UNet(bilinear=False, outputs=1)
assert numel(model) > 31_000_000
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_unet_out_bilinear(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
model = unet.UNet(bilinear=True, outputs=1)
assert numel(model) < 30_000_000
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3_out(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3(variant=variant)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3_pretrain_backbone(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3(variant=variant, pretrained=True)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3_custom():
batch = torch.rand((2, 4, 480, 480))
batches, _, height, width = batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3(in_channels=4, out_channels=2, in_dimension=480, variant=variant, pretrained=True)
out = output(model, batch)
assert out.shape == (batches, 2, height, width)
def test_deeplabv3plus_out(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3Plus(variant=variant)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3plus_pretrain_backbone(random_seg_batch: torch.Tensor):
batches, _, height, width = random_seg_batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3Plus(variant=variant, pretrained=True)
out = output(model, random_seg_batch)
assert out.shape == (batches, 1, height, width)
def test_deeplabv3plus_custom():
batch = torch.rand((2, 4, 480, 480))
batches, _, height, width = batch.shape
for variant in dl.DeepLabVariants:
model = dl.DeepLabV3Plus(in_channels=4, out_channels=2, in_dimension=480, variant=variant, pretrained=True)
out = output(model, batch)
assert out.shape == (batches, 2, height, width)
| [
"cvmodels.segmentation.unet.UNet",
"cvmodels.segmentation.deeplab.DeepLabV3Plus",
"torch.no_grad",
"cvmodels.segmentation.deeplab.DeepLabV3",
"torch.rand"
]
| [((686, 722), 'cvmodels.segmentation.unet.UNet', 'unet.UNet', ([], {'bilinear': '(False)', 'outputs': '(1)'}), '(bilinear=False, outputs=1)\n', (695, 722), False, 'from cvmodels.segmentation import unet, deeplab as dl\n'), ((983, 1018), 'cvmodels.segmentation.unet.UNet', 'unet.UNet', ([], {'bilinear': '(True)', 'outputs': '(1)'}), '(bilinear=True, outputs=1)\n', (992, 1018), False, 'from cvmodels.segmentation import unet, deeplab as dl\n'), ((1824, 1852), 'torch.rand', 'torch.rand', (['(2, 4, 480, 480)'], {}), '((2, 4, 480, 480))\n', (1834, 1852), False, 'import torch\n'), ((2833, 2861), 'torch.rand', 'torch.rand', (['(2, 4, 480, 480)'], {}), '((2, 4, 480, 480))\n', (2843, 2861), False, 'import torch\n'), ((205, 220), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (218, 220), False, 'import torch\n'), ((1318, 1347), 'cvmodels.segmentation.deeplab.DeepLabV3', 'dl.DeepLabV3', ([], {'variant': 'variant'}), '(variant=variant)\n', (1330, 1347), True, 'from cvmodels.segmentation import unet, deeplab as dl\n'), ((1632, 1678), 'cvmodels.segmentation.deeplab.DeepLabV3', 'dl.DeepLabV3', ([], {'variant': 'variant', 'pretrained': '(True)'}), '(variant=variant, pretrained=True)\n', (1644, 1678), True, 'from cvmodels.segmentation import unet, deeplab as dl\n'), ((1952, 2052), 'cvmodels.segmentation.deeplab.DeepLabV3', 'dl.DeepLabV3', ([], {'in_channels': '(4)', 'out_channels': '(2)', 'in_dimension': '(480)', 'variant': 'variant', 'pretrained': '(True)'}), '(in_channels=4, out_channels=2, in_dimension=480, variant=\n variant, pretrained=True)\n', (1964, 2052), True, 'from cvmodels.segmentation import unet, deeplab as dl\n'), ((2311, 2344), 'cvmodels.segmentation.deeplab.DeepLabV3Plus', 'dl.DeepLabV3Plus', ([], {'variant': 'variant'}), '(variant=variant)\n', (2327, 2344), True, 'from cvmodels.segmentation import unet, deeplab as dl\n'), ((2633, 2683), 'cvmodels.segmentation.deeplab.DeepLabV3Plus', 'dl.DeepLabV3Plus', ([], {'variant': 'variant', 'pretrained': '(True)'}), '(variant=variant, pretrained=True)\n', (2649, 2683), True, 'from cvmodels.segmentation import unet, deeplab as dl\n'), ((2961, 3065), 'cvmodels.segmentation.deeplab.DeepLabV3Plus', 'dl.DeepLabV3Plus', ([], {'in_channels': '(4)', 'out_channels': '(2)', 'in_dimension': '(480)', 'variant': 'variant', 'pretrained': '(True)'}), '(in_channels=4, out_channels=2, in_dimension=480, variant=\n variant, pretrained=True)\n', (2977, 3065), True, 'from cvmodels.segmentation import unet, deeplab as dl\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import pandas as pd
from aif360.datasets import BinaryLabelDataset
from aif360.metrics import ClassificationMetric
def test_generalized_entropy_index():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.generalized_entropy_index() == 0.2
pred = data.copy()
pred[:, -1] = np.array([0, 1, 1, 0, 0, 0, 0, 1, 1, 1])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.generalized_entropy_index() == 0.3
def test_theil_index():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.theil_index() == 4*np.log(2)/10
def test_between_all_groups():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
b = np.array([1, 1, 1.25, 1.25, 1.25, 1.25, 0.75, 0.75, 0.75, 0.75])
assert cm.between_all_groups_generalized_entropy_index() == 1/20*np.sum(b**2 - 1)
def test_between_group():
data = np.array([[0, 0, 1],
[0, 1, 0],
[1, 1, 0],
[1, 1, 1],
[1, 0, 0],
[1, 0, 0]])
pred = data.copy()
pred[[0, 3], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'feat2', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'feat2', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat', 'feat2'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat', 'feat2'])
cm = ClassificationMetric(bld, bld2, unprivileged_groups=[{'feat': 0}],
privileged_groups=[{'feat': 1}])
b = np.array([0.5, 0.5, 1.25, 1.25, 1.25, 1.25])
assert cm.between_group_generalized_entropy_index() == 1/12*np.sum(b**2 - 1)
| [
"numpy.log",
"numpy.array",
"numpy.sum",
"pandas.DataFrame",
"aif360.datasets.BinaryLabelDataset",
"aif360.metrics.ClassificationMetric"
]
| [((336, 431), 'numpy.array', 'np.array', (['[[0, 1], [0, 0], [1, 0], [1, 1], [1, 0], [1, 0], [2, 1], [2, 0], [2, 1], [2, 1]\n ]'], {}), '([[0, 1], [0, 0], [1, 0], [1, 1], [1, 0], [1, 0], [2, 1], [2, 0], [\n 2, 1], [2, 1]])\n', (344, 431), True, 'import numpy as np\n'), ((698, 743), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['feat', 'label']"}), "(data, columns=['feat', 'label'])\n", (710, 743), True, 'import pandas as pd\n'), ((754, 799), 'pandas.DataFrame', 'pd.DataFrame', (['pred'], {'columns': "['feat', 'label']"}), "(pred, columns=['feat', 'label'])\n", (766, 799), True, 'import pandas as pd\n'), ((810, 899), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df', 'label_names': "['label']", 'protected_attribute_names': "['feat']"}), "(df=df, label_names=['label'], protected_attribute_names=\n ['feat'])\n", (828, 899), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((914, 1004), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df2', 'label_names': "['label']", 'protected_attribute_names': "['feat']"}), "(df=df2, label_names=['label'], protected_attribute_names\n =['feat'])\n", (932, 1004), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((1017, 1048), 'aif360.metrics.ClassificationMetric', 'ClassificationMetric', (['bld', 'bld2'], {}), '(bld, bld2)\n', (1037, 1048), False, 'from aif360.metrics import ClassificationMetric\n'), ((1141, 1181), 'numpy.array', 'np.array', (['[0, 1, 1, 0, 0, 0, 0, 1, 1, 1]'], {}), '([0, 1, 1, 0, 0, 0, 0, 1, 1, 1])\n', (1149, 1181), True, 'import numpy as np\n'), ((1192, 1237), 'pandas.DataFrame', 'pd.DataFrame', (['pred'], {'columns': "['feat', 'label']"}), "(pred, columns=['feat', 'label'])\n", (1204, 1237), True, 'import pandas as pd\n'), ((1249, 1339), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df2', 'label_names': "['label']", 'protected_attribute_names': "['feat']"}), "(df=df2, label_names=['label'], protected_attribute_names\n =['feat'])\n", (1267, 1339), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((1352, 1383), 'aif360.metrics.ClassificationMetric', 'ClassificationMetric', (['bld', 'bld2'], {}), '(bld, bld2)\n', (1372, 1383), False, 'from aif360.metrics import ClassificationMetric\n'), ((1470, 1565), 'numpy.array', 'np.array', (['[[0, 1], [0, 0], [1, 0], [1, 1], [1, 0], [1, 0], [2, 1], [2, 0], [2, 1], [2, 1]\n ]'], {}), '([[0, 1], [0, 0], [1, 0], [1, 1], [1, 0], [1, 0], [2, 1], [2, 0], [\n 2, 1], [2, 1]])\n', (1478, 1565), True, 'import numpy as np\n'), ((1832, 1877), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['feat', 'label']"}), "(data, columns=['feat', 'label'])\n", (1844, 1877), True, 'import pandas as pd\n'), ((1888, 1933), 'pandas.DataFrame', 'pd.DataFrame', (['pred'], {'columns': "['feat', 'label']"}), "(pred, columns=['feat', 'label'])\n", (1900, 1933), True, 'import pandas as pd\n'), ((1944, 2033), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df', 'label_names': "['label']", 'protected_attribute_names': "['feat']"}), "(df=df, label_names=['label'], protected_attribute_names=\n ['feat'])\n", (1962, 2033), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((2048, 2138), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df2', 'label_names': "['label']", 'protected_attribute_names': "['feat']"}), "(df=df2, label_names=['label'], protected_attribute_names\n =['feat'])\n", (2066, 2138), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((2151, 2182), 'aif360.metrics.ClassificationMetric', 'ClassificationMetric', (['bld', 'bld2'], {}), '(bld, bld2)\n', (2171, 2182), False, 'from aif360.metrics import ClassificationMetric\n'), ((2273, 2368), 'numpy.array', 'np.array', (['[[0, 1], [0, 0], [1, 0], [1, 1], [1, 0], [1, 0], [2, 1], [2, 0], [2, 1], [2, 1]\n ]'], {}), '([[0, 1], [0, 0], [1, 0], [1, 1], [1, 0], [1, 0], [2, 1], [2, 0], [\n 2, 1], [2, 1]])\n', (2281, 2368), True, 'import numpy as np\n'), ((2635, 2680), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['feat', 'label']"}), "(data, columns=['feat', 'label'])\n", (2647, 2680), True, 'import pandas as pd\n'), ((2691, 2736), 'pandas.DataFrame', 'pd.DataFrame', (['pred'], {'columns': "['feat', 'label']"}), "(pred, columns=['feat', 'label'])\n", (2703, 2736), True, 'import pandas as pd\n'), ((2747, 2836), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df', 'label_names': "['label']", 'protected_attribute_names': "['feat']"}), "(df=df, label_names=['label'], protected_attribute_names=\n ['feat'])\n", (2765, 2836), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((2851, 2941), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df2', 'label_names': "['label']", 'protected_attribute_names': "['feat']"}), "(df=df2, label_names=['label'], protected_attribute_names\n =['feat'])\n", (2869, 2941), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((2954, 2985), 'aif360.metrics.ClassificationMetric', 'ClassificationMetric', (['bld', 'bld2'], {}), '(bld, bld2)\n', (2974, 2985), False, 'from aif360.metrics import ClassificationMetric\n'), ((2995, 3059), 'numpy.array', 'np.array', (['[1, 1, 1.25, 1.25, 1.25, 1.25, 0.75, 0.75, 0.75, 0.75]'], {}), '([1, 1, 1.25, 1.25, 1.25, 1.25, 0.75, 0.75, 0.75, 0.75])\n', (3003, 3059), True, 'import numpy as np\n'), ((3184, 3260), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 1, 0], [1, 1, 0], [1, 1, 1], [1, 0, 0], [1, 0, 0]]'], {}), '([[0, 0, 1], [0, 1, 0], [1, 1, 0], [1, 1, 1], [1, 0, 0], [1, 0, 0]])\n', (3192, 3260), True, 'import numpy as np\n'), ((3448, 3502), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['feat', 'feat2', 'label']"}), "(data, columns=['feat', 'feat2', 'label'])\n", (3460, 3502), True, 'import pandas as pd\n'), ((3513, 3567), 'pandas.DataFrame', 'pd.DataFrame', (['pred'], {'columns': "['feat', 'feat2', 'label']"}), "(pred, columns=['feat', 'feat2', 'label'])\n", (3525, 3567), True, 'import pandas as pd\n'), ((3578, 3676), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df', 'label_names': "['label']", 'protected_attribute_names': "['feat', 'feat2']"}), "(df=df, label_names=['label'], protected_attribute_names=\n ['feat', 'feat2'])\n", (3596, 3676), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((3691, 3790), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df2', 'label_names': "['label']", 'protected_attribute_names': "['feat', 'feat2']"}), "(df=df2, label_names=['label'], protected_attribute_names\n =['feat', 'feat2'])\n", (3709, 3790), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((3803, 3906), 'aif360.metrics.ClassificationMetric', 'ClassificationMetric', (['bld', 'bld2'], {'unprivileged_groups': "[{'feat': 0}]", 'privileged_groups': "[{'feat': 1}]"}), "(bld, bld2, unprivileged_groups=[{'feat': 0}],\n privileged_groups=[{'feat': 1}])\n", (3823, 3906), False, 'from aif360.metrics import ClassificationMetric\n'), ((3920, 3964), 'numpy.array', 'np.array', (['[0.5, 0.5, 1.25, 1.25, 1.25, 1.25]'], {}), '([0.5, 0.5, 1.25, 1.25, 1.25, 1.25])\n', (3928, 3964), True, 'import numpy as np\n'), ((3129, 3147), 'numpy.sum', 'np.sum', (['(b ** 2 - 1)'], {}), '(b ** 2 - 1)\n', (3135, 3147), True, 'import numpy as np\n'), ((4029, 4047), 'numpy.sum', 'np.sum', (['(b ** 2 - 1)'], {}), '(b ** 2 - 1)\n', (4035, 4047), True, 'import numpy as np\n'), ((2217, 2226), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2223, 2226), True, 'import numpy as np\n')] |
import openmoc
import openmoc.log as log
import openmoc.plotter as plotter
import openmoc.materialize as materialize
log.set_log_level('NORMAL')
###############################################################################
########################### Creating Materials ############################
###############################################################################
log.py_printf('NORMAL', 'Importing materials data from HDF5...')
materials = openmoc.materialize.load_from_hdf5('c5g7-mgxs.h5', '../')
###############################################################################
########################### Creating Surfaces #############################
###############################################################################
log.py_printf('NORMAL', 'Creating surfaces...')
xmin = openmoc.XPlane(x=-5.0, name='xmin')
xmax = openmoc.XPlane(x= 5.0, name='xmax')
ymin = openmoc.YPlane(y=-5.0, name='ymin')
ymax = openmoc.YPlane(y= 5.0, name='ymax')
zmin = openmoc.ZPlane(z=-5.0, name='zmin')
zmax = openmoc.ZPlane(z= 5.0, name='zmax')
xmin.setBoundaryType(openmoc.REFLECTIVE)
xmax.setBoundaryType(openmoc.REFLECTIVE)
ymin.setBoundaryType(openmoc.REFLECTIVE)
ymax.setBoundaryType(openmoc.REFLECTIVE)
zmin.setBoundaryType(openmoc.REFLECTIVE)
zmax.setBoundaryType(openmoc.REFLECTIVE)
###############################################################################
############################# Creating Cells ##############################
###############################################################################
log.py_printf('NORMAL', 'Creating cells...')
fuel = openmoc.Cell(name='fuel')
fuel.setFill(materials['UO2'])
moderator = openmoc.Cell(name='moderator')
moderator.setFill(materials['UO2'])
root_cell = openmoc.Cell(name='root cell')
root_cell.addSurface(halfspace=+1, surface=xmin)
root_cell.addSurface(halfspace=-1, surface=xmax)
root_cell.addSurface(halfspace=+1, surface=ymin)
root_cell.addSurface(halfspace=-1, surface=ymax)
root_cell.addSurface(halfspace=+1, surface=zmin)
root_cell.addSurface(halfspace=-1, surface=zmax)
###############################################################################
########################### Creating Universes ############################
###############################################################################
log.py_printf('NORMAL', 'Creating universes...')
fue_univ = openmoc.Universe(name='homogeneous fue cell')
fue_univ.addCell(fuel)
mod_univ = openmoc.Universe(name='homogeneous mod cell')
mod_univ.addCell(moderator)
root_universe = openmoc.Universe(name='root universe')
root_universe.addCell(root_cell)
###############################################################################
########################### Creating Lattices #############################
###############################################################################
log.py_printf('NORMAL', 'Creating simple 10 x 10 lattice...')
f = fue_univ
lattice = openmoc.Lattice(name='10x10 lattice')
lattice.setWidth(width_x=1.0, width_y=1.0, width_z=1.0)
lattice.setUniverses([[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]]])
root_cell.setFill(lattice)
###############################################################################
########################## Creating the Geometry ##########################
###############################################################################
log.py_printf('NORMAL', 'Creating geometry...')
geometry = openmoc.Geometry()
geometry.setRootUniverse(root_universe)
geometry.initializeFlatSourceRegions()
| [
"openmoc.Universe",
"openmoc.Lattice",
"openmoc.materialize.load_from_hdf5",
"openmoc.Cell",
"openmoc.log.set_log_level",
"openmoc.Geometry",
"openmoc.XPlane",
"openmoc.log.py_printf",
"openmoc.ZPlane",
"openmoc.YPlane"
]
| [((118, 145), 'openmoc.log.set_log_level', 'log.set_log_level', (['"""NORMAL"""'], {}), "('NORMAL')\n", (135, 145), True, 'import openmoc.log as log\n'), ((388, 452), 'openmoc.log.py_printf', 'log.py_printf', (['"""NORMAL"""', '"""Importing materials data from HDF5..."""'], {}), "('NORMAL', 'Importing materials data from HDF5...')\n", (401, 452), True, 'import openmoc.log as log\n'), ((466, 523), 'openmoc.materialize.load_from_hdf5', 'openmoc.materialize.load_from_hdf5', (['"""c5g7-mgxs.h5"""', '"""../"""'], {}), "('c5g7-mgxs.h5', '../')\n", (500, 523), False, 'import openmoc\n'), ((767, 814), 'openmoc.log.py_printf', 'log.py_printf', (['"""NORMAL"""', '"""Creating surfaces..."""'], {}), "('NORMAL', 'Creating surfaces...')\n", (780, 814), True, 'import openmoc.log as log\n'), ((823, 858), 'openmoc.XPlane', 'openmoc.XPlane', ([], {'x': '(-5.0)', 'name': '"""xmin"""'}), "(x=-5.0, name='xmin')\n", (837, 858), False, 'import openmoc\n'), ((866, 900), 'openmoc.XPlane', 'openmoc.XPlane', ([], {'x': '(5.0)', 'name': '"""xmax"""'}), "(x=5.0, name='xmax')\n", (880, 900), False, 'import openmoc\n'), ((909, 944), 'openmoc.YPlane', 'openmoc.YPlane', ([], {'y': '(-5.0)', 'name': '"""ymin"""'}), "(y=-5.0, name='ymin')\n", (923, 944), False, 'import openmoc\n'), ((952, 986), 'openmoc.YPlane', 'openmoc.YPlane', ([], {'y': '(5.0)', 'name': '"""ymax"""'}), "(y=5.0, name='ymax')\n", (966, 986), False, 'import openmoc\n'), ((995, 1030), 'openmoc.ZPlane', 'openmoc.ZPlane', ([], {'z': '(-5.0)', 'name': '"""zmin"""'}), "(z=-5.0, name='zmin')\n", (1009, 1030), False, 'import openmoc\n'), ((1038, 1072), 'openmoc.ZPlane', 'openmoc.ZPlane', ([], {'z': '(5.0)', 'name': '"""zmax"""'}), "(z=5.0, name='zmax')\n", (1052, 1072), False, 'import openmoc\n'), ((1563, 1607), 'openmoc.log.py_printf', 'log.py_printf', (['"""NORMAL"""', '"""Creating cells..."""'], {}), "('NORMAL', 'Creating cells...')\n", (1576, 1607), True, 'import openmoc.log as log\n'), ((1616, 1641), 'openmoc.Cell', 'openmoc.Cell', ([], {'name': '"""fuel"""'}), "(name='fuel')\n", (1628, 1641), False, 'import openmoc\n'), ((1686, 1716), 'openmoc.Cell', 'openmoc.Cell', ([], {'name': '"""moderator"""'}), "(name='moderator')\n", (1698, 1716), False, 'import openmoc\n'), ((1766, 1796), 'openmoc.Cell', 'openmoc.Cell', ([], {'name': '"""root cell"""'}), "(name='root cell')\n", (1778, 1796), False, 'import openmoc\n'), ((2334, 2382), 'openmoc.log.py_printf', 'log.py_printf', (['"""NORMAL"""', '"""Creating universes..."""'], {}), "('NORMAL', 'Creating universes...')\n", (2347, 2382), True, 'import openmoc.log as log\n'), ((2395, 2440), 'openmoc.Universe', 'openmoc.Universe', ([], {'name': '"""homogeneous fue cell"""'}), "(name='homogeneous fue cell')\n", (2411, 2440), False, 'import openmoc\n'), ((2476, 2521), 'openmoc.Universe', 'openmoc.Universe', ([], {'name': '"""homogeneous mod cell"""'}), "(name='homogeneous mod cell')\n", (2492, 2521), False, 'import openmoc\n'), ((2567, 2605), 'openmoc.Universe', 'openmoc.Universe', ([], {'name': '"""root universe"""'}), "(name='root universe')\n", (2583, 2605), False, 'import openmoc\n'), ((2882, 2943), 'openmoc.log.py_printf', 'log.py_printf', (['"""NORMAL"""', '"""Creating simple 10 x 10 lattice..."""'], {}), "('NORMAL', 'Creating simple 10 x 10 lattice...')\n", (2895, 2943), True, 'import openmoc.log as log\n'), ((2969, 3006), 'openmoc.Lattice', 'openmoc.Lattice', ([], {'name': '"""10x10 lattice"""'}), "(name='10x10 lattice')\n", (2984, 3006), False, 'import openmoc\n'), ((9043, 9090), 'openmoc.log.py_printf', 'log.py_printf', (['"""NORMAL"""', '"""Creating geometry..."""'], {}), "('NORMAL', 'Creating geometry...')\n", (9056, 9090), True, 'import openmoc.log as log\n'), ((9103, 9121), 'openmoc.Geometry', 'openmoc.Geometry', ([], {}), '()\n', (9119, 9121), False, 'import openmoc\n')] |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import sys
from google.ads.google_ads import util
if sys.version_info < (3, 6):
raise ImportError("This module requires Python 3.6 or later.")
_lazy_name_to_package_map = {
"account_budget_proposal_service_client": "google.ads.google_ads.v5.services",
"account_budget_service_client": "google.ads.google_ads.v5.services",
"account_link_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_asset_view_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_service_client": "google.ads.google_ads.v5.services",
"ad_group_audience_view_service_client": "google.ads.google_ads.v5.services",
"ad_group_bid_modifier_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_simulation_service_client": "google.ads.google_ads.v5.services",
"ad_group_extension_setting_service_client": "google.ads.google_ads.v5.services",
"ad_group_feed_service_client": "google.ads.google_ads.v5.services",
"ad_group_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_service_client": "google.ads.google_ads.v5.services",
"ad_group_simulation_service_client": "google.ads.google_ads.v5.services",
"ad_parameter_service_client": "google.ads.google_ads.v5.services",
"ad_schedule_view_service_client": "google.ads.google_ads.v5.services",
"ad_service_client": "google.ads.google_ads.v5.services",
"age_range_view_service_client": "google.ads.google_ads.v5.services",
"asset_service_client": "google.ads.google_ads.v5.services",
"batch_job_service_client": "google.ads.google_ads.v5.services",
"bidding_strategy_service_client": "google.ads.google_ads.v5.services",
"billing_setup_service_client": "google.ads.google_ads.v5.services",
"campaign_asset_service_client": "google.ads.google_ads.v5.services",
"campaign_audience_view_service_client": "google.ads.google_ads.v5.services",
"campaign_bid_modifier_service_client": "google.ads.google_ads.v5.services",
"campaign_budget_service_client": "google.ads.google_ads.v5.services",
"campaign_criterion_service_client": "google.ads.google_ads.v5.services",
"campaign_criterion_simulation_service_client": "google.ads.google_ads.v5.services",
"campaign_draft_service_client": "google.ads.google_ads.v5.services",
"campaign_experiment_service_client": "google.ads.google_ads.v5.services",
"campaign_extension_setting_service_client": "google.ads.google_ads.v5.services",
"campaign_feed_service_client": "google.ads.google_ads.v5.services",
"campaign_label_service_client": "google.ads.google_ads.v5.services",
"campaign_service_client": "google.ads.google_ads.v5.services",
"campaign_shared_set_service_client": "google.ads.google_ads.v5.services",
"carrier_constant_service_client": "google.ads.google_ads.v5.services",
"change_status_service_client": "google.ads.google_ads.v5.services",
"click_view_service_client": "google.ads.google_ads.v5.services",
"conversion_action_service_client": "google.ads.google_ads.v5.services",
"conversion_adjustment_upload_service_client": "google.ads.google_ads.v5.services",
"conversion_upload_service_client": "google.ads.google_ads.v5.services",
"currency_constant_service_client": "google.ads.google_ads.v5.services",
"custom_interest_service_client": "google.ads.google_ads.v5.services",
"customer_client_link_service_client": "google.ads.google_ads.v5.services",
"customer_client_service_client": "google.ads.google_ads.v5.services",
"customer_extension_setting_service_client": "google.ads.google_ads.v5.services",
"customer_feed_service_client": "google.ads.google_ads.v5.services",
"customer_label_service_client": "google.ads.google_ads.v5.services",
"customer_manager_link_service_client": "google.ads.google_ads.v5.services",
"customer_negative_criterion_service_client": "google.ads.google_ads.v5.services",
"customer_service_client": "google.ads.google_ads.v5.services",
"detail_placement_view_service_client": "google.ads.google_ads.v5.services",
"display_keyword_view_service_client": "google.ads.google_ads.v5.services",
"distance_view_service_client": "google.ads.google_ads.v5.services",
"domain_category_service_client": "google.ads.google_ads.v5.services",
"dynamic_search_ads_search_term_view_service_client": "google.ads.google_ads.v5.services",
"expanded_landing_page_view_service_client": "google.ads.google_ads.v5.services",
"extension_feed_item_service_client": "google.ads.google_ads.v5.services",
"feed_item_service_client": "google.ads.google_ads.v5.services",
"feed_item_target_service_client": "google.ads.google_ads.v5.services",
"feed_mapping_service_client": "google.ads.google_ads.v5.services",
"feed_placeholder_view_service_client": "google.ads.google_ads.v5.services",
"feed_service_client": "google.ads.google_ads.v5.services",
"gender_view_service_client": "google.ads.google_ads.v5.services",
"geo_target_constant_service_client": "google.ads.google_ads.v5.services",
"geographic_view_service_client": "google.ads.google_ads.v5.services",
"google_ads_field_service_client": "google.ads.google_ads.v5.services",
"google_ads_service_client": "google.ads.google_ads.v5.services",
"group_placement_view_service_client": "google.ads.google_ads.v5.services",
"hotel_group_view_service_client": "google.ads.google_ads.v5.services",
"hotel_performance_view_service_client": "google.ads.google_ads.v5.services",
"income_range_view_service_client": "google.ads.google_ads.v5.services",
"invoice_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_ad_group_keyword_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_ad_group_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_campaign_keyword_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_campaign_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_idea_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_service_client": "google.ads.google_ads.v5.services",
"keyword_view_service_client": "google.ads.google_ads.v5.services",
"label_service_client": "google.ads.google_ads.v5.services",
"landing_page_view_service_client": "google.ads.google_ads.v5.services",
"language_constant_service_client": "google.ads.google_ads.v5.services",
"location_view_service_client": "google.ads.google_ads.v5.services",
"managed_placement_view_service_client": "google.ads.google_ads.v5.services",
"media_file_service_client": "google.ads.google_ads.v5.services",
"merchant_center_link_service_client": "google.ads.google_ads.v5.services",
"mobile_app_category_constant_service_client": "google.ads.google_ads.v5.services",
"mobile_device_constant_service_client": "google.ads.google_ads.v5.services",
"offline_user_data_job_service_client": "google.ads.google_ads.v5.services",
"operating_system_version_constant_service_client": "google.ads.google_ads.v5.services",
"paid_organic_search_term_view_service_client": "google.ads.google_ads.v5.services",
"parental_status_view_service_client": "google.ads.google_ads.v5.services",
"payments_account_service_client": "google.ads.google_ads.v5.services",
"product_bidding_category_constant_service_client": "google.ads.google_ads.v5.services",
"product_group_view_service_client": "google.ads.google_ads.v5.services",
"reach_plan_service_client": "google.ads.google_ads.v5.services",
"recommendation_service_client": "google.ads.google_ads.v5.services",
"remarketing_action_service_client": "google.ads.google_ads.v5.services",
"search_term_view_service_client": "google.ads.google_ads.v5.services",
"shared_criterion_service_client": "google.ads.google_ads.v5.services",
"shared_set_service_client": "google.ads.google_ads.v5.services",
"shopping_performance_view_service_client": "google.ads.google_ads.v5.services",
"third_party_app_analytics_link_service_client": "google.ads.google_ads.v5.services",
"topic_constant_service_client": "google.ads.google_ads.v5.services",
"topic_view_service_client": "google.ads.google_ads.v5.services",
"user_data_service_client": "google.ads.google_ads.v5.services",
"user_interest_service_client": "google.ads.google_ads.v5.services",
"user_list_service_client": "google.ads.google_ads.v5.services",
"user_location_view_service_client": "google.ads.google_ads.v5.services",
"video_service_client": "google.ads.google_ads.v5.services",
"account_budget_proposal_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"account_budget_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"account_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_asset_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_audience_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_bid_modifier_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_parameter_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_schedule_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"age_range_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"asset_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"batch_job_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"bidding_strategy_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"billing_setup_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_asset_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_audience_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_bid_modifier_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_budget_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_criterion_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_draft_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_experiment_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_shared_set_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"carrier_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"change_status_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"click_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_action_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_adjustment_upload_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_upload_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"currency_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"custom_interest_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_client_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_client_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_manager_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_negative_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"detail_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"display_keyword_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"distance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"domain_category_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"dynamic_search_ads_search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"expanded_landing_page_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"extension_feed_item_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_item_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_item_target_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_mapping_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_placeholder_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"gender_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"geo_target_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"geographic_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"google_ads_field_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"google_ads_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"group_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"hotel_group_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"hotel_performance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"income_range_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"invoice_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_ad_group_keyword_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_ad_group_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_campaign_keyword_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_campaign_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_idea_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"landing_page_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"language_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"location_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"managed_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"media_file_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"merchant_center_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"mobile_app_category_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"mobile_device_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"offline_user_data_job_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"operating_system_version_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"paid_organic_search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"parental_status_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"payments_account_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"product_bidding_category_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"product_group_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"reach_plan_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"recommendation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"remarketing_action_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shared_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shared_set_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shopping_performance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"third_party_app_analytics_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"topic_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"topic_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_data_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_interest_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_list_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_location_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"video_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
}
# Background on how this behaves: https://www.python.org/dev/peps/pep-0562/
def __getattr__(name): # Requires Python >= 3.7
"""Lazily perform imports and class definitions on first demand."""
if name == "__all__":
converted = (
util.convert_snake_case_to_upper_case(key)
for key in _lazy_name_to_package_map
)
all_names = sorted(converted)
globals()["__all__"] = all_names
return all_names
elif name.endswith("Transport"):
module = __getattr__(util.convert_upper_case_to_snake_case(name))
sub_mod_class = getattr(module, name)
klass = type(name, (sub_mod_class,), {"__doc__": sub_mod_class.__doc__})
globals()[name] = klass
return klass
elif name.endswith("ServiceClient"):
module = __getattr__(util.convert_upper_case_to_snake_case(name))
enums = __getattr__("enums")
sub_mod_class = getattr(module, name)
klass = type(
name,
(sub_mod_class,),
{"__doc__": sub_mod_class.__doc__, "enums": enums},
)
globals()[name] = klass
return klass
elif name == "enums":
path = "google.ads.google_ads.v5.services.enums"
module = importlib.import_module(path)
globals()[name] = module
return module
elif name == "types":
path = "google.ads.google_ads.v5.types"
module = importlib.import_module(path)
globals()[name] = module
return module
elif name in _lazy_name_to_package_map:
module = importlib.import_module(
f"{_lazy_name_to_package_map[name]}.{name}"
)
globals()[name] = module
return module
else:
raise AttributeError(f"unknown sub-module {name!r}.")
def __dir__():
return globals().get("__all__") or __getattr__("__all__")
if not sys.version_info >= (3, 7):
from pep562 import Pep562
Pep562(__name__)
| [
"google.ads.google_ads.util.convert_upper_case_to_snake_case",
"pep562.Pep562",
"importlib.import_module",
"google.ads.google_ads.util.convert_snake_case_to_upper_case"
]
| [((22297, 22313), 'pep562.Pep562', 'Pep562', (['__name__'], {}), '(__name__)\n', (22303, 22313), False, 'from pep562 import Pep562\n'), ((20614, 20656), 'google.ads.google_ads.util.convert_snake_case_to_upper_case', 'util.convert_snake_case_to_upper_case', (['key'], {}), '(key)\n', (20651, 20656), False, 'from google.ads.google_ads import util\n'), ((20886, 20929), 'google.ads.google_ads.util.convert_upper_case_to_snake_case', 'util.convert_upper_case_to_snake_case', (['name'], {}), '(name)\n', (20923, 20929), False, 'from google.ads.google_ads import util\n'), ((21181, 21224), 'google.ads.google_ads.util.convert_upper_case_to_snake_case', 'util.convert_upper_case_to_snake_case', (['name'], {}), '(name)\n', (21218, 21224), False, 'from google.ads.google_ads import util\n'), ((21606, 21635), 'importlib.import_module', 'importlib.import_module', (['path'], {}), '(path)\n', (21629, 21635), False, 'import importlib\n'), ((21782, 21811), 'importlib.import_module', 'importlib.import_module', (['path'], {}), '(path)\n', (21805, 21811), False, 'import importlib\n'), ((21928, 21996), 'importlib.import_module', 'importlib.import_module', (['f"""{_lazy_name_to_package_map[name]}.{name}"""'], {}), "(f'{_lazy_name_to_package_map[name]}.{name}')\n", (21951, 21996), False, 'import importlib\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__doc__ = '''
Crawl comment from nicovideo.jp
Usage:
crawl_comments.py --url <url> --mail <mail> --pass <pass> [--sqlite <sqlite>] [--csv <csv>]
Options:
--url <url>
--mail <mail>
--pass <pass>
--sqlite <sqlite> (optional) path of comment DB [default: comments.sqlite3]
--csv <csv> (optional) path of csv file contains urls of videos [default: crawled.csv]
'''
from docopt import docopt
from nicocrawler.nicocrawler import NicoCrawler
if __name__ == '__main__':
# ใณใใณใใฉใคใณๅผๆฐใฎๅๅพ
args = docopt(__doc__)
url_channel_toppage = args['--url']
login_mail = args['--mail']
login_pass = args['--pass']
path_sqlite = args['--sqlite']
path_csv = args['--csv']
ncrawler = NicoCrawler(login_mail, login_pass)
ncrawler.connect_sqlite(path_sqlite)
df = ncrawler.get_all_video_url_of_season(url_channel_toppage)
ncrawler.initialize_csv_from_db(path_csv)
# # ใใคใชใผใฉใณใญใณใฐ1~300ไฝใฎๅ็ปใๅๅพใใ
# url = 'http://www.nicovideo.jp/ranking/fav/daily/all'
# ncrawler.initialize_csv_from_url(url, path_csv, max_page=3)
# ncrawler.get_all_comments_of_csv(path_csv, max_n_iter=1)
| [
"nicocrawler.nicocrawler.NicoCrawler",
"docopt.docopt"
]
| [((590, 605), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (596, 605), False, 'from docopt import docopt\n'), ((790, 825), 'nicocrawler.nicocrawler.NicoCrawler', 'NicoCrawler', (['login_mail', 'login_pass'], {}), '(login_mail, login_pass)\n', (801, 825), False, 'from nicocrawler.nicocrawler import NicoCrawler\n')] |
# Functions to do the greedy similarity maximisation for article:node assignments
# All code is original
import random
def computeSimSum(G, similarityMatrix, asgn):
""" Compute the total similarity sum for the current node:article assignment """
S = sum([similarityMatrix[asgn[j], asgn[i]]
for j in range(len(G)) for i in list(G[j])])
return S
def greedySimilarityMax(G, similarityMatrix, nrounds=5):
pairList = [(a,b) for a in range(len(G)) for b in range(a)]
maxSimSums = []
asgns = []
for i in range(nrounds):
# get random indices for initial node:article assignment
init_ids = list(range(len(G)))
random.shuffle(init_ids)
# assign articles to nodes and compute initial similarity sum
curAsgn = dict((key, init_ids[key]) for key in range(len(G)))
curSimSum = computeSimSum(G, similarityMatrix, curAsgn)
# maximisation loop - repeats until S can't increase
while True:
# for each node pair, swap the nodes recompute similarity sum
simSums = []
for edge in pairList:
tempAsgn = dict(curAsgn)
tempAsgn[edge[0]] = curAsgn[edge[1]]
tempAsgn[edge[1]] = curAsgn[edge[0]]
# Recompute similarity sum
tempSimSum = computeSimSum(G, similarityMatrix, tempAsgn)
simSums.append(tempSimSum)
# find the max possible new similarity score
# then update curAsgn if the new max score > old score
maxNewSimSum = max(simSums)
if maxNewSimSum > curSimSum:
nodesToSwap = pairList[simSums.index(maxNewSimSum)]
oldAsgn = dict(curAsgn)
curAsgn[nodesToSwap[0]] = oldAsgn[nodesToSwap[1]]
curAsgn[nodesToSwap[1]] = oldAsgn[nodesToSwap[0]]
curSimSum = maxNewSimSum # no need to recompute, know the value already
else:
break
maxSimSums.append(curSimSum)
asgns.append(curAsgn)
bestRound = maxSimSums.index(max(maxSimSums))
bestAsgn = asgns[bestRound]
print('Best S = ' + str(maxSimSums[bestRound]))
return bestAsgn
| [
"random.shuffle"
]
| [((663, 687), 'random.shuffle', 'random.shuffle', (['init_ids'], {}), '(init_ids)\n', (677, 687), False, 'import random\n')] |
import sys
import numpy as np
import shutil
import time
import itertools as it
import collections
import ctypes as ct
import os
import copy
sys.path.append(os.path.dirname(__file__))
from ThreadStoppable import ThreadStoppable
class Idq801(object):
def __init__(
self,
deviceId=-1,
timestamp_buffer_size=int(1e6),
integration_time_ms=0.5 * 1e3,
coincidence_window_bins=1000,
max_retry=3,
delay_retry_sec=0.01,
clean_data_directory=False,
data_directory="Idq801Data",
processing="external",
):
self._max_retry = max_retry
self._set_check_delay = delay_retry_sec # Delay in seconds between setting and
# checking that a parameter was set.
self._data_directory = data_directory
self._wait_for_settings = 1
self._processing_dict = {"i": "internal", "e": "external"}
processing = processing.lower()
assert processing in self._processing.values()
self._processing = processing
if not os.path.isdir(data_directory):
os.mkdir(data_directory)
if clean_data_directory:
self.clean_data_directory()
module_path = os.path.dirname(__file__) + "/"
if sys.platform == "linux":
self.idq801Lib = ct.CDLL(module_path + "libtdcbase.so")
elif sys.platform == "win32":
self.idq801Lib = ct.CDLL(module_path + "./tdcbase.dll")
else:
raise OSError("Invalid operating system")
if self.idq801Lib.TDC_init(deviceId):
raise RuntimeError("Could not connect to the ID801 counter.")
# Initial parameters.
self.unset_channel(-1)
self.set_timestamp_buffer_size(timestamp_buffer_size)
self.integration_time_ms = integration_time_ms
if self._processing == self._processing_dict["i"]:
self.set_integration_time(integration_time_ms)
else:
self.set_integration_time(1.0e-3) # 1us integration time.
self.set_coincidence_window_bins(1000)
self._time_last_get_timestamps = time.time()
self.channel_delays = {
"1": 0,
"2": 0,
"3": 0,
"4": 0,
"5": 0,
"6": 0,
"7": 0,
"8": 0,
}
self.set_channel_delays_ns(self.channel_delays)
self.accidental_delay = 0
def __del__(self):
self.idq801Lib.TDC_deInit()
def _set_value(self, set_value, setter, getter):
"""Sets a value and makes sure it was set."""
attempt = 0
is_set = False
while not is_set and attempt < self._max_retry:
attempt += 1
setter(set_value)
time.sleep(self._set_check_delay)
try:
if list(set_value) == list(getter()):
is_set = True
except TypeError:
if set_value == getter():
is_set = True
if not is_set:
raise RuntimeError(
"Unable to set the value using %s to %s after %i attempts."
% (setter.__name__, str(set_value), self._max_retry)
)
def _get_device_params(self):
cm = ct.c_int32()
cw = ct.c_int32()
ew = ct.c_int32()
self.idq801Lib.TDC_getDeviceParams(ct.byref(cm), ct.byref(cw), ct.byref(ew))
return (cm, cw, ew)
def _set_processing(self, processing):
processing = processing.lower()
assert processing in self._processing_dict.values()
self._processing = processing
if processing == self._processing_dict["i"]:
self.set_integration_time(self.integration_time_ms)
return self._processing
def set_processing_internal(self):
return self._set_processing("internal")
def set_processing_external(self):
return self._set_processing("external")
def clean_data_directory(self):
"""
Deletes all data in the `Idq801Data` directory.
"""
shutil.rmtree(self._data_directory, ignore_errors=True)
os.mkdir(self._data_directory)
def get_timebase(self):
self.idq801Lib.TDC_getTimebase.restype = ct.c_double
tb = self.idq801Lib.TDC_getTimebase()
return tb
def get_mask_channels(self):
cm, _, _ = self._get_device_params()
return cm.value
def get_status_channels(self):
cm, cw, ew = self._get_device_params()
channels_enabled = [bool(int(c)) for c in bin(cm.value)[2:]][::-1]
padLength = 8 - len(channels_enabled)
channels_enabled.extend([False] * padLength)
return tuple(channels_enabled)
def get_enabled_channels(self):
channels_status = self.get_status_channels()
channels_enabled = tuple(
i + 1 for i, v in enumerate(channels_status) if v == True
)
return channels_enabled
def get_disabled_channels(self):
channels_status = self.get_status_channels()
channels_disabled = tuple(
i + 1 for i, v in enumerate(channels_status) if v == False
)
return channels_disabled
def is_channel_enabled(self, channel):
assert 1 <= channel <= 8, "Invalid choice channel range."
channel -= 1
channel_status = self.get_status_channels()[channel]
return channel_status
def _get_channel_mask(self, channel, set_unset):
def channel_mask_from_channel_list(channels_enabled):
channel_mask = 0
for b in channels_enabled[::-1]:
channel_mask = (channel_mask << b - 1) | True
return channel_mask
set_unset = set_unset.lower()
assert set_unset in ("set", "unset"), (
"Invalid `set_unset` choice %s." % set_unset
)
if isinstance(channel, str):
channel = channel.lower()
if channel == "all" or channel == -1:
channel_mask = 0xFF
elif channel in range(1, 9):
channel_mask = 1 << channel
elif isinstance(channel, collections.Iterable):
channel_mask = channel_mask_from_channel_list(channel)
else:
raise TypeError("Invalid `channel` choice.")
if set_unset == "unset":
channel_mask ^= 0xFF
return channel_mask
def _set_unset_channel(self, channel, set_unset):
self._channel_mask = self._get_channel_mask(channel, set_unset)
self._set_value(
self._channel_mask,
self.idq801Lib.TDC_enableChannels,
self.get_mask_channels,
)
return self._channel_mask
def set_channel(self, channel):
"""Choose which channels to enable.
Options include:
* -1 or 'all' for (all channels).
* A single number for channel to be enabled.
* An iterable containing the channels
to be enables. e.g. (1,4,5)
* Default is no channels are enabled.
"""
return self._set_unset_channel(channel, "set")
def unset_channel(self, channel):
"""Choose which channels to disable.
Options include:
* -1 or 'all' for (all channels).
* A single number for channel to be disabled.
* An iterable containing the channels
to be disables. e.g. (1,4,5)
* Default is no channels are disabled.
"""
return self._set_unset_channel(channel, "unset")
def get_coincidence_window_bins(self):
cm, cw, ew = self._get_device_params()
return cw.value
def get_coincidence_window_ns(self):
bin = self.get_timebase()
return bin * self.get_coincidence_window_bins() * 1e9
def set_coincidence_window_bins(self, coincidence_window_bins):
coincidence_window_bins = int(coincidence_window_bins)
if not 0 < coincidence_window_bins <= 65535:
raise ValueError(
"The chosen number of coincidence \
window bins is not in the range (0,65535]."
)
self._set_value(
coincidence_window_bins,
self.idq801Lib.TDC_setCoincidenceWindow,
self.get_coincidence_window_bins,
)
def set_coincidence_window_ns(self, coincidence_window_ns):
bin = self.get_timebase()
coincidence_window_bins = int(coincidence_window_ns * 1e-9 / bin)
return self.set_coincidence_window_bins(coincidence_window_bins)
def get_integration_time(self):
cm, cw, ew = self._get_device_params()
return ew.value
def freeze_buffers(self):
self.idq801Lib.TDC_freezeBuffers(True)
def unfreeze_buffers(self):
self.idq801Lib.TDC_freezeBuffers(False)
def set_integration_time(self, window_time_ms):
window_time_ms = round(window_time_ms)
if self._processing == self._processing_dict["i"]:
if not 0 < window_time_ms <= 65535:
raise ValueError(
"The chosen exposure window is not \
in the range (0,65535]. Can't do more than 65.5s \
integration time internally."
)
self._set_value(
self.window_time_ms,
self.idq801Lib.TDC_setExposureTime,
self.get_integration_time,
)
def get_data_lost_status(self):
"""Returns true if data is being lost, and false
if data is not being lost.
"""
# Get the status of the lost latch.
lost = ct.c_int32()
self.idq801Lib.TDC_getDataLost(ct.byref(lost))
latch = lost.value
# Calls the function again to clear the lost latch.
self.idq801Lib.TDC_getDataLost(ct.byref(lost))
return latch
def get_timestamp_buffer_size(self):
size = ct.c_int32()
self.idq801Lib.TDC_getTimestampBufferSize(ct.byref(size))
return size.value
def set_timestamp_buffer_size(self, size):
"""`size` is the amount of timestamps that the
the counter will store. Range is 1->1000000
"""
self._set_value(
size,
self.idq801Lib.TDC_setTimestampBufferSize,
self.get_timestamp_buffer_size,
)
def get_timestamps(self, clear_retrieved_timestamps=True, trim_time_s=None):
"""
Gets all the time stamps in the buffer and returns
a dictionary corresponding to the timestamps in each
channel.
args:
clear_retrieved_timestamps(bool): Clears the timestamp
buffer of the IDQ801 after reading.
trim_time_s(float, None): The amount of timestamps, in
seconds, from the import first timestamps to keep.
If `None`, all timestamps are returned. Multiple
channels are all trimmed starting from the lowest
timestamps of all the channels combined.
returns:
dict: A dictionary containing numpy arrays with the
timestamps of each channel. The time from the
last calling of this function is also returned
in the dictionary.
"""
if self.get_timestamp_buffer_size() == 0:
raise RuntimeError(
"The timestamp buffer size is 0. \
Can't get timestamps. Need to set the timestamp \
buffer."
)
r = ct.c_int32(clear_retrieved_timestamps)
ts = (ct.c_int64 * self.get_timestamp_buffer_size())()
c = (ct.c_int8 * self.get_timestamp_buffer_size())()
v = ct.c_int32()
self.idq801Lib.TDC_getLastTimestamps(r, ts, c, ct.byref(v))
time_read = time.time()
time_diff = time_read - self._time_last_get_timestamps
self._time_last_get_timestamps = time_read
channel = np.frombuffer(c, dtype=np.int8)
channel_masks = [
channel == i for i in range(4) if self._channel_mask & (1 << i)
]
timestamps = np.frombuffer(ts, dtype=np.int64)
timestamps_masked = {
str(c + 1): timestamps[c_m] for c, c_m in enumerate(channel_masks)
}
timestamps_masked.update((k, v[v > 0]) for k, v in timestamps_masked.items())
last_counts = []
if trim_time_s:
for timestamps in timestamps_masked.values():
if timestamps.size:
first_count = timestamps[0]
last_counts.append(
first_count + int(trim_time_s / self.get_timebase() + 0.5)
)
if len(last_counts):
last_count = np.min(last_counts)
for channel, timestamps in timestamps_masked.items():
if timestamps.size:
last_idx = np.searchsorted(timestamps, last_count, "right")
timestamps_masked[channel] = timestamps[: last_idx - 1]
timestamps_masked["time_diff"] = time_diff
return timestamps_masked
def _get_coins(self, timestamps_1, timestamps_2, method="2"):
t2 = np.array(timestamps_2, dtype=np.int64)
assert method in ("1", "2"), "Invalid method chosen."
if method == "1":
t1 = np.empty(len(timestamps_1) + 2, dtype=np.int64)
t1[0] = 0
t1[-1] = np.iinfo(np.int64).max
t1[1:-1] = timestamps_1
t2_pos = np.searchsorted(t1, t2)
t1_pos_forw = t2_pos
t1_pos_back = t2_pos - 1
t1_pos_back[t1_pos_back == -1] = 0
dt_forw = np.abs(t1[t1_pos_forw] - t2) <= self.get_coincidence_window_bins()
dt_back = np.abs(t1[t1_pos_back] - t2) <= self.get_coincidence_window_bins()
coin_forw_args = dt_forw.nonzero()[0]
coin_back_args = dt_back.nonzero()[0]
coins_forw = np.c_[t1_pos_forw[coin_forw_args] - 1, coin_forw_args]
coins_back = np.c_[t1_pos_back[coin_back_args] - 1, coin_back_args]
coins = np.vstack((coins_back, coins_forw))
elif method == "2":
t1 = np.array(timestamps_1, dtype=np.int64)
l = np.searchsorted(t1, t2 - self.get_coincidence_window_bins() / 2)
r = np.searchsorted(t1, t2 + self.get_coincidence_window_bins() / 2)
args = np.where(l != r)[0]
coins = np.c_[r[args], args]
return coins
def get_coin_counts(
self, coin_channels, accidentals_delay_ns=None, trim_time_s=None
):
bin = self.get_timebase()
timestamps = self.get_timestamps(
clear_retrieved_timestamps=True, trim_time_s=trim_time_s
)
time_diff = timestamps["time_diff"]
timestamps.pop("time_diff", None)
coin_counts = {}
acc_counts = {}
# Get singles counts
for c in coin_channels:
if str(c) in timestamps:
coin_counts[str(c)] = len(timestamps[str(c)])
else:
coin_counts[str(c)] = 0
coin_combinations = list(it.combinations(coin_channels, 2))
for c in coin_combinations:
# Get coincidence counts
if str(c[0]) in timestamps and str(c[1]) in timestamps:
coin_counts[str(c[0]) + "/" + str(c[1])] = len(
self._get_coins(timestamps[str(c[0])], timestamps[str(c[1])])
)
else:
coin_counts[str(c[0]) + "/" + str(c[1])] = 0
if accidentals_delay_ns != None:
accidentals_delay_bin = int(accidentals_delay_ns * 1e-9 / bin)
for c in coin_combinations:
# Get accidental counts
if str(c[0]) in timestamps and str(c[1]) in timestamps:
acc_counts[str(c[0]) + "/" + str(c[1])] = len(
self._get_coins(
timestamps[str(c[0])],
timestamps[str(c[1])] + accidentals_delay_bin,
)
)
else:
acc_counts[str(c[0]) + "/" + str(c[1])] = 0
return coin_counts, acc_counts, timestamps
def scan_channel_delay(
self, coin_channels, scan_channel, scan_range_ns, integration_time=1.0
):
"""
Scans channel delay electronically - integrates once then applies delays to the timestamps to find coins
Args:
coin_channels: channels to look at coins
scan_channel: channel to scan
scan_range_ns: +/- range of delay in ns
integration_time: initial integration time
Returns: max coin reading, delay in ns of the max, all coin counts, delay range
"""
current_delays_bins = self.get_channel_delays_bins()
self.set_channel_delays_ns({str(coin_channels[0]): 0, str(coin_channels[1]): 0})
bin = self.get_timebase()
self.get_timestamps()
time.sleep(integration_time)
original_timestamps = self.get_timestamps()
delay_range = range(-scan_range_ns, scan_range_ns + 1)
coin_counts = np.zeros(len(delay_range))
timestamps = copy.deepcopy(original_timestamps)
for idd, d in enumerate(delay_range):
timestamps[str(scan_channel)] = copy.deepcopy(
original_timestamps[str(scan_channel)]
) + int(d * 1e-9 / bin)
coin_counts[idd] = len(
self._get_coins(
timestamps[str(coin_channels[0])], timestamps[str(coin_channels[1])]
)
)
print(
"delay channel = %s, delay = %s ns, coin counts = %s"
% (scan_channel, d, int(coin_counts[idd]))
)
max_coin = np.max(coin_counts)
max_coin_delay = delay_range[np.argmax(coin_counts)]
self.set_channel_delays_bins(current_delays_bins)
return max_coin, max_coin_delay, coin_counts, delay_range
def get_timestamps_continuous(self, seconds=-1):
"""Runs `gets_timestamps` continuously in a separate
thread for `seconds` amount of seconds in a loop.
If seconds == -1, it doesn't timeout. Returns a
thread object that can be stopped and started.
"""
time.sleep(self._wait_for_settings)
clear_retrieved_timestamps = True
t = ThreadStoppable(
self.get_timestamps, seconds, True, args=(clear_retrieved_timestamps,)
)
return t
def write_timestamps_to_file(self):
"""Writes the timestamps in the buffer to a
file.
"""
timestamp_dir = "Timestamps"
if not os.path.isdir(self._data_directory + "/" + timestamp_dir):
os.mkdir(self._data_directory + "/" + timestamp_dir)
filename_prefix = (
self._data_directory + "/" + timestamp_dir + "/" + "timestamp_channel_"
)
filenames = [filename_prefix + str(i) + ".dat" for i in range(1, 9)]
for fn in filenames:
if not os.path.exists(fn):
open(fn, "w").close()
ts = self.get_timestamps(clear_retrieved_timestamps=True)
for i, fn in enumerate(filenames):
with open(fn, "a") as fs:
try:
for t in ts[str(i + 1)]:
fs.write(str(t) + "\n")
except KeyError:
pass
def write_timestamps_to_file_continuous(self, seconds=-1):
"""Runs `write_timestamps_to_file` continuously in a separate
thread for `seconds` amount of seconds in a loop. If
seconds == -1, it doesn't timeout. Returns a thread object
that can be stopped and started.
"""
time.sleep(self._wait_for_settings)
t = ThreadStoppable(self.write_timestamps_to_file, seconds)
return t
def get_counters(self):
"""Returns a list of the most recent value of
of the counters.
"""
counters = (ct.c_int32 * 19)()
self.idq801Lib.TDC_getCoincCounters(counters, None)
return list(counters)
def get_counters_continuous(self, seconds=-1):
"""Runs `get_counters` continuously in a separate thread for
`seconds` amount of seconds in a loop. If seconds == -1,
it doesn't timeout. Returns a thread object that can be
stopped and started.
"""
time.sleep(self._wait_for_settings)
t = ThreadStoppable(self.get_counters, seconds, True)
return t
def write_counters_to_file(self, filename="counters.dat"):
"""Writes the most recent values of the internal
counters and coincidence counters to a file
named `filename`.
"""
fn = self._data_directory + "/" + filename
if not os.path.exists(fn):
with open(fn, "w") as fs:
header = (
"1,2,3,4,5,6,7,8,1/2,1/3,1/4,2/3,2/4,3/4,"
"1/2/3,1/2/4,1/3/4,2/3/4,1/2/3/4"
)
fs.write("#" + header + "\n")
counters = self.get_counters()
counters_str = ",".join([str(c) for c in counters])
with open(fn, "a") as fs:
fs.write(counters_str + "\n")
def write_counters_to_file_continuous(self, seconds=-1, filename="counters.dat"):
"""Runs `write_counters_to_file` continuously in a separate
thread for `seconds` amount of seconds in a loop. If
seconds == -1, it doesn't timeout. Returns a thread
object that can be stopped and started.
"""
time.sleep(self._wait_for_settings)
t = ThreadStoppable(
self.write_counters_to_file, seconds, False, args=(filename,)
)
return t
def _get_channel_delays(self):
channels = range(8)
channels = (ct.c_int32 * len(channels))(*channels)
self.idq801Lib.TDC_getChannelDelays(channels)
return channels
def get_channel_delays_bins(self):
return list(self._get_channel_delays())
def get_channel_delays_ns(self):
bin = self.get_timebase()
delays_bins = list(self._get_channel_delays())
return [d * 1e9 * bin for d in delays_bins]
def set_channel_delays_bins(self, delays_bins):
delays = (ct.c_int * len(delays_bins))(*delays_bins)
return self._set_value(
delays, self.idq801Lib.TDC_setChannelDelays, self._get_channel_delays
)
def set_channel_delays_ns(self, delays_ns_dict):
"""
Set channel delays in ns. The delays are in a dictionary.
Args:
delays_ns_dict:
Returns:
"""
delays_ns = self.get_channel_delays_ns()
for channel in delays_ns_dict.keys():
self.channel_delays[str(channel)] = delays_ns[int(channel) - 1]
delays_ns[int(channel) - 1] = delays_ns_dict[str(channel)]
bin = self.get_timebase()
delays_bins = [int(d * 1e-9 / bin) for d in delays_ns]
return self.set_channel_delays_bins(delays_bins)
def main():
idq801 = Idq801()
idq801.clean_data_directory()
idq801.set_channel((1, 2))
# t1 = idq801.write_counters_to_file_continuous(2)
# t2 = idq801.write_timestamps_to_file_continuous(2)
#
if __name__ == "__main__":
main()
| [
"ctypes.c_int32",
"numpy.iinfo",
"time.sleep",
"numpy.array",
"copy.deepcopy",
"ctypes.CDLL",
"ThreadStoppable.ThreadStoppable",
"os.path.exists",
"numpy.searchsorted",
"numpy.where",
"numpy.max",
"os.path.isdir",
"os.mkdir",
"numpy.vstack",
"numpy.min",
"numpy.frombuffer",
"numpy.abs",
"numpy.argmax",
"os.path.dirname",
"time.time",
"ctypes.byref",
"itertools.combinations",
"shutil.rmtree"
]
| [((157, 182), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (172, 182), False, 'import os\n'), ((2118, 2129), 'time.time', 'time.time', ([], {}), '()\n', (2127, 2129), False, 'import time\n'), ((3264, 3276), 'ctypes.c_int32', 'ct.c_int32', ([], {}), '()\n', (3274, 3276), True, 'import ctypes as ct\n'), ((3290, 3302), 'ctypes.c_int32', 'ct.c_int32', ([], {}), '()\n', (3300, 3302), True, 'import ctypes as ct\n'), ((3316, 3328), 'ctypes.c_int32', 'ct.c_int32', ([], {}), '()\n', (3326, 3328), True, 'import ctypes as ct\n'), ((4074, 4129), 'shutil.rmtree', 'shutil.rmtree', (['self._data_directory'], {'ignore_errors': '(True)'}), '(self._data_directory, ignore_errors=True)\n', (4087, 4129), False, 'import shutil\n'), ((4138, 4168), 'os.mkdir', 'os.mkdir', (['self._data_directory'], {}), '(self._data_directory)\n', (4146, 4168), False, 'import os\n'), ((9637, 9649), 'ctypes.c_int32', 'ct.c_int32', ([], {}), '()\n', (9647, 9649), True, 'import ctypes as ct\n'), ((9927, 9939), 'ctypes.c_int32', 'ct.c_int32', ([], {}), '()\n', (9937, 9939), True, 'import ctypes as ct\n'), ((11556, 11594), 'ctypes.c_int32', 'ct.c_int32', (['clear_retrieved_timestamps'], {}), '(clear_retrieved_timestamps)\n', (11566, 11594), True, 'import ctypes as ct\n'), ((11731, 11743), 'ctypes.c_int32', 'ct.c_int32', ([], {}), '()\n', (11741, 11743), True, 'import ctypes as ct\n'), ((11832, 11843), 'time.time', 'time.time', ([], {}), '()\n', (11841, 11843), False, 'import time\n'), ((11977, 12008), 'numpy.frombuffer', 'np.frombuffer', (['c'], {'dtype': 'np.int8'}), '(c, dtype=np.int8)\n', (11990, 12008), True, 'import numpy as np\n'), ((12142, 12175), 'numpy.frombuffer', 'np.frombuffer', (['ts'], {'dtype': 'np.int64'}), '(ts, dtype=np.int64)\n', (12155, 12175), True, 'import numpy as np\n'), ((13241, 13279), 'numpy.array', 'np.array', (['timestamps_2'], {'dtype': 'np.int64'}), '(timestamps_2, dtype=np.int64)\n', (13249, 13279), True, 'import numpy as np\n'), ((17086, 17114), 'time.sleep', 'time.sleep', (['integration_time'], {}), '(integration_time)\n', (17096, 17114), False, 'import time\n'), ((17301, 17335), 'copy.deepcopy', 'copy.deepcopy', (['original_timestamps'], {}), '(original_timestamps)\n', (17314, 17335), False, 'import copy\n'), ((17906, 17925), 'numpy.max', 'np.max', (['coin_counts'], {}), '(coin_counts)\n', (17912, 17925), True, 'import numpy as np\n'), ((18418, 18453), 'time.sleep', 'time.sleep', (['self._wait_for_settings'], {}), '(self._wait_for_settings)\n', (18428, 18453), False, 'import time\n'), ((18508, 18600), 'ThreadStoppable.ThreadStoppable', 'ThreadStoppable', (['self.get_timestamps', 'seconds', '(True)'], {'args': '(clear_retrieved_timestamps,)'}), '(self.get_timestamps, seconds, True, args=(\n clear_retrieved_timestamps,))\n', (18523, 18600), False, 'from ThreadStoppable import ThreadStoppable\n'), ((19883, 19918), 'time.sleep', 'time.sleep', (['self._wait_for_settings'], {}), '(self._wait_for_settings)\n', (19893, 19918), False, 'import time\n'), ((19931, 19986), 'ThreadStoppable.ThreadStoppable', 'ThreadStoppable', (['self.write_timestamps_to_file', 'seconds'], {}), '(self.write_timestamps_to_file, seconds)\n', (19946, 19986), False, 'from ThreadStoppable import ThreadStoppable\n'), ((20554, 20589), 'time.sleep', 'time.sleep', (['self._wait_for_settings'], {}), '(self._wait_for_settings)\n', (20564, 20589), False, 'import time\n'), ((20602, 20651), 'ThreadStoppable.ThreadStoppable', 'ThreadStoppable', (['self.get_counters', 'seconds', '(True)'], {}), '(self.get_counters, seconds, True)\n', (20617, 20651), False, 'from ThreadStoppable import ThreadStoppable\n'), ((21735, 21770), 'time.sleep', 'time.sleep', (['self._wait_for_settings'], {}), '(self._wait_for_settings)\n', (21745, 21770), False, 'import time\n'), ((21783, 21861), 'ThreadStoppable.ThreadStoppable', 'ThreadStoppable', (['self.write_counters_to_file', 'seconds', '(False)'], {'args': '(filename,)'}), '(self.write_counters_to_file, seconds, False, args=(filename,))\n', (21798, 21861), False, 'from ThreadStoppable import ThreadStoppable\n'), ((1052, 1081), 'os.path.isdir', 'os.path.isdir', (['data_directory'], {}), '(data_directory)\n', (1065, 1081), False, 'import os\n'), ((1095, 1119), 'os.mkdir', 'os.mkdir', (['data_directory'], {}), '(data_directory)\n', (1103, 1119), False, 'import os\n'), ((1217, 1242), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1232, 1242), False, 'import os\n'), ((1314, 1352), 'ctypes.CDLL', 'ct.CDLL', (["(module_path + 'libtdcbase.so')"], {}), "(module_path + 'libtdcbase.so')\n", (1321, 1352), True, 'import ctypes as ct\n'), ((2756, 2789), 'time.sleep', 'time.sleep', (['self._set_check_delay'], {}), '(self._set_check_delay)\n', (2766, 2789), False, 'import time\n'), ((3372, 3384), 'ctypes.byref', 'ct.byref', (['cm'], {}), '(cm)\n', (3380, 3384), True, 'import ctypes as ct\n'), ((3386, 3398), 'ctypes.byref', 'ct.byref', (['cw'], {}), '(cw)\n', (3394, 3398), True, 'import ctypes as ct\n'), ((3400, 3412), 'ctypes.byref', 'ct.byref', (['ew'], {}), '(ew)\n', (3408, 3412), True, 'import ctypes as ct\n'), ((9689, 9703), 'ctypes.byref', 'ct.byref', (['lost'], {}), '(lost)\n', (9697, 9703), True, 'import ctypes as ct\n'), ((9832, 9846), 'ctypes.byref', 'ct.byref', (['lost'], {}), '(lost)\n', (9840, 9846), True, 'import ctypes as ct\n'), ((9990, 10004), 'ctypes.byref', 'ct.byref', (['size'], {}), '(size)\n', (9998, 10004), True, 'import ctypes as ct\n'), ((11799, 11810), 'ctypes.byref', 'ct.byref', (['v'], {}), '(v)\n', (11807, 11810), True, 'import ctypes as ct\n'), ((13558, 13581), 'numpy.searchsorted', 'np.searchsorted', (['t1', 't2'], {}), '(t1, t2)\n', (13573, 13581), True, 'import numpy as np\n'), ((14160, 14195), 'numpy.vstack', 'np.vstack', (['(coins_back, coins_forw)'], {}), '((coins_back, coins_forw))\n', (14169, 14195), True, 'import numpy as np\n'), ((15195, 15228), 'itertools.combinations', 'it.combinations', (['coin_channels', '(2)'], {}), '(coin_channels, 2)\n', (15210, 15228), True, 'import itertools as it\n'), ((17963, 17985), 'numpy.argmax', 'np.argmax', (['coin_counts'], {}), '(coin_counts)\n', (17972, 17985), True, 'import numpy as np\n'), ((18806, 18863), 'os.path.isdir', 'os.path.isdir', (["(self._data_directory + '/' + timestamp_dir)"], {}), "(self._data_directory + '/' + timestamp_dir)\n", (18819, 18863), False, 'import os\n'), ((18877, 18929), 'os.mkdir', 'os.mkdir', (["(self._data_directory + '/' + timestamp_dir)"], {}), "(self._data_directory + '/' + timestamp_dir)\n", (18885, 18929), False, 'import os\n'), ((20946, 20964), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (20960, 20964), False, 'import os\n'), ((1420, 1458), 'ctypes.CDLL', 'ct.CDLL', (["(module_path + './tdcbase.dll')"], {}), "(module_path + './tdcbase.dll')\n", (1427, 1458), True, 'import ctypes as ct\n'), ((12780, 12799), 'numpy.min', 'np.min', (['last_counts'], {}), '(last_counts)\n', (12786, 12799), True, 'import numpy as np\n'), ((13477, 13495), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (13485, 13495), True, 'import numpy as np\n'), ((13722, 13750), 'numpy.abs', 'np.abs', (['(t1[t1_pos_forw] - t2)'], {}), '(t1[t1_pos_forw] - t2)\n', (13728, 13750), True, 'import numpy as np\n'), ((13811, 13839), 'numpy.abs', 'np.abs', (['(t1[t1_pos_back] - t2)'], {}), '(t1[t1_pos_back] - t2)\n', (13817, 13839), True, 'import numpy as np\n'), ((14241, 14279), 'numpy.array', 'np.array', (['timestamps_1'], {'dtype': 'np.int64'}), '(timestamps_1, dtype=np.int64)\n', (14249, 14279), True, 'import numpy as np\n'), ((19179, 19197), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (19193, 19197), False, 'import os\n'), ((14462, 14478), 'numpy.where', 'np.where', (['(l != r)'], {}), '(l != r)\n', (14470, 14478), True, 'import numpy as np\n'), ((12946, 12994), 'numpy.searchsorted', 'np.searchsorted', (['timestamps', 'last_count', '"""right"""'], {}), "(timestamps, last_count, 'right')\n", (12961, 12994), True, 'import numpy as np\n')] |
# This program reads a file representing web server logs in common log format and streams them into a PubSub topic
# with lag characteristics as determined by command-line arguments
import argparse
from google.cloud import pubsub_v1
import time
from datetime import datetime, timezone
import random
from anytree.importer import DictImporter
import json
from multiprocessing import Process
parser = argparse.ArgumentParser(__file__, description="event_generator")
parser.add_argument("--taxonomy", "-x", dest="taxonomy_fp",
help="A .json file representing a taxonomy of web resources",
default="taxonomy.json")
parser.add_argument("--users_fp", "-u", dest="users_fp",
help="A .csv file of users",
default="users.csv")
parser.add_argument("--off_to_on", "-off", dest="off_to_on_prob", type=float,
help="A float representing the probability that a user who is offline will come online",
default=.25)
parser.add_argument("--on_to_off", "-on", dest="on_to_off_prob", type=float,
help="A float representing the probability that a user who is online will go offline",
default=.1)
parser.add_argument("--max_lag_millis", '-l', dest="max_lag_millis", type=int,
help="An integer representing the maximum amount of lag in millisecond", default=250)
parser.add_argument("--project_id", "-p", type=str, dest="project_id", help="A GCP Project ID", required=True)
parser.add_argument("--topic_name", "-t", dest="topic_name", type=str,
help="The name of the topic where the messages to be published", required=True)
avg_secs_between_events = 5
args = parser.parse_args()
taxonomy_fp = args.taxonomy_fp
users_fp = args.users_fp
online_to_offline_probability = args.on_to_off_prob
offline_to_online_probability = args.off_to_on_prob
max_lag_millis = args.max_lag_millis
project_id = args.project_id
topic_name = args.topic_name
min_file_size_bytes = 100
max_file_size_bytes = 500
verbs = ["GET"]
responses = [200]
log_fields = ["ip", "user_id", "lat", "lng", "timestamp", "http_request",
"http_response", "num_bytes", "user_agent"]
def extract_resources(taxonomy_filepath):
"""
Reads a .json representing a taxonomy and returns
a data structure representing their hierarchical relationship
:param taxonomy_file: a string representing a path to a .json file
:return: Node representing root of taxonomic tree
"""
try:
with open(taxonomy_filepath, 'r') as fp:
json_str = fp.read()
json_data = json.loads(json_str)
root = DictImporter().import_(json_data)
finally:
fp.close()
return root
def read_users(users_fp):
"""
Reads a .csv from @user_fp representing users into a list of dictionaries,
each elt of which represents a user
:param user_fp: a .csv file where each line represents a user
:return: a list of dictionaries
"""
users = []
with open(users_fp, 'r') as fp:
fields = fp.readline().rstrip().split(",")
for line in fp:
user = dict(zip(fields, line.rstrip().split(",")))
users.append(user)
return users
def sleep_then_publish_burst(burst, publisher, topic_path):
"""
:param burst: a list of dictionaries, each representing an event
:param num_events_counter: an instance of Value shared by all processes
to track the number of published events
:param publisher: a PubSub publisher
:param topic_path: a topic path for PubSub
:return:
"""
sleep_secs = random.uniform(0, max_lag_millis/1000)
time.sleep(sleep_secs)
publish_burst(burst, publisher, topic_path)
def publish_burst(burst, publisher, topic_path):
"""
Publishes and prints each event
:param burst: a list of dictionaries, each representing an event
:param num_events_counter: an instance of Value shared by all processes to
track the number of published events
:param publisher: a PubSub publisher
:param topic_path: a topic path for PubSub
:return:
"""
for event_dict in burst:
json_str = json.dumps(event_dict)
data = json_str.encode('utf-8')
publisher.publish(topic_path, data=data, timestamp=event_dict['timestamp'])
def create_user_process(user, root):
"""
Code for continuously-running process representing a user publishing
events to pubsub
:param user: a dictionary representing characteristics of the user
:param root: an instance of AnyNode representing the home page of a website
:param num_events_counter: a variable shared among all processes used to track the number of events published
:return:
"""
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_name)
user['page'] = root
user['is_online'] = True
user['offline_events'] = []
while True:
time_between_events = random.uniform(0, avg_secs_between_events * 2)
time.sleep(time_between_events)
prob = random.random()
event = generate_event(user)
if user['is_online']:
if prob < online_to_offline_probability:
user['is_online'] = False
user['offline_events'] = [event]
else:
sleep_then_publish_burst([event], publisher, topic_path)
else:
user['offline_events'].append(event)
if prob < offline_to_online_probability:
user['is_online'] = True
sleep_then_publish_burst(user['offline_events'], publisher, topic_path)
user['offline_events'] = []
def generate_event(user):
"""
Returns a dictionary representing an event
:param user:
:return:
"""
user['page'] = get_next_page(user)
uri = str(user['page'].name)
event_time = datetime.now(tz=timezone.utc)
current_time_str = event_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
file_size_bytes = random.choice(range(min_file_size_bytes, max_file_size_bytes))
http_request = "\"{} {} HTTP/1.0\"".format(random.choice(verbs), uri)
http_response = random.choice(responses)
event_values = [user['ip'], user['id'], float(user['lat']), float(user['lng']), current_time_str, http_request,
http_response, file_size_bytes, user['user_agent']]
return dict(zip(log_fields, event_values))
def get_next_page(user):
"""
Consults the user's representation of the web site taxonomy to determine the next page that they visit
:param user:
:return:
"""
possible_next_pages = [user['page']]
if not user['page'].is_leaf:
possible_next_pages += list(user['page'].children)
if (user['page'].parent != None):
possible_next_pages += [user['page'].parent]
next_page = random.choice(possible_next_pages)
return next_page
if __name__ == '__main__':
users = read_users(users_fp)
root = extract_resources(taxonomy_fp)
processes = [Process(target=create_user_process, args=(user, root))
for user in users]
[process.start() for process in processes]
while True:
time.sleep(1) | [
"random.uniform",
"random.choice",
"json.loads",
"argparse.ArgumentParser",
"multiprocessing.Process",
"json.dumps",
"time.sleep",
"datetime.datetime.now",
"google.cloud.pubsub_v1.PublisherClient",
"random.random",
"anytree.importer.DictImporter"
]
| [((412, 476), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['__file__'], {'description': '"""event_generator"""'}), "(__file__, description='event_generator')\n", (435, 476), False, 'import argparse\n'), ((3758, 3798), 'random.uniform', 'random.uniform', (['(0)', '(max_lag_millis / 1000)'], {}), '(0, max_lag_millis / 1000)\n', (3772, 3798), False, 'import random\n'), ((3802, 3824), 'time.sleep', 'time.sleep', (['sleep_secs'], {}), '(sleep_secs)\n', (3812, 3824), False, 'import time\n'), ((4929, 4956), 'google.cloud.pubsub_v1.PublisherClient', 'pubsub_v1.PublisherClient', ([], {}), '()\n', (4954, 4956), False, 'from google.cloud import pubsub_v1\n'), ((6103, 6132), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'timezone.utc'}), '(tz=timezone.utc)\n', (6115, 6132), False, 'from datetime import datetime, timezone\n'), ((6384, 6408), 'random.choice', 'random.choice', (['responses'], {}), '(responses)\n', (6397, 6408), False, 'import random\n'), ((7081, 7115), 'random.choice', 'random.choice', (['possible_next_pages'], {}), '(possible_next_pages)\n', (7094, 7115), False, 'import random\n'), ((4327, 4349), 'json.dumps', 'json.dumps', (['event_dict'], {}), '(event_dict)\n', (4337, 4349), False, 'import json\n'), ((5160, 5206), 'random.uniform', 'random.uniform', (['(0)', '(avg_secs_between_events * 2)'], {}), '(0, avg_secs_between_events * 2)\n', (5174, 5206), False, 'import random\n'), ((5216, 5247), 'time.sleep', 'time.sleep', (['time_between_events'], {}), '(time_between_events)\n', (5226, 5247), False, 'import time\n'), ((5264, 5279), 'random.random', 'random.random', ([], {}), '()\n', (5277, 5279), False, 'import random\n'), ((6336, 6356), 'random.choice', 'random.choice', (['verbs'], {}), '(verbs)\n', (6349, 6356), False, 'import random\n'), ((7265, 7319), 'multiprocessing.Process', 'Process', ([], {'target': 'create_user_process', 'args': '(user, root)'}), '(target=create_user_process, args=(user, root))\n', (7272, 7319), False, 'from multiprocessing import Process\n'), ((7431, 7444), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7441, 7444), False, 'import time\n'), ((2715, 2735), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (2725, 2735), False, 'import json\n'), ((2756, 2770), 'anytree.importer.DictImporter', 'DictImporter', ([], {}), '()\n', (2768, 2770), False, 'from anytree.importer import DictImporter\n')] |
"""Database setup"""
# Third party library
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
# initialization of the database and migration
database = SQLAlchemy()
migrate = Migrate()
| [
"flask_sqlalchemy.SQLAlchemy",
"flask_migrate.Migrate"
]
| [((177, 189), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (187, 189), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((200, 209), 'flask_migrate.Migrate', 'Migrate', ([], {}), '()\n', (207, 209), False, 'from flask_migrate import Migrate\n')] |
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of the ClientData abstract base class."""
import collections
import os.path
from typing import Callable, Mapping
import tensorflow as tf
from tensorflow_federated.python import core as tff
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.simulation import client_data
from tensorflow_federated.python.tensorflow_libs import tensor_utils
class FilePerUserClientData(client_data.ClientData):
"""A `tf.simulation.ClientData` that maps a set of files to a dataset.
This mapping is restricted to one file per user.
"""
def __init__(self, client_ids_to_files: Mapping[str, str],
dataset_fn: Callable[[str], tf.data.Dataset]):
"""Constructs a `tf.simulation.ClientData` object.
Args:
client_ids_to_files: A mapping from string client IDs to filepaths
containing the user's data.
dataset_fn: A factory function that takes a filepath (must accept
both strings and tensors) and returns a `tf.data.Dataset` corresponding
to this path.
"""
py_typecheck.check_type(client_ids_to_files, collections.abc.Mapping)
if not client_ids_to_files:
raise ValueError('`client_ids` must have at least one client ID')
py_typecheck.check_callable(dataset_fn)
self._client_ids = sorted(client_ids_to_files.keys())
def create_dataset_for_filename_fn(client_id):
return dataset_fn(client_ids_to_files[client_id])
@tff.tf_computation(tf.string)
def dataset_computation(client_id):
client_ids_to_path = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
list(client_ids_to_files.keys()),
list(client_ids_to_files.values())), '')
client_path = client_ids_to_path.lookup(client_id)
return dataset_fn(client_path)
self._create_tf_dataset_fn = create_dataset_for_filename_fn
self._dataset_computation = dataset_computation
g = tf.Graph()
with g.as_default():
tf_dataset = self._create_tf_dataset_fn(self._client_ids[0])
self._element_type_structure = tf_dataset.element_spec
@property
def client_ids(self):
return self._client_ids
def create_tf_dataset_for_client(self, client_id):
tf_dataset = self._create_tf_dataset_fn(client_id)
tensor_utils.check_nested_equal(tf_dataset.element_spec,
self._element_type_structure)
return tf_dataset
@property
def element_type_structure(self):
return self._element_type_structure
@classmethod
def create_from_dir(cls, path, create_tf_dataset_fn=tf.data.TFRecordDataset):
"""Builds a `tff.simulation.FilePerUserClientData`.
Iterates over all files in `path`, using the filename as the client ID. Does
not recursively search `path`.
Args:
path: A directory path to search for per-client files.
create_tf_dataset_fn: A callable that creates a `tf.data.Datasaet` object
for a given file in the directory specified in `path`.
Returns:
A `tff.simulation.FilePerUserClientData` object.
"""
client_ids_to_paths_dict = {
filename: os.path.join(path, filename)
for filename in tf.io.gfile.listdir(path)
}
return FilePerUserClientData(client_ids_to_paths_dict, create_tf_dataset_fn)
@property
def dataset_computation(self):
return self._dataset_computation
| [
"tensorflow.Graph",
"tensorflow_federated.python.common_libs.py_typecheck.check_type",
"tensorflow_federated.python.core.tf_computation",
"tensorflow_federated.python.common_libs.py_typecheck.check_callable",
"tensorflow_federated.python.tensorflow_libs.tensor_utils.check_nested_equal",
"tensorflow.io.gfile.listdir"
]
| [((1678, 1747), 'tensorflow_federated.python.common_libs.py_typecheck.check_type', 'py_typecheck.check_type', (['client_ids_to_files', 'collections.abc.Mapping'], {}), '(client_ids_to_files, collections.abc.Mapping)\n', (1701, 1747), False, 'from tensorflow_federated.python.common_libs import py_typecheck\n'), ((1856, 1895), 'tensorflow_federated.python.common_libs.py_typecheck.check_callable', 'py_typecheck.check_callable', (['dataset_fn'], {}), '(dataset_fn)\n', (1883, 1895), False, 'from tensorflow_federated.python.common_libs import py_typecheck\n'), ((2068, 2097), 'tensorflow_federated.python.core.tf_computation', 'tff.tf_computation', (['tf.string'], {}), '(tf.string)\n', (2086, 2097), True, 'from tensorflow_federated.python import core as tff\n'), ((2562, 2572), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2570, 2572), True, 'import tensorflow as tf\n'), ((2904, 2995), 'tensorflow_federated.python.tensorflow_libs.tensor_utils.check_nested_equal', 'tensor_utils.check_nested_equal', (['tf_dataset.element_spec', 'self._element_type_structure'], {}), '(tf_dataset.element_spec, self.\n _element_type_structure)\n', (2935, 2995), False, 'from tensorflow_federated.python.tensorflow_libs import tensor_utils\n'), ((3803, 3828), 'tensorflow.io.gfile.listdir', 'tf.io.gfile.listdir', (['path'], {}), '(path)\n', (3822, 3828), True, 'import tensorflow as tf\n')] |
from django.contrib import admin
from blog.models import Post, Comment
# Register your models here.
admin.site.register(Post)
admin.site.register(Comment)
| [
"django.contrib.admin.site.register"
]
| [((105, 130), 'django.contrib.admin.site.register', 'admin.site.register', (['Post'], {}), '(Post)\n', (124, 130), False, 'from django.contrib import admin\n'), ((132, 160), 'django.contrib.admin.site.register', 'admin.site.register', (['Comment'], {}), '(Comment)\n', (151, 160), False, 'from django.contrib import admin\n')] |
# imports
from telegram.ext import (
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
)
from handler_functions.start import start
from handler_functions.bio import bio
from handler_functions.gender import gender
from handler_functions.photo import photo, skip_photo
from handler_functions.location import location, skip_location
from handler_functions.cancel import cancel
from conversation_handlers.stage_constants import *
# Adds conversation handler with the states GENDER, PHOTO, LOCATION and BIO for stage 1 of the sign up
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
GENDER: [MessageHandler(Filters.regex('^(Gentleman|Lady|I am a unicorn.)$'), gender)],
PHOTO: [MessageHandler(Filters.photo, photo), CommandHandler('skip', skip_photo)],
LOCATION: [
MessageHandler(Filters.location, location),
CommandHandler('skip', skip_location),
],
BIO: [MessageHandler(Filters.text & ~Filters.command, bio)],
},
fallbacks=[CommandHandler('cancel', cancel)],
) | [
"telegram.ext.MessageHandler",
"telegram.ext.Filters.regex",
"telegram.ext.CommandHandler"
]
| [((612, 642), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""start"""', 'start'], {}), "('start', start)\n", (626, 642), False, 'from telegram.ext import CommandHandler, MessageHandler, Filters, ConversationHandler\n'), ((1073, 1105), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""cancel"""', 'cancel'], {}), "('cancel', cancel)\n", (1087, 1105), False, 'from telegram.ext import CommandHandler, MessageHandler, Filters, ConversationHandler\n'), ((769, 805), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.photo', 'photo'], {}), '(Filters.photo, photo)\n', (783, 805), False, 'from telegram.ext import CommandHandler, MessageHandler, Filters, ConversationHandler\n'), ((807, 841), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""skip"""', 'skip_photo'], {}), "('skip', skip_photo)\n", (821, 841), False, 'from telegram.ext import CommandHandler, MessageHandler, Filters, ConversationHandler\n'), ((876, 918), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.location', 'location'], {}), '(Filters.location, location)\n', (890, 918), False, 'from telegram.ext import CommandHandler, MessageHandler, Filters, ConversationHandler\n'), ((932, 969), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""skip"""', 'skip_location'], {}), "('skip', skip_location)\n", (946, 969), False, 'from telegram.ext import CommandHandler, MessageHandler, Filters, ConversationHandler\n'), ((996, 1048), 'telegram.ext.MessageHandler', 'MessageHandler', (['(Filters.text & ~Filters.command)', 'bio'], {}), '(Filters.text & ~Filters.command, bio)\n', (1010, 1048), False, 'from telegram.ext import CommandHandler, MessageHandler, Filters, ConversationHandler\n'), ((690, 741), 'telegram.ext.Filters.regex', 'Filters.regex', (['"""^(Gentleman|Lady|I am a unicorn.)$"""'], {}), "('^(Gentleman|Lady|I am a unicorn.)$')\n", (703, 741), False, 'from telegram.ext import CommandHandler, MessageHandler, Filters, ConversationHandler\n')] |
from ocean_lib.models.data_token import DataToken
from ocean_lib.models.dtfactory import DTFactory
from ocean_lib.ocean.util import to_base_18
def test1(network, alice_wallet, dtfactory_address):
dtfactory = DTFactory(dtfactory_address)
dt_address = dtfactory.createToken('foo_blob', 'DT1', 'DT1', to_base_18(1000), from_wallet=alice_wallet)
dt = DataToken(dtfactory.get_token_address(dt_address))
assert isinstance(dt, DataToken)
assert dt.blob() == 'foo_blob'
| [
"ocean_lib.ocean.util.to_base_18",
"ocean_lib.models.dtfactory.DTFactory"
]
| [((214, 242), 'ocean_lib.models.dtfactory.DTFactory', 'DTFactory', (['dtfactory_address'], {}), '(dtfactory_address)\n', (223, 242), False, 'from ocean_lib.models.dtfactory import DTFactory\n'), ((309, 325), 'ocean_lib.ocean.util.to_base_18', 'to_base_18', (['(1000)'], {}), '(1000)\n', (319, 325), False, 'from ocean_lib.ocean.util import to_base_18\n')] |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
__author__ = "<NAME>"
__copyright__ = "Copyright 2018"
__credits__ = ["<NAME>"]
__license__ = "Apache"
__version__ = "v1.0.0"
__maintainer__ = "<NAME>"
__email = "<EMAIL>"
__status__ = "Development"
from data_model.load_data import create_connection, select_all_tasks
from tools.data_frames import dframe_t_db
def main():
database = "/Cython_Code/database/heart.db"
# create a database connection
conn = create_connection(database)
with conn:
print("2. Query all tasks")
rows, name = select_all_tasks(conn, 'heart_table')
return dframe_t_db(rows, name)
if __name__ == '__main__':
df = main()
print(df)
| [
"tools.data_frames.dframe_t_db",
"data_model.load_data.create_connection",
"data_model.load_data.select_all_tasks"
]
| [((468, 495), 'data_model.load_data.create_connection', 'create_connection', (['database'], {}), '(database)\n', (485, 495), False, 'from data_model.load_data import create_connection, select_all_tasks\n'), ((617, 640), 'tools.data_frames.dframe_t_db', 'dframe_t_db', (['rows', 'name'], {}), '(rows, name)\n', (628, 640), False, 'from tools.data_frames import dframe_t_db\n'), ((568, 605), 'data_model.load_data.select_all_tasks', 'select_all_tasks', (['conn', '"""heart_table"""'], {}), "(conn, 'heart_table')\n", (584, 605), False, 'from data_model.load_data import create_connection, select_all_tasks\n')] |
import numpy as np
import matplotlib.pyplot as plt
TOTAL = 200
STEP = 0.25
EPS = 0.1
INITIAL_THETA = [9, 14]
def func(x):
return 0.2 * x + 3
def generate_sample(total=TOTAL):
x = 0
while x < total * STEP:
yield func(x) + np.random.uniform(-1, 1) * np.random.uniform(2, 8)
x += STEP
def cost_function(A, Y, theta):
return (Y - A@theta).T@(Y - A@theta)
def batch_descent(A, Y, speed=0.001):
theta = np.array(INITIAL_THETA.copy(), dtype=np.float32)
theta.reshape((len(theta), 1))
previous_cost = 10 ** 6
current_cost = cost_function(A, Y, theta)
while np.abs(previous_cost - current_cost) > EPS:
previous_cost = current_cost
derivatives = [0] * len(theta)
# ---------------------------------------------
for j in range(len(theta)):
summ = 0
for i in range(len(Y)):
summ += (Y[i] - A[i]@theta) * A[i][j]
derivatives[j] = summ
# ะัะฟะพะปะฝะตะฝะธะต ััะตะฑะพะฒะฐะฝะธั ะพะดะฝะพะฒัะตะผะผะตะฝะฝะพััะธ
theta[0] += speed * derivatives[0]
theta[1] += speed * derivatives[1]
# ---------------------------------------------
current_cost = cost_function(A, Y, theta)
print("Batch cost:", current_cost)
plt.plot(theta[0], theta[1], 'ro')
return theta
def stochastic_descent(A, Y, speed=0.1):
theta = np.array(INITIAL_THETA.copy(), dtype=np.float32)
previous_cost = 10 ** 6
current_cost = cost_function(A, Y, theta)
while np.abs(previous_cost - current_cost) > EPS:
previous_cost = current_cost
# --------------------------------------
# for i in range(len(Y)):
i = np.random.randint(0, len(Y))
derivatives = [0] * len(theta)
for j in range(len(theta)):
derivatives[j] = (Y[i] - A[i]@theta) * A[i][j]
theta[0] += speed * derivatives[0]
theta[1] += speed * derivatives[1]
# --------------------------------------
current_cost = cost_function(A, Y, theta)
print("Stochastic cost:", current_cost)
plt.plot(theta[0], theta[1], 'ro')
return theta
X = np.arange(0, TOTAL * STEP, STEP)
Y = np.array([y for y in generate_sample(TOTAL)])
# ะะพัะผะฐะปะธะทะฐัะธั ะฒะบัััะธะป, ััะพะฑั ะฟะฐัะฐะฑะฐะปะพะธะด ะบัะฐัะธะฒัะน ะฑัะป
X = (X - X.min()) / (X.max() - X.min())
A = np.empty((TOTAL, 2))
A[:, 0] = 1
A[:, 1] = X
theta = np.linalg.pinv(A).dot(Y)
print(theta, cost_function(A, Y, theta))
import time
start = time.clock()
theta_stochastic = stochastic_descent(A, Y, 0.1)
print("St:", time.clock() - start, theta_stochastic)
start = time.clock()
theta_batch = batch_descent(A, Y, 0.001)
print("Btch:", time.clock() - start, theta_batch)
| [
"numpy.abs",
"numpy.linalg.pinv",
"time.clock",
"matplotlib.pyplot.plot",
"numpy.empty",
"numpy.random.uniform",
"numpy.arange"
]
| [((2136, 2168), 'numpy.arange', 'np.arange', (['(0)', '(TOTAL * STEP)', 'STEP'], {}), '(0, TOTAL * STEP, STEP)\n', (2145, 2168), True, 'import numpy as np\n'), ((2319, 2339), 'numpy.empty', 'np.empty', (['(TOTAL, 2)'], {}), '((TOTAL, 2))\n', (2327, 2339), True, 'import numpy as np\n'), ((2460, 2472), 'time.clock', 'time.clock', ([], {}), '()\n', (2470, 2472), False, 'import time\n'), ((2584, 2596), 'time.clock', 'time.clock', ([], {}), '()\n', (2594, 2596), False, 'import time\n'), ((610, 646), 'numpy.abs', 'np.abs', (['(previous_cost - current_cost)'], {}), '(previous_cost - current_cost)\n', (616, 646), True, 'import numpy as np\n'), ((1259, 1293), 'matplotlib.pyplot.plot', 'plt.plot', (['theta[0]', 'theta[1]', '"""ro"""'], {}), "(theta[0], theta[1], 'ro')\n", (1267, 1293), True, 'import matplotlib.pyplot as plt\n'), ((1499, 1535), 'numpy.abs', 'np.abs', (['(previous_cost - current_cost)'], {}), '(previous_cost - current_cost)\n', (1505, 1535), True, 'import numpy as np\n'), ((2079, 2113), 'matplotlib.pyplot.plot', 'plt.plot', (['theta[0]', 'theta[1]', '"""ro"""'], {}), "(theta[0], theta[1], 'ro')\n", (2087, 2113), True, 'import matplotlib.pyplot as plt\n'), ((2373, 2390), 'numpy.linalg.pinv', 'np.linalg.pinv', (['A'], {}), '(A)\n', (2387, 2390), True, 'import numpy as np\n'), ((2535, 2547), 'time.clock', 'time.clock', ([], {}), '()\n', (2545, 2547), False, 'import time\n'), ((2653, 2665), 'time.clock', 'time.clock', ([], {}), '()\n', (2663, 2665), False, 'import time\n'), ((246, 270), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (263, 270), True, 'import numpy as np\n'), ((273, 296), 'numpy.random.uniform', 'np.random.uniform', (['(2)', '(8)'], {}), '(2, 8)\n', (290, 296), True, 'import numpy as np\n')] |
import castle
from typing import Tuple
def select_player_types() -> Tuple[castle.PlayerType, castle.PlayerType]:
player1, player2 = None, None
while True:
print(f'1) Play a person')
print(f'2) Play the computer')
print(f'3) Play the computer against itself')
choice_str = input(f'Select an option: ')
try:
choice = int(choice_str)
if choice not in [1, 2, 3]:
raise ValueError
except ValueError:
print('Invalid option.\n')
continue
if choice == 1:
player1 = castle.PlayerType.HUMAN
player2 = castle.PlayerType.HUMAN
elif choice == 2:
player1 = castle.PlayerType.HUMAN
player2 = castle.PlayerType.COMPUTER
elif choice == 3:
player1 = castle.PlayerType.COMPUTER
player2 = castle.PlayerType.COMPUTER
break
return player1, player2
def play_constructed_game(g: castle.Game):
g.board.pretty_print()
while not g.finished:
print(f'white short {g.can_castle(castle.Color.WHITE, True)}')
print(f'white long {g.can_castle(castle.Color.WHITE, False)}')
print(f'black short {g.can_castle(castle.Color.BLACK, True)}')
print(f'black long {g.can_castle(castle.Color.BLACK, False)}')
g.play_turn()
winning_prefix = f'Game over by '
if g.winner == castle.Winner.DRAW:
winning_prefix += 'stalemate'
else:
winning_prefix += 'checkmate'
winning_text = f'{winning_prefix}. Winner: {g.winner.name.title()}'
print(winning_text)
def play_game():
player1, player2 = select_player_types()
g = castle.Game(player1, player2)
play_constructed_game(g)
def test_perft():
g = castle.Game(castle.PlayerType.HUMAN, castle.PlayerType.HUMAN)
g.board.pretty_print()
for i in range(10):
print(f'perft({i}) = {g.perft(i)}')
def test_perft2():
g = castle.Game(castle.PlayerType.HUMAN, castle.PlayerType.HUMAN)
g.board.clear()
# https://sites.google.com/site/numptychess/perft/position-3
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.BLACK), 'a8')
g.board.place_piece(castle.Piece(castle.PieceType.KING, castle.Color.BLACK), 'e8')
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.BLACK), 'h8')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'a7')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'h7')
g.board.place_piece(castle.Piece(castle.PieceType.BISHOP, castle.Color.WHITE), 'a5')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'b4')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'c4')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.BLACK), 'e4')
g.board.place_piece(castle.Piece(castle.PieceType.BISHOP, castle.Color.BLACK), 'd3')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.WHITE), 'a2')
g.board.place_piece(castle.Piece(castle.PieceType.PAWN, castle.Color.WHITE), 'h2')
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.WHITE), 'a1')
g.board.place_piece(castle.Piece(castle.PieceType.KING, castle.Color.WHITE), 'e1')
g.board.place_piece(castle.Piece(castle.PieceType.ROOK, castle.Color.WHITE), 'h1')
g.board.pretty_print()
for i in range(2):
print(f'perft({i}) = {g.perft(i)}')
def fen():
# f = castle.FenGameConstructor('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1')
game = 'rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq e3 0 1'
game = 'r3k2r/p6p/8/B7/1pp1p3/3b4/P6P/R3K2R w KQkq - 0 1'
game = '8/5p2/8/2k3P1/p3K3/8/1P6/8 b - - 0 1'
f = castle.FenGameConstructor(game)
return f.game
def main():
print(f'Welcome to castle, a litte chess engine.\n')
# test_perft()
g = fen()
print('returned')
g.print_perft(5)
play_constructed_game(g)
# play_game()
if __name__ == '__main__':
main()
| [
"castle.Piece",
"castle.Game",
"castle.FenGameConstructor"
]
| [((1694, 1723), 'castle.Game', 'castle.Game', (['player1', 'player2'], {}), '(player1, player2)\n', (1705, 1723), False, 'import castle\n'), ((1781, 1842), 'castle.Game', 'castle.Game', (['castle.PlayerType.HUMAN', 'castle.PlayerType.HUMAN'], {}), '(castle.PlayerType.HUMAN, castle.PlayerType.HUMAN)\n', (1792, 1842), False, 'import castle\n'), ((1967, 2028), 'castle.Game', 'castle.Game', (['castle.PlayerType.HUMAN', 'castle.PlayerType.HUMAN'], {}), '(castle.PlayerType.HUMAN, castle.PlayerType.HUMAN)\n', (1978, 2028), False, 'import castle\n'), ((3819, 3850), 'castle.FenGameConstructor', 'castle.FenGameConstructor', (['game'], {}), '(game)\n', (3844, 3850), False, 'import castle\n'), ((2138, 2193), 'castle.Piece', 'castle.Piece', (['castle.PieceType.ROOK', 'castle.Color.BLACK'], {}), '(castle.PieceType.ROOK, castle.Color.BLACK)\n', (2150, 2193), False, 'import castle\n'), ((2225, 2280), 'castle.Piece', 'castle.Piece', (['castle.PieceType.KING', 'castle.Color.BLACK'], {}), '(castle.PieceType.KING, castle.Color.BLACK)\n', (2237, 2280), False, 'import castle\n'), ((2312, 2367), 'castle.Piece', 'castle.Piece', (['castle.PieceType.ROOK', 'castle.Color.BLACK'], {}), '(castle.PieceType.ROOK, castle.Color.BLACK)\n', (2324, 2367), False, 'import castle\n'), ((2399, 2454), 'castle.Piece', 'castle.Piece', (['castle.PieceType.PAWN', 'castle.Color.BLACK'], {}), '(castle.PieceType.PAWN, castle.Color.BLACK)\n', (2411, 2454), False, 'import castle\n'), ((2486, 2541), 'castle.Piece', 'castle.Piece', (['castle.PieceType.PAWN', 'castle.Color.BLACK'], {}), '(castle.PieceType.PAWN, castle.Color.BLACK)\n', (2498, 2541), False, 'import castle\n'), ((2573, 2630), 'castle.Piece', 'castle.Piece', (['castle.PieceType.BISHOP', 'castle.Color.WHITE'], {}), '(castle.PieceType.BISHOP, castle.Color.WHITE)\n', (2585, 2630), False, 'import castle\n'), ((2662, 2717), 'castle.Piece', 'castle.Piece', (['castle.PieceType.PAWN', 'castle.Color.BLACK'], {}), '(castle.PieceType.PAWN, castle.Color.BLACK)\n', (2674, 2717), False, 'import castle\n'), ((2749, 2804), 'castle.Piece', 'castle.Piece', (['castle.PieceType.PAWN', 'castle.Color.BLACK'], {}), '(castle.PieceType.PAWN, castle.Color.BLACK)\n', (2761, 2804), False, 'import castle\n'), ((2836, 2891), 'castle.Piece', 'castle.Piece', (['castle.PieceType.PAWN', 'castle.Color.BLACK'], {}), '(castle.PieceType.PAWN, castle.Color.BLACK)\n', (2848, 2891), False, 'import castle\n'), ((2923, 2980), 'castle.Piece', 'castle.Piece', (['castle.PieceType.BISHOP', 'castle.Color.BLACK'], {}), '(castle.PieceType.BISHOP, castle.Color.BLACK)\n', (2935, 2980), False, 'import castle\n'), ((3012, 3067), 'castle.Piece', 'castle.Piece', (['castle.PieceType.PAWN', 'castle.Color.WHITE'], {}), '(castle.PieceType.PAWN, castle.Color.WHITE)\n', (3024, 3067), False, 'import castle\n'), ((3099, 3154), 'castle.Piece', 'castle.Piece', (['castle.PieceType.PAWN', 'castle.Color.WHITE'], {}), '(castle.PieceType.PAWN, castle.Color.WHITE)\n', (3111, 3154), False, 'import castle\n'), ((3186, 3241), 'castle.Piece', 'castle.Piece', (['castle.PieceType.ROOK', 'castle.Color.WHITE'], {}), '(castle.PieceType.ROOK, castle.Color.WHITE)\n', (3198, 3241), False, 'import castle\n'), ((3273, 3328), 'castle.Piece', 'castle.Piece', (['castle.PieceType.KING', 'castle.Color.WHITE'], {}), '(castle.PieceType.KING, castle.Color.WHITE)\n', (3285, 3328), False, 'import castle\n'), ((3360, 3415), 'castle.Piece', 'castle.Piece', (['castle.PieceType.ROOK', 'castle.Color.WHITE'], {}), '(castle.PieceType.ROOK, castle.Color.WHITE)\n', (3372, 3415), False, 'import castle\n')] |
import random
import numpy as np
import itertools
import re
from collections import defaultdict
import os
def get_tags(s, open_delim='<', close_delim='/>'):
"""Iterator to spit out the xml style disfluency tags in a given string.
Keyword arguments:
s -- input string
"""
while True:
# Search for the next two delimiters in the source text
start = s.find(open_delim)
end = s.find(close_delim)
# We found a non-empty match
if -1 < start < end:
# Skip the length of the open delimiter
start += len(open_delim)
# Spit out the tag
yield open_delim + s[start:end].strip() + close_delim
# Truncate string to start from last match
s = s[end+len(close_delim):]
else:
return
def remove_uttseg_tag(tag):
tags = get_tags(tag)
final_tag = ""
for t in tags:
m = re.search(r'<[ct]*/>', t)
if m:
continue
final_tag += t
return final_tag
def convert_to_simple_label(tag, rep="disf1_uttseg"):
"""Takes the complex tag set and gives back the simple,
smaller version with ten tags:
"""
disftag = "<f/>"
if "<rm-" in tag:
disftag = "<rm-0/>"
elif "<e" in tag:
disftag = "<e/>"
if "uttseg" in rep: # if combined task with TTO
m = re.search(r'<[ct]*/>', tag)
if m:
return disftag + m.group(0)
else:
print("WARNING NO TAG", +tag)
return ""
return disftag # if not TT0
def convert_to_simple_idx(tag, rep='1_trp'):
tag = convert_to_simple_label(tag, rep)
simple_tags = """<e/><cc/>
<e/><ct/>
<e/><tc/>
<e/><tt/>
<f/><cc/>
<f/><ct/>
<f/><tc/>
<f/><tt/>
<rm-0/><cc/>
<rm-0/><ct/>""".split("\n")
simple_tag_dict = {}
for s in range(0, len(simple_tags)):
simple_tag_dict[simple_tags[s].strip()] = s
return simple_tag_dict[tag]
def convert_from_full_tag_set_to_idx(tag, rep, idx_to_label):
"""Maps from the full tag set of trp repairs to the new dictionary"""
if "simple" in rep:
tag = convert_to_simple_label(tag)
for k, v in idx_to_label.items():
if v in tag: # a substring relation
return k
def add_word_continuation_tags(tags):
"""In place, add a continutation tag to each word:
<cc/> -word continues current dialogue act and the next word will also
continue it
<ct/> -word continues current dialogue act and is the last word of it
<tc/> -word starts this dialogue act tag and the next word continues it
<tt/> -word starts and ends dialogue act (single word dialogue act)
"""
tags = list(tags)
for i in range(0, len(tags)):
if i == 0:
tags[i] = tags[i] + "<t"
else:
tags[i] = tags[i] + "<c"
if i == len(tags)-1:
tags[i] = tags[i] + "t/>"
else:
tags[i] = tags[i] + "c/>"
return tags
def verify_disfluency_tags(tags, normalize_ID=False):
"""Check that the repair tags sequence is valid.
Keyword arguments:
normalize_ID -- boolean, whether to convert the repair ID
numbers to be derivable from their unique RPS position in the utterance.
"""
id_map = dict() # map between old ID and new ID
# in first pass get old and new IDs
for i in range(0, len(tags)):
rps = re.findall("<rps id\=\"[0-9]+\"\/>", tags[i])
if rps:
id_map[rps[0][rps[0].find("=")+2:-3]] = str(i)
# key: old repair ID, value, list [reparandum,interregnum,repair]
# all True when repair is all there
repairs = defaultdict(list)
for r in id_map.keys():
repairs[r] = [None, None, None] # three valued None<False<True
# print(repairs)
# second pass verify the validity of the tags
# and (optionally) modify the IDs
for i in range(0, len(tags)): # iterate over all tag strings
new_tags = []
if tags[i] == "":
assert(all([repairs[ID][2] or
repairs[ID] == [None, None, None]
for ID in repairs.keys()])),\
"Unresolved repairs at fluent tag\n\t" + str(repairs)
for tag in get_tags(tags[i]): # iterate over all tags
# print(i)
# print(tag)
if tag == "<e/>":
new_tags.append(tag)
continue
ID = tag[tag.find("=")+2:-3]
if "<rms" in tag:
assert repairs[ID][0] == None,\
"reparandum started parsed more than once " + ID
assert repairs[ID][1] == None,\
"reparandum start again during interregnum phase " + ID
assert repairs[ID][2] == None,\
"reparandum start again during repair phase " + ID
repairs[ID][0] = False # set in progress
elif "<rm " in tag:
assert repairs[ID][0] != None,\
"mid reparandum tag before reparandum start " + ID
assert repairs[ID][2] == None,\
"mid reparandum tag in a interregnum phase or beyond " + ID
assert repairs[ID][2] == None,\
"mid reparandum tag in a repair phase or beyond " + ID
elif "<i" in tag:
assert repairs[ID][0] != None,\
"interregnum start before reparandum start " + ID
assert repairs[ID][2] == None,\
"interregnum in a repair phase " + ID
if repairs[ID][1] == None: # interregnum not reached yet
repairs[ID][0] = True # reparandum completed
repairs[ID][1] = False # interregnum in progress
elif "<rps" in tag:
assert repairs[ID][0] != None,\
"repair start before reparandum start " + ID
assert repairs[ID][1] != True,\
"interregnum over before repair start " + ID
assert repairs[ID][2] == None,\
"repair start parsed twice " + ID
repairs[ID][0] = True # reparanudm complete
repairs[ID][1] = True # interregnum complete
repairs[ID][2] = False # repair in progress
elif "<rp " in tag:
assert repairs[ID][0] == True,\
"mid repair word start before reparandum end " + ID
assert repairs[ID][1] == True,\
"mid repair word start before interregnum end " + ID
assert repairs[ID][2] == False,\
"mid repair tag before repair start tag " + ID
elif "<rpn" in tag:
# make sure the rps is order in tag string is before
assert repairs[ID][0] == True,\
"repair end before reparandum end " + ID
assert repairs[ID][1] == True,\
"repair end before interregnum end " + ID
assert repairs[ID][2] == False,\
"repair end before repair start " + ID
repairs[ID][2] = True
# do the replacement of the tag's ID after checking
new_tags.append(tag.replace(ID, id_map[ID]))
if normalize_ID:
tags[i] = "".join(new_tags)
assert all([repairs[ID][2] for ID in repairs.keys()]),\
"Unresolved repairs:\n\t" + str(repairs)
def shuffle(lol, seed):
"""Shuffle inplace each list in the same order.
lol :: list of list as input
seed :: seed the shuffling
"""
for l in lol:
random.seed(seed)
random.shuffle(l)
def minibatch(l, bs):
"""Returns a list of minibatches of indexes
which size is equal to bs
border cases are treated as follow:
eg: [0,1,2,3] and bs = 3
will output:
[[0],[0,1],[0,1,2],[1,2,3]]
l :: list of word idxs
"""
out = [l[:i] for i in xrange(1, min(bs, len(l)+1))]
out += [l[i-bs:i] for i in xrange(bs, len(l)+1)]
assert len(l) == len(out)
return out
def indices_from_length(sentence_length, bs, start_index=0):
"""Return a list of indexes pairs (start/stop) for each word
max difference between start and stop equal to bs
border cases are treated as follow:
eg: sentenceLength=4 and bs = 3
will output:
[[0,0],[0,1],[0,2],[1,3]]
"""
l = map(lambda x: start_index+x, xrange(sentence_length))
out = []
for i in xrange(0, min(bs, len(l))):
out.append([l[0], l[i]])
for i in xrange(bs+1, len(l)+1):
out.append([l[i-bs], l[i-1]])
assert len(l) == sentence_length
return out
def context_win(l, win):
"""Return a list of list of indexes corresponding
to context windows surrounding each word in the sentence
given a list of indexes composing a sentence.
win :: int corresponding to the size of the window
"""
assert (win % 2) == 1
assert win >= 1
l = list(l)
lpadded = win/2 * [-1] + l + win/2 * [-1]
out = [lpadded[i:i+win] for i in range(len(l))]
assert len(out) == len(l)
return out
def context_win_backwards(l, win):
'''Same as contextwin except only backwards context
(i.e. like an n-gram model)
'''
assert win >= 1
l = list(l)
lpadded = (win-1) * [-1] + l
out = [lpadded[i: i+win] for i in range(len(l))]
assert len(out) == len(l)
return out
def corpus_to_indexed_matrix(my_array_list, win, bs, sentence=False):
"""Returns a matrix of contextwins for a list of utterances of
dimensions win * n_words_in_corpus
(i.e. total length of all arrays in my_array_list)
and corresponding matrix of indexes (of just start/stop for each one)
so 2 * n_words_in_corpus
of where to access these, using bs (backprop distance)
as the limiting history size
"""
sentences = [] # a list (of arrays, or lists?), returned as matrix
indices = [] # a list of index pairs (arrays?), returned as matrix
totalSize = 0
if sentence:
for sent in my_array_list:
mysent = np.asarray([-1] * (bs-1) + list(sent)) # padding with eos
# get list of context windows
mywords = context_win_backwards(mysent, win)
# just one per utterance for now..
cindices = [[totalSize, totalSize+len(mywords)-1]]
cwords = []
for i in range(bs, len(mywords)+1):
words = list(itertools.chain(*mywords[(i-bs):i]))
cwords.append(words) # always (bs * n) words long
# print cwords
sentences.extend(cwords)
indices.extend(cindices)
totalSize += len(cwords)
else:
for sentence in my_array_list:
# get list of context windows
cwords = context_win_backwards(sentence, win)
cindices = indices_from_length(len(cwords), bs, totalSize)
indices.extend(cindices)
sentences.extend(cwords)
totalSize += len(cwords)
for s in sentences:
if any([x is None for x in s]):
print(s)
return np.matrix(sentences, dtype='int32'), indices
def convert_from_eval_tags_to_inc_disfluency_tags(tags, words,
representation="disf1",
limit=8):
"""Conversion from disfluency tagged corpus with xml-style tags
as from STIR (https://bitbucket.org/julianhough/stir)
to the strictly left-to-right schemas as
described by Hough and Schlangen 2015 Interspeech paper,
which are used by RNN architectures at runtime.
Keyword arguments:
tags -- the STIR eval style disfluency tags
words -- the words in the utterance
representation -- the number corresponding to the type of tagging system
1=standard, 2=rm-N values where N does not count intervening edit terms
3=same as 2 but with a 'c' tag after edit terms have ended.
limit -- the limit on the distance back from the repair start
"""
repair_dict = defaultdict(list)
new_tags = []
# print("tags")
# print(tags)
# print('words')
# print(words)
for t in range(0, len(tags)):
if "uttseg" in representation:
m = re.search(r'<[ct]*/>', tags[t])
if m:
TTO_tag = m.group(0)
tags[t] = tags[t].replace(TTO_tag, "")
if "dact" in representation:
m = re.search(r'<diact type="[^\s]*"/>', tags[t])
if m:
dact_tag = m.group(0)
tags[t] = tags[t].replace(dact_tag, "")
if "laugh" in representation:
m = re.search(r'<speechLaugh/>|<laughter/>', tags[t])
if m:
laughter_tag = m.group(0)
else:
laughter_tag = "<nolaughter/>"
tags[t] = tags[t].replace(laughter_tag, "")
current_tag = ""
if "<e/>" in tags[t] or "<i" in tags[t]:
current_tag = "<e/>" # TODO may make this an interregnum
if "<rms" in tags[t]:
rms = re.findall("<rms id\=\"[0-9]+\"\/>", tags[t], re.S)
for r in rms:
repairID = r[r.find("=")+2:-3]
repair_dict[repairID] = [t, 0]
if "<rps" in tags[t]:
rps = re.findall("<rps id\=\"[0-9]+\"\/>", tags[t], re.S)
for r in rps:
repairID = r[r.find("=")+2:-3]
# print('repairID')
# print(repairID)
# print(repair_dict.get(repairID))
# print(str(repairID)+str(tags)+str(words))
assert repair_dict.get(repairID), str(repairID)+str(tags)+str(words)
repair_dict[repairID][1] = t
dist = min(t-repair_dict[repairID][0], limit)
# adjust in case the reparandum is shortened due to the limit
repair_dict[repairID][0] = t-dist
current_tag += "<rm-{}/>".format(dist) + "<rpMid/>"
if "<rpn" in tags[t]:
rpns = re.findall("<rpnrep id\=\"[0-9]+\"\/>", tags[t], re.S) +\
re.findall("<rpnsub id\=\"[0-9]+\"\/>", tags[t], re.S)
rpns_del = re.findall("<rpndel id\=\"[0-9]+\"\/>", tags[t], re.S)
# slight simplifying assumption is to take the repair with
# the longest reparandum as the end category
repair_type = ""
longestlength = 0
for r in rpns:
repairID = r[r.find("=")+2:-3]
l = repair_dict[repairID]
if l[1]-l[0] > longestlength:
longestlength = l[1]-l[0]
repair_type = "Sub"
for r in rpns_del:
repairID = r[r.find("=")+2:-3]
l = repair_dict[repairID]
if l[1]-l[0] > longestlength:
longestlength = l[1]-l[0]
repair_type = "Del"
if repair_type == "":
raise Exception("Repair not passed \
correctly."+str(words)+str(tags))
current_tag += "<rpEnd"+repair_type+"/>"
current_tag = current_tag.replace("<rpMid/>", "")
if current_tag == "":
current_tag = "<f/>"
if "uttseg" in representation:
current_tag += TTO_tag
if "dact" in representation:
current_tag += dact_tag
if "laugh" in representation:
current_tag += laughter_tag
new_tags.append(current_tag)
return new_tags
def convert_from_inc_disfluency_tags_to_eval_tags(
tags, words,
start=0,
representation="disf1_uttseg"):
"""Converts the incremental style output tags of the RNN to the standard
STIR eval output tags.
The exact inverse of convertFromEvalTagsToIncrementalDisfluencyTags.
Keyword arguments:
tags -- the RNN style disfluency tags
words -- the words in the utterance
start -- position from where to begin changing the tags from
representation -- the number corresponding to the type of tagging system,
1=standard, 2=rm-N values where N does not count intervening edit terms
3=same as 2 but with a 'c' tag after edit terms have ended.
"""
# maps from the repair ID to a list of
# [reparandumStart,repairStart,repairOver]
repair_dict = defaultdict(list)
new_tags = []
if start > 0:
# assuming the tags up to this point are already converted
new_tags = tags[:start]
if "mid" not in representation:
rps_s = re.findall("<rps id\=\"[0-9]+\"\/>", tags[start-1])
rpmid = re.findall("<rp id\=\"[0-9]+\"\/>", tags[start-1])
if rps_s:
for r in rps_s:
repairID = r[r.find("=")+2:-3]
resolved_repair = re.findall(
"<rpn[repsubdl]+ id\=\"{}\"\/>"
.format(repairID), tags[start-1])
if not resolved_repair:
if not rpmid:
rpmid = []
rpmid.append(r.replace("rps ", "rp "))
if rpmid:
newstart = start-1
for rp in rpmid:
rps = rp.replace("rp ", "rps ")
repairID = rp[rp.find("=")+2:-3]
# go back and find the repair
for b in range(newstart, -1, -1):
if rps in tags[b]:
repair_dict[repairID] = [b, b, False]
break
for t in range(start, len(tags)):
current_tag = ""
if "uttseg" in representation:
m = re.search(r'<[ct]*/>', tags[t])
if m:
TTO_tag = m.group(0)
if "<e/>" in tags[t] or "<i/>" in tags[t]:
current_tag = "<e/>"
if "<rm-" in tags[t]:
rps = re.findall("<rm-[0-9]+\/>", tags[t], re.S)
for r in rps: # should only be one
current_tag += '<rps id="{}"/>'.format(t)
# print t-dist
if "simple" in representation:
# simply tagging the rps
pass
else:
dist = int(r[r.find("-")+1:-2])
repair_dict[str(t)] = [max([0, t-dist]), t, False]
# backwards looking search if full set
# print new_tags, t, dist, t-dist, max([0, t-dist])
# print tags[:t+1]
rms_start_idx = max([0, t-dist])
new_tags[rms_start_idx] = '<rms id="{}"/>'\
.format(t) + new_tags[rms_start_idx]\
.replace("<f/>", "")
reparandum = False # interregnum if edit term
for b in range(t-1, max([0, t-dist]), -1):
if "<e" not in new_tags[b]:
reparandum = True
new_tags[b] = '<rm id="{}"/>'.format(t) +\
new_tags[b].replace("<f/>", "")
if reparandum is False and "<e" in new_tags[b]:
new_tags[b] = '<i id="{}"/>'.\
format(t) + new_tags[b]
# repair ends
if "<rpEnd" in tags[t]:
rpns = re.findall("<rpEndSub/>", tags[t], re.S)
rpns_del = re.findall("<rpEndDel/>", tags[t], re.S)
rpnAll = rpns + rpns_del
if rpnAll:
for k, v in repair_dict.items():
if t >= int(k) and v[2] is False:
repair_dict[k][2] = True
# classify the repair
if rpns_del: # a delete
current_tag += '<rpndel id="{}"/>'.format(k)
rpns_del.pop(0)
continue
reparandum = [words[i] for i in range(0, len(new_tags))
if '<rms id="{}"/>'.
format(k) in new_tags[i] or
'<rm id="{}"/>'.
format(k) in new_tags[i]]
repair = [words[i] for i in range(0, len(new_tags))
if '<rps id="{}"/>'.format(k)
in new_tags[i] or '<rp id="{}"/>'.format(k)
in new_tags[i]] + [words[t]]
if reparandum == repair:
current_tag += '<rpnrep id="{}"/>'.format(k)
else:
current_tag += '<rpnsub id="{}"/>'.format(k)
# mid repair phases still in progress
for k, v in repair_dict.items():
if t > int(k) and v[2] is False:
current_tag += '<rp id="{}"/>'.format(k)
if current_tag == "":
current_tag = "<f/>"
if "uttseg" in representation:
current_tag += TTO_tag
new_tags.append(current_tag)
return new_tags
def verify_dialogue_data_matrix(dialogue_data_matrix, word_dict=None,
pos_dict=None, tag_dict=None, n_lm=0,
n_acoustic=0):
"""Boolean check of whether dialogue data consistent
with args. Checks all idxs are valid and number of features is correct.
Standard form of each row of the matrix should be:
utt_index, word_idx, pos_idx, word_duration,
acoustic_feats.., lm_feats....,label
"""
l = 3 + n_acoustic + n_lm + 1 # row length
try:
for i, row in enumerate(dialogue_data_matrix):
assert len(row) == l,\
"row {} wrong length {}, should be {}".format(i, len(row), l)
assert word_dict[row[1]] is not None,\
"row[1][{}] {} not in word dict".format(i, row[1])
assert pos_dict[row[2]] is not None,\
"row[2][{}] {} not in POS dict".format(i, row[2])
assert tag_dict[row[-1]] is not None,\
"row[-1][{}] {} not in tag dict".format(i, row[-1])
except AssertionError as a:
print(a)
return False
return True
def verify_dialogue_data_matrices_from_folder(matrices_folder_filepath,
word_dict=None,
pos_dict=None,
tag_dict=None,
n_lm=0,
n_acoustic=0):
"""A boolean check that the dialogue matrices make sense for the
particular configuration in args and tag2idx dicts.
"""
for dialogue_file in os.listdir(matrices_folder_filepath):
v = np.load(matrices_folder_filepath + "/" + dialogue_file,allow_pickle=True)
if not verify_dialogue_data_matrix(v,
word_dict=word_dict,
pos_dict=pos_dict,
tag_dict=tag_dict,
n_lm=n_lm,
n_acoustic=n_acoustic):
# print"{} failed test".format(dialogue_file)
return False
return True
def dialogue_data_and_indices_from_matrix(d_matrix,
n_extra,
pre_seg=False,
window_size=2,
bs=9,
tag_rep="disf1_uttseg",
tag_to_idx_map=None,
in_utterances=False):
"""Transforming from input format of row:
utt_index, word_idx, pos_idx, word_duration,
acoustic_feats.., lm_feats....,label
to 5-tuple of:
word_idx, pos_idx, extra, labels, indices
where :word_idx: and :pos_idx: have the correct window context
according to @window_size
and :indices: is the start and stop points for consumption by the
net in training for each label in :labels:. :extra: is the matrix
of extra features.
"""
if len(d_matrix)==0:
return
utt_indices = d_matrix[:, 0]
words = d_matrix[:, 1]
pos = d_matrix[:, 2]
extra = None if n_extra == 0 else d_matrix[:, 3: -1]
labels = d_matrix[:, -1]
word_idx = []
pos_idx = []
current = []
indices = []
previous_idx = -1
for i, a_tuple in enumerate(zip(utt_indices, words, pos, labels)):
utt_idx, w, p, l = a_tuple
# print(w)
current.append((w, p, l))
if pre_seg:
if previous_idx != utt_idx or i == len(labels)-1:
if in_utterances:
start = 0 if indices == [] else indices[-1][1]+1
indices.append([start, start + (len(current)-1)])
else:
indices.extend(indices_from_length(len(current), bs,
start_index=len(indices)))
word_idx.extend(context_win_backwards([x[0] for x in current],
window_size))
pos_idx.extend(context_win_backwards([x[1] for x in current],
window_size))
current = []
# print('final')
# print(w)
# print(word_idx)
elif i == len(labels)-1:
# indices = indices_from_length(len(current), bs)
# currently a simple window of same size
indices = [[j, j + bs] for j in range(0, len(current))]
padding = [[-1, -1]] * (bs - window_size)
word_idx = padding + context_win_backwards([x[0] for x in current],
window_size)
pos_idx = padding + context_win_backwards([x[1] for x in current],
window_size)
previous_idx = utt_idx
# print(pos_idx)
# print(word_idx)
# print(extra)
# print(labels)
# print(indices)
# return np.asarray(word_idx, dtype=np.int32), np.asarray(pos_idx,
# dtype=np.int32),\
# labels,\
# np.asarray(indices, dtype=np.int32)
return np.asarray(word_idx, dtype=np.int32), np.asarray(pos_idx,
dtype=np.int32),\
extra,\
labels,\
np.asarray(indices, dtype=np.int32)
if __name__ == '__main__':
tags = '<f/>,<rms id="3"/>,<i id="3"/><e/>,<rps id="3"/>' +\
'<rpnsub id="3"/>,<f/>,<e/>,<f/>,' + \
'<f/>'
tags = tags.split(",")
words = "i,like,uh,love,to,uh,love,alot".split(",")
# print(tags)
# print(len(tags))
# print(len(words))
new_tags = convert_from_eval_tags_to_inc_disfluency_tags(
tags,
words,
representation="disf1")
# print(new_tags)
old_tags = convert_from_inc_disfluency_tags_to_eval_tags(
new_tags,
words,
representation="disf1")
assert old_tags == tags, "\n " + str(old_tags) + "\n" + str(tags)
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
# print(context_win_backwards(x, 2))
# print "indices", indices_from_length(11, 9)
| [
"itertools.chain",
"os.listdir",
"random.shuffle",
"numpy.asarray",
"random.seed",
"numpy.load",
"collections.defaultdict",
"re.findall",
"numpy.matrix",
"re.search"
]
| [((3680, 3697), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3691, 3697), False, 'from collections import defaultdict\n'), ((12142, 12159), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12153, 12159), False, 'from collections import defaultdict\n'), ((16551, 16568), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (16562, 16568), False, 'from collections import defaultdict\n'), ((23086, 23122), 'os.listdir', 'os.listdir', (['matrices_folder_filepath'], {}), '(matrices_folder_filepath)\n', (23096, 23122), False, 'import os\n'), ((925, 949), 're.search', 're.search', (['"""<[ct]*/>"""', 't'], {}), "('<[ct]*/>', t)\n", (934, 949), False, 'import re\n'), ((1372, 1398), 're.search', 're.search', (['"""<[ct]*/>"""', 'tag'], {}), "('<[ct]*/>', tag)\n", (1381, 1398), False, 'import re\n'), ((3435, 3480), 're.findall', 're.findall', (['"""<rps id\\\\="[0-9]+"\\\\/>"""', 'tags[i]'], {}), '(\'<rps id\\\\="[0-9]+"\\\\/>\', tags[i])\n', (3445, 3480), False, 'import re\n'), ((7678, 7695), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (7689, 7695), False, 'import random\n'), ((7704, 7721), 'random.shuffle', 'random.shuffle', (['l'], {}), '(l)\n', (7718, 7721), False, 'import random\n'), ((11193, 11228), 'numpy.matrix', 'np.matrix', (['sentences'], {'dtype': '"""int32"""'}), "(sentences, dtype='int32')\n", (11202, 11228), True, 'import numpy as np\n'), ((23136, 23210), 'numpy.load', 'np.load', (["(matrices_folder_filepath + '/' + dialogue_file)"], {'allow_pickle': '(True)'}), "(matrices_folder_filepath + '/' + dialogue_file, allow_pickle=True)\n", (23143, 23210), True, 'import numpy as np\n'), ((26927, 26963), 'numpy.asarray', 'np.asarray', (['word_idx'], {'dtype': 'np.int32'}), '(word_idx, dtype=np.int32)\n', (26937, 26963), True, 'import numpy as np\n'), ((26965, 27000), 'numpy.asarray', 'np.asarray', (['pos_idx'], {'dtype': 'np.int32'}), '(pos_idx, dtype=np.int32)\n', (26975, 27000), True, 'import numpy as np\n'), ((27240, 27275), 'numpy.asarray', 'np.asarray', (['indices'], {'dtype': 'np.int32'}), '(indices, dtype=np.int32)\n', (27250, 27275), True, 'import numpy as np\n'), ((12345, 12375), 're.search', 're.search', (['"""<[ct]*/>"""', 'tags[t]'], {}), "('<[ct]*/>', tags[t])\n", (12354, 12375), False, 'import re\n'), ((12536, 12581), 're.search', 're.search', (['"""<diact type="[^\\\\s]*"/>"""', 'tags[t]'], {}), '(\'<diact type="[^\\\\s]*"/>\', tags[t])\n', (12545, 12581), False, 'import re\n'), ((12748, 12796), 're.search', 're.search', (['"""<speechLaugh/>|<laughter/>"""', 'tags[t]'], {}), "('<speechLaugh/>|<laughter/>', tags[t])\n", (12757, 12796), False, 'import re\n'), ((13171, 13222), 're.findall', 're.findall', (['"""<rms id\\\\="[0-9]+"\\\\/>"""', 'tags[t]', 're.S'], {}), '(\'<rms id\\\\="[0-9]+"\\\\/>\', tags[t], re.S)\n', (13181, 13222), False, 'import re\n'), ((13391, 13442), 're.findall', 're.findall', (['"""<rps id\\\\="[0-9]+"\\\\/>"""', 'tags[t]', 're.S'], {}), '(\'<rps id\\\\="[0-9]+"\\\\/>\', tags[t], re.S)\n', (13401, 13442), False, 'import re\n'), ((14283, 14337), 're.findall', 're.findall', (['"""<rpndel id\\\\="[0-9]+"\\\\/>"""', 'tags[t]', 're.S'], {}), '(\'<rpndel id\\\\="[0-9]+"\\\\/>\', tags[t], re.S)\n', (14293, 14337), False, 'import re\n'), ((16764, 16817), 're.findall', 're.findall', (['"""<rps id\\\\="[0-9]+"\\\\/>"""', 'tags[start - 1]'], {}), '(\'<rps id\\\\="[0-9]+"\\\\/>\', tags[start - 1])\n', (16774, 16817), False, 'import re\n'), ((16836, 16888), 're.findall', 're.findall', (['"""<rp id\\\\="[0-9]+"\\\\/>"""', 'tags[start - 1]'], {}), '(\'<rp id\\\\="[0-9]+"\\\\/>\', tags[start - 1])\n', (16846, 16888), False, 'import re\n'), ((17940, 17970), 're.search', 're.search', (['"""<[ct]*/>"""', 'tags[t]'], {}), "('<[ct]*/>', tags[t])\n", (17949, 17970), False, 'import re\n'), ((18159, 18202), 're.findall', 're.findall', (['"""<rm-[0-9]+\\\\/>"""', 'tags[t]', 're.S'], {}), "('<rm-[0-9]+\\\\/>', tags[t], re.S)\n", (18169, 18202), False, 'import re\n'), ((19630, 19670), 're.findall', 're.findall', (['"""<rpEndSub/>"""', 'tags[t]', 're.S'], {}), "('<rpEndSub/>', tags[t], re.S)\n", (19640, 19670), False, 'import re\n'), ((19694, 19734), 're.findall', 're.findall', (['"""<rpEndDel/>"""', 'tags[t]', 're.S'], {}), "('<rpEndDel/>', tags[t], re.S)\n", (19704, 19734), False, 'import re\n'), ((14134, 14188), 're.findall', 're.findall', (['"""<rpnrep id\\\\="[0-9]+"\\\\/>"""', 'tags[t]', 're.S'], {}), '(\'<rpnrep id\\\\="[0-9]+"\\\\/>\', tags[t], re.S)\n', (14144, 14188), False, 'import re\n'), ((14205, 14259), 're.findall', 're.findall', (['"""<rpnsub id\\\\="[0-9]+"\\\\/>"""', 'tags[t]', 're.S'], {}), '(\'<rpnsub id\\\\="[0-9]+"\\\\/>\', tags[t], re.S)\n', (14215, 14259), False, 'import re\n'), ((10524, 10559), 'itertools.chain', 'itertools.chain', (['*mywords[i - bs:i]'], {}), '(*mywords[i - bs:i])\n', (10539, 10559), False, 'import itertools\n')] |
# Copyright (C) 2015 UCSC Computational Genomics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
import re
try:
import cPickle
except ImportError:
import pickle as cPickle
class NoSuchJobException( Exception ):
def __init__( self, jobStoreID ):
super( NoSuchJobException, self ).__init__( "The job '%s' does not exist" % jobStoreID )
class ConcurrentFileModificationException( Exception ):
def __init__( self, jobStoreFileID ):
super( ConcurrentFileModificationException, self ).__init__(
'Concurrent update to file %s detected.' % jobStoreFileID )
class NoSuchFileException( Exception ):
def __init__( self, fileJobStoreID ):
super( NoSuchFileException, self ).__init__( "The file '%s' does not exist" % fileJobStoreID )
class JobStoreCreationException( Exception ):
def __init__( self, message ):
super( JobStoreCreationException, self ).__init__( message )
class AbstractJobStore( object ):
"""
Represents the physical storage for the jobs and associated files in a toil.
"""
__metaclass__ = ABCMeta
def __init__( self, config=None ):
"""
:param config: If config is not None then the
given configuration object will be written to the shared file "config.pickle" which can
later be retrieved using the readSharedFileStream. See writeConfigToStore.
If this file already exists it will be overwritten. If config is None,
the shared file "config.pickle" is assumed to exist and is retrieved. See loadConfigFromStore.
"""
#Now get on with reading or writing the config
if config is None:
with self.readSharedFileStream( "config.pickle", isProtected=False ) as fileHandle:
self.__config = cPickle.load(fileHandle)
else:
self.__config = config
self.writeConfigToStore()
def writeConfigToStore(self):
"""
Re-writes the config attribute to the jobStore, so that its values can be retrieved
if the jobStore is reloaded.
"""
with self.writeSharedFileStream( "config.pickle", isProtected=False ) as fileHandle:
cPickle.dump(self.__config, fileHandle, cPickle.HIGHEST_PROTOCOL)
@property
def config( self ):
return self.__config
@staticmethod
def _checkJobStoreCreation(create, exists, jobStoreString):
"""
Consistency checks which will result in exceptions if we attempt to overwrite an existing
jobStore.
:type create: boolean
:type exists: boolean
:raise JobStoreCreationException: Thrown if create=True and exists=True or create=False
and exists=False
"""
if create and exists:
raise JobStoreCreationException("The job store '%s' already exists. "
"Use --restart or 'toil restart' to resume this jobStore, "
"else remove it to start from scratch" % jobStoreString)
if not create and not exists:
raise JobStoreCreationException("The job store '%s' does not exist, so there "
"is nothing to restart." % jobStoreString)
@abstractmethod
def deleteJobStore( self ):
"""
Removes the jobStore from the disk/store. Careful!
"""
raise NotImplementedError( )
##Cleanup functions
def clean(self):
"""
Function to cleanup the state of a jobStore after a restart.
Fixes jobs that might have been partially updated.
Resets the try counts.
"""
#Collate any jobs that were in the process of being created/deleted
jobsToDelete = set()
for job in self.jobs():
for updateID in job.jobsToDelete:
jobsToDelete.add(updateID)
#Delete the jobs that should be deleted
if len(jobsToDelete) > 0:
for job in self.jobs():
if job.updateID in jobsToDelete:
self.delete(job.jobStoreID)
#Cleanup the state of each job
for job in self.jobs():
changed = False #Flag to indicate if we need to update the job
#on disk
if len(job.jobsToDelete) != 0:
job.jobsToDelete = set()
changed = True
#While jobs at the end of the stack are already deleted remove
#those jobs from the stack (this cleans up the case that the job
#had successors to run, but had not been updated to reflect this)
while len(job.stack) > 0:
jobs = [ command for command in job.stack[-1] if self.exists(command[0]) ]
if len(jobs) < len(job.stack[-1]):
changed = True
if len(jobs) > 0:
job.stack[-1] = jobs
break
else:
job.stack.pop()
else:
break
#Reset the retry count of the job
if job.remainingRetryCount < self._defaultTryCount():
job.remainingRetryCount = self._defaultTryCount()
changed = True
#This cleans the old log file which may
#have been left if the job is being retried after a job failure.
if job.logJobStoreFileID != None:
job.clearLogFile(self)
changed = True
if changed: #Update, but only if a change has occurred
self.update(job)
#Remove any crufty stats/logging files from the previous run
self.readStatsAndLogging(lambda x : None)
##########################################
#The following methods deal with creating/loading/updating/writing/checking for the
#existence of jobs
##########################################
@abstractmethod
def create( self, command, memory, cores, disk, updateID=None,
predecessorNumber=0 ):
"""
Creates a job, adding it to the store.
Command, memory, cores, updateID, predecessorNumber
are all arguments to the job's constructor.
:rtype : toil.jobWrapper.JobWrapper
"""
raise NotImplementedError( )
@abstractmethod
def exists( self, jobStoreID ):
"""
Returns true if the job is in the store, else false.
:rtype : bool
"""
raise NotImplementedError( )
@abstractmethod
def getPublicUrl( self, FileName):
"""
Returns a publicly accessible URL to the given file in the job store.
The returned URL starts with 'http:', 'https:' or 'file:'.
The returned URL may expire as early as 1h after its been returned.
Throw an exception if the file does not exist.
:param jobStoreFileID:
:return:
"""
raise NotImplementedError()
@abstractmethod
def getSharedPublicUrl( self, jobStoreFileID):
"""
Returns a publicly accessible URL to the given file in the job store.
The returned URL starts with 'http:', 'https:' or 'file:'.
The returned URL may expire as early as 1h after its been returned.
Throw an exception if the file does not exist.
:param jobStoreFileID:
:return:
"""
raise NotImplementedError()
@abstractmethod
def load( self, jobStoreID ):
"""
Loads a job for the given jobStoreID and returns it.
:rtype: toil.jobWrapper.JobWrapper
:raises: NoSuchJobException if there is no job with the given jobStoreID
"""
raise NotImplementedError( )
@abstractmethod
def update( self, job ):
"""
Persists the job in this store atomically.
"""
raise NotImplementedError( )
@abstractmethod
def delete( self, jobStoreID ):
"""
Removes from store atomically, can not then subsequently call load(), write(), update(),
etc. with the job.
This operation is idempotent, i.e. deleting a job twice or deleting a non-existent job
will succeed silently.
"""
raise NotImplementedError( )
def jobs(self):
"""
Returns iterator on the jobs in the store.
:rtype : iterator
"""
raise NotImplementedError( )
##########################################
#The following provide an way of creating/reading/writing/updating files
#associated with a given job.
##########################################
@abstractmethod
def writeFile( self, localFilePath, jobStoreID=None ):
"""
Takes a file (as a path) and places it in this job store. Returns an ID that can be used
to retrieve the file at a later time.
jobStoreID is the id of a job, or None. If specified, when delete(job)
is called all files written with the given job.jobStoreID will be
removed from the jobStore.
"""
raise NotImplementedError( )
@abstractmethod
@contextmanager
def writeFileStream( self, jobStoreID=None ):
"""
Similar to writeFile, but returns a context manager yielding a tuple of
1) a file handle which can be written to and 2) the ID of the resulting
file in the job store. The yielded file handle does not need to and
should not be closed explicitly.
"""
raise NotImplementedError( )
@abstractmethod
def getEmptyFileStoreID( self, jobStoreID=None ):
"""
:rtype : string, the ID of a new, empty file.
jobStoreID is the id of a job, or None. If specified, when delete(job)
is called all files written with the given job.jobStoreID will be
removed from the jobStore.
Call to fileExists(getEmptyFileStoreID(jobStoreID)) will return True.
"""
raise NotImplementedError( )
@abstractmethod
def readFile( self, jobStoreFileID, localFilePath ):
"""
Copies the file referenced by jobStoreFileID to the given local file path. The version
will be consistent with the last copy of the file written/updated.
"""
raise NotImplementedError( )
@abstractmethod
@contextmanager
def readFileStream( self, jobStoreFileID ):
"""
Similar to readFile, but returns a context manager yielding a file handle which can be
read from. The yielded file handle does not need to and should not be closed explicitly.
"""
raise NotImplementedError( )
@abstractmethod
def deleteFile( self, jobStoreFileID ):
"""
Deletes the file with the given ID from this job store.
This operation is idempotent, i.e. deleting a file twice or deleting a non-existent file
will succeed silently.
"""
raise NotImplementedError( )
@abstractmethod
def fileExists(self, jobStoreFileID ):
"""
:rtype : True if the jobStoreFileID exists in the jobStore, else False
"""
raise NotImplementedError()
@abstractmethod
def updateFile( self, jobStoreFileID, localFilePath ):
"""
Replaces the existing version of a file in the jobStore. Throws an exception if the file
does not exist.
:raises ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
"""
raise NotImplementedError( )
##########################################
#The following methods deal with shared files, i.e. files not associated
#with specific jobs.
##########################################
sharedFileNameRegex = re.compile( r'^[a-zA-Z0-9._-]+$' )
# FIXME: Rename to updateSharedFileStream
@abstractmethod
@contextmanager
def writeSharedFileStream( self, sharedFileName, isProtected=True ):
"""
Returns a context manager yielding a writable file handle to the global file referenced
by the given name.
:param sharedFileName: A file name matching AbstractJobStore.fileNameRegex, unique within
the physical storage represented by this job store
:raises ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
"""
raise NotImplementedError( )
@abstractmethod
@contextmanager
def readSharedFileStream( self, sharedFileName, isProtected=True ):
"""
Returns a context manager yielding a readable file handle to the global file referenced
by the given name.
"""
raise NotImplementedError( )
@abstractmethod
def writeStatsAndLogging( self, statsAndLoggingString ):
"""
Adds the given statistics/logging string to the store of statistics info.
"""
raise NotImplementedError( )
@abstractmethod
def readStatsAndLogging( self, statsAndLoggingCallBackFn):
"""
Reads stats/logging strings accumulated by "writeStatsAndLogging" function.
For each stats/logging file calls the statsAndLoggingCallBackFn with
an open, readable file-handle that can be used to parse the stats.
Returns the number of stat/logging strings processed.
Stats/logging files are only read once and are removed from the
file store after being written to the given file handle.
"""
raise NotImplementedError( )
## Helper methods for subclasses
def _defaultTryCount( self ):
return int( self.config.retryCount+1 )
@classmethod
def _validateSharedFileName( cls, sharedFileName ):
return bool( cls.sharedFileNameRegex.match( sharedFileName ) )
| [
"pickle.load",
"pickle.dump",
"re.compile"
]
| [((12664, 12695), 're.compile', 're.compile', (['"""^[a-zA-Z0-9._-]+$"""'], {}), "('^[a-zA-Z0-9._-]+$')\n", (12674, 12695), False, 'import re\n'), ((2822, 2887), 'pickle.dump', 'cPickle.dump', (['self.__config', 'fileHandle', 'cPickle.HIGHEST_PROTOCOL'], {}), '(self.__config, fileHandle, cPickle.HIGHEST_PROTOCOL)\n', (2834, 2887), True, 'import pickle as cPickle\n'), ((2404, 2428), 'pickle.load', 'cPickle.load', (['fileHandle'], {}), '(fileHandle)\n', (2416, 2428), True, 'import pickle as cPickle\n')] |
from datetime import date
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.express as px
from dash.dependencies import Input, Output
test_data = pd.read_csv("data/world_data.csv")
today = date.today()
external_stylesheets = [dbc.themes.BOOTSTRAP]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.title = "COVID Dashboard - UK Edition"
app.layout = html.Div([
html.Nav(className="navbar navbar-dark fixed-top bg-dark flex-md-nowrap p-0 shadow", children=[
html.A(className="navbar-brand col-sm-3 col-md-2 mr-0", children="COVID-19"),
# dcc.DatePickerRange(className="date-and-location",
# id="month-picker",
# min_date_allowed=date(2020, 1, 30),
# max_date_allowed=date(today.year, today.month, today.day),
# start_date=date(2020, 3, 1),
# end_date=date(today.year, today.month, today.day),
# style={"height": "50%"}
# ),
]),
html.Div(className="container-fluid", children=[
html.Div(className="row", children=[
html.Nav(className="col-md-2 d-none d-md-block bg-light sidebar", children=[
html.Div(className="sidebar-sticky", children=[
html.H6(className="sidebar-heading d-flex px-3 mt-4 mb-1 text-muted", children=[
html.Span("Custom Search"),
]),
html.Ul(className="nav flex-column", children=[
html.Li(className="nav-item", children=[
dcc.Link("User Search", href="/home"),
])]),
html.H6(className="sidebar-heading d-flex px-3 mt-4 mb-1 text-muted", children=[
html.Span("Preset Search"),
]),
dcc.Location(id="url", refresh=False),
html.Ul(className="nav flex-column", children=[
html.Li(className="nav-item", children=[
dcc.Link("Africa", href="/africa"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("Asia", href="/asia"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("Europe", href="/europe"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("North America", href="/northamerica"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("South America", href="/southamerica"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("Oceania", href="/oceania"),
html.Span(className="sr-only"),
]),
]),
html.Div(id='page-content'),
html.Ul(className="nav flex-column mb-2")
]),
]),
html.Main(role="main", className="col-md-9 ml-sm-auto col-lg-10 px-4", children=[
html.Div(className="chartjs-size-monitor", style={"position": "absolute", "left": "0px", "top": "0px", "right": "0px", "bottom": "0px", "overflow": "hidden", "pointer-events": "none", "visibility": "hidden", "z-index": "-1"}),
html.Div(className="box-shadow", children=[
]),
dbc.Row(
[
dbc.Col(children=[
html.H1(children="Deaths"),
html.Hr(className="lead"),
html.Div(id="death-stats", children="######"),
]),
dbc.Col(children=[
html.H1(children="Cases"),
html.Hr(className="lead"),
html.Div(id="cases-stats", children="######"),
]),
dbc.Col(children=[
html.H1(children="Vaccines"),
html.Hr(className="lead"),
html.Div(id="vaccines-stats", children="######"),
]),
]
),
html.Div(className="graphs", children=[
dcc.Graph(
id="cases-graph"
),
dcc.Graph(
id="deaths-graph",
),
]),
])])])])
def dropdown(location, user_enabled, display):
return dcc.Dropdown(
id="location",
options=[
{"label": location, "value": location} for location in test_data["location"].unique()
],
value=location,
searchable=False,
disabled=user_enabled,
style={"display": display}
),
@app.callback(dash.dependencies.Output('page-content', 'children'),
[dash.dependencies.Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/africa':
return dropdown("Africa", True, "none")
elif pathname == '/asia':
return dropdown("Asia", True, "none")
elif pathname == '/europe':
return dropdown("Europe", True, "none")
elif pathname == '/northamerica':
return dropdown("North America", True, "none")
elif pathname == '/southamerica':
return dropdown("South America", True, "none")
elif pathname == '/oceania':
return dropdown("Oceania", True, "none")
else:
return dropdown("United Kingdom", False, "block")
@app.callback(
[
Output("cases-graph", "figure"), Output("deaths-graph", "figure"),
Output("death-stats", "children"), Output("cases-stats", "children"),
Output("vaccines-stats", "children")
],
[
# Input('month-picker', "start_date"),
# Input("month-picker", "end_date"),
Input("location", "value"),
],
)
def update_personal_ouput(value):
# start_date, end_date, ):
filtered_data_cases = test_data.loc[(test_data["location"] == value)]
# //& (test_data["date"] >= start_date) & (test_data["date"] <= end_date)]
fig_deaths = px.bar(filtered_data_cases, x="date", y=["new_deaths_smoothed"], color_discrete_sequence=["mediumaquamarine"], title=f"COVID Deaths - {value}", labels={"value": "Number of Deaths", "date": "Date", "variable": "Legend"})
fig_deaths.update_layout(title_x=0.5, legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01))
fig_deaths.add_scatter(x=filtered_data_cases["date"], y=filtered_data_cases["new_deaths_smoothed"].rolling(window=7, min_periods=7, center=True).mean().round(), name="Rolling Average")
fig_cases = px.bar(filtered_data_cases, x="date", y=["new_cases_smoothed"], color_discrete_sequence=["mediumaquamarine"], title=f"COVID Cases - {value}", labels={"value": "Number of Cases", "date": "Date", "variable": "Legend"})
fig_cases.update_layout(title_x=0.5, legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01))
fig_cases.add_scatter(x=filtered_data_cases["date"], y=filtered_data_cases["new_cases_smoothed"].rolling(window=7, min_periods=7, center=True).mean().round(), name="Rolling Average")
latest_deaths = f'{filtered_data_cases["new_deaths"].iloc[-1]:.0f} today'
latest_cases = f'{filtered_data_cases["new_cases"].iloc[-1]:.0f} today'
latest_vaccines = f'{filtered_data_cases["new_vaccinations"].iloc[-2]:.0f} today'
return fig_deaths, fig_cases, latest_deaths, latest_cases, latest_vaccines
if __name__ == "__main__":
app.run_server(debug=True, dev_tools_ui=False)
| [
"dash_html_components.Ul",
"dash_html_components.Hr",
"pandas.read_csv",
"plotly.express.bar",
"dash_core_components.Link",
"dash.dependencies.Output",
"dash_core_components.Location",
"dash_html_components.H1",
"dash_html_components.Span",
"dash.dependencies.Input",
"dash_html_components.Div",
"dash_core_components.Graph",
"datetime.date.today",
"dash.Dash",
"dash_html_components.A"
]
| [((254, 288), 'pandas.read_csv', 'pd.read_csv', (['"""data/world_data.csv"""'], {}), "('data/world_data.csv')\n", (265, 288), True, 'import pandas as pd\n'), ((298, 310), 'datetime.date.today', 'date.today', ([], {}), '()\n', (308, 310), False, 'from datetime import date\n'), ((365, 427), 'dash.Dash', 'dash.Dash', (['__name__'], {'external_stylesheets': 'external_stylesheets'}), '(__name__, external_stylesheets=external_stylesheets)\n', (374, 427), False, 'import dash\n'), ((7046, 7098), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""page-content"""', '"""children"""'], {}), "('page-content', 'children')\n", (7070, 7098), False, 'import dash\n'), ((8380, 8612), 'plotly.express.bar', 'px.bar', (['filtered_data_cases'], {'x': '"""date"""', 'y': "['new_deaths_smoothed']", 'color_discrete_sequence': "['mediumaquamarine']", 'title': 'f"""COVID Deaths - {value}"""', 'labels': "{'value': 'Number of Deaths', 'date': 'Date', 'variable': 'Legend'}"}), "(filtered_data_cases, x='date', y=['new_deaths_smoothed'],\n color_discrete_sequence=['mediumaquamarine'], title=\n f'COVID Deaths - {value}', labels={'value': 'Number of Deaths', 'date':\n 'Date', 'variable': 'Legend'})\n", (8386, 8612), True, 'import plotly.express as px\n'), ((8908, 9137), 'plotly.express.bar', 'px.bar', (['filtered_data_cases'], {'x': '"""date"""', 'y': "['new_cases_smoothed']", 'color_discrete_sequence': "['mediumaquamarine']", 'title': 'f"""COVID Cases - {value}"""', 'labels': "{'value': 'Number of Cases', 'date': 'Date', 'variable': 'Legend'}"}), "(filtered_data_cases, x='date', y=['new_cases_smoothed'],\n color_discrete_sequence=['mediumaquamarine'], title=\n f'COVID Cases - {value}', labels={'value': 'Number of Cases', 'date':\n 'Date', 'variable': 'Legend'})\n", (8914, 9137), True, 'import plotly.express as px\n'), ((7115, 7157), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""url"""', '"""pathname"""'], {}), "('url', 'pathname')\n", (7138, 7157), False, 'import dash\n'), ((7802, 7833), 'dash.dependencies.Output', 'Output', (['"""cases-graph"""', '"""figure"""'], {}), "('cases-graph', 'figure')\n", (7808, 7833), False, 'from dash.dependencies import Input, Output\n'), ((7835, 7867), 'dash.dependencies.Output', 'Output', (['"""deaths-graph"""', '"""figure"""'], {}), "('deaths-graph', 'figure')\n", (7841, 7867), False, 'from dash.dependencies import Input, Output\n'), ((7877, 7910), 'dash.dependencies.Output', 'Output', (['"""death-stats"""', '"""children"""'], {}), "('death-stats', 'children')\n", (7883, 7910), False, 'from dash.dependencies import Input, Output\n'), ((7912, 7945), 'dash.dependencies.Output', 'Output', (['"""cases-stats"""', '"""children"""'], {}), "('cases-stats', 'children')\n", (7918, 7945), False, 'from dash.dependencies import Input, Output\n'), ((7955, 7991), 'dash.dependencies.Output', 'Output', (['"""vaccines-stats"""', '"""children"""'], {}), "('vaccines-stats', 'children')\n", (7961, 7991), False, 'from dash.dependencies import Input, Output\n'), ((8105, 8131), 'dash.dependencies.Input', 'Input', (['"""location"""', '"""value"""'], {}), "('location', 'value')\n", (8110, 8131), False, 'from dash.dependencies import Input, Output\n'), ((620, 696), 'dash_html_components.A', 'html.A', ([], {'className': '"""navbar-brand col-sm-3 col-md-2 mr-0"""', 'children': '"""COVID-19"""'}), "(className='navbar-brand col-sm-3 col-md-2 mr-0', children='COVID-19')\n", (626, 696), True, 'import dash_html_components as html\n'), ((4472, 4709), 'dash_html_components.Div', 'html.Div', ([], {'className': '"""chartjs-size-monitor"""', 'style': "{'position': 'absolute', 'left': '0px', 'top': '0px', 'right': '0px',\n 'bottom': '0px', 'overflow': 'hidden', 'pointer-events': 'none',\n 'visibility': 'hidden', 'z-index': '-1'}"}), "(className='chartjs-size-monitor', style={'position': 'absolute',\n 'left': '0px', 'top': '0px', 'right': '0px', 'bottom': '0px',\n 'overflow': 'hidden', 'pointer-events': 'none', 'visibility': 'hidden',\n 'z-index': '-1'})\n", (4480, 4709), True, 'import dash_html_components as html\n'), ((4748, 4793), 'dash_html_components.Div', 'html.Div', ([], {'className': '"""box-shadow"""', 'children': '[]'}), "(className='box-shadow', children=[])\n", (4756, 4793), True, 'import dash_html_components as html\n'), ((2344, 2381), 'dash_core_components.Location', 'dcc.Location', ([], {'id': '"""url"""', 'refresh': '(False)'}), "(id='url', refresh=False)\n", (2356, 2381), True, 'import dash_core_components as dcc\n'), ((4168, 4195), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""page-content"""'}), "(id='page-content')\n", (4176, 4195), True, 'import dash_html_components as html\n'), ((4252, 4293), 'dash_html_components.Ul', 'html.Ul', ([], {'className': '"""nav flex-column mb-2"""'}), "(className='nav flex-column mb-2')\n", (4259, 4293), True, 'import dash_html_components as html\n'), ((6141, 6168), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""cases-graph"""'}), "(id='cases-graph')\n", (6150, 6168), True, 'import dash_core_components as dcc\n'), ((6292, 6320), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""deaths-graph"""'}), "(id='deaths-graph')\n", (6301, 6320), True, 'import dash_core_components as dcc\n'), ((5019, 5045), 'dash_html_components.H1', 'html.H1', ([], {'children': '"""Deaths"""'}), "(children='Deaths')\n", (5026, 5045), True, 'import dash_html_components as html\n'), ((5091, 5116), 'dash_html_components.Hr', 'html.Hr', ([], {'className': '"""lead"""'}), "(className='lead')\n", (5098, 5116), True, 'import dash_html_components as html\n'), ((5162, 5207), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""death-stats"""', 'children': '"""######"""'}), "(id='death-stats', children='######')\n", (5170, 5207), True, 'import dash_html_components as html\n'), ((5360, 5385), 'dash_html_components.H1', 'html.H1', ([], {'children': '"""Cases"""'}), "(children='Cases')\n", (5367, 5385), True, 'import dash_html_components as html\n'), ((5431, 5456), 'dash_html_components.Hr', 'html.Hr', ([], {'className': '"""lead"""'}), "(className='lead')\n", (5438, 5456), True, 'import dash_html_components as html\n'), ((5502, 5547), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""cases-stats"""', 'children': '"""######"""'}), "(id='cases-stats', children='######')\n", (5510, 5547), True, 'import dash_html_components as html\n'), ((5700, 5728), 'dash_html_components.H1', 'html.H1', ([], {'children': '"""Vaccines"""'}), "(children='Vaccines')\n", (5707, 5728), True, 'import dash_html_components as html\n'), ((5774, 5799), 'dash_html_components.Hr', 'html.Hr', ([], {'className': '"""lead"""'}), "(className='lead')\n", (5781, 5799), True, 'import dash_html_components as html\n'), ((5845, 5893), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""vaccines-stats"""', 'children': '"""######"""'}), "(id='vaccines-stats', children='######')\n", (5853, 5893), True, 'import dash_html_components as html\n'), ((1793, 1819), 'dash_html_components.Span', 'html.Span', (['"""Custom Search"""'], {}), "('Custom Search')\n", (1802, 1819), True, 'import dash_html_components as html\n'), ((2256, 2282), 'dash_html_components.Span', 'html.Span', (['"""Preset Search"""'], {}), "('Preset Search')\n", (2265, 2282), True, 'import dash_html_components as html\n'), ((2038, 2075), 'dash_core_components.Link', 'dcc.Link', (['"""User Search"""'], {'href': '"""/home"""'}), "('User Search', href='/home')\n", (2046, 2075), True, 'import dash_core_components as dcc\n'), ((2593, 2627), 'dash_core_components.Link', 'dcc.Link', (['"""Africa"""'], {'href': '"""/africa"""'}), "('Africa', href='/africa')\n", (2601, 2627), True, 'import dash_core_components as dcc\n'), ((2669, 2699), 'dash_html_components.Span', 'html.Span', ([], {'className': '"""sr-only"""'}), "(className='sr-only')\n", (2678, 2699), True, 'import dash_html_components as html\n'), ((2880, 2910), 'dash_core_components.Link', 'dcc.Link', (['"""Asia"""'], {'href': '"""/asia"""'}), "('Asia', href='/asia')\n", (2888, 2910), True, 'import dash_core_components as dcc\n'), ((2952, 2982), 'dash_html_components.Span', 'html.Span', ([], {'className': '"""sr-only"""'}), "(className='sr-only')\n", (2961, 2982), True, 'import dash_html_components as html\n'), ((3137, 3171), 'dash_core_components.Link', 'dcc.Link', (['"""Europe"""'], {'href': '"""/europe"""'}), "('Europe', href='/europe')\n", (3145, 3171), True, 'import dash_core_components as dcc\n'), ((3213, 3243), 'dash_html_components.Span', 'html.Span', ([], {'className': '"""sr-only"""'}), "(className='sr-only')\n", (3222, 3243), True, 'import dash_html_components as html\n'), ((3398, 3445), 'dash_core_components.Link', 'dcc.Link', (['"""North America"""'], {'href': '"""/northamerica"""'}), "('North America', href='/northamerica')\n", (3406, 3445), True, 'import dash_core_components as dcc\n'), ((3487, 3517), 'dash_html_components.Span', 'html.Span', ([], {'className': '"""sr-only"""'}), "(className='sr-only')\n", (3496, 3517), True, 'import dash_html_components as html\n'), ((3672, 3719), 'dash_core_components.Link', 'dcc.Link', (['"""South America"""'], {'href': '"""/southamerica"""'}), "('South America', href='/southamerica')\n", (3680, 3719), True, 'import dash_core_components as dcc\n'), ((3761, 3791), 'dash_html_components.Span', 'html.Span', ([], {'className': '"""sr-only"""'}), "(className='sr-only')\n", (3770, 3791), True, 'import dash_html_components as html\n'), ((3946, 3982), 'dash_core_components.Link', 'dcc.Link', (['"""Oceania"""'], {'href': '"""/oceania"""'}), "('Oceania', href='/oceania')\n", (3954, 3982), True, 'import dash_core_components as dcc\n'), ((4024, 4054), 'dash_html_components.Span', 'html.Span', ([], {'className': '"""sr-only"""'}), "(className='sr-only')\n", (4033, 4054), True, 'import dash_html_components as html\n')] |
from collections import OrderedDict
import skimage.io as io
from config import get_config
config = get_config()
class LRUCache:
def __init__(self, capacity: int):
self._ordered_dict = OrderedDict()
self._capacity = capacity
def get(self, key):
self._move_to_end_if_exist(key)
return self._ordered_dict.get(key)
def put(self, key, value):
self._move_to_end_if_exist(key)
self._ordered_dict[key] = value
if len(self._ordered_dict) > self._capacity:
key, value = self._ordered_dict.popitem(last=False)
del key
del value
def _move_to_end_if_exist(self, key):
if key in self._ordered_dict:
self._ordered_dict.move_to_end(key)
_cache = LRUCache(config["data_queue_len"])
def get_image(path):
# image = _cache.get(path)
image = None
if image is None:
image = io.imread(path)
# _cache.put(path, image)
return image
| [
"config.get_config",
"collections.OrderedDict",
"skimage.io.imread"
]
| [((102, 114), 'config.get_config', 'get_config', ([], {}), '()\n', (112, 114), False, 'from config import get_config\n'), ((201, 214), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (212, 214), False, 'from collections import OrderedDict\n'), ((912, 927), 'skimage.io.imread', 'io.imread', (['path'], {}), '(path)\n', (921, 927), True, 'import skimage.io as io\n')] |
import logging
import time
from qupy.framing.slip import Slip
from qupy.interface.serial import SerialPort
from qupy.interface.errors import InterfaceTimeoutError, InterfaceIOError, InterfaceError
from qupy.comm.client import CommClient
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
s = SerialPort()
f = Slip()
c = CommClient(s, f)
connect = True
while True:
if connect:
try:
s.open()
except InterfaceIOError as e:
time.sleep(1.0)
continue
c.start()
connect = False
try:
print('ask...')
data = input()
d = c.ask(data.encode('utf-8'))
print('data:',d)
if len(d) > 0 and d[0] == ord('p'):
break
except InterfaceIOError as e:
print('ask io error', str(e))
c.stop()
s.close()
connect = True
except InterfaceTimeoutError as e:
print('timeout')
c.stop()
s.close()
| [
"logging.basicConfig",
"qupy.comm.client.CommClient",
"time.sleep",
"qupy.interface.serial.SerialPort",
"qupy.framing.slip.Slip"
]
| [((240, 280), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (259, 280), False, 'import logging\n'), ((318, 330), 'qupy.interface.serial.SerialPort', 'SerialPort', ([], {}), '()\n', (328, 330), False, 'from qupy.interface.serial import SerialPort\n'), ((339, 345), 'qupy.framing.slip.Slip', 'Slip', ([], {}), '()\n', (343, 345), False, 'from qupy.framing.slip import Slip\n'), ((355, 371), 'qupy.comm.client.CommClient', 'CommClient', (['s', 'f'], {}), '(s, f)\n', (365, 371), False, 'from qupy.comm.client import CommClient\n'), ((537, 552), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (547, 552), False, 'import time\n')] |
import requests
from bs4 import BeautifulSoup
import json
import re
# Range of Roll Number - User Input
start_roll = int(input("Starting Roll Number: "))
end_roll = int(input("Ending Roll Number: "))
# Semester - User Input
sem = int(input("Which Semester[1-8]: "))
# Verbosity
verbose = int(input("Verbosity Level (1 for just data, 2 for detailed data): "))
# Roll Number Tuple
roll_tuple = tuple(range(start_roll, end_roll+1))
# Getting the Websites
result_url = 'https://makaut.ucanapply.com/smartexam/public/result-details'
get_result_details = 'https://makaut.ucanapply.com/smartexam/public//get-result-details'
# Semester Codes
semcode = ('SM01', 'SM02', 'SM03', 'SM04', 'SM05', 'SM06', 'SM07', 'SM08')
def get_marks_of(rollNo, semester):
# Handle session cookies appropriately
s = requests.Session()
with s.get(result_url) as r:
while r.status_code != 200:
r = s.get(result_url)
# Parse CSRF-Token
soup = BeautifulSoup(r.text, 'html.parser')
csrf_token = soup.find("meta", {"name":"csrf-token"})['content']
# Create dict for post request
form_data = {'_token': csrf_token, 'p1':'', 'ROLLNO':str(rollNo), 'SEMCODE':semcode[semester-1], 'examtype':'result-details', 'all':''}
# Get Result Data
with s.post(get_result_details, data=form_data) as r:
while r.status_code != 200:
r = s.post(get_result_details, data=form_data)
result_data = json.loads(r.text)['html']
soup = BeautifulSoup(result_data, 'html.parser')
result_data = soup.find("div", {"id":"page-wrap"})
try:
result_data = result_data.get_text()
except AttributeError:
# This result has not yet been published
return
# Basic Data
name = re.findall("Name[^a-zA-Z]*([a-zA-Z ]+)", result_data)[0]
stream = re.findall("B.Tech[^A-Z]*([A-Z]+)", result_data)[0]
roll_num = re.findall("Roll[^0-9]*([0-9]+)", result_data)[0]
reg_num, batch = re.findall("Registration[^0-9]*([0-9]+) OF ([0-9-]+)", result_data)[0]
# Subject Data
def get_subject_data(result_data):
re_mp_fl = [ i for i in filter(lambda x: x!='', [i for i in map(lambda x: x.strip(), re.findall("([^\n]+)", result_data))])]
for i in range(re_mp_fl.index("Subject Code")+6, re_mp_fl.index("Total"),6):
yield(tuple([re_mp_fl[j] for j in range(i, i+6)]))
subject_data = get_subject_data(result_data)
# SGPA YGPA MAR - Prone to errors for odd and even sem
sgpa_odd, odd_year, sgpa_even, even_year, ygpa, cgpa = -1, -1, -1, -1, -1, -1
try:
sgpa_odd = re.findall("ODD\.*\s*\(.*\)[^0-9.]*([0-9.]+)", result_data)[0]
odd_year = re.findall("ODD[^0-9]*([0-9])", result_data)[0]
sgpa_even = re.findall("EVEN\s*\(.*\)[^0-9.]*([0-9.]+)", result_data)[0]
even_year = re.findall("EVEN[^0-9]*([0-9])", result_data)[0]
ygpa = re.findall("YGPA[^0-9]*([0-9.]+)", result_data)[0]
cgpa = re.findall("DGPA[^EVEN]*EVEN\s*\(.*\)[^0-9.]*[0-9.]+\s*([0-9.]+)[^YGPA]*YGPA", result_data)[0]
except IndexError:
pass
return {
'name': name,
'stream': stream,
'roll': roll_num,
'reg_num': reg_num,
'batch': batch,
'marks_per_subject': subject_data,
'sgpa_odd': sgpa_odd,
'odd_year': odd_year,
'sgpa_even': None if sgpa_even == -1 else sgpa_even,
'even_year': None if even_year == -1 else even_year,
'ygpa': None if ygpa == -1 else ygpa,
'cgpa': None if cgpa == -1 else cgpa
}
def print_marks_properly(roll, sem):
data = get_marks_of(roll, sem)
if data != "<TBD>":
for key, value in data.items():
if key == 'marks_per_subject':
print(key,"->")
for x in value:
print(x)
else:
print(key, "->", value)
if verbose == 1:
# Disply most recent
for roll in roll_tuple:
data = get_marks_of(roll, sem)
try:
print(f"({data['name']}, {data['sgpa_odd' if sem%2!=0 else 'sgpa_even']})")
except:
pass
elif verbose == 2:
for roll in roll_tuple:
print_marks_properly(roll, sem)
else:
print("[!] Verbosity Level Wrong!")
| [
"bs4.BeautifulSoup",
"re.findall",
"json.loads",
"requests.Session"
]
| [((830, 848), 'requests.Session', 'requests.Session', ([], {}), '()\n', (846, 848), False, 'import requests\n'), ((997, 1033), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""html.parser"""'], {}), "(r.text, 'html.parser')\n", (1010, 1033), False, 'from bs4 import BeautifulSoup\n'), ((1530, 1571), 'bs4.BeautifulSoup', 'BeautifulSoup', (['result_data', '"""html.parser"""'], {}), "(result_data, 'html.parser')\n", (1543, 1571), False, 'from bs4 import BeautifulSoup\n'), ((1489, 1507), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (1499, 1507), False, 'import json\n'), ((1818, 1871), 're.findall', 're.findall', (['"""Name[^a-zA-Z]*([a-zA-Z ]+)"""', 'result_data'], {}), "('Name[^a-zA-Z]*([a-zA-Z ]+)', result_data)\n", (1828, 1871), False, 'import re\n'), ((1889, 1937), 're.findall', 're.findall', (['"""B.Tech[^A-Z]*([A-Z]+)"""', 'result_data'], {}), "('B.Tech[^A-Z]*([A-Z]+)', result_data)\n", (1899, 1937), False, 'import re\n'), ((1957, 2003), 're.findall', 're.findall', (['"""Roll[^0-9]*([0-9]+)"""', 'result_data'], {}), "('Roll[^0-9]*([0-9]+)', result_data)\n", (1967, 2003), False, 'import re\n'), ((2029, 2096), 're.findall', 're.findall', (['"""Registration[^0-9]*([0-9]+) OF ([0-9-]+)"""', 'result_data'], {}), "('Registration[^0-9]*([0-9]+) OF ([0-9-]+)', result_data)\n", (2039, 2096), False, 'import re\n'), ((2673, 2736), 're.findall', 're.findall', (['"""ODD\\\\.*\\\\s*\\\\(.*\\\\)[^0-9.]*([0-9.]+)"""', 'result_data'], {}), "('ODD\\\\.*\\\\s*\\\\(.*\\\\)[^0-9.]*([0-9.]+)', result_data)\n", (2683, 2736), False, 'import re\n'), ((2756, 2800), 're.findall', 're.findall', (['"""ODD[^0-9]*([0-9])"""', 'result_data'], {}), "('ODD[^0-9]*([0-9])', result_data)\n", (2766, 2800), False, 'import re\n'), ((2825, 2885), 're.findall', 're.findall', (['"""EVEN\\\\s*\\\\(.*\\\\)[^0-9.]*([0-9.]+)"""', 'result_data'], {}), "('EVEN\\\\s*\\\\(.*\\\\)[^0-9.]*([0-9.]+)', result_data)\n", (2835, 2885), False, 'import re\n'), ((2907, 2952), 're.findall', 're.findall', (['"""EVEN[^0-9]*([0-9])"""', 'result_data'], {}), "('EVEN[^0-9]*([0-9])', result_data)\n", (2917, 2952), False, 'import re\n'), ((2972, 3019), 're.findall', 're.findall', (['"""YGPA[^0-9]*([0-9.]+)"""', 'result_data'], {}), "('YGPA[^0-9]*([0-9.]+)', result_data)\n", (2982, 3019), False, 'import re\n'), ((3039, 3143), 're.findall', 're.findall', (['"""DGPA[^EVEN]*EVEN\\\\s*\\\\(.*\\\\)[^0-9.]*[0-9.]+\\\\s*([0-9.]+)[^YGPA]*YGPA"""', 'result_data'], {}), "(\n 'DGPA[^EVEN]*EVEN\\\\s*\\\\(.*\\\\)[^0-9.]*[0-9.]+\\\\s*([0-9.]+)[^YGPA]*YGPA',\n result_data)\n", (3049, 3143), False, 'import re\n'), ((2256, 2291), 're.findall', 're.findall', (['"""([^\n]+)"""', 'result_data'], {}), "('([^\\n]+)', result_data)\n", (2266, 2291), False, 'import re\n')] |
from dataclasses import dataclass
import json
import re
@dataclass
class KeyMapper(dict):
"""
Example:
km = KeyMapper({'messages': {'message1': 'Hello World!'}}})
print(km['messages.message1'])
Variables:
__delimiter__ is set to dot-notation by default, unless specified otherwise.
"""
__delimiter__ = "." # Default
__schema__ = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if kwargs:
if 'delimiter' in kwargs:
self.__delimiter__ = kwargs['delimiter']
elif 'schema' in kwargs:
self.__schema__ = kwargs['schema']
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
if self.__schema__:
if self.__schema__[k] == type(v):
self.__dict__.update({k: v})
else:
raise ValueError(
f'TypeMismatchError: value {type(v)} does not match type {type(self.__schema__[k])} defined in schema')
else:
self.__dict__.update({k: v})
def __repr__(self):
return '{}(dict={})'.format(self.__class__, self.__dict__)
def __str__(self):
return '{}'.format(self.__dict__)
def __getattr__(self, attr):
try:
return self.get(attr)
except Exception as e:
raise e
def __setattr__(self, key, value):
try:
self.__setitem__(key, value)
except Exception as e:
raise e
def __delattr__(self, item):
try:
self.__delitem__(item)
except Exception as e:
raise e
def __getitem__(self, key):
try:
if self.__delimiter__ in key:
return self.__mapper__(self.__dict__, key.split(self.__delimiter__), self.__getitem__.__name__)
else:
return self.get(key)
except Exception as e:
raise e
def __setitem__(self, key, value):
try:
if self.__delimiter__ in key:
self.__mapper__(self.__dict__, key.split(
self.__delimiter__), self.__setitem__.__name__, value)
else:
super().__setitem__(key, value)
self.__dict__.update({key: value})
except Exception as e:
raise e
def __delitem__(self, key):
try:
if self.__delimiter__ in key:
self.__mapper__(self.__dict__, key.split(
self.__delimiter__), self.__delitem__.__name__)
else:
super().__delitem__(key)
del self.__dict__[key]
except Exception as e:
raise e
def pprint(self, *args):
try:
if len(args) > 0:
return json.dumps(args[0], indent=4, ensure_ascii=False)
return json.dumps(self, indent=4, ensure_ascii=False)
except Exception as e:
raise e
@classmethod
def __mapper__(cls, d, m, callback, *args, **kwargs):
for i, k in enumerate(m):
key = k if not re.search(r'^[0-9]+$', k) else int(k)
try:
if str(key) in d or type(key) == int and d[key]:
if str(key) != m[-1] or i != len(m) - 1:
return cls.__mapper__(d[key], m[1:], callback, *args, **kwargs)
elif str(key) == m[-1] and i == len(m) - 1:
if callback == '__setitem__':
d[key] = args[0]
return None
elif callback == '__delitem__':
del d[key]
return None
else:
return d[key]
except Exception as e:
raise e
else:
if i == len(m) - 1:
if callback == '__setitem__':
d[m[-1]] = args[0]
return None
else:
raise KeyError('{}'.format(m[i]))
else:
if callback == '__getitem__':
return d
| [
"json.dumps",
"re.search"
]
| [((3028, 3074), 'json.dumps', 'json.dumps', (['self'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(self, indent=4, ensure_ascii=False)\n', (3038, 3074), False, 'import json\n'), ((2959, 3008), 'json.dumps', 'json.dumps', (['args[0]'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(args[0], indent=4, ensure_ascii=False)\n', (2969, 3008), False, 'import json\n'), ((3263, 3287), 're.search', 're.search', (['"""^[0-9]+$"""', 'k'], {}), "('^[0-9]+$', k)\n", (3272, 3287), False, 'import re\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Kay management script.
:Copyright: (c) 2009 Accense Technology, Inc. All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import logging
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
import kay
kay.setup_env(manage_py_env=True)
from werkzeug import script
from kay.management import *
import appengine_config
if __name__ == '__main__':
if len(sys.argv) == 1:
sys.argv.append("--help")
script.run()
| [
"sys.argv.append",
"os.path.dirname",
"kay.setup_env",
"werkzeug.script.run"
]
| [((308, 341), 'kay.setup_env', 'kay.setup_env', ([], {'manage_py_env': '(True)'}), '(manage_py_env=True)\n', (321, 341), False, 'import kay\n'), ((516, 528), 'werkzeug.script.run', 'script.run', ([], {}), '()\n', (526, 528), False, 'from werkzeug import script\n'), ((486, 511), 'sys.argv.append', 'sys.argv.append', (['"""--help"""'], {}), "('--help')\n", (501, 511), False, 'import sys\n'), ((257, 282), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (272, 282), False, 'import os\n')] |
#!/usr/bin/python
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""This entry point runs all script tests."""
import logging.config
import unittest
if __name__ == '__main__':
logging.config.fileConfig('logging.conf')
suite = unittest.TestLoader().loadTestsFromNames([
'templateloader_test', 'pegparser_test', 'idlparser_test',
'idlnode_test', 'idlrenderer_test', 'database_test',
'databasebuilder_test', 'emitter_test', 'dartgenerator_test',
'multiemitter_test'
])
unittest.TextTestRunner().run(suite)
| [
"unittest.TextTestRunner",
"unittest.TestLoader"
]
| [((402, 423), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (421, 423), False, 'import unittest\n'), ((682, 707), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (705, 707), False, 'import unittest\n')] |
from __future__ import print_function, division, absolute_import
import copy
import numpy as np
import skimage.draw
import skimage.measure
from .. import imgaug as ia
from .utils import normalize_shape, project_coords
# TODO functions: square(), to_aspect_ratio(), contains_point()
class BoundingBox(object):
"""Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right
corners. Both are given as x and y-coordinates. The corners are intended
to lie inside the bounding box area. As a result, a bounding box that lies
completely inside the image but has maximum extensions would have
coordinates ``(0.0, 0.0)`` and ``(W - epsilon, H - epsilon)``. Note that
coordinates are saved internally as floats.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or str, optional
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
if y1 > y2:
y2, y1 = y1, y2
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def coords(self):
"""Get the top-left and bottom-right coordinates as one array.
Returns
-------
ndarray
A ``(N, 2)`` numpy array with ``N=2`` containing the top-left
and bottom-right coordinates.
"""
arr = np.empty((2, 2), dtype=np.float32)
arr[0, :] = (self.x1, self.y1)
arr[1, :] = (self.x2, self.y2)
return arr
@property
def x1_int(self):
"""Get the x-coordinate of the top left corner as an integer.
Returns
-------
int
X-coordinate of the top left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.x1))
@property
def y1_int(self):
"""Get the y-coordinate of the top left corner as an integer.
Returns
-------
int
Y-coordinate of the top left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.y1))
@property
def x2_int(self):
"""Get the x-coordinate of the bottom left corner as an integer.
Returns
-------
int
X-coordinate of the bottom left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.x2))
@property
def y2_int(self):
"""Get the y-coordinate of the bottom left corner as an integer.
Returns
-------
int
Y-coordinate of the bottom left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.y2))
@property
def height(self):
"""Estimate the height of the bounding box.
Returns
-------
number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""Estimate the width of the bounding box.
Returns
-------
number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""Estimate the area of the bounding box.
Returns
-------
number
Area of the bounding box, i.e. ``height * width``.
"""
return self.height * self.width
# TODO add test for tuple of number
def contains(self, other):
"""Estimate whether the bounding box contains a given point.
Parameters
----------
other : tuple of number or imgaug.augmentables.kps.Keypoint
Point to check for.
Returns
-------
bool
``True`` if the point is contained in the bounding box,
``False`` otherwise.
"""
if isinstance(other, tuple):
x, y = other
else:
x, y = other.x, other.y
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2
# TODO add tests for ndarray inputs
def project(self, from_shape, to_shape):
"""Project the bounding box onto a differently shaped image.
E.g. if the bounding box is on its original image at
``x1=(10 of 100 pixels)`` and ``y1=(20 of 100 pixels)`` and is
projected onto a new image with size ``(width=200, height=200)``,
its new position will be ``(x1=20, y1=40)``.
(Analogous for ``x2``/``y2``.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int or ndarray
Shape of the original image. (Before resize.)
to_shape : tuple of int or ndarray
Shape of the new image. (After resize.)
Returns
-------
imgaug.augmentables.bbs.BoundingBox
``BoundingBox`` instance with new coordinates.
"""
coords_proj = project_coords([(self.x1, self.y1), (self.x2, self.y2)],
from_shape, to_shape)
return self.copy(
x1=coords_proj[0][0],
y1=coords_proj[0][1],
x2=coords_proj[1][0],
y2=coords_proj[1][1],
label=self.label)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all
sides.
top : number, optional
Value by which to extend the bounding box size along its top
side.
right : number, optional
Value by which to extend the bounding box size along its right
side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom
side.
left : number, optional
Value by which to extend the bounding box size along its left
side.
Returns
-------
imgaug.BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""Compute the intersection BB between this BB and another BB.
Note that in extreme cases, the intersection can be a single point.
In that case the intersection bounding box exists and it will be
returned, but it will have a height and width of zero.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.augmentables.bbs.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is
an intersection.
If there is no intersection, the default value will be returned,
which can by anything.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i > x2_i or y1_i > y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""Compute the union BB between this BB and another BB.
This is equivalent to drawing a bounding box around all corner points
of both bounding boxes.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""Compute the IoU between this bounding box and another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B))
/ (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0.0
area_union = self.area + other.area - inters.area
return inters.area / area_union if area_union > 0 else 0.0
def is_fully_within_image(self, image):
"""Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
``True`` if the bounding box is fully inside the image area.
``False`` otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
return (
self.x1 >= 0
and self.x2 < width
and self.y1 >= 0
and self.y2 < height)
def is_partly_within_image(self, image):
"""Estimate whether the BB is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
``True`` if the bounding box is at least partially inside the
image area.
``False`` otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""Estimate whether the BB is partially/fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape and
must contain at least two integers.
fully : bool, optional
Whether to return ``True`` if the bounding box is fully outside
of the image area.
partly : bool, optional
Whether to return ``True`` if the bounding box is at least
partially outside fo the image area.
Returns
-------
bool
``True`` if the bounding box is partially/fully outside of the
image area, depending on defined parameters.
``False`` otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
return fully
@ia.deprecated(alt_func="BoundingBox.clip_out_of_image()",
comment="clip_out_of_image() has the exactly same "
"interface.")
def cut_out_of_image(self, *args, **kwargs):
return self.clip_out_of_image(*args, **kwargs)
def clip_out_of_image(self, image):
"""Clip off all parts of the BB box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape and
must contain at least two integers.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
assert height > 0, (
"Expected image with height>0, got shape %s." % (image.shape,))
assert width > 0, (
"Expected image with width>0, got shape %s." % (image.shape,))
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
# TODO convert this to x/y params?
def shift(self, top=None, right=None, bottom=None, left=None):
"""Move this bounding box along the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift this object *from* the
top (towards the bottom).
right : None or int, optional
Amount of pixels by which to shift this object *from* the
right (towards the left).
bottom : None or int, optional
Amount of pixels by which to shift this object *from* the
bottom (towards the top).
left : None or int, optional
Amount of pixels by which to shift this object *from* the
left (towards the right).
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
# TODO add explicit test for zero-sized BBs (worked when tested by hand)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the bounding box.
Currently expected to be ``uint8``.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the
image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where ``1.0`` denotes
no transparency and ``0.0`` is invisible.
size : int, optional
The thickness of the bounding box in pixels. If the value is
larger than ``1``, then additional pixels will be added around
the bounding box (i.e. extension towards the outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is fully outside of
the image. If set to ``False``, no error will be raised and only
the parts inside the image will be drawn.
thickness : None or int, optional
Deprecated.
Returns
-------
(H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if thickness is not None:
ia.warn_deprecated(
"Usage of argument 'thickness' in BoundingBox.draw_on_image() "
"is deprecated. The argument was renamed to 'size'.")
size = thickness
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception(
"Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f "
"on image with shape %s." % (
self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(size):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions
# round them to H. That is technically sensible, but in the case
# of drawing means that the border lies just barely outside of
# the image, making the border disappear, even though the BB is
# fully inside the image. Here we correct for that because of
# beauty reasons. Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if ia.is_float_array(result):
# TODO use blend_alpha here
result[rr, cc, :] = (
(1 - alpha) * result[rr, cc, :]
+ alpha * color)
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (
(1 - alpha) * result[rr, cc, :]
+ alpha * color)
result = np.clip(result, 0, 255).astype(input_dtype)
return result
# TODO add tests for pad and pad_max
def extract_from_image(self, image, pad=True, pad_max=None,
prevent_zero_size=True):
"""Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is
partially/fully outside of the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
pad : bool, optional
Whether to zero-pad the image if the object is partially/fully
outside of it.
pad_max : None or int, optional
The maximum number of pixels that may be zero-paded on any side,
i.e. if this has value ``N`` the total maximum of added pixels
is ``4*N``.
This option exists to prevent extremely large images as a result of
single points being moved very far away during augmentation.
prevent_zero_size : bool, optional
Whether to prevent the height or width of the extracted image from
becoming zero.
If this is set to ``True`` and the height or width of the bounding
box is below ``1``, the height/width will be increased to ``1``.
This can be useful to prevent problems, e.g. with image saving or
plotting.
If it is set to ``False``, images will be returned as ``(H', W')``
or ``(H', W', 3)`` with ``H`` or ``W`` potentially being 0.
Returns
-------
(H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box
is partially/fully outside of the image.
If `prevent_zero_size` is activated, it is guarantueed that
``H'>0`` and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions
# round them to H. That is technically sensible, but in the case of
# extraction leads to a black border, which is both ugly and
# unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons. Same is the case for x coordinates.
fully_within = self.is_fully_within_image(image)
if fully_within:
y1, y2 = np.clip([y1, y2], 0, height-1)
x1, x2 = np.clip([x1, x2], 0, width-1)
# TODO add test
if prevent_zero_size:
if abs(x2 - x1) < 1:
x2 = x1 + 1
if abs(y2 - y1) < 1:
y2 = y1 + 1
if pad:
# if the bb is outside of the image area, the following pads the
# image first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that
# are natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + pad_left
width = width + pad_left
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + pad_top
height = height + pad_top
y1 = 0
if x2 >= width:
pad_right = x2 - width
if y2 >= height:
pad_bottom = y2 - height
paddings = [pad_top, pad_right, pad_bottom, pad_left]
any_padded = any([val > 0 for val in paddings])
if any_padded:
if pad_max is None:
pad_max = max(paddings)
image = ia.pad(
image,
top=min(pad_top, pad_max),
right=min(pad_right, pad_max),
bottom=min(pad_bottom, pad_max),
left=min(pad_left, pad_max)
)
return image[y1:y2, x1:x2]
else:
within_image = (
(0, 0, 0, 0)
<= (x1, y1, x2, y2)
< (width, height, width, height)
)
out_height, out_width = (y2 - y1), (x2 - x1)
nonzero_height = (out_height > 0)
nonzero_width = (out_width > 0)
if within_image and nonzero_height and nonzero_width:
return image[y1:y2, x1:x2]
if prevent_zero_size:
out_height = 1
out_width = 1
else:
out_height = 0
out_width = 0
if image.ndim == 2:
return np.zeros((out_height, out_width), dtype=image.dtype)
return np.zeros((out_height, out_width, image.shape[-1]),
dtype=image.dtype)
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""Convert the BB's corners to keypoints (clockwise, from top left).
Returns
-------
list of imgaug.augmentables.kps.Keypoint
Corners of the bounding box as keypoints.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def coords_almost_equals(self, other, max_distance=1e-4):
"""Estimate if this and another BB have almost identical coordinates.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox or iterable
The other bounding box with which to compare this one.
If this is an ``iterable``, it is assumed to represent the top-left
and bottom-right coordinates of that bounding box, given as e.g.
an ``(2,2)`` ndarray or an ``(4,)`` ndarray or as a similar list.
max_distance : number, optional
The maximum euclidean distance between a corner on one bounding
box and the closest corner on the other bounding box. If the
distance is exceeded for any such pair, the two BBs are not
viewed as equal.
Returns
-------
bool
Whether the two bounding boxes have almost identical corner
coordinates.
"""
if ia.is_np_array(other):
# we use flat here in case other is (N,2) instead of (4,)
coords_b = other.flat
elif ia.is_iterable(other):
coords_b = list(ia.flatten(other))
else:
assert isinstance(other, BoundingBox), (
"Expected 'other' to be an iterable containing two "
"(x,y)-coordinate pairs or a BoundingBox. "
"Got type %s." % (type(other),))
coords_b = other.coords.flat
coords_a = self.coords
return np.allclose(coords_a.flat, coords_b, atol=max_distance, rtol=0)
def almost_equals(self, other, max_distance=1e-4):
"""Compare this and another BB's label and coordinates.
This is the same as
:func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals` but
additionally compares the labels.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox or iterable
The other object to compare against. Expected to be a
``BoundingBox``.
max_distance : number, optional
See
:func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals`.
Returns
-------
bool
``True`` if the coordinates are almost equal and additionally
the labels are equal. Otherwise ``False``.
"""
if self.label != other.label:
return False
return self.coords_almost_equals(other, max_distance=max_distance)
@classmethod
def from_point_soup(cls, xy):
"""Convert a ``(2P,) or (P,2) ndarray`` to a BB instance.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xy : (2P,) ndarray or (P, 2) array or iterable of number or iterable of iterable of number
Array containing ``P`` points in xy-form denoting a soup of
points around which to place a bounding box.
The array should usually be of dtype ``float32``.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Bounding box around the points.
"""
xy = np.array(xy, dtype=np.float32)
assert len(xy) > 0, (
"Expected to get at least one point to place a bounding box "
"around, got shape %s." % (xy.shape,))
assert xy.ndim == 1 or (xy.ndim == 2 and xy.shape[-1] == 2), (
"Expected input array of shape (P,) or (P, 2), "
"got shape %s." % (xy.shape,))
if xy.ndim == 1:
xy = xy.reshape((-1, 2))
x1, y1 = np.min(xy, axis=0)
x2, y2 = np.max(xy, axis=0)
return cls(x1=x1, y1=y1, x2=x2, y2=y2)
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""Create a shallow copy of this BoundingBox instance.
Parameters
----------
x1 : None or number
If not ``None``, then the ``x1`` coordinate of the copied object
will be set to this value.
y1 : None or number
If not ``None``, then the ``y1`` coordinate of the copied object
will be set to this value.
x2 : None or number
If not ``None``, then the ``x2`` coordinate of the copied object
will be set to this value.
y2 : None or number
If not ``None``, then the ``y2`` coordinate of the copied object
will be set to this value.
label : None or string
If not ``None``, then the ``label`` of the copied object
will be set to this value.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=copy.deepcopy(self.label) if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not ``None``, then the ``x1`` coordinate of the copied object
will be set to this value.
y1 : None or number
If not ``None``, then the ``y1`` coordinate of the copied object
will be set to this value.
x2 : None or number
If not ``None``, then the ``x2`` coordinate of the copied object
will be set to this value.
y2 : None or number
If not ``None``, then the ``y2`` coordinate of the copied object
will be set to this value.
label : None or string
If not ``None``, then the ``label`` of the copied object
will be set to this value.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Deep copy.
"""
# TODO write specific copy routine with deepcopy for label and remove
# the deepcopy from copy()
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (
self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""Container for the list of all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of imgaug.augmentables.bbs.BoundingBox
List of bounding boxes on the image.
shape : tuple of int or ndarray
The shape of the image on which the objects are placed.
Either an image with shape ``(H,W,[C])`` or a ``tuple`` denoting
such an image shape.
Examples
--------
>>> import numpy as np
>>> from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
>>>
>>> image = np.zeros((100, 100))
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
self.shape = normalize_shape(shape)
@property
def items(self):
"""Get the bounding boxes in this container.
Returns
-------
list of BoundingBox
Bounding boxes within this container.
"""
return self.bounding_boxes
# TODO remove this? here it is image height, but in BoundingBox it is
# bounding box height
@property
def height(self):
"""Get the height of the image on which the bounding boxes fall.
Returns
-------
int
Image height.
"""
return self.shape[0]
# TODO remove this? here it is image width, but in BoundingBox it is
# bounding box width
@property
def width(self):
"""Get the width of the image on which the bounding boxes fall.
Returns
-------
int
Image width.
"""
return self.shape[1]
@property
def empty(self):
"""Determine whether this instance contains zero bounding boxes.
Returns
-------
bool
True if this object contains zero bounding boxes.
"""
return len(self.bounding_boxes) == 0
def on(self, image):
"""Project bounding boxes from one image (shape) to a another one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing the same bounding boxes after projection to
the new image shape.
"""
shape = normalize_shape(image)
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
bounding_boxes = [bb.project(self.shape, shape)
for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
@classmethod
def from_xyxy_array(cls, xyxy, shape):
"""Convert an ``(N, 4) or (N, 2, 2) ndarray`` to a BBsOI instance.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N, 4) ndarray or (N, 2, 2) array
Array containing the corner coordinates of ``N`` bounding boxes.
Each bounding box is represented by its top-left and bottom-right
coordinates.
The array should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing a list of :class:`BoundingBox` instances
derived from the provided corner coordinates.
"""
xyxy = np.array(xyxy, dtype=np.float32)
# note that np.array([]) is (0,), not (0, 2)
if xyxy.shape[0] == 0:
return BoundingBoxesOnImage([], shape)
assert (
(xyxy.ndim == 2 and xyxy.shape[-1] == 4)
or (xyxy.ndim == 3 and xyxy.shape[1:3] == (2, 2))), (
"Expected input array of shape (N, 4) or (N, 2, 2), "
"got shape %s." % (xyxy.shape,))
xyxy = xyxy.reshape((-1, 2, 2))
boxes = [BoundingBox.from_point_soup(row) for row in xyxy]
return cls(boxes, shape)
@classmethod
def from_point_soups(cls, xy, shape):
"""Convert an ``(N, 2P) or (N, P, 2) ndarray`` to a BBsOI instance.
Parameters
----------
xy : (N, 2P) ndarray or (N, P, 2) array or iterable of iterable of number or iterable of iterable of iterable of number
Array containing the corner coordinates of ``N`` bounding boxes.
Each bounding box is represented by a soup of ``P`` points.
If ``(N, P)`` then the second axis is expected to be in
xy-form (e.g. ``x1``, ``y1``, ``x2``, ``y2``, ...).
The final bounding box coordinates will be derived using ``min``
and ``max`` operations on the xy-values.
The array should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing a list of :class:`BoundingBox` instances
derived from the provided point soups.
"""
xy = np.array(xy, dtype=np.float32)
# from_xy_array() already checks the ndim/shape, so we don't have to
# do it here
boxes = [BoundingBox.from_point_soup(row) for row in xy]
return cls(boxes, shape)
def to_xyxy_array(self, dtype=np.float32):
"""Convert the ``BoundingBoxesOnImage`` object to an ``(N,4) ndarray``.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
``(N,4) ndarray``, where ``N`` denotes the number of bounding
boxes and ``4`` denotes the top-left and bottom-right bounding
box corner coordinates in form ``(x1, y1, x2, y2)``.
"""
xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2]
return xyxy_array.astype(dtype)
def to_xy_array(self):
"""Convert the ``BoundingBoxesOnImage`` object to an ``(N,2) ndarray``.
Returns
-------
ndarray
``(2*B,2) ndarray`` of xy-coordinates, where ``B`` denotes the
number of bounding boxes.
"""
return self.to_xyxy_array().reshape((-1, 2))
def fill_from_xyxy_array_(self, xyxy):
"""Modify the BB coordinates of this instance in-place.
.. note ::
This currently expects exactly one entry in `xyxy` per bounding
in this instance. (I.e. two corner coordinates per instance.)
Otherwise, an ``AssertionError`` will be raised.
.. note ::
This method will automatically flip x-coordinates if ``x1>x2``
for a bounding box. (Analogous for y-coordinates.)
Parameters
----------
xyxy : (N, 4) ndarray or iterable of iterable of number
Coordinates of ``N`` bounding boxes on an image, given as
a ``(N,4)`` array of two corner xy-coordinates per bounding box.
``N`` must match the number of bounding boxes in this instance.
Returns
-------
BoundingBoxesOnImage
This instance itself, with updated bounding box coordinates.
Note that the instance was modified in-place.
"""
xyxy = np.array(xyxy, dtype=np.float32)
# note that np.array([]) is (0,), not (0, 4)
assert xyxy.shape[0] == 0 or (xyxy.ndim == 2 and xyxy.shape[-1] == 4), (
"Expected input array to have shape (N,4), "
"got shape %s." % (xyxy.shape,))
assert len(xyxy) == len(self.bounding_boxes), (
"Expected to receive an array with as many rows there are "
"bounding boxes in this instance. Got %d rows, expected %d." % (
len(xyxy), len(self.bounding_boxes)))
for bb, (x1, y1, x2, y2) in zip(self.bounding_boxes, xyxy):
bb.x1 = min([x1, x2])
bb.y1 = min([y1, y2])
bb.x2 = max([x1, x2])
bb.y2 = max([y1, y2])
return self
def fill_from_xy_array_(self, xy):
"""Modify the BB coordinates of this instance in-place.
See
:func:`imgaug.augmentables.bbs.BoundingBoxesOnImage.fill_from_xyxy_array_`.
Parameters
----------
xy : (2*B, 2) ndarray or iterable of iterable of number
Coordinates of ``B`` bounding boxes on an image, given as
a ``(2*B,2)`` array of two corner xy-coordinates per bounding box.
``B`` must match the number of bounding boxes in this instance.
Returns
-------
BoundingBoxesOnImage
This instance itself, with updated bounding box coordinates.
Note that the instance was modified in-place.
"""
xy = np.array(xy, dtype=np.float32)
return self.fill_from_xyxy_array_(xy.reshape((-1, 4)))
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as set in
``BoundingBoxesOnImage.shape``.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes.
If a single ``int`` ``C``, then that is equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
size : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the bounding boxes.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the
image.
thickness : None or int, optional
Deprecated.
Returns
-------
(H,W,3) ndarray
Image with drawn bounding boxes.
"""
image = np.copy(image) if copy else image
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
size=size,
copy=False,
raise_if_out_of_image=raise_if_out_of_image,
thickness=thickness
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""Remove all BBs that are fully/partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove bounding boxes that are fully outside of the
image.
partly : bool, optional
Whether to remove bounding boxes that are partially outside of
the image.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were
fully/partially outside of the image being removed.
"""
bbs_clean = [
bb
for bb
in self.bounding_boxes
if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
@ia.deprecated(alt_func="BoundingBoxesOnImage.clip_out_of_image()",
comment="clip_out_of_image() has the exactly same "
"interface.")
def cut_out_of_image(self):
return self.clip_out_of_image()
def clip_out_of_image(self):
"""Clip off all parts from all BBs that are outside of the image.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [
bb.clip_out_of_image(self.shape)
for bb
in self.bounding_boxes
if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""Move all all BBs along the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all objects *from* the
top (towards the bottom).
right : None or int, optional
Amount of pixels by which to shift all objects *from* the
right (towads the left).
bottom : None or int, optional
Amount of pixels by which to shift all objects *from* the
bottom (towards the top).
left : None or int, optional
Amount of pixels by which to shift all objects *from* the
left (towards the right).
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [
bb.shift(top=top, right=right, bottom=bottom, left=left)
for bb
in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def to_keypoints_on_image(self):
"""Convert the bounding boxes to one ``KeypointsOnImage`` instance.
Returns
-------
imgaug.augmentables.kps.KeypointsOnImage
A keypoints instance containing ``N*4`` coordinates for ``N``
bounding boxes. Order matches the order in ``bounding_boxes``.
"""
from .kps import KeypointsOnImage
# This currently uses 4 points instead of 2 points as the method
# is primarily used during augmentation and 4 points are overall
# the better choice there.
arr = np.zeros((len(self.bounding_boxes), 2*4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
arr[i] = [
box.x1, box.y1,
box.x2, box.y1,
box.x2, box.y2,
box.x1, box.y2
]
return KeypointsOnImage.from_xy_array(
arr.reshape((-1, 2)),
shape=self.shape
)
def invert_to_keypoints_on_image_(self, kpsoi):
"""Invert the output of ``to_keypoints_on_image()`` in-place.
This function writes in-place into this ``BoundingBoxesOnImage``
instance.
Parameters
----------
kpsoi : imgaug.augmentables.kps.KeypointsOnImages
Keypoints to convert back to bounding boxes, i.e. the outputs
of ``to_keypoints_on_image()``.
Returns
-------
BoundingBoxesOnImage
Bounding boxes container with updated coordinates.
Note that the instance is also updated in-place.
"""
assert len(kpsoi.keypoints) == len(self.bounding_boxes) * 4, (
"Expected %d coordinates, got %d." % (
len(self.bounding_boxes) * 2, len(kpsoi.keypoints)))
for i, bb in enumerate(self.bounding_boxes):
xx = [kpsoi.keypoints[4*i+0].x, kpsoi.keypoints[4*i+1].x,
kpsoi.keypoints[4*i+2].x, kpsoi.keypoints[4*i+3].x]
yy = [kpsoi.keypoints[4*i+0].y, kpsoi.keypoints[4*i+1].y,
kpsoi.keypoints[4*i+2].y, kpsoi.keypoints[4*i+3].y]
bb.x1 = min(xx)
bb.y1 = min(yy)
bb.x2 = max(xx)
bb.y2 = max(yy)
self.shape = kpsoi.shape
return self
def copy(self):
"""Create a shallow copy of the ``BoundingBoxesOnImage`` instance.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""Create a deep copy of the ``BoundingBoxesOnImage`` object.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for BoundingBoxesOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return (
"BoundingBoxesOnImage(%s, shape=%s)"
% (str(self.bounding_boxes), self.shape))
| [
"numpy.clip",
"numpy.copy",
"numpy.uint8",
"numpy.allclose",
"copy.deepcopy",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"numpy.min",
"numpy.finfo",
"copy.copy",
"imgaug.augmentables.kps.Keypoint",
"numpy.round"
]
| [((1845, 1879), 'numpy.empty', 'np.empty', (['(2, 2)'], {'dtype': 'np.float32'}), '((2, 2), dtype=np.float32)\n', (1853, 1879), True, 'import numpy as np\n'), ((14585, 14617), 'numpy.clip', 'np.clip', (['self.x1', '(0)', '(width - eps)'], {}), '(self.x1, 0, width - eps)\n', (14592, 14617), True, 'import numpy as np\n'), ((14631, 14663), 'numpy.clip', 'np.clip', (['self.x2', '(0)', '(width - eps)'], {}), '(self.x2, 0, width - eps)\n', (14638, 14663), True, 'import numpy as np\n'), ((14677, 14710), 'numpy.clip', 'np.clip', (['self.y1', '(0)', '(height - eps)'], {}), '(self.y1, 0, height - eps)\n', (14684, 14710), True, 'import numpy as np\n'), ((14724, 14757), 'numpy.clip', 'np.clip', (['self.y2', '(0)', '(height - eps)'], {}), '(self.y2, 0, height - eps)\n', (14731, 14757), True, 'import numpy as np\n'), ((27448, 27511), 'numpy.allclose', 'np.allclose', (['coords_a.flat', 'coords_b'], {'atol': 'max_distance', 'rtol': '(0)'}), '(coords_a.flat, coords_b, atol=max_distance, rtol=0)\n', (27459, 27511), True, 'import numpy as np\n'), ((29124, 29154), 'numpy.array', 'np.array', (['xy'], {'dtype': 'np.float32'}), '(xy, dtype=np.float32)\n', (29132, 29154), True, 'import numpy as np\n'), ((29568, 29586), 'numpy.min', 'np.min', (['xy'], {'axis': '(0)'}), '(xy, axis=0)\n', (29574, 29586), True, 'import numpy as np\n'), ((29604, 29622), 'numpy.max', 'np.max', (['xy'], {'axis': '(0)'}), '(xy, axis=0)\n', (29610, 29622), True, 'import numpy as np\n'), ((36293, 36325), 'numpy.array', 'np.array', (['xyxy'], {'dtype': 'np.float32'}), '(xyxy, dtype=np.float32)\n', (36301, 36325), True, 'import numpy as np\n'), ((38022, 38052), 'numpy.array', 'np.array', (['xy'], {'dtype': 'np.float32'}), '(xy, dtype=np.float32)\n', (38030, 38052), True, 'import numpy as np\n'), ((40495, 40527), 'numpy.array', 'np.array', (['xyxy'], {'dtype': 'np.float32'}), '(xyxy, dtype=np.float32)\n', (40503, 40527), True, 'import numpy as np\n'), ((41999, 42029), 'numpy.array', 'np.array', (['xy'], {'dtype': 'np.float32'}), '(xy, dtype=np.float32)\n', (42007, 42029), True, 'import numpy as np\n'), ((49025, 49040), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (49034, 49040), False, 'import copy\n'), ((2345, 2362), 'numpy.round', 'np.round', (['self.x1'], {}), '(self.x1)\n', (2353, 2362), True, 'import numpy as np\n'), ((2732, 2749), 'numpy.round', 'np.round', (['self.y1'], {}), '(self.y1)\n', (2740, 2749), True, 'import numpy as np\n'), ((3125, 3142), 'numpy.round', 'np.round', (['self.x2'], {}), '(self.x2)\n', (3133, 3142), True, 'import numpy as np\n'), ((3518, 3535), 'numpy.round', 'np.round', (['self.y2'], {}), '(self.y2)\n', (3526, 3535), True, 'import numpy as np\n'), ((12070, 12090), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (12078, 12090), True, 'import numpy as np\n'), ((14547, 14567), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (14555, 14567), True, 'import numpy as np\n'), ((18292, 18306), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (18299, 18306), True, 'import numpy as np\n'), ((18392, 18407), 'numpy.uint8', 'np.uint8', (['color'], {}), '(color)\n', (18400, 18407), True, 'import numpy as np\n'), ((22753, 22785), 'numpy.clip', 'np.clip', (['[y1, y2]', '(0)', '(height - 1)'], {}), '([y1, y2], 0, height - 1)\n', (22760, 22785), True, 'import numpy as np\n'), ((22805, 22836), 'numpy.clip', 'np.clip', (['[x1, x2]', '(0)', '(width - 1)'], {}), '([x1, x2], 0, width - 1)\n', (22812, 22836), True, 'import numpy as np\n'), ((25169, 25238), 'numpy.zeros', 'np.zeros', (['(out_height, out_width, image.shape[-1])'], {'dtype': 'image.dtype'}), '((out_height, out_width, image.shape[-1]), dtype=image.dtype)\n', (25177, 25238), True, 'import numpy as np\n'), ((25727, 25757), 'imgaug.augmentables.kps.Keypoint', 'Keypoint', ([], {'x': 'self.x1', 'y': 'self.y1'}), '(x=self.x1, y=self.y1)\n', (25735, 25757), False, 'from imgaug.augmentables.kps import Keypoint\n'), ((25771, 25801), 'imgaug.augmentables.kps.Keypoint', 'Keypoint', ([], {'x': 'self.x2', 'y': 'self.y1'}), '(x=self.x2, y=self.y1)\n', (25779, 25801), False, 'from imgaug.augmentables.kps import Keypoint\n'), ((25815, 25845), 'imgaug.augmentables.kps.Keypoint', 'Keypoint', ([], {'x': 'self.x2', 'y': 'self.y2'}), '(x=self.x2, y=self.y2)\n', (25823, 25845), False, 'from imgaug.augmentables.kps import Keypoint\n'), ((25859, 25889), 'imgaug.augmentables.kps.Keypoint', 'Keypoint', ([], {'x': 'self.x1', 'y': 'self.y2'}), '(x=self.x1, y=self.y2)\n', (25867, 25889), False, 'from imgaug.augmentables.kps import Keypoint\n'), ((43348, 43362), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (43355, 43362), True, 'import numpy as np\n'), ((19038, 19072), 'numpy.clip', 'np.clip', (['y1', '(0)', '(image.shape[0] - 1)'], {}), '(y1, 0, image.shape[0] - 1)\n', (19045, 19072), True, 'import numpy as np\n'), ((19092, 19126), 'numpy.clip', 'np.clip', (['y2', '(0)', '(image.shape[0] - 1)'], {}), '(y2, 0, image.shape[0] - 1)\n', (19099, 19126), True, 'import numpy as np\n'), ((19146, 19180), 'numpy.clip', 'np.clip', (['x1', '(0)', '(image.shape[1] - 1)'], {}), '(x1, 0, image.shape[1] - 1)\n', (19153, 19180), True, 'import numpy as np\n'), ((19200, 19234), 'numpy.clip', 'np.clip', (['x2', '(0)', '(image.shape[1] - 1)'], {}), '(x2, 0, image.shape[1] - 1)\n', (19207, 19234), True, 'import numpy as np\n'), ((25097, 25149), 'numpy.zeros', 'np.zeros', (['(out_height, out_width)'], {'dtype': 'image.dtype'}), '((out_height, out_width), dtype=image.dtype)\n', (25105, 25149), True, 'import numpy as np\n'), ((19746, 19769), 'numpy.clip', 'np.clip', (['result', '(0)', '(255)'], {}), '(result, 0, 255)\n', (19753, 19769), True, 'import numpy as np\n'), ((30907, 30932), 'copy.deepcopy', 'copy.deepcopy', (['self.label'], {}), '(self.label)\n', (30920, 30932), False, 'import copy\n'), ((20062, 20085), 'numpy.clip', 'np.clip', (['result', '(0)', '(255)'], {}), '(result, 0, 255)\n', (20069, 20085), True, 'import numpy as np\n')] |
import tensorflow as tf
def _smooth_l1_loss(y_true, y_pred):
t = tf.abs(y_pred - y_true)
return tf.where(t < 1, 0.5 * t ** 2, t - 0.5)
def MultiBoxLoss(num_class=2, neg_pos_ratio=3):
"""multi-box loss"""
def multi_box_loss(y_true, y_pred):
num_batch = tf.shape(y_true)[0]
num_prior = tf.shape(y_true)[1]
loc_pred = tf.reshape(y_pred[0], [num_batch * num_prior, 4])
landm_pred = tf.reshape(y_pred[1], [num_batch * num_prior, 8])
class_pred = tf.reshape(y_pred[2], [num_batch * num_prior, num_class])
loc_true = tf.reshape(y_true[..., :4], [num_batch * num_prior, 4])
landm_true = tf.reshape(y_true[..., 4:12], [num_batch * num_prior, 8])
landm_valid = tf.reshape(y_true[..., 12], [num_batch * num_prior, 1])
class_true = tf.reshape(y_true[..., 13], [num_batch * num_prior, 1])
# define filter mask: class_true = 1 (pos), 0 (neg), -1 (ignore)
# landm_valid = 1 (w landm), 0 (w/o landm)
mask_pos = tf.equal(class_true, 1)
mask_neg = tf.equal(class_true, 0)
mask_landm = tf.logical_and(tf.equal(landm_valid, 1), mask_pos)
# landm loss (smooth L1)
mask_landm_b = tf.broadcast_to(mask_landm, tf.shape(landm_true))
loss_landm = _smooth_l1_loss(tf.boolean_mask(landm_true, mask_landm_b),
tf.boolean_mask(landm_pred, mask_landm_b))
loss_landm = tf.reduce_mean(loss_landm)
# localization loss (smooth L1)
mask_pos_b = tf.broadcast_to(mask_pos, tf.shape(loc_true))
loss_loc = _smooth_l1_loss(tf.boolean_mask(loc_true, mask_pos_b),
tf.boolean_mask(loc_pred, mask_pos_b))
loss_loc = tf.reduce_mean(loss_loc)
# classification loss (crossentropy)
# 1. compute max conf across batch for hard negative mining
loss_class = tf.where(mask_neg,
1 - class_pred[:, 0][..., tf.newaxis], 0)
# 2. hard negative mining
loss_class = tf.reshape(loss_class, [num_batch, num_prior])
loss_class_idx = tf.argsort(loss_class, axis=1, direction='DESCENDING')
loss_class_idx_rank = tf.argsort(loss_class_idx, axis=1)
mask_pos_per_batch = tf.reshape(mask_pos, [num_batch, num_prior])
num_pos_per_batch = tf.reduce_sum(
tf.cast(mask_pos_per_batch, tf.float32), 1, keepdims=True)
num_pos_per_batch = tf.maximum(num_pos_per_batch, 1)
num_neg_per_batch = tf.minimum(neg_pos_ratio * num_pos_per_batch,
tf.cast(num_prior, tf.float32) - 1)
mask_hard_neg = tf.reshape(
tf.cast(loss_class_idx_rank, tf.float32) < num_neg_per_batch,
[num_batch * num_prior, 1])
# 3. classification loss including positive and negative examples
loss_class_mask = tf.logical_or(mask_pos, mask_hard_neg)
loss_class_mask_b = tf.broadcast_to(loss_class_mask,
tf.shape(class_pred))
filter_class_true = tf.boolean_mask(tf.cast(mask_pos, tf.float32),
loss_class_mask)
filter_class_pred = tf.boolean_mask(class_pred, loss_class_mask_b)
filter_class_pred = tf.reshape(filter_class_pred, [-1, num_class])
loss_class = tf.keras.losses.sparse_categorical_crossentropy(
y_true=filter_class_true, y_pred=filter_class_pred)
loss_class = tf.reduce_mean(loss_class)
return loss_loc, loss_landm, loss_class
return multi_box_loss
| [
"tensorflow.equal",
"tensorflow.maximum",
"tensorflow.shape",
"tensorflow.logical_or",
"tensorflow.boolean_mask",
"tensorflow.argsort",
"tensorflow.where",
"tensorflow.keras.losses.sparse_categorical_crossentropy",
"tensorflow.reshape",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.abs"
]
| [((71, 94), 'tensorflow.abs', 'tf.abs', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (77, 94), True, 'import tensorflow as tf\n'), ((106, 144), 'tensorflow.where', 'tf.where', (['(t < 1)', '(0.5 * t ** 2)', '(t - 0.5)'], {}), '(t < 1, 0.5 * t ** 2, t - 0.5)\n', (114, 144), True, 'import tensorflow as tf\n'), ((360, 409), 'tensorflow.reshape', 'tf.reshape', (['y_pred[0]', '[num_batch * num_prior, 4]'], {}), '(y_pred[0], [num_batch * num_prior, 4])\n', (370, 409), True, 'import tensorflow as tf\n'), ((431, 480), 'tensorflow.reshape', 'tf.reshape', (['y_pred[1]', '[num_batch * num_prior, 8]'], {}), '(y_pred[1], [num_batch * num_prior, 8])\n', (441, 480), True, 'import tensorflow as tf\n'), ((502, 559), 'tensorflow.reshape', 'tf.reshape', (['y_pred[2]', '[num_batch * num_prior, num_class]'], {}), '(y_pred[2], [num_batch * num_prior, num_class])\n', (512, 559), True, 'import tensorflow as tf\n'), ((579, 634), 'tensorflow.reshape', 'tf.reshape', (['y_true[..., :4]', '[num_batch * num_prior, 4]'], {}), '(y_true[..., :4], [num_batch * num_prior, 4])\n', (589, 634), True, 'import tensorflow as tf\n'), ((656, 713), 'tensorflow.reshape', 'tf.reshape', (['y_true[..., 4:12]', '[num_batch * num_prior, 8]'], {}), '(y_true[..., 4:12], [num_batch * num_prior, 8])\n', (666, 713), True, 'import tensorflow as tf\n'), ((736, 791), 'tensorflow.reshape', 'tf.reshape', (['y_true[..., 12]', '[num_batch * num_prior, 1]'], {}), '(y_true[..., 12], [num_batch * num_prior, 1])\n', (746, 791), True, 'import tensorflow as tf\n'), ((813, 868), 'tensorflow.reshape', 'tf.reshape', (['y_true[..., 13]', '[num_batch * num_prior, 1]'], {}), '(y_true[..., 13], [num_batch * num_prior, 1])\n', (823, 868), True, 'import tensorflow as tf\n'), ((1033, 1056), 'tensorflow.equal', 'tf.equal', (['class_true', '(1)'], {}), '(class_true, 1)\n', (1041, 1056), True, 'import tensorflow as tf\n'), ((1076, 1099), 'tensorflow.equal', 'tf.equal', (['class_true', '(0)'], {}), '(class_true, 0)\n', (1084, 1099), True, 'import tensorflow as tf\n'), ((1460, 1486), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_landm'], {}), '(loss_landm)\n', (1474, 1486), True, 'import tensorflow as tf\n'), ((1762, 1786), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_loc'], {}), '(loss_loc)\n', (1776, 1786), True, 'import tensorflow as tf\n'), ((1922, 1982), 'tensorflow.where', 'tf.where', (['mask_neg', '(1 - class_pred[:, 0][..., tf.newaxis])', '(0)'], {}), '(mask_neg, 1 - class_pred[:, 0][..., tf.newaxis], 0)\n', (1930, 1982), True, 'import tensorflow as tf\n'), ((2069, 2115), 'tensorflow.reshape', 'tf.reshape', (['loss_class', '[num_batch, num_prior]'], {}), '(loss_class, [num_batch, num_prior])\n', (2079, 2115), True, 'import tensorflow as tf\n'), ((2141, 2195), 'tensorflow.argsort', 'tf.argsort', (['loss_class'], {'axis': '(1)', 'direction': '"""DESCENDING"""'}), "(loss_class, axis=1, direction='DESCENDING')\n", (2151, 2195), True, 'import tensorflow as tf\n'), ((2226, 2260), 'tensorflow.argsort', 'tf.argsort', (['loss_class_idx'], {'axis': '(1)'}), '(loss_class_idx, axis=1)\n', (2236, 2260), True, 'import tensorflow as tf\n'), ((2290, 2334), 'tensorflow.reshape', 'tf.reshape', (['mask_pos', '[num_batch, num_prior]'], {}), '(mask_pos, [num_batch, num_prior])\n', (2300, 2334), True, 'import tensorflow as tf\n'), ((2481, 2513), 'tensorflow.maximum', 'tf.maximum', (['num_pos_per_batch', '(1)'], {}), '(num_pos_per_batch, 1)\n', (2491, 2513), True, 'import tensorflow as tf\n'), ((2914, 2952), 'tensorflow.logical_or', 'tf.logical_or', (['mask_pos', 'mask_hard_neg'], {}), '(mask_pos, mask_hard_neg)\n', (2927, 2952), True, 'import tensorflow as tf\n'), ((3244, 3290), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['class_pred', 'loss_class_mask_b'], {}), '(class_pred, loss_class_mask_b)\n', (3259, 3290), True, 'import tensorflow as tf\n'), ((3319, 3365), 'tensorflow.reshape', 'tf.reshape', (['filter_class_pred', '[-1, num_class]'], {}), '(filter_class_pred, [-1, num_class])\n', (3329, 3365), True, 'import tensorflow as tf\n'), ((3387, 3490), 'tensorflow.keras.losses.sparse_categorical_crossentropy', 'tf.keras.losses.sparse_categorical_crossentropy', ([], {'y_true': 'filter_class_true', 'y_pred': 'filter_class_pred'}), '(y_true=filter_class_true,\n y_pred=filter_class_pred)\n', (3434, 3490), True, 'import tensorflow as tf\n'), ((3521, 3547), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_class'], {}), '(loss_class)\n', (3535, 3547), True, 'import tensorflow as tf\n'), ((280, 296), 'tensorflow.shape', 'tf.shape', (['y_true'], {}), '(y_true)\n', (288, 296), True, 'import tensorflow as tf\n'), ((320, 336), 'tensorflow.shape', 'tf.shape', (['y_true'], {}), '(y_true)\n', (328, 336), True, 'import tensorflow as tf\n'), ((1136, 1160), 'tensorflow.equal', 'tf.equal', (['landm_valid', '(1)'], {}), '(landm_valid, 1)\n', (1144, 1160), True, 'import tensorflow as tf\n'), ((1257, 1277), 'tensorflow.shape', 'tf.shape', (['landm_true'], {}), '(landm_true)\n', (1265, 1277), True, 'import tensorflow as tf\n'), ((1316, 1357), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['landm_true', 'mask_landm_b'], {}), '(landm_true, mask_landm_b)\n', (1331, 1357), True, 'import tensorflow as tf\n'), ((1396, 1437), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['landm_pred', 'mask_landm_b'], {}), '(landm_pred, mask_landm_b)\n', (1411, 1437), True, 'import tensorflow as tf\n'), ((1575, 1593), 'tensorflow.shape', 'tf.shape', (['loc_true'], {}), '(loc_true)\n', (1583, 1593), True, 'import tensorflow as tf\n'), ((1630, 1667), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['loc_true', 'mask_pos_b'], {}), '(loc_true, mask_pos_b)\n', (1645, 1667), True, 'import tensorflow as tf\n'), ((1704, 1741), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['loc_pred', 'mask_pos_b'], {}), '(loc_pred, mask_pos_b)\n', (1719, 1741), True, 'import tensorflow as tf\n'), ((2394, 2433), 'tensorflow.cast', 'tf.cast', (['mask_pos_per_batch', 'tf.float32'], {}), '(mask_pos_per_batch, tf.float32)\n', (2401, 2433), True, 'import tensorflow as tf\n'), ((3058, 3078), 'tensorflow.shape', 'tf.shape', (['class_pred'], {}), '(class_pred)\n', (3066, 3078), True, 'import tensorflow as tf\n'), ((3124, 3153), 'tensorflow.cast', 'tf.cast', (['mask_pos', 'tf.float32'], {}), '(mask_pos, tf.float32)\n', (3131, 3153), True, 'import tensorflow as tf\n'), ((2627, 2657), 'tensorflow.cast', 'tf.cast', (['num_prior', 'tf.float32'], {}), '(num_prior, tf.float32)\n', (2634, 2657), True, 'import tensorflow as tf\n'), ((2711, 2751), 'tensorflow.cast', 'tf.cast', (['loss_class_idx_rank', 'tf.float32'], {}), '(loss_class_idx_rank, tf.float32)\n', (2718, 2751), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-10-24 16:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awardapp', '0003_auto_20191024_1606'),
]
operations = [
migrations.AlterField(
model_name='project',
name='link',
field=models.TextField(max_length=130),
),
]
| [
"django.db.models.TextField"
]
| [((399, 431), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(130)'}), '(max_length=130)\n', (415, 431), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests the coordinator module
"""
from fabric.api import env
from mock import patch
from prestoadmin import coordinator
from prestoadmin.util.exception import ConfigurationError
from tests.base_test_case import BaseTestCase
class TestCoordinator(BaseTestCase):
def test_build_all_defaults(self):
env.roledefs['coordinator'] = 'a'
env.roledefs['workers'] = ['b', 'c']
actual_default = coordinator.Coordinator().build_all_defaults()
expected = {'node.properties':
{'node.environment': 'presto',
'node.data-dir': '/var/lib/presto/data',
'node.launcher-log-file': '/var/log/presto/launcher.log',
'node.server-log-file': '/var/log/presto/server.log',
'catalog.config-dir': '/etc/presto/catalog',
'plugin.dir': '/usr/lib/presto/lib/plugin'},
'jvm.config': ['-server',
'-Xmx16G',
'-XX:-UseBiasedLocking',
'-XX:+UseG1GC',
'-XX:G1HeapRegionSize=32M',
'-XX:+ExplicitGCInvokesConcurrent',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseGCOverheadLimit',
'-XX:+ExitOnOutOfMemoryError',
'-XX:ReservedCodeCacheSize=512M',
'-DHADOOP_USER_NAME=hive'],
'config.properties': {
'coordinator': 'true',
'discovery-server.enabled': 'true',
'discovery.uri': 'http://a:8080',
'http-server.http.port': '8080',
'node-scheduler.include-coordinator': 'false',
'query.max-memory': '50GB',
'query.max-memory-per-node': '8GB'}
}
self.assertEqual(actual_default, expected)
def test_defaults_coord_is_worker(self):
env.roledefs['coordinator'] = ['a']
env.roledefs['worker'] = ['a', 'b', 'c']
actual_default = coordinator.Coordinator().build_all_defaults()
expected = {'node.properties': {
'node.environment': 'presto',
'node.data-dir': '/var/lib/presto/data',
'node.launcher-log-file': '/var/log/presto/launcher.log',
'node.server-log-file': '/var/log/presto/server.log',
'catalog.config-dir': '/etc/presto/catalog',
'plugin.dir': '/usr/lib/presto/lib/plugin'},
'jvm.config': ['-server',
'-Xmx16G',
'-XX:-UseBiasedLocking',
'-XX:+UseG1GC',
'-XX:G1HeapRegionSize=32M',
'-XX:+ExplicitGCInvokesConcurrent',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseGCOverheadLimit',
'-XX:+ExitOnOutOfMemoryError',
'-XX:ReservedCodeCacheSize=512M',
'-DHADOOP_USER_NAME=hive'],
'config.properties': {
'coordinator': 'true',
'discovery-server.enabled': 'true',
'discovery.uri': 'http://a:8080',
'http-server.http.port': '8080',
'node-scheduler.include-coordinator': 'true',
'query.max-memory': '50GB',
'query.max-memory-per-node': '8GB'}
}
self.assertEqual(actual_default, expected)
def test_validate_valid(self):
conf = {'node.properties': {},
'jvm.config': [],
'config.properties': {'coordinator': 'true',
'discovery.uri': 'http://uri'}}
self.assertEqual(conf, coordinator.Coordinator.validate(conf))
def test_validate_default(self):
env.roledefs['coordinator'] = 'localhost'
env.roledefs['workers'] = ['localhost']
conf = coordinator.Coordinator().build_all_defaults()
self.assertEqual(conf, coordinator.Coordinator.validate(conf))
def test_invalid_conf(self):
conf = {'node.propoerties': {}}
self.assertRaisesRegexp(ConfigurationError,
'Missing configuration for required file: ',
coordinator.Coordinator.validate, conf)
def test_invalid_conf_missing_coordinator(self):
conf = {'node.properties': {},
'jvm.config': [],
'config.properties': {'discovery.uri': 'http://uri'}
}
self.assertRaisesRegexp(ConfigurationError,
'Must specify coordinator=true in '
'coordinator\'s config.properties',
coordinator.Coordinator.validate, conf)
def test_invalid_conf_coordinator(self):
conf = {'node.properties': {},
'jvm.config': [],
'config.properties': {'coordinator': 'false',
'discovery.uri': 'http://uri'}
}
self.assertRaisesRegexp(ConfigurationError,
'Coordinator cannot be false in the '
'coordinator\'s config.properties',
coordinator.Coordinator.validate, conf)
@patch('prestoadmin.node.config.write_conf_to_file')
@patch('prestoadmin.node.get_presto_conf')
def test_get_conf_empty_is_default(self, get_conf_from_file_mock,
write_mock):
env.roledefs['coordinator'] = 'j'
env.roledefs['workers'] = ['K', 'L']
get_conf_from_file_mock.return_value = {}
self.assertEqual(coordinator.Coordinator().get_conf(),
coordinator.Coordinator().build_all_defaults())
@patch('prestoadmin.node.config.write_conf_to_file')
@patch('prestoadmin.node.get_presto_conf')
def test_get_conf(self, get_conf_from_file_mock, write_mock):
env.roledefs['coordinator'] = 'j'
env.roledefs['workers'] = ['K', 'L']
file_conf = {'node.properties': {'my-property': 'value',
'node.environment': 'test'}}
get_conf_from_file_mock.return_value = file_conf
expected = {'node.properties':
{'my-property': 'value',
'node.environment': 'test'},
'jvm.config': ['-server',
'-Xmx16G',
'-XX:-UseBiasedLocking',
'-XX:+UseG1GC',
'-XX:G1HeapRegionSize=32M',
'-XX:+ExplicitGCInvokesConcurrent',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseGCOverheadLimit',
'-XX:+ExitOnOutOfMemoryError',
'-XX:ReservedCodeCacheSize=512M',
'-DHADOOP_USER_NAME=hive'],
'config.properties': {
'coordinator': 'true',
'discovery-server.enabled': 'true',
'discovery.uri': 'http://j:8080',
'http-server.http.port': '8080',
'node-scheduler.include-coordinator': 'false',
'query.max-memory': '50GB',
'query.max-memory-per-node': '8GB'}
}
self.assertEqual(coordinator.Coordinator().get_conf(), expected)
| [
"prestoadmin.coordinator.Coordinator",
"mock.patch",
"prestoadmin.coordinator.Coordinator.validate"
]
| [((6360, 6411), 'mock.patch', 'patch', (['"""prestoadmin.node.config.write_conf_to_file"""'], {}), "('prestoadmin.node.config.write_conf_to_file')\n", (6365, 6411), False, 'from mock import patch\n'), ((6417, 6458), 'mock.patch', 'patch', (['"""prestoadmin.node.get_presto_conf"""'], {}), "('prestoadmin.node.get_presto_conf')\n", (6422, 6458), False, 'from mock import patch\n'), ((6860, 6911), 'mock.patch', 'patch', (['"""prestoadmin.node.config.write_conf_to_file"""'], {}), "('prestoadmin.node.config.write_conf_to_file')\n", (6865, 6911), False, 'from mock import patch\n'), ((6917, 6958), 'mock.patch', 'patch', (['"""prestoadmin.node.get_presto_conf"""'], {}), "('prestoadmin.node.get_presto_conf')\n", (6922, 6958), False, 'from mock import patch\n'), ((4764, 4802), 'prestoadmin.coordinator.Coordinator.validate', 'coordinator.Coordinator.validate', (['conf'], {}), '(conf)\n', (4796, 4802), False, 'from prestoadmin import coordinator\n'), ((5033, 5071), 'prestoadmin.coordinator.Coordinator.validate', 'coordinator.Coordinator.validate', (['conf'], {}), '(conf)\n', (5065, 5071), False, 'from prestoadmin import coordinator\n'), ((986, 1011), 'prestoadmin.coordinator.Coordinator', 'coordinator.Coordinator', ([], {}), '()\n', (1009, 1011), False, 'from prestoadmin import coordinator\n'), ((2824, 2849), 'prestoadmin.coordinator.Coordinator', 'coordinator.Coordinator', ([], {}), '()\n', (2847, 2849), False, 'from prestoadmin import coordinator\n'), ((4955, 4980), 'prestoadmin.coordinator.Coordinator', 'coordinator.Coordinator', ([], {}), '()\n', (4978, 4980), False, 'from prestoadmin import coordinator\n'), ((6743, 6768), 'prestoadmin.coordinator.Coordinator', 'coordinator.Coordinator', ([], {}), '()\n', (6766, 6768), False, 'from prestoadmin import coordinator\n'), ((6806, 6831), 'prestoadmin.coordinator.Coordinator', 'coordinator.Coordinator', ([], {}), '()\n', (6829, 6831), False, 'from prestoadmin import coordinator\n'), ((8601, 8626), 'prestoadmin.coordinator.Coordinator', 'coordinator.Coordinator', ([], {}), '()\n', (8624, 8626), False, 'from prestoadmin import coordinator\n')] |
import pygame, sys, random, time
from pygame.locals import *
class Missile:
def __init__(self, screen, x):
# Store the data. Initialize: y to 591 and exploded to False.
self.screen = screen
self.x = x
self.y = 591
self.exploded = False
def move(self):
# Make self.y 5 smaller than it was (which will cause the Missile to move UP).
self.y = self.y - 5
def draw(self):
# Draw a vertical, 4 pixels thick, 8 pixels long, red (or green) line on the screen,
# where the line starts at the current position of this Missile.
pygame.draw.line(self.screen, (0, 255, 0), (self.x, self.y), (self.x, self.y - 8), 4)
class Fighter:
def __init__(self, screen, x, y):
# Store the data.
# Set self.missiles to the empty list.
# Load the file "fighter.png" as the image
# Set the colorkey to white (it has a white background that needs removed)
self.screen = screen
self.x = x
self.y = y
self.missiles = []
self.image = pygame.image.load("fighter.png")
self.image.set_colorkey(pygame.Color("White"))
def draw(self):
# Draw this Fighter, using its image at its current (x, y) position.
self.screen.blit(self.image, (self.x, self.y))
def fire(self):
# Construct a new Missile 50 pixels to the right of this Fighter.
# Append that Missile to this Fighter's list of Missile objects.
new_missile = Missile(self.screen, self.x + 50)
self.missiles.append(new_missile)
def remove_exploded_missiles(self):
# Already complete
for k in range(len(self.missiles) - 1, -1, -1):
if self.missiles[k].exploded or self.missiles[k].y < 0:
del self.missiles[k]
class Badguy:
def __init__(self, screen, x, y):
# Store the data.
# Set dead to False and original_x to x and move_right to True.
# Load the file "badguy.png" as the image. and set its colorkey to black.
self.screen = screen
self.x = x
self.y = y
self.dead = False
self.original_x = x
self.move_right = True
self.image = pygame.image.load("badguy.png")
self.image.set_colorkey(pygame.Color("Black"))
def move(self):
# Move 2 units in the current direction.
# Switch direction if this Badguy's position is more than 100 pixels from its original position.
if self.move_right:
self.x = self.x + 8
if self.x > self.original_x + 100:
self.move_right = False
self.y = self.y + 15
else:
self.x = self.x - 8
if self.x < self.original_x - 100:
self.move_right = True
self.y = self.y + 15
def draw(self):
# Draw this Badguy, using its image at its current (x, y) position.
self.screen.blit(self.image, (self.x, self.y))
def hit_by(self, missile):
# Return True if a 70x45 rectangle at this Badguy's current position
# collides with the xy point of the given missile.
# Return False otherwise.
return pygame.Rect(self.x, self.y, 70, 45).collidepoint(missile.x, missile.y)
class EnemyFleet:
def __init__(self, screen, enemy_rows):
# Already done. Prepares the list of Badguys.
self.badguys = []
for j in range(enemy_rows):
for k in range(8):
self.badguys.append(Badguy(screen, 80 * k, 50 * j + 20))
@property
def is_defeated(self):
# Return True if the number of badguys in this Enemy Fleet is 0,
# otherwise return False.
return len(self.badguys) == 0
def move(self):
# Make each badguy in this EnemyFleet move.
for badguy in self.badguys:
badguy.move()
def draw(self):
# Make each badguy in this EnemyFleet draw itself.
for badguy in self.badguys:
badguy.draw()
def remove_dead_badguys(self):
for k in range(len(self.badguys) - 1, -1, -1):
if self.badguys[k].dead:
del self.badguys[k]
# Create a Scoreboard class (from scratch)
# Instance variables: screen, x, y, score, and font (size 30)
# Methods: draw (and __init__)
# Create a scoreboard at location 5, 5
# Draw the scoreboard in the game loop
class Scoreboard:
def __init__(self, screen):
self.screen = screen
self.score = 0
self.font = pygame.font.Font(None, 30)
def draw(self):
text_as_image = self.font.render("Score: " + str(self.score), True, (255, 255, 255))
self.screen.blit(text_as_image, (5, 5))
def main():
pygame.init()
clock = pygame.time.Clock()
pygame.display.set_caption("SPACE INVADERS!")
screen = pygame.display.set_mode((640, 650))
enemy_rows = 3
enemy = EnemyFleet(screen, enemy_rows)
fighter = Fighter(screen, 320, 590)
scoreboard = Scoreboard(screen)
gameover_image = pygame.image.load("gameover.png")
is_game_over = False
while True:
clock.tick(60)
for event in pygame.event.get():
pressed_keys = pygame.key.get_pressed()
if event.type == KEYDOWN and pressed_keys[K_SPACE]:
fighter.fire()
if event.type == QUIT:
sys.exit()
screen.fill((0, 0, 0))
pressed_keys = pygame.key.get_pressed()
if pressed_keys[K_LEFT] and fighter.x > -50:
fighter.x = fighter.x - 5
if pressed_keys[K_RIGHT] and fighter.x < 590:
fighter.x = fighter.x + 5
fighter.draw()
enemy.move()
enemy.draw()
for missile in fighter.missiles:
missile.move()
missile.draw()
for badguy in enemy.badguys:
for missile in fighter.missiles:
if badguy.hit_by(missile):
scoreboard.score = scoreboard.score + 100
badguy.dead = True
missile.exploded = True
fighter.remove_exploded_missiles()
enemy.remove_dead_badguys()
if enemy.is_defeated:
enemy_rows = enemy_rows + 1
enemy = EnemyFleet(screen, enemy_rows)
scoreboard.draw()
if not is_game_over:
pygame.display.update()
for badguy in enemy.badguys:
if badguy.y > 545:
screen.blit(gameover_image, (170, 200))
pygame.display.update()
is_game_over = True
main()
| [
"sys.exit",
"pygame.init",
"pygame.draw.line",
"pygame.event.get",
"pygame.Color",
"pygame.display.set_mode",
"pygame.time.Clock",
"pygame.key.get_pressed",
"pygame.display.set_caption",
"pygame.image.load",
"pygame.font.Font",
"pygame.display.update",
"pygame.Rect"
]
| [((4760, 4773), 'pygame.init', 'pygame.init', ([], {}), '()\n', (4771, 4773), False, 'import pygame, sys, random, time\n'), ((4786, 4805), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (4803, 4805), False, 'import pygame, sys, random, time\n'), ((4810, 4855), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""SPACE INVADERS!"""'], {}), "('SPACE INVADERS!')\n", (4836, 4855), False, 'import pygame, sys, random, time\n'), ((4869, 4904), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(640, 650)'], {}), '((640, 650))\n', (4892, 4904), False, 'import pygame, sys, random, time\n'), ((5066, 5099), 'pygame.image.load', 'pygame.image.load', (['"""gameover.png"""'], {}), "('gameover.png')\n", (5083, 5099), False, 'import pygame, sys, random, time\n'), ((620, 710), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', '(0, 255, 0)', '(self.x, self.y)', '(self.x, self.y - 8)', '(4)'], {}), '(self.screen, (0, 255, 0), (self.x, self.y), (self.x, self.\n y - 8), 4)\n', (636, 710), False, 'import pygame, sys, random, time\n'), ((1089, 1121), 'pygame.image.load', 'pygame.image.load', (['"""fighter.png"""'], {}), "('fighter.png')\n", (1106, 1121), False, 'import pygame, sys, random, time\n'), ((2242, 2273), 'pygame.image.load', 'pygame.image.load', (['"""badguy.png"""'], {}), "('badguy.png')\n", (2259, 2273), False, 'import pygame, sys, random, time\n'), ((4553, 4579), 'pygame.font.Font', 'pygame.font.Font', (['None', '(30)'], {}), '(None, 30)\n', (4569, 4579), False, 'import pygame, sys, random, time\n'), ((5186, 5204), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (5202, 5204), False, 'import pygame, sys, random, time\n'), ((5470, 5494), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (5492, 5494), False, 'import pygame, sys, random, time\n'), ((1154, 1175), 'pygame.Color', 'pygame.Color', (['"""White"""'], {}), "('White')\n", (1166, 1175), False, 'import pygame, sys, random, time\n'), ((2306, 2327), 'pygame.Color', 'pygame.Color', (['"""Black"""'], {}), "('Black')\n", (2318, 2327), False, 'import pygame, sys, random, time\n'), ((5233, 5257), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (5255, 5257), False, 'import pygame, sys, random, time\n'), ((6382, 6405), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (6403, 6405), False, 'import pygame, sys, random, time\n'), ((3228, 3263), 'pygame.Rect', 'pygame.Rect', (['self.x', 'self.y', '(70)', '(45)'], {}), '(self.x, self.y, 70, 45)\n', (3239, 3263), False, 'import pygame, sys, random, time\n'), ((5404, 5414), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5412, 5414), False, 'import pygame, sys, random, time\n'), ((6563, 6586), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (6584, 6586), False, 'import pygame, sys, random, time\n')] |
from guniflask.config import settings
from guniflask.web import blueprint, get_route
@blueprint
class ConfigController:
def __init__(self):
pass
@get_route('/settings/<name>')
def get_setting(self, name):
return {name: settings[name]}
| [
"guniflask.web.get_route"
]
| [((165, 194), 'guniflask.web.get_route', 'get_route', (['"""/settings/<name>"""'], {}), "('/settings/<name>')\n", (174, 194), False, 'from guniflask.web import blueprint, get_route\n')] |
import sys,os
sys.path.append('/home/zongdaoming/cv/multi-organ/multi-organ-ijcai')
from lib.losses.BaseClass import _AbstractDiceLoss
from lib.losses.basic import *
class DiceLoss(_AbstractDiceLoss):
"""Computes Dice Loss according to https://arxiv.org/abs/1606.04797.
For multi-class segmentation `weight` parameter can be used to assign different weights per class.
"""
def __init__(self, classes=4, skip_index_after=None, weight=None, sigmoid_normalization=True ):
super().__init__(weight, sigmoid_normalization)
self.classes = classes
if skip_index_after is not None:
self.skip_index_after = skip_index_after
def dice(self, input, target, weight):
return compute_per_channel_dice(input, target, weights=self.weight)
| [
"sys.path.append"
]
| [((14, 83), 'sys.path.append', 'sys.path.append', (['"""/home/zongdaoming/cv/multi-organ/multi-organ-ijcai"""'], {}), "('/home/zongdaoming/cv/multi-organ/multi-organ-ijcai')\n", (29, 83), False, 'import sys, os\n')] |
import datetime
from threading import Thread
from time import sleep
import DBC.dbcreate as dbc
class Tracker(Thread):
max_idle_time = 720 # minutes
default_sleep = 3600 # secs
def track(self):
dbcl = dbc.DBClient()
# print(dbcl.getlasttime())
print("Tracker activated")
while True:
date = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H:%M')
string = date.rsplit("-", 1)
yearmonthday = (string[0].rsplit("-", 3))
hoursminutes = (string[1].rsplit(":", 2))
# print(yearmonthday)
# print(hoursminutes)
year = int(yearmonthday[0])
month = int(yearmonthday[1])
day = int(yearmonthday[2])
hour = int(hoursminutes[0])
minute = int(hoursminutes[1])
date = dbcl.getlasttime()
string = date.rsplit("-", 1)
yearmonthday = (string[0].rsplit("-", 3))
hoursminutes = (string[1].rsplit(":", 2))
#print(yearmonthday)
#print(hoursminutes)
yeard = int(yearmonthday[0])
monthd = int(yearmonthday[1])
dayd = int(yearmonthday[2])
hourd = int(hoursminutes[0])
minuted = int(hoursminutes[1])
# tรคmรค loopitus tyhmรครค, voisi kรคyttรครค valmista kirjastoa
if year == yeard:
if month == monthd:
if day == dayd:
if hour == hourd:
away = minute - minuted
else:
away = ((hour*60) + minute) - ((hourd*60) + minuted)
else:
if hour == hourd:
away = ((hourd + (day-dayd)*24 - hour) * 60) + minute - minuted
else:
away = ((day*hour*60) + minute) - ((dayd*hourd*60) + minuted)
else:
# puutteellinen
away = 3
#print(away)
self.actions(away, dbcl.getlastaway())
sleep(self.default_sleep)
def run(self):
self.track()
def actions(self, time, away):
if time < self.max_idle_time:
print("Everything ok")
else:
away = (int(away) * 60)
if time > away:
print("Contacting users")
else:
print("Holiday mode") | [
"DBC.dbcreate.DBClient",
"datetime.datetime.now",
"time.sleep"
]
| [((225, 239), 'DBC.dbcreate.DBClient', 'dbc.DBClient', ([], {}), '()\n', (237, 239), True, 'import DBC.dbcreate as dbc\n'), ((2140, 2165), 'time.sleep', 'sleep', (['self.default_sleep'], {}), '(self.default_sleep)\n', (2145, 2165), False, 'from time import sleep\n'), ((378, 401), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (399, 401), False, 'import datetime\n')] |
# Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import google.api_core.gapic_v1.method
import mock
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
SQL_QUERY = """\
SELECT first_name, last_name, age FROM citizens ORDER BY age"""
SQL_QUERY_WITH_PARAM = """
SELECT first_name, last_name, email FROM citizens WHERE age <= @max_age"""
PARAMS = {"max_age": 30}
PARAM_TYPES = {"max_age": "INT64"}
SQL_QUERY_WITH_BYTES_PARAM = """\
SELECT image_name FROM images WHERE @bytes IN image_data"""
PARAMS_WITH_BYTES = {"bytes": b"FACEDACE"}
RESUME_TOKEN = b"<PASSWORD>"
TXN_ID = b"DEAFBEAD"
SECONDS = 3
MICROS = 123456
class Test_restart_on_unavailable(unittest.TestCase):
def _call_fut(self, restart):
from google.cloud.spanner_v1.snapshot import _restart_on_unavailable
return _restart_on_unavailable(restart)
def _make_item(self, value, resume_token=b""):
return mock.Mock(
value=value, resume_token=resume_token, spec=["value", "resume_token"]
)
def test_iteration_w_empty_raw(self):
raw = _MockIterator()
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), [])
def test_iteration_w_non_empty_raw(self):
ITEMS = (self._make_item(0), self._make_item(1))
raw = _MockIterator(*ITEMS)
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(ITEMS))
restart.assert_called_once_with()
def test_iteration_w_raw_w_resume_tken(self):
ITEMS = (
self._make_item(0),
self._make_item(1, resume_token=RESUME_TOKEN),
self._make_item(2),
self._make_item(3),
)
raw = _MockIterator(*ITEMS)
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(ITEMS))
restart.assert_called_once_with()
def test_iteration_w_raw_raising_unavailable_no_token(self):
ITEMS = (
self._make_item(0),
self._make_item(1, resume_token=RESUME_TOKEN),
self._make_item(2),
)
before = _MockIterator(fail_after=True)
after = _MockIterator(*ITEMS)
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(ITEMS))
self.assertEqual(restart.mock_calls, [mock.call(), mock.call(resume_token=b"")])
def test_iteration_w_raw_raising_unavailable(self):
FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN))
SECOND = (self._make_item(2),) # discarded after 503
LAST = (self._make_item(3),)
before = _MockIterator(*(FIRST + SECOND), fail_after=True)
after = _MockIterator(*LAST)
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(FIRST + LAST))
self.assertEqual(
restart.mock_calls, [mock.call(), mock.call(resume_token=RESUME_TOKEN)]
)
def test_iteration_w_raw_raising_unavailable_after_token(self):
FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN))
SECOND = (self._make_item(2), self._make_item(3))
before = _MockIterator(*FIRST, fail_after=True)
after = _MockIterator(*SECOND)
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(FIRST + SECOND))
self.assertEqual(
restart.mock_calls, [mock.call(), mock.call(resume_token=RESUME_TOKEN)]
)
class Test_SnapshotBase(unittest.TestCase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
DATABASE_ID = "database-id"
DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID
SESSION_ID = "session-id"
SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID
def _getTargetClass(self):
from google.cloud.spanner_v1.snapshot import _SnapshotBase
return _SnapshotBase
def _make_one(self, session):
return self._getTargetClass()(session)
def _makeDerived(self, session):
class _Derived(self._getTargetClass()):
_transaction_id = None
_multi_use = False
def _make_txn_selector(self):
from google.cloud.spanner_v1.proto.transaction_pb2 import (
TransactionOptions,
TransactionSelector,
)
if self._transaction_id:
return TransactionSelector(id=self._transaction_id)
options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
if self._multi_use:
return TransactionSelector(begin=options)
return TransactionSelector(single_use=options)
return _Derived(session)
def _make_spanner_api(self):
import google.cloud.spanner_v1.gapic.spanner_client
return mock.create_autospec(
google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True
)
def test_ctor(self):
session = _Session()
base = self._make_one(session)
self.assertIs(base._session, session)
self.assertEqual(base._execute_sql_count, 0)
def test__make_txn_selector_virtual(self):
session = _Session()
base = self._make_one(session)
with self.assertRaises(NotImplementedError):
base._make_txn_selector()
def test_read_other_error(self):
from google.cloud.spanner_v1.keyset import KeySet
keyset = KeySet(all_=True)
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.streaming_read.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
with self.assertRaises(RuntimeError):
list(derived.read(TABLE_NAME, COLUMNS, keyset))
def _read_helper(self, multi_use, first=True, count=0, partition=None):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1.proto.result_set_pb2 import (
PartialResultSet,
ResultSetMetadata,
ResultSetStats,
)
from google.cloud.spanner_v1.proto.transaction_pb2 import (
TransactionSelector,
TransactionOptions,
)
from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType
from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64
from google.cloud.spanner_v1.keyset import KeySet
from google.cloud.spanner_v1._helpers import _make_value_pb
VALUES = [[u"bharney", 31], [u"phred", 32]]
VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES]
struct_type_pb = StructType(
fields=[
StructType.Field(name="name", type=Type(code=STRING)),
StructType.Field(name="age", type=Type(code=INT64)),
]
)
metadata_pb = ResultSetMetadata(row_type=struct_type_pb)
stats_pb = ResultSetStats(
query_stats=Struct(fields={"rows_returned": _make_value_pb(2)})
)
result_sets = [
PartialResultSet(values=VALUE_PBS[0], metadata=metadata_pb),
PartialResultSet(values=VALUE_PBS[1], stats=stats_pb),
]
KEYS = [["<EMAIL>"], ["<EMAIL>"]]
keyset = KeySet(keys=KEYS)
INDEX = "email-address-index"
LIMIT = 20
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.streaming_read.return_value = _MockIterator(*result_sets)
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
derived._read_request_count = count
if not first:
derived._transaction_id = TXN_ID
if partition is not None: # 'limit' and 'partition' incompatible
result_set = derived.read(
TABLE_NAME, COLUMNS, keyset, index=INDEX, partition=partition
)
else:
result_set = derived.read(
TABLE_NAME, COLUMNS, keyset, index=INDEX, limit=LIMIT
)
self.assertEqual(derived._read_request_count, count + 1)
if multi_use:
self.assertIs(result_set._source, derived)
else:
self.assertIsNone(result_set._source)
self.assertEqual(list(result_set), VALUES)
self.assertEqual(result_set.metadata, metadata_pb)
self.assertEqual(result_set.stats, stats_pb)
txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
if multi_use:
if first:
expected_transaction = TransactionSelector(begin=txn_options)
else:
expected_transaction = TransactionSelector(id=TXN_ID)
else:
expected_transaction = TransactionSelector(single_use=txn_options)
if partition is not None:
expected_limit = 0
else:
expected_limit = LIMIT
api.streaming_read.assert_called_once_with(
self.SESSION_NAME,
TABLE_NAME,
COLUMNS,
keyset._to_pb(),
transaction=expected_transaction,
index=INDEX,
limit=expected_limit,
partition_token=partition,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_read_wo_multi_use(self):
self._read_helper(multi_use=False)
def test_read_wo_multi_use_w_read_request_count_gt_0(self):
with self.assertRaises(ValueError):
self._read_helper(multi_use=False, count=1)
def test_read_w_multi_use_wo_first(self):
self._read_helper(multi_use=True, first=False)
def test_read_w_multi_use_wo_first_w_count_gt_0(self):
self._read_helper(multi_use=True, first=False, count=1)
def test_read_w_multi_use_w_first_w_partition(self):
PARTITION = b"FADEABED"
self._read_helper(multi_use=True, first=True, partition=PARTITION)
def test_read_w_multi_use_w_first_w_count_gt_0(self):
with self.assertRaises(ValueError):
self._read_helper(multi_use=True, first=True, count=1)
def test_execute_sql_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.execute_streaming_sql.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
with self.assertRaises(RuntimeError):
list(derived.execute_sql(SQL_QUERY))
self.assertEqual(derived._execute_sql_count, 1)
def test_execute_sql_w_params_wo_param_types(self):
database = _Database()
session = _Session(database)
derived = self._makeDerived(session)
with self.assertRaises(ValueError):
derived.execute_sql(SQL_QUERY_WITH_PARAM, PARAMS)
def _execute_sql_helper(
self,
multi_use,
first=True,
count=0,
partition=None,
sql_count=0,
query_options=None,
timeout=google.api_core.gapic_v1.method.DEFAULT,
retry=google.api_core.gapic_v1.method.DEFAULT,
):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1.proto.result_set_pb2 import (
PartialResultSet,
ResultSetMetadata,
ResultSetStats,
)
from google.cloud.spanner_v1.proto.transaction_pb2 import (
TransactionSelector,
TransactionOptions,
)
from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType
from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64
from google.cloud.spanner_v1._helpers import (
_make_value_pb,
_merge_query_options,
)
VALUES = [[u"bharney", u"rhubbyl", 31], [u"phred", u"phlyntstone", 32]]
VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES]
MODE = 2 # PROFILE
struct_type_pb = StructType(
fields=[
StructType.Field(name="first_name", type=Type(code=STRING)),
StructType.Field(name="last_name", type=Type(code=STRING)),
StructType.Field(name="age", type=Type(code=INT64)),
]
)
metadata_pb = ResultSetMetadata(row_type=struct_type_pb)
stats_pb = ResultSetStats(
query_stats=Struct(fields={"rows_returned": _make_value_pb(2)})
)
result_sets = [
PartialResultSet(values=VALUE_PBS[0], metadata=metadata_pb),
PartialResultSet(values=VALUE_PBS[1], stats=stats_pb),
]
iterator = _MockIterator(*result_sets)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.execute_streaming_sql.return_value = iterator
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
derived._read_request_count = count
derived._execute_sql_count = sql_count
if not first:
derived._transaction_id = TXN_ID
result_set = derived.execute_sql(
SQL_QUERY_WITH_PARAM,
PARAMS,
PARAM_TYPES,
query_mode=MODE,
query_options=query_options,
partition=partition,
retry=retry,
timeout=timeout,
)
self.assertEqual(derived._read_request_count, count + 1)
if multi_use:
self.assertIs(result_set._source, derived)
else:
self.assertIsNone(result_set._source)
self.assertEqual(list(result_set), VALUES)
self.assertEqual(result_set.metadata, metadata_pb)
self.assertEqual(result_set.stats, stats_pb)
txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
if multi_use:
if first:
expected_transaction = TransactionSelector(begin=txn_options)
else:
expected_transaction = TransactionSelector(id=TXN_ID)
else:
expected_transaction = TransactionSelector(single_use=txn_options)
expected_params = Struct(
fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()}
)
expected_query_options = database._instance._client._query_options
if query_options:
expected_query_options = _merge_query_options(
expected_query_options, query_options
)
api.execute_streaming_sql.assert_called_once_with(
self.SESSION_NAME,
SQL_QUERY_WITH_PARAM,
transaction=expected_transaction,
params=expected_params,
param_types=PARAM_TYPES,
query_mode=MODE,
query_options=expected_query_options,
partition_token=partition,
seqno=sql_count,
metadata=[("google-cloud-resource-prefix", database.name)],
timeout=timeout,
retry=retry,
)
self.assertEqual(derived._execute_sql_count, sql_count + 1)
def test_execute_sql_wo_multi_use(self):
self._execute_sql_helper(multi_use=False)
def test_execute_sql_wo_multi_use_w_read_request_count_gt_0(self):
with self.assertRaises(ValueError):
self._execute_sql_helper(multi_use=False, count=1)
def test_execute_sql_w_multi_use_wo_first(self):
self._execute_sql_helper(multi_use=True, first=False, sql_count=1)
def test_execute_sql_w_multi_use_wo_first_w_count_gt_0(self):
self._execute_sql_helper(multi_use=True, first=False, count=1)
def test_execute_sql_w_multi_use_w_first(self):
self._execute_sql_helper(multi_use=True, first=True)
def test_execute_sql_w_multi_use_w_first_w_count_gt_0(self):
with self.assertRaises(ValueError):
self._execute_sql_helper(multi_use=True, first=True, count=1)
def test_execute_sql_w_retry(self):
self._execute_sql_helper(multi_use=False, retry=None)
def test_execute_sql_w_timeout(self):
self._execute_sql_helper(multi_use=False, timeout=None)
def test_execute_sql_w_query_options(self):
from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest
self._execute_sql_helper(
multi_use=False,
query_options=ExecuteSqlRequest.QueryOptions(optimizer_version="3"),
)
def _partition_read_helper(
self, multi_use, w_txn, size=None, max_partitions=None, index=None
):
from google.cloud.spanner_v1.keyset import KeySet
from google.cloud.spanner_v1.types import Partition
from google.cloud.spanner_v1.types import PartitionOptions
from google.cloud.spanner_v1.types import PartitionResponse
from google.cloud.spanner_v1.types import Transaction
from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector
keyset = KeySet(all_=True)
new_txn_id = b"ABECAB91"
token_1 = b"<PASSWORD>"
token_2 = b"<PASSWORD>"
response = PartitionResponse(
partitions=[
Partition(partition_token=token_1),
Partition(partition_token=token_2),
],
transaction=Transaction(id=new_txn_id),
)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.partition_read.return_value = response
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
if w_txn:
derived._transaction_id = TXN_ID
tokens = list(
derived.partition_read(
TABLE_NAME,
COLUMNS,
keyset,
index=index,
partition_size_bytes=size,
max_partitions=max_partitions,
)
)
self.assertEqual(tokens, [token_1, token_2])
expected_txn_selector = TransactionSelector(id=TXN_ID)
expected_partition_options = PartitionOptions(
partition_size_bytes=size, max_partitions=max_partitions
)
api.partition_read.assert_called_once_with(
session=self.SESSION_NAME,
table=TABLE_NAME,
columns=COLUMNS,
key_set=keyset._to_pb(),
transaction=expected_txn_selector,
index=index,
partition_options=expected_partition_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_partition_read_single_use_raises(self):
with self.assertRaises(ValueError):
self._partition_read_helper(multi_use=False, w_txn=True)
def test_partition_read_wo_existing_transaction_raises(self):
with self.assertRaises(ValueError):
self._partition_read_helper(multi_use=True, w_txn=False)
def test_partition_read_other_error(self):
from google.cloud.spanner_v1.keyset import KeySet
keyset = KeySet(all_=True)
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.partition_read.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = True
derived._transaction_id = TXN_ID
with self.assertRaises(RuntimeError):
list(derived.partition_read(TABLE_NAME, COLUMNS, keyset))
def test_partition_read_ok_w_index_no_options(self):
self._partition_read_helper(multi_use=True, w_txn=True, index="index")
def test_partition_read_ok_w_size(self):
self._partition_read_helper(multi_use=True, w_txn=True, size=2000)
def test_partition_read_ok_w_max_partitions(self):
self._partition_read_helper(multi_use=True, w_txn=True, max_partitions=4)
def _partition_query_helper(self, multi_use, w_txn, size=None, max_partitions=None):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1.types import Partition
from google.cloud.spanner_v1.types import PartitionOptions
from google.cloud.spanner_v1.types import PartitionResponse
from google.cloud.spanner_v1.types import Transaction
from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector
from google.cloud.spanner_v1._helpers import _make_value_pb
new_txn_id = b"ABECAB91"
token_1 = b"FACE0FFF"
token_2 = b"BADE8CAF"
response = PartitionResponse(
partitions=[
Partition(partition_token=token_1),
Partition(partition_token=token_2),
],
transaction=Transaction(id=new_txn_id),
)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.partition_query.return_value = response
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
if w_txn:
derived._transaction_id = TXN_ID
tokens = list(
derived.partition_query(
SQL_QUERY_WITH_PARAM,
PARAMS,
PARAM_TYPES,
partition_size_bytes=size,
max_partitions=max_partitions,
)
)
self.assertEqual(tokens, [token_1, token_2])
expected_params = Struct(
fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()}
)
expected_txn_selector = TransactionSelector(id=TXN_ID)
expected_partition_options = PartitionOptions(
partition_size_bytes=size, max_partitions=max_partitions
)
api.partition_query.assert_called_once_with(
session=self.SESSION_NAME,
sql=SQL_QUERY_WITH_PARAM,
transaction=expected_txn_selector,
params=expected_params,
param_types=PARAM_TYPES,
partition_options=expected_partition_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_partition_query_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.partition_query.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = True
derived._transaction_id = TXN_ID
with self.assertRaises(RuntimeError):
list(derived.partition_query(SQL_QUERY))
def test_partition_query_w_params_wo_param_types(self):
database = _Database()
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = True
derived._transaction_id = TXN_ID
with self.assertRaises(ValueError):
list(derived.partition_query(SQL_QUERY_WITH_PARAM, PARAMS))
def test_partition_query_single_use_raises(self):
with self.assertRaises(ValueError):
self._partition_query_helper(multi_use=False, w_txn=True)
def test_partition_query_wo_transaction_raises(self):
with self.assertRaises(ValueError):
self._partition_query_helper(multi_use=True, w_txn=False)
def test_partition_query_ok_w_index_no_options(self):
self._partition_query_helper(multi_use=True, w_txn=True)
def test_partition_query_ok_w_size(self):
self._partition_query_helper(multi_use=True, w_txn=True, size=2000)
def test_partition_query_ok_w_max_partitions(self):
self._partition_query_helper(multi_use=True, w_txn=True, max_partitions=4)
class TestSnapshot(unittest.TestCase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
DATABASE_ID = "database-id"
DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID
SESSION_ID = "session-id"
SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID
def _getTargetClass(self):
from google.cloud.spanner_v1.snapshot import Snapshot
return Snapshot
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def _make_spanner_api(self):
import google.cloud.spanner_v1.gapic.spanner_client
return mock.create_autospec(
google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True
)
def _makeTimestamp(self):
import datetime
from google.cloud._helpers import UTC
return datetime.datetime.utcnow().replace(tzinfo=UTC)
def _makeDuration(self, seconds=1, microseconds=0):
import datetime
return datetime.timedelta(seconds=seconds, microseconds=microseconds)
def test_ctor_defaults(self):
session = _Session()
snapshot = self._make_one(session)
self.assertIs(snapshot._session, session)
self.assertTrue(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_multiple_options(self):
timestamp = self._makeTimestamp()
duration = self._makeDuration()
session = _Session()
with self.assertRaises(ValueError):
self._make_one(session, read_timestamp=timestamp, max_staleness=duration)
def test_ctor_w_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertEqual(snapshot._read_timestamp, timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_min_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, min_read_timestamp=timestamp)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertEqual(snapshot._min_read_timestamp, timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_max_staleness(self):
duration = self._makeDuration()
session = _Session()
snapshot = self._make_one(session, max_staleness=duration)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertEqual(snapshot._max_staleness, duration)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_exact_staleness(self):
duration = self._makeDuration()
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertEqual(snapshot._exact_staleness, duration)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_multi_use(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
self.assertTrue(snapshot._session is session)
self.assertTrue(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertTrue(snapshot._multi_use)
def test_ctor_w_multi_use_and_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True)
self.assertTrue(snapshot._session is session)
self.assertFalse(snapshot._strong)
self.assertEqual(snapshot._read_timestamp, timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertTrue(snapshot._multi_use)
def test_ctor_w_multi_use_and_min_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
with self.assertRaises(ValueError):
self._make_one(session, min_read_timestamp=timestamp, multi_use=True)
def test_ctor_w_multi_use_and_max_staleness(self):
duration = self._makeDuration()
session = _Session()
with self.assertRaises(ValueError):
self._make_one(session, max_staleness=duration, multi_use=True)
def test_ctor_w_multi_use_and_exact_staleness(self):
duration = self._makeDuration()
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration, multi_use=True)
self.assertTrue(snapshot._session is session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertEqual(snapshot._exact_staleness, duration)
self.assertTrue(snapshot._multi_use)
def test__make_txn_selector_w_transaction_id(self):
session = _Session()
snapshot = self._make_one(session)
snapshot._transaction_id = TXN_ID
selector = snapshot._make_txn_selector()
self.assertEqual(selector.id, TXN_ID)
def test__make_txn_selector_strong(self):
session = _Session()
snapshot = self._make_one(session)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertTrue(options.read_only.strong)
def test__make_txn_selector_w_read_timestamp(self):
from google.cloud._helpers import _pb_timestamp_to_datetime
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(
_pb_timestamp_to_datetime(options.read_only.read_timestamp), timestamp
)
def test__make_txn_selector_w_min_read_timestamp(self):
from google.cloud._helpers import _pb_timestamp_to_datetime
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, min_read_timestamp=timestamp)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(
_pb_timestamp_to_datetime(options.read_only.min_read_timestamp), timestamp
)
def test__make_txn_selector_w_max_staleness(self):
duration = self._makeDuration(seconds=3, microseconds=123456)
session = _Session()
snapshot = self._make_one(session, max_staleness=duration)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(options.read_only.max_staleness.seconds, 3)
self.assertEqual(options.read_only.max_staleness.nanos, 123456000)
def test__make_txn_selector_w_exact_staleness(self):
duration = self._makeDuration(seconds=3, microseconds=123456)
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(options.read_only.exact_staleness.seconds, 3)
self.assertEqual(options.read_only.exact_staleness.nanos, 123456000)
def test__make_txn_selector_strong_w_multi_use(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
selector = snapshot._make_txn_selector()
options = selector.begin
self.assertTrue(options.read_only.strong)
def test__make_txn_selector_w_read_timestamp_w_multi_use(self):
from google.cloud._helpers import _pb_timestamp_to_datetime
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True)
selector = snapshot._make_txn_selector()
options = selector.begin
self.assertEqual(
_pb_timestamp_to_datetime(options.read_only.read_timestamp), timestamp
)
def test__make_txn_selector_w_exact_staleness_w_multi_use(self):
duration = self._makeDuration(seconds=3, microseconds=123456)
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration, multi_use=True)
selector = snapshot._make_txn_selector()
options = selector.begin
self.assertEqual(options.read_only.exact_staleness.seconds, 3)
self.assertEqual(options.read_only.exact_staleness.nanos, 123456000)
def test_begin_wo_multi_use(self):
session = _Session()
snapshot = self._make_one(session)
with self.assertRaises(ValueError):
snapshot.begin()
def test_begin_w_read_request_count_gt_0(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
snapshot._read_request_count = 1
with self.assertRaises(ValueError):
snapshot.begin()
def test_begin_w_existing_txn_id(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
snapshot._transaction_id = TXN_ID
with self.assertRaises(ValueError):
snapshot.begin()
def test_begin_w_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.begin_transaction.side_effect = RuntimeError()
timestamp = self._makeTimestamp()
session = _Session(database)
snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True)
with self.assertRaises(RuntimeError):
snapshot.begin()
def test_begin_ok_exact_staleness(self):
from google.protobuf.duration_pb2 import Duration
from google.cloud.spanner_v1.proto.transaction_pb2 import (
Transaction as TransactionPB,
TransactionOptions,
)
transaction_pb = TransactionPB(id=TXN_ID)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.begin_transaction.return_value = transaction_pb
duration = self._makeDuration(seconds=SECONDS, microseconds=MICROS)
session = _Session(database)
snapshot = self._make_one(session, exact_staleness=duration, multi_use=True)
txn_id = snapshot.begin()
self.assertEqual(txn_id, TXN_ID)
self.assertEqual(snapshot._transaction_id, TXN_ID)
expected_duration = Duration(seconds=SECONDS, nanos=MICROS * 1000)
expected_txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(exact_staleness=expected_duration)
)
api.begin_transaction.assert_called_once_with(
session.name,
expected_txn_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_begin_ok_exact_strong(self):
from google.cloud.spanner_v1.proto.transaction_pb2 import (
Transaction as TransactionPB,
TransactionOptions,
)
transaction_pb = TransactionPB(id=TXN_ID)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.begin_transaction.return_value = transaction_pb
session = _Session(database)
snapshot = self._make_one(session, multi_use=True)
txn_id = snapshot.begin()
self.assertEqual(txn_id, TXN_ID)
self.assertEqual(snapshot._transaction_id, TXN_ID)
expected_txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
api.begin_transaction.assert_called_once_with(
session.name,
expected_txn_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
class _Client(object):
def __init__(self):
from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest
self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1")
class _Instance(object):
def __init__(self):
self._client = _Client()
class _Database(object):
def __init__(self):
self.name = "testing"
self._instance = _Instance()
class _Session(object):
def __init__(self, database=None, name=TestSnapshot.SESSION_NAME):
self._database = database
self.name = name
class _MockIterator(object):
def __init__(self, *values, **kw):
self._iter_values = iter(values)
self._fail_after = kw.pop("fail_after", False)
def __iter__(self):
return self
def __next__(self):
from google.api_core.exceptions import ServiceUnavailable
try:
return next(self._iter_values)
except StopIteration:
if self._fail_after:
raise ServiceUnavailable("testing")
raise
next = __next__
| [
"mock.Mock",
"google.cloud.spanner_v1.types.Transaction",
"mock.create_autospec",
"datetime.timedelta",
"google.cloud.spanner_v1._helpers._make_value_pb",
"google.cloud.spanner_v1.proto.transaction_pb2.TransactionSelector",
"google.cloud.spanner_v1.proto.result_set_pb2.ResultSetMetadata",
"google.cloud.spanner_v1.snapshot._restart_on_unavailable",
"google.api_core.exceptions.ServiceUnavailable",
"google.cloud.spanner_v1.keyset.KeySet",
"google.protobuf.duration_pb2.Duration",
"google.cloud.spanner_v1.types.Partition",
"google.cloud.spanner_v1.proto.spanner_pb2.ExecuteSqlRequest.QueryOptions",
"mock.call",
"google.cloud.spanner_v1._helpers._merge_query_options",
"google.cloud.spanner_v1.proto.transaction_pb2.TransactionOptions.ReadOnly",
"google.cloud.spanner_v1.types.PartitionOptions",
"datetime.datetime.utcnow",
"google.cloud.spanner_v1.proto.transaction_pb2.Transaction",
"google.cloud.spanner_v1.proto.result_set_pb2.PartialResultSet",
"google.cloud._helpers._pb_timestamp_to_datetime",
"google.cloud.spanner_v1.proto.type_pb2.Type"
]
| [((1385, 1417), 'google.cloud.spanner_v1.snapshot._restart_on_unavailable', '_restart_on_unavailable', (['restart'], {}), '(restart)\n', (1408, 1417), False, 'from google.cloud.spanner_v1.snapshot import _restart_on_unavailable\n'), ((1485, 1570), 'mock.Mock', 'mock.Mock', ([], {'value': 'value', 'resume_token': 'resume_token', 'spec': "['value', 'resume_token']"}), "(value=value, resume_token=resume_token, spec=['value',\n 'resume_token'])\n", (1494, 1570), False, 'import mock\n'), ((1680, 1716), 'mock.Mock', 'mock.Mock', ([], {'spec': '[]', 'return_value': 'raw'}), '(spec=[], return_value=raw)\n', (1689, 1716), False, 'import mock\n'), ((1965, 2001), 'mock.Mock', 'mock.Mock', ([], {'spec': '[]', 'return_value': 'raw'}), '(spec=[], return_value=raw)\n', (1974, 2001), False, 'import mock\n'), ((2431, 2467), 'mock.Mock', 'mock.Mock', ([], {'spec': '[]', 'return_value': 'raw'}), '(spec=[], return_value=raw)\n', (2440, 2467), False, 'import mock\n'), ((2930, 2977), 'mock.Mock', 'mock.Mock', ([], {'spec': '[]', 'side_effect': '[before, after]'}), '(spec=[], side_effect=[before, after])\n', (2939, 2977), False, 'import mock\n'), ((3528, 3575), 'mock.Mock', 'mock.Mock', ([], {'spec': '[]', 'side_effect': '[before, after]'}), '(spec=[], side_effect=[before, after])\n', (3537, 3575), False, 'import mock\n'), ((4126, 4173), 'mock.Mock', 'mock.Mock', ([], {'spec': '[]', 'side_effect': '[before, after]'}), '(spec=[], side_effect=[before, after])\n', (4135, 4173), False, 'import mock\n'), ((5909, 6009), 'mock.create_autospec', 'mock.create_autospec', (['google.cloud.spanner_v1.gapic.spanner_client.SpannerClient'], {'instance': '(True)'}), '(google.cloud.spanner_v1.gapic.spanner_client.\n SpannerClient, instance=True)\n', (5929, 6009), False, 'import mock\n'), ((6541, 6558), 'google.cloud.spanner_v1.keyset.KeySet', 'KeySet', ([], {'all_': '(True)'}), '(all_=True)\n', (6547, 6558), False, 'from google.cloud.spanner_v1.keyset import KeySet\n'), ((7999, 8041), 'google.cloud.spanner_v1.proto.result_set_pb2.ResultSetMetadata', 'ResultSetMetadata', ([], {'row_type': 'struct_type_pb'}), '(row_type=struct_type_pb)\n', (8016, 8041), False, 'from google.cloud.spanner_v1.proto.result_set_pb2 import PartialResultSet, ResultSetMetadata, ResultSetStats\n'), ((8396, 8413), 'google.cloud.spanner_v1.keyset.KeySet', 'KeySet', ([], {'keys': 'KEYS'}), '(keys=KEYS)\n', (8402, 8413), False, 'from google.cloud.spanner_v1.keyset import KeySet\n'), ((13477, 13519), 'google.cloud.spanner_v1.proto.result_set_pb2.ResultSetMetadata', 'ResultSetMetadata', ([], {'row_type': 'struct_type_pb'}), '(row_type=struct_type_pb)\n', (13494, 13519), False, 'from google.cloud.spanner_v1.proto.result_set_pb2 import PartialResultSet, ResultSetMetadata, ResultSetStats\n'), ((18194, 18211), 'google.cloud.spanner_v1.keyset.KeySet', 'KeySet', ([], {'all_': '(True)'}), '(all_=True)\n', (18200, 18211), False, 'from google.cloud.spanner_v1.keyset import KeySet\n'), ((19248, 19278), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionSelector', 'TransactionSelector', ([], {'id': 'TXN_ID'}), '(id=TXN_ID)\n', (19267, 19278), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((19317, 19391), 'google.cloud.spanner_v1.types.PartitionOptions', 'PartitionOptions', ([], {'partition_size_bytes': 'size', 'max_partitions': 'max_partitions'}), '(partition_size_bytes=size, max_partitions=max_partitions)\n', (19333, 19391), False, 'from google.cloud.spanner_v1.types import PartitionOptions\n'), ((20285, 20302), 'google.cloud.spanner_v1.keyset.KeySet', 'KeySet', ([], {'all_': '(True)'}), '(all_=True)\n', (20291, 20302), False, 'from google.cloud.spanner_v1.keyset import KeySet\n'), ((22836, 22866), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionSelector', 'TransactionSelector', ([], {'id': 'TXN_ID'}), '(id=TXN_ID)\n', (22855, 22866), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((22905, 22979), 'google.cloud.spanner_v1.types.PartitionOptions', 'PartitionOptions', ([], {'partition_size_bytes': 'size', 'max_partitions': 'max_partitions'}), '(partition_size_bytes=size, max_partitions=max_partitions)\n', (22921, 22979), False, 'from google.cloud.spanner_v1.types import PartitionOptions\n'), ((25648, 25748), 'mock.create_autospec', 'mock.create_autospec', (['google.cloud.spanner_v1.gapic.spanner_client.SpannerClient'], {'instance': '(True)'}), '(google.cloud.spanner_v1.gapic.spanner_client.\n SpannerClient, instance=True)\n', (25668, 25748), False, 'import mock\n'), ((26027, 26089), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'seconds', 'microseconds': 'microseconds'}), '(seconds=seconds, microseconds=microseconds)\n', (26045, 26089), False, 'import datetime\n'), ((36210, 36234), 'google.cloud.spanner_v1.proto.transaction_pb2.Transaction', 'TransactionPB', ([], {'id': 'TXN_ID'}), '(id=TXN_ID)\n', (36223, 36234), True, 'from google.cloud.spanner_v1.proto.transaction_pb2 import Transaction as TransactionPB, TransactionOptions\n'), ((36751, 36797), 'google.protobuf.duration_pb2.Duration', 'Duration', ([], {'seconds': 'SECONDS', 'nanos': '(MICROS * 1000)'}), '(seconds=SECONDS, nanos=MICROS * 1000)\n', (36759, 36797), False, 'from google.protobuf.duration_pb2 import Duration\n'), ((37363, 37387), 'google.cloud.spanner_v1.proto.transaction_pb2.Transaction', 'TransactionPB', ([], {'id': 'TXN_ID'}), '(id=TXN_ID)\n', (37376, 37387), True, 'from google.cloud.spanner_v1.proto.transaction_pb2 import Transaction as TransactionPB, TransactionOptions\n'), ((38256, 38309), 'google.cloud.spanner_v1.proto.spanner_pb2.ExecuteSqlRequest.QueryOptions', 'ExecuteSqlRequest.QueryOptions', ([], {'optimizer_version': '"""1"""'}), "(optimizer_version='1')\n", (38286, 38309), False, 'from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest\n'), ((8199, 8258), 'google.cloud.spanner_v1.proto.result_set_pb2.PartialResultSet', 'PartialResultSet', ([], {'values': 'VALUE_PBS[0]', 'metadata': 'metadata_pb'}), '(values=VALUE_PBS[0], metadata=metadata_pb)\n', (8215, 8258), False, 'from google.cloud.spanner_v1.proto.result_set_pb2 import PartialResultSet, ResultSetMetadata, ResultSetStats\n'), ((8272, 8325), 'google.cloud.spanner_v1.proto.result_set_pb2.PartialResultSet', 'PartialResultSet', ([], {'values': 'VALUE_PBS[1]', 'stats': 'stats_pb'}), '(values=VALUE_PBS[1], stats=stats_pb)\n', (8288, 8325), False, 'from google.cloud.spanner_v1.proto.result_set_pb2 import PartialResultSet, ResultSetMetadata, ResultSetStats\n'), ((9957, 10000), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionSelector', 'TransactionSelector', ([], {'single_use': 'txn_options'}), '(single_use=txn_options)\n', (9976, 10000), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((13677, 13736), 'google.cloud.spanner_v1.proto.result_set_pb2.PartialResultSet', 'PartialResultSet', ([], {'values': 'VALUE_PBS[0]', 'metadata': 'metadata_pb'}), '(values=VALUE_PBS[0], metadata=metadata_pb)\n', (13693, 13736), False, 'from google.cloud.spanner_v1.proto.result_set_pb2 import PartialResultSet, ResultSetMetadata, ResultSetStats\n'), ((13750, 13803), 'google.cloud.spanner_v1.proto.result_set_pb2.PartialResultSet', 'PartialResultSet', ([], {'values': 'VALUE_PBS[1]', 'stats': 'stats_pb'}), '(values=VALUE_PBS[1], stats=stats_pb)\n', (13766, 13803), False, 'from google.cloud.spanner_v1.proto.result_set_pb2 import PartialResultSet, ResultSetMetadata, ResultSetStats\n'), ((15329, 15372), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionSelector', 'TransactionSelector', ([], {'single_use': 'txn_options'}), '(single_use=txn_options)\n', (15348, 15372), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((15640, 15699), 'google.cloud.spanner_v1._helpers._merge_query_options', '_merge_query_options', (['expected_query_options', 'query_options'], {}), '(expected_query_options, query_options)\n', (15660, 15699), False, 'from google.cloud.spanner_v1._helpers import _make_value_pb, _merge_query_options\n'), ((32059, 32118), 'google.cloud._helpers._pb_timestamp_to_datetime', '_pb_timestamp_to_datetime', (['options.read_only.read_timestamp'], {}), '(options.read_only.read_timestamp)\n', (32084, 32118), False, 'from google.cloud._helpers import _pb_timestamp_to_datetime\n'), ((32539, 32602), 'google.cloud._helpers._pb_timestamp_to_datetime', '_pb_timestamp_to_datetime', (['options.read_only.min_read_timestamp'], {}), '(options.read_only.min_read_timestamp)\n', (32564, 32602), False, 'from google.cloud._helpers import _pb_timestamp_to_datetime\n'), ((34231, 34290), 'google.cloud._helpers._pb_timestamp_to_datetime', '_pb_timestamp_to_datetime', (['options.read_only.read_timestamp'], {}), '(options.read_only.read_timestamp)\n', (34256, 34290), False, 'from google.cloud._helpers import _pb_timestamp_to_datetime\n'), ((3123, 3134), 'mock.call', 'mock.call', ([], {}), '()\n', (3132, 3134), False, 'import mock\n'), ((3136, 3163), 'mock.call', 'mock.call', ([], {'resume_token': "b''"}), "(resume_token=b'')\n", (3145, 3163), False, 'import mock\n'), ((3741, 3752), 'mock.call', 'mock.call', ([], {}), '()\n', (3750, 3752), False, 'import mock\n'), ((3754, 3790), 'mock.call', 'mock.call', ([], {'resume_token': 'RESUME_TOKEN'}), '(resume_token=RESUME_TOKEN)\n', (3763, 3790), False, 'import mock\n'), ((4341, 4352), 'mock.call', 'mock.call', ([], {}), '()\n', (4350, 4352), False, 'import mock\n'), ((4354, 4390), 'mock.call', 'mock.call', ([], {'resume_token': 'RESUME_TOKEN'}), '(resume_token=RESUME_TOKEN)\n', (4363, 4390), False, 'import mock\n'), ((5725, 5764), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionSelector', 'TransactionSelector', ([], {'single_use': 'options'}), '(single_use=options)\n', (5744, 5764), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((7698, 7718), 'google.cloud.spanner_v1._helpers._make_value_pb', '_make_value_pb', (['item'], {}), '(item)\n', (7712, 7718), False, 'from google.cloud.spanner_v1._helpers import _make_value_pb\n'), ((9646, 9686), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionOptions.ReadOnly', 'TransactionOptions.ReadOnly', ([], {'strong': '(True)'}), '(strong=True)\n', (9673, 9686), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((9781, 9819), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionSelector', 'TransactionSelector', ([], {'begin': 'txn_options'}), '(begin=txn_options)\n', (9800, 9819), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((9877, 9907), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionSelector', 'TransactionSelector', ([], {'id': 'TXN_ID'}), '(id=TXN_ID)\n', (9896, 9907), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((13066, 13086), 'google.cloud.spanner_v1._helpers._make_value_pb', '_make_value_pb', (['item'], {}), '(item)\n', (13080, 13086), False, 'from google.cloud.spanner_v1._helpers import _make_value_pb\n'), ((15018, 15058), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionOptions.ReadOnly', 'TransactionOptions.ReadOnly', ([], {'strong': '(True)'}), '(strong=True)\n', (15045, 15058), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((15153, 15191), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionSelector', 'TransactionSelector', ([], {'begin': 'txn_options'}), '(begin=txn_options)\n', (15172, 15191), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((15249, 15279), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionSelector', 'TransactionSelector', ([], {'id': 'TXN_ID'}), '(id=TXN_ID)\n', (15268, 15279), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((17595, 17648), 'google.cloud.spanner_v1.proto.spanner_pb2.ExecuteSqlRequest.QueryOptions', 'ExecuteSqlRequest.QueryOptions', ([], {'optimizer_version': '"""3"""'}), "(optimizer_version='3')\n", (17625, 17648), False, 'from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest\n'), ((18515, 18541), 'google.cloud.spanner_v1.types.Transaction', 'Transaction', ([], {'id': 'new_txn_id'}), '(id=new_txn_id)\n', (18526, 18541), False, 'from google.cloud.spanner_v1.types import Transaction\n'), ((21988, 22014), 'google.cloud.spanner_v1.types.Transaction', 'Transaction', ([], {'id': 'new_txn_id'}), '(id=new_txn_id)\n', (21999, 22014), False, 'from google.cloud.spanner_v1.types import Transaction\n'), ((25883, 25909), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (25907, 25909), False, 'import datetime\n'), ((36871, 36933), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionOptions.ReadOnly', 'TransactionOptions.ReadOnly', ([], {'exact_staleness': 'expected_duration'}), '(exact_staleness=expected_duration)\n', (36898, 36933), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((37847, 37887), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionOptions.ReadOnly', 'TransactionOptions.ReadOnly', ([], {'strong': '(True)'}), '(strong=True)\n', (37874, 37887), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((5424, 5468), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionSelector', 'TransactionSelector', ([], {'id': 'self._transaction_id'}), '(id=self._transaction_id)\n', (5443, 5468), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((5667, 5701), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionSelector', 'TransactionSelector', ([], {'begin': 'options'}), '(begin=options)\n', (5686, 5701), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((15433, 15454), 'google.cloud.spanner_v1._helpers._make_value_pb', '_make_value_pb', (['value'], {}), '(value)\n', (15447, 15454), False, 'from google.cloud.spanner_v1._helpers import _make_value_pb\n'), ((18388, 18422), 'google.cloud.spanner_v1.types.Partition', 'Partition', ([], {'partition_token': 'token_1'}), '(partition_token=token_1)\n', (18397, 18422), False, 'from google.cloud.spanner_v1.types import Partition\n'), ((18440, 18474), 'google.cloud.spanner_v1.types.Partition', 'Partition', ([], {'partition_token': 'token_2'}), '(partition_token=token_2)\n', (18449, 18474), False, 'from google.cloud.spanner_v1.types import Partition\n'), ((21861, 21895), 'google.cloud.spanner_v1.types.Partition', 'Partition', ([], {'partition_token': 'token_1'}), '(partition_token=token_1)\n', (21870, 21895), False, 'from google.cloud.spanner_v1.types import Partition\n'), ((21913, 21947), 'google.cloud.spanner_v1.types.Partition', 'Partition', ([], {'partition_token': 'token_2'}), '(partition_token=token_2)\n', (21922, 21947), False, 'from google.cloud.spanner_v1.types import Partition\n'), ((22735, 22756), 'google.cloud.spanner_v1._helpers._make_value_pb', '_make_value_pb', (['value'], {}), '(value)\n', (22749, 22756), False, 'from google.cloud.spanner_v1._helpers import _make_value_pb\n'), ((39112, 39141), 'google.api_core.exceptions.ServiceUnavailable', 'ServiceUnavailable', (['"""testing"""'], {}), "('testing')\n", (39130, 39141), False, 'from google.api_core.exceptions import ServiceUnavailable\n'), ((5545, 5585), 'google.cloud.spanner_v1.proto.transaction_pb2.TransactionOptions.ReadOnly', 'TransactionOptions.ReadOnly', ([], {'strong': '(True)'}), '(strong=True)\n', (5572, 5585), False, 'from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions, TransactionSelector\n'), ((7864, 7881), 'google.cloud.spanner_v1.proto.type_pb2.Type', 'Type', ([], {'code': 'STRING'}), '(code=STRING)\n', (7868, 7881), False, 'from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType\n'), ((7934, 7950), 'google.cloud.spanner_v1.proto.type_pb2.Type', 'Type', ([], {'code': 'INT64'}), '(code=INT64)\n', (7938, 7950), False, 'from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType\n'), ((8133, 8150), 'google.cloud.spanner_v1._helpers._make_value_pb', '_make_value_pb', (['(2)'], {}), '(2)\n', (8147, 8150), False, 'from google.cloud.spanner_v1._helpers import _make_value_pb\n'), ((13266, 13283), 'google.cloud.spanner_v1.proto.type_pb2.Type', 'Type', ([], {'code': 'STRING'}), '(code=STRING)\n', (13270, 13283), False, 'from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType\n'), ((13342, 13359), 'google.cloud.spanner_v1.proto.type_pb2.Type', 'Type', ([], {'code': 'STRING'}), '(code=STRING)\n', (13346, 13359), False, 'from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType\n'), ((13412, 13428), 'google.cloud.spanner_v1.proto.type_pb2.Type', 'Type', ([], {'code': 'INT64'}), '(code=INT64)\n', (13416, 13428), False, 'from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType\n'), ((13611, 13628), 'google.cloud.spanner_v1._helpers._make_value_pb', '_make_value_pb', (['(2)'], {}), '(2)\n', (13625, 13628), False, 'from google.cloud.spanner_v1._helpers import _make_value_pb\n')] |
from typing import Callable, Tuple
import numpy as np
def posterior_factory(y: np.ndarray, sigma_y: float, sigma_theta: float) -> Tuple[Callable]:
"""The banana distribution is a distribution that exhibits a characteristic
banana-shaped ridge that resembles the posterior that can emerge from
models that are not identifiable. The distribution is the posterior of the
following generative model.
y ~ Normal(theta[0] + theta[1]**2, sigma_sq_y)
theta[i] ~ Normal(0, sigma_sq_theta)
Args:
y: Observations of the banana model.
sigma_y: Standard deviation of the observations.
sigma_theta: Standard deviation of prior over linear coefficients.
Returns:
log_posterior: Function to compute the log-posterior.
metric: Function to compute the Fisher information metric.
euclidean_auxiliaries: Function to compute the log-posterior and its
gradient.
riemannian_auxiliaries: Function to compute the log-posterior, the
gradient of the log-posterior, the Fisher information metric, and the
derivatives of the Fisher information metric.
"""
sigma_sq_y = np.square(sigma_y)
sigma_sq_theta = np.square(sigma_theta)
def log_posterior(theta: np.ndarray) -> float:
"""The banana-shaped distribution posterior.
Args:
theta: Linear coefficients.
Returns:
out: The log-posterior of the banana-shaped distribution.
"""
p = theta[0] + np.square(theta[1])
ll = -0.5 / sigma_sq_y * np.square(y - p).sum()
lp = -0.5 / sigma_sq_theta * np.square(theta).sum()
return ll + lp
def grad_log_posterior(theta: np.ndarray) -> np.ndarray:
"""Gradient of the banana-shaped distribution with respect to the linear
coefficients.
Args:
theta: Linear coefficients.
Returns:
out: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
"""
p = theta[0] + np.square(theta[1])
d = np.sum(y - p)
ga = d / sigma_sq_y - theta[0] / sigma_sq_theta
gb = 2.0*d / sigma_sq_y * theta[1] - theta[1] / sigma_sq_theta
return np.hstack((ga, gb))
def metric(theta: np.ndarray) -> np.ndarray:
"""The Fisher information is the negative expected outer product of the
gradient of the posterior.
Args:
theta: Linear coefficients.
Returns:
G: The Fisher information metric of the banana-shaped distribution.
"""
n = y.size
s = 2.0*n*theta[1] / sigma_sq_y
G = np.array([[n / sigma_sq_y + 1.0 / sigma_sq_theta, s],
[s, 4.0*n*np.square(theta[1]) / sigma_sq_y + 1.0 / sigma_sq_theta]])
return G
def grad_metric(theta: np.ndarray) -> np.ndarray:
"""The gradient of the Fisher information metric with respect to the linear
coefficients.
Args:
theta: Linear coefficients.
Returns:
dG: The gradient of the Fisher information metric with respect to the
linear coefficients.
"""
n = y.size
dG = np.array([
[[0.0, 0.0], [0.0, 2.0*n / sigma_sq_y]],
[[0.0, 2.0*n / sigma_sq_y], [0.0, 8.0*n*theta[1] / sigma_sq_y]]
])
return dG
def euclidean_auxiliaries(theta: np.ndarray) -> Tuple[np.ndarray]:
"""Function to compute the log-posterior and the gradient of the
log-posterior.
Args:
theta: Linear coefficients.
Returns:
lp: The log-posterior of the banana-shaped distribution.
glp: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
"""
lp = log_posterior(theta)
glp = grad_log_posterior(theta)
return lp, glp
def riemannnian_auxiliaries(theta: np.ndarray) -> Tuple[np.ndarray]:
"""Function to compute the log-posterior, the gradient of the log-posterior,
the Fisher information metric and the derivatives of the Fisher
information metric.
Args:
theta: Linear coefficients.
Returns:
lp: The log-posterior of the banana-shaped distribution.
glp: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
G: The Fisher information metric of the banana-shaped distribution.
dG: The gradient of the Fisher information metric with respect to the
linear coefficients.
"""
lp = log_posterior(theta)
glp = grad_log_posterior(theta)
G = metric(theta)
dG = grad_metric(theta)
return lp, glp, G, dG
def log_posterior_and_metric(theta: np.ndarray) -> Tuple[np.ndarray]:
lp = log_posterior(theta)
G = metric(theta)
return lp, G
return log_posterior, metric, log_posterior_and_metric, euclidean_auxiliaries, riemannnian_auxiliaries
def generate_data(t: float, sigma_y: float, sigma_theta: float, num_obs: int) -> np.ndarray:
"""Generate data from the banana-shaped posterior distribution.
Args:
t: Free-parameter determining the thetas.
sigma_y: Noise standard deviation.
sigma_theta: Prior standard deviation over the thetas.
num_obs: Number of observations to generate.
Returns:
theta: Linear coefficients of the banana-shaped distribution.
y: Observations from the unidentifiable model.
"""
theta = np.array([t, np.sqrt(1.0 - t)])
y = theta[0] + np.square(theta[1]) + sigma_y * np.random.normal(size=(num_obs, ))
return theta, y
| [
"numpy.random.normal",
"numpy.sqrt",
"numpy.hstack",
"numpy.square",
"numpy.sum",
"numpy.array"
]
| [((1187, 1205), 'numpy.square', 'np.square', (['sigma_y'], {}), '(sigma_y)\n', (1196, 1205), True, 'import numpy as np\n'), ((1227, 1249), 'numpy.square', 'np.square', (['sigma_theta'], {}), '(sigma_theta)\n', (1236, 1249), True, 'import numpy as np\n'), ((2141, 2154), 'numpy.sum', 'np.sum', (['(y - p)'], {}), '(y - p)\n', (2147, 2154), True, 'import numpy as np\n'), ((2297, 2316), 'numpy.hstack', 'np.hstack', (['(ga, gb)'], {}), '((ga, gb))\n', (2306, 2316), True, 'import numpy as np\n'), ((3279, 3407), 'numpy.array', 'np.array', (['[[[0.0, 0.0], [0.0, 2.0 * n / sigma_sq_y]], [[0.0, 2.0 * n / sigma_sq_y], [\n 0.0, 8.0 * n * theta[1] / sigma_sq_y]]]'], {}), '([[[0.0, 0.0], [0.0, 2.0 * n / sigma_sq_y]], [[0.0, 2.0 * n /\n sigma_sq_y], [0.0, 8.0 * n * theta[1] / sigma_sq_y]]])\n', (3287, 3407), True, 'import numpy as np\n'), ((1534, 1553), 'numpy.square', 'np.square', (['theta[1]'], {}), '(theta[1])\n', (1543, 1553), True, 'import numpy as np\n'), ((2109, 2128), 'numpy.square', 'np.square', (['theta[1]'], {}), '(theta[1])\n', (2118, 2128), True, 'import numpy as np\n'), ((5746, 5762), 'numpy.sqrt', 'np.sqrt', (['(1.0 - t)'], {}), '(1.0 - t)\n', (5753, 5762), True, 'import numpy as np\n'), ((5784, 5803), 'numpy.square', 'np.square', (['theta[1]'], {}), '(theta[1])\n', (5793, 5803), True, 'import numpy as np\n'), ((5816, 5849), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(num_obs,)'}), '(size=(num_obs,))\n', (5832, 5849), True, 'import numpy as np\n'), ((1587, 1603), 'numpy.square', 'np.square', (['(y - p)'], {}), '(y - p)\n', (1596, 1603), True, 'import numpy as np\n'), ((1647, 1663), 'numpy.square', 'np.square', (['theta'], {}), '(theta)\n', (1656, 1663), True, 'import numpy as np\n'), ((2805, 2824), 'numpy.square', 'np.square', (['theta[1]'], {}), '(theta[1])\n', (2814, 2824), True, 'import numpy as np\n')] |
import pytest
from thefuck.rules.pacman_invalid_option import get_new_command
from thefuck.rules.pacman_invalid_option import match
from thefuck.types import Command
good_output = """community/shared_meataxe 1.0-3
A set of programs for working with matrix representations over finite fields
"""
bad_output = "error: invalid option '-"
@pytest.mark.parametrize("option", "SURQFDVT")
def test_not_match_good_output(option):
assert not match(Command("pacman -{}s meat".format(option), good_output))
@pytest.mark.parametrize("option", "azxcbnm")
def test_not_match_bad_output(option):
assert not match(Command("pacman -{}v meat".format(option), bad_output))
@pytest.mark.parametrize("option", "surqfdvt")
def test_match(option):
assert match(Command("pacman -{}v meat".format(option), bad_output))
@pytest.mark.parametrize("option", "surqfdvt")
def test_get_new_command(option):
new_command = get_new_command(
Command("pacman -{}v meat".format(option), ""))
assert new_command == "pacman -{}v meat".format(option.upper())
| [
"pytest.mark.parametrize"
]
| [((345, 390), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""option"""', '"""SURQFDVT"""'], {}), "('option', 'SURQFDVT')\n", (368, 390), False, 'import pytest\n'), ((512, 556), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""option"""', '"""azxcbnm"""'], {}), "('option', 'azxcbnm')\n", (535, 556), False, 'import pytest\n'), ((676, 721), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""option"""', '"""surqfdvt"""'], {}), "('option', 'surqfdvt')\n", (699, 721), False, 'import pytest\n'), ((822, 867), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""option"""', '"""surqfdvt"""'], {}), "('option', 'surqfdvt')\n", (845, 867), False, 'import pytest\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.