ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b41742b9ed877085556492817d0943bcd6863d16 | #!/usr/bin/env python
import os, sys, re, platform
import subprocess
import ConfigParser
import collections
from optparse import OptionParser
HANDLE_DUPS = True
PKG_NOT_INSTALLED=0
PKG_INSTALLED=0x1
PKG_HOLD=0x2
class MultiDict(dict):
def __setitem__(self, key, value):
if isinstance(value, list) and key in self:
self[key].extend(value)
else:
super(self.__class__, self).__setitem__(key, value)
class PackageInfo(object):
'''Information describing a package'''
def __init__(self, name=None, state=0, version=None, requiredVersion=None, candidateVersion=None, arch=None):
self._name = name
self._state = state
self._version = version
self._required_version = requiredVersion
self._arch = arch
self._custom_action = None
self._candidate_version = candidateVersion
def isInstalled(self):
return 0 != (self._state & PKG_INSTALLED)
def isOnHold(self):
return 0 != (self._state & PKG_INSTALLED) and 0 != (self._state & PKG_HOLD)
def getName(self):
return self._name
def getVersion(self, raw=False):
if raw:
return self._version or 'none'
return self._version or 'latest'
def getRequiredVersion(self, raw=False):
if raw:
return self._required_version or 'none'
return self._required_version or 'latest'
def getCandidateVersion(self, raw=False):
if raw:
return self._candidate_version or 'none'
if self.getRequiredVersion() == 'latest':
return 'latest'
return self._candidate_version or 'latest'
def getCustomAction(self):
return self._custom_action
allPackageMgrs = {}
class PackageMgr(object):
'''Generic package manager class
'''
def __init__(self, name=None, noroot=False):
self._name = name
self._noroot = noroot
@classmethod
def addPackageMgr(cls):
allPackageMgrs[cls.__name__] = cls
@staticmethod
def getPackageMgr(name):
return allPackageMgrs.get(name)
def getName(self):
return self._name
def getPackageInfo(self, names, debug=True):
'''Get installed list of PackageInfo instances
Override this in derived classes.
'''
raise NotImplemented()
def installPackages(self, packageList, update, debug, verbose):
'''Install packages.
Override this in derived classes.
Args:
packageList: list of PackageInfo instances
update: boolean indicating if an update of available packages is
required.
debug: If true commands are sent to stdout but not executed.
'''
raise NotImplemented()
def refreshPackageCandidates(self, packageList, debug):
'''Refresh candidate version..
Override this in derived classes.
Args:
packageList: list of PackageInfo instances
debug: If true commands are sent to stdout but not executed.
'''
raise NotImplemented()
def execute(self, command, debug, verbose=False):
if debug:
print(command)
return True
else:
if verbose:
print(command)
return 0 == os.system(command)
def executeAsRoot(self, command, debug, verbose=False):
if not self._noroot and os.getuid() != 0:
command = 'sudo ' + command;
return self.execute(command, debug=debug, verbose=verbose)
def checkVersionLess(self, ver1, ver2):
'''Check if ver1 < ver2
'''
ver1 = ver1.replace('-','.').split('.')
ver2 = ver2.replace('-','.').split('.')
for i in range(min(len(ver1),len(ver2))):
v1 = int(ver1[i])
v2 = int(ver2[i])
if v1 < v2:
return True
elif v1 > v2:
return False
# must be equal so far
return len(ver1) < len(ver2)
def prepareInstall(self, pkgs):
# Prepare package install into to lists
to_install = []
needs_update = []
for p in pkgs:
if not p.isInstalled():
if p.getRequiredVersion() == 'latest':
to_install.append(p)
elif p.getCandidateVersion() != 'latest' and \
self.checkVersionLess(p.getRequiredVersion(), p.getCandidateVersion()):
to_install.append(p)
else:
needs_update.append(p)
elif p.isOnHold():
warning('%s is on hold, ignoring install request' % p.getName())
elif p.isInstalled() and p.getRequiredVersion() != 'latest' \
and self.checkVersionLess(p.getVersion(),p.getRequiredVersion()):
needs_update.append(p)
return (to_install, needs_update)
class AptPackageMgr(PackageMgr):
'''Apt'''
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self._threshold = 500 # just a guess
# dpkg-query sample output
# un www-browser
# ii x11-common 1:7.7+1ubuntu8.1
# ii x11proto-composite-dev 1:0.4.2-2
# ii x11proto-core-dev 7.0.26-1~ubuntu2
# ii x11proto-xinerama-dev 1.2.1-2
# ii xauth 1:1.0.7-1ubuntu1
# ii xfonts-encodings 1:1.0.4-1ubuntu1
# ii xfonts-utils 1:7.7+1
# ii cmake 3.2.2-2~ubuntu14.04.1~ppa1
self._dpkg_query = re.compile(r'^([a-zA-Z ]{3})\s([+.\w_-]+)(?::\w+)?\s(?:\d+:)?(\d[.\d-]*\d)[~+.\w_-]*\s*$')
# apt-cache policy git sample output
# git:
# Installed: 1:1.9.1-1ubuntu0.3
# Candidate: 1:1.9.1-1ubuntu0.3
# Version table:
# 1:1.9.1-1ubuntu0.3 0
# 500 http://us-east-1.ec2.archive.ubuntu.com/ubuntu/ trusty-updates/main amd64 Packages
# 500 http://security.ubuntu.com/ubuntu/ trusty-security/main amd64 Packages
# *** 1:1.9.1-1ubuntu0.3 0
# 100 /var/lib/dpkg/status
# 1:1.9.1-1 0
# 500 http://us-east-1.ec2.archive.ubuntu.com/ubuntu/ trusty/main amd64 Packages
# cmake:
# Installed: 3.2.2-2~ubuntu14.04.1~ppa1
# Candidate: 3.2.2-2~ubuntu14.04.1~ppa1
# Version table:
# *** 3.2.2-2~ubuntu14.04.1~ppa1 0
# 500 http://ppa.launchpad.net/george-edison55/cmake-3.x/ubuntu/ trusty/main amd64 Packages
# 100 /var/lib/dpkg/status
# 2.8.12.2-0ubuntu3 0
# 500 http://us-east-1.ec2.archive.ubuntu.com/ubuntu/ trusty/main amd64 Packages
#self._apt_query = re.compile(r'^\s*installed:(?:\d+:)?(?P<installed>\d[.\d-]*\d)[~+.\w_-]*\s*$|^\s*candidate:(?:\d+:)?(?P<candidate>\d[.\d-]*\d)[~+.\w_-]*\s*$', \
self._apt_query = re.compile(r'^\s*installed:\s*(?:\d+:)?(?P<installed>\d[.\d-]*\d)\D.*$|^\s*candidate:\s*(?:\d+:)?(?P<candidate>\d[.\d-]*\d)\D.*$', \
flags=re.MULTILINE|re.IGNORECASE)
def _parseDpkgQuery(self, result):
# ii zlib1g-dev:amd64 1:1.2.8.dfsg-1ubuntu1
srch = self._dpkg_query.match(result)
if srch is not None:
#print("DEBUG " + str((srch.group(0),srch.group(1),srch.group(2),srch.group(3))))
pkg = PackageInfo(name=srch.group(2), version=srch.group(3))
if srch.group(1)[0] in 'i':
pkg._state = PKG_INSTALLED
elif srch.group(1)[0] == 'h':
pkg._state = PKG_INSTALLED|PKG_HOLD
return pkg
return None
def _queryApt(self, pkgname, FNULL=None, debug=False):
if debug:
print(' Running apt-cache policy %s' % pkgname);
if FNULL is None:
with open(os.devnull, 'w') as FNULL:
result = subprocess.Popen(['apt-cache', 'policy', pkgname], \
stderr=FNULL, stdout=subprocess.PIPE).communicate()[0]
else:
result = subprocess.Popen(['apt-cache', 'policy', pkgname], \
stderr=FNULL, stdout=subprocess.PIPE).communicate()[0]
srch = self._apt_query.search(result)
if srch is not None:
candidate = None
installed = None
if srch.group('installed') is not None and len(srch.group('installed')):
installed = srch.group('installed')
if srch.group('candidate') is not None and len(srch.group('candidate')) != 0:
candidate = srch.group('candidate')
if debug:
if installed is None and candidate is None:
print(' result inconclusive')
print('--------------')
print(result)
print('--------------')
else:
print(' result = (%r,%r)' % (installed, candidate))
return (installed, candidate)
elif debug:
print(' no results')
print('--------------')
print(result)
print('--------------')
return (None,None)
def installPackages(self, packageList, update=False, debug=False, verbose=False):
'''Install packages.
Args:
packageList: list of PackageInfo instances
update: if True then an update of available packages is required
before install.
debug: If true commands are sent to stdout but not executed.
'''
if self._noroot or os.getuid() == 0:
updatefmt = 'aptitude %s'
installfmt = 'aptitude -y install %s'
else:
updatefmt = 'sudo aptitude %s'
installfmt = 'sudo aptitude -y install %s'
if update:
self.execute(updatefmt % 'update', debug=debug, verbose=verbose)
self.execute(updatefmt % '-y upgrade', debug=debug, verbose=verbose)
args = ''
for pkg in packageList:
if (len(pkg.getName()) + 1 + len(args)) > 80:
self.execute(installfmt % args, debug=debug, verbose=verbose)
args = ''
args += ' ' + pkg.getName()
if len(args) != 0:
self.execute(installfmt % args, debug=debug, verbose=verbose)
def refreshPackageCandidates(self, packageList, debug=False):
'''Refresh candidate version..
Args:
packageList: list of PackageInfo instances
debug: If true commands are sent to stdout but not executed.
'''
with open(os.devnull, 'w') as FNULL:
for pkg in packageList:
pkg._version,pkg._candidate_version = self._queryApt(pkg.getName(), FNULL, debug=debug)
def getPackageInfo(self, names, debug=True):
'''Get packages in same order a names
Args:
names: list of package names.
debug: If true commands are sent to stdout but not executed.
Returns:
A tuple (list of PackageInfo instances, list of missing names).
'''
if not isinstance(names, collections.Iterable):
names = { names: 'latest' }
pkgs = []
missing = []
if len(names) > self._threshold and not debug:
# At some threshold its cheaper to get all packages installed.
allpkgs = {}
with open(os.devnull, 'w') as FNULL:
results = subprocess.Popen(['dpkg-query', '-f=${db:Status-Abbrev} ${binary:Package} ${Version}\n', '-W', '*'], \
stderr=FNULL, stdout=subprocess.PIPE).communicate()[0]
results = results.split('\n')
for result in results:
if len(result) == 0:
continue
pkg = self._parseDpkgQuery(result)
if pkg is not None:
allpkgs[pkg.getName()] = pkg
for nm in names:
pkg = allpkgs.get(nm[0])
if pkg is not None:
pkg._required_version = nm[1]
if len(nm) == 3:
pkg._custom_action = nm[2]
if pkg.getRequiredVersion() != 'latest':
# Check using apt-cache
_,pkg._candidate_version = self._queryApt(nm[0], debug=debug)
pkgs.append(pkg)
else:
# Check using apt-cache
installed, candidate = self._queryApt(nm[0], debug=debug)
if installed is not None:
warning('dpkg-query parse failure - check regex in this script')
pkg = PackageInfo(name=nm[0],state=PKG_INSTALLED,version=installed,requiredVersion=nm[1],candidateVersion=candidate)
pkgs.append(pkg)
elif candidate is not None:
pkg = PackageInfo(name=nm[0],version=installed,requiredVersion=nm[1],candidateVersion=candidate)
pkgs.append(pkg)
else:
# will need to do apt-get update
missing.append(nm)
else:
for nm in names:
with open(os.devnull, 'w') as FNULL:
result = subprocess.Popen(['dpkg-query', '-f=${db:Status-Abbrev} ${binary:Package} ${Version}\n', '-W', nm[0]], \
stderr=FNULL, stdout=subprocess.PIPE).communicate()[0]
result = result.strip('\n')
if debug:
print('dpkg-query %s result=%s' % (nm[0], result))
pkg = self._parseDpkgQuery(result)
if pkg is not None:
pkg._required_version = nm[1]
if len(nm) == 3:
pkg._custom_action = nm[2]
if pkg.getRequiredVersion() != 'latest':
# Check using apt-cache
_,pkg._candidate_version = self._queryApt(nm[0], debug=debug)
pkgs.append(pkg)
else:
# Check using apt-cache
installed, candidate = self._queryApt(nm[0], debug=debug)
if installed is not None:
warning('dpkg-query parse failure - check regex in this script')
pkg = PackageInfo(name=nm[0],state=PKG_INSTALLED,version=installed,requiredVersion=nm[1],candidateVersion=candidate)
pkgs.append(pkg)
elif candidate is not None:
pkg = PackageInfo(name=nm[0],version=installed,requiredVersion=nm[1],candidateVersion=candidate)
pkgs.append(pkg)
else:
# will need to do apt-get update
missing.append(nm)
return (pkgs, missing)
class PipPackageMgr(PackageMgr):
'''Pip'''
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
# scikit-image (0.12.3)
# scikit-learn (0.17.1)
# scipy (0.17.1)
self._pip_list = re.compile(r'(?P<pkgname>[\w.~-]+)\s*\((?P<installed>\d+[.\d-]*)\)')
self._pip_search1 = re.compile(r'^(?P<pkgname>[\w.~-]+)\s*\((?P<candidate>\d+[.\d-]*)\)')
self._pip_search2 = re.compile(r'^\s*INSTALLED:\s*(?P<installed>\d+[.\d-]*)')
def _parsePipSearch1(self, result):
srch = self._pip_search1.search(result)
if srch is not None:
pkg = PackageInfo(name=srch.group('pkgname'), candidateVersion=srch.group('candidate'))
return pkg
return None
def _parsePipSearch2(self, pkg, result):
srch = self._pip_search2.search(result)
if srch is not None:
pkg._version = srch.group('installed')
return True
return False
def _parsePipList(self, result):
srch = self._pip_list.search(result)
if srch is not None:
pkg = PackageInfo(name=srch.group('pkgname'), version=srch.group('installed'))
pkg._state = PKG_INSTALLED
return pkg
return None
def getPackageInfo(self, names, debug=True):
'''Get packages in same order a names
Args:
names: list of package names.
debug: If true commands are sent to stdout but not executed.
Returns:
A tuple (list of PackageInfo instances, list of missing names).
'''
if not isinstance(names, collections.Iterable):
names = { names: 'latest' }
pkgs = []
missing = []
allpkgs = {}
with open(os.devnull, 'w') as FNULL:
results = subprocess.Popen(['pip', 'list'], stderr=FNULL, stdout=subprocess.PIPE).communicate()[0]
results = results.split('\n')
for result in results:
if len(result) == 0:
continue
pkg = self._parsePipList(result)
if pkg is not None:
allpkgs[pkg.getName()] = pkg
todo = []
for nm in names:
pkg = allpkgs.get(nm[0])
if pkg is not None:
pkg._required_version = nm[1]
if len(nm) == 3:
pkg._custom_action = nm[2]
pkgs.append(pkg)
else:
todo.append(nm)
with open(os.devnull, 'w') as FNULL:
for nm in todo:
pkg = None
# Handle pips woeful search facilty inherited from pypi.
target = re.split('[\d,.~-]', nm[0])
results = subprocess.Popen(['pip', 'search', target[0]], stderr=FNULL, stdout=subprocess.PIPE).communicate()[0]
if debug:
print('Searching for %s' % nm[0])
print('---------')
print(results)
print('---------')
results = results.split('\n')
for r in results:
if pkg is not None:
self._parsePipSearch2(pkg, r)
break
if r.find(nm[0]) == 0:
if debug: print('found partial match: %s' % r)
pkg = self._parsePipSearch1(r)
if pkg is not None and pkg.getName() != nm[0]:
pkg = None
continue
elif pkg is not None:
pkgs.append(pkg)
if pkg is None:
missing.append(nm)
return (pkgs, missing)
def installPackages(self, packageList, update, debug, verbose):
'''Install packages.
Args:
packageList: list of PackageInfo instances
update: Ignored for PIP.
debug: If true commands are sent to stdout but not executed.
'''
for pkg in packageList:
if pkg.getRequiredVersion() != 'latest':
self.executeAsRoot('pip install %s==%s' (pkg.getName(), pkg.getRequiredVersion()), debug=debug, verbose=verbose)
else:
self.executeAsRoot('pip install ' + pkg.getName(), debug=debug, verbose=verbose)
def refreshPackageCandidates(self, packageList, debug):
'''Refresh candidate version..
Does nothing for PIP.
Args:
packageList: list of PackageInfo instances
debug: If true commands are sent to stdout but not executed.
'''
pass
def die(msg=None):
if msg is not None:
print("Error: %s" % msg)
sys.exit(1)
def warning(msg):
print("Warning: %s" % msg)
def prepCustomAction(items):
m = {}
for k,v in items:
m[k] = v
if m.get('run') is None:
return None
if m.get('version') is None:
m['version'] = 'latest'
return m
def extractPackageNames(pkgs):
'''Extract package names from a list of PackageInfo instances.
Args:
pkgs: A list of PackageInfo instances.
Returns:
A list of packages names
'''
if not isinstance(pkgs, collections.Iterable):
pkgs = [ pkgs ]
names = []
for p in pkgs:
names.append(p.getName())
def grep_platform(regex):
try:
regex = regex.split('/')
if len(regex) == 3 and len(regex[0]) == 0:
flags = 0
if 'i' in regex[2]:
flags |= re.IGNORECASE
return re.search(regex[1], platform.platform(), flags=flags)
except:
pass
die('bad regex %s in package-selector section' % regex)
return False
def printPackageInfo(packages):
for u in packages:
# 0 1 2 3 4 5 6
# 0123456789012345678901234567890123456789012345678901234567890
print(' Package Name Installed Required Candidate')
print(' ------------ --------- -------- ---------')
print(' %-16s %-14s %-12s %-14s' % (u.getName(), u.getVersion(True), u.getRequiredVersion(True), u.getCandidateVersion(True)))
if __name__ == '__main__':
print('Package Dependency Helper V0.1')
# Parse command line
usage = '%prog [[options] [file1.conf ...]]'
parser = OptionParser(usage)
parser.add_option('-l', '--list', action='store_true', dest='pkglist', help='list selected packages on stdout')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='verbose output')
parser.add_option('-i', '--install', action='store_true', dest='pkginstall', help='install selected packages')
parser.add_option('-r', '--noroot', action='store_true', dest='noroot', help='do not install as root')
parser.add_option('-d', '--debug', action='store_true', dest='debug', help='used with install, print commands but don\'t execute')
(options, args) = parser.parse_args()
if args is None or len(args) == 0:
die('no confguration file so nothing to do')
# FIXME: custom action paths should be relative to the conf file
# At the moment they are relative to the first conf file
if len(args) > 1:
warning("Custom actions are relative the first configuration file path.")
base_path = os.path.abspath(os.path.dirname(args[0]))
# Add supported package managers
AptPackageMgr.addPackageMgr()
PipPackageMgr.addPackageMgr()
# Parse configuration files
if HANDLE_DUPS:
cfg = ConfigParser.ConfigParser(dict_type=MultiDict)
else:
cfg = ConfigParser.ConfigParser(dict_type=dict)
cfg.optionxform = str # preserve case on keys
success = cfg.read(args)
mgrs = {}
if not cfg.has_section('package-managers'):
die('no package-managers in configuration file(s)')
mgrMap = {}
items = cfg.items('package-managers')
for k,v in items:
cls = PackageMgr.getPackageMgr(v)
if cls is None:
warning('%s package-manager class not found' % v)
continue
mgrMap[k] = cls(name=k, noroot=options.noroot)
if len(mgrMap) == 0:
die('no package-managers map to classes')
# Disabled.
#
# Map package manager to platform
# if not cfg.has_section('package-selector'):
# die('no package-selector in configuration file(s)')
if cfg.has_section('package-selector'):
# Remove package-managers not related to this platform
mgr = None
items = cfg.items('package-selector')
for k,v in items:
if mgrMap.get(k) is None:
# No package-manager for this key so remove sections
cfg.remove_section(k)
elif not grep_platform(v):
# This platform is not being used so remove from map and remove sections
# in configuration
mgrMap.pop(k)
cfg.remove_section(k)
if len(mgrMap) == 0:
die('no package-managers after processing package-selector section')
# Load packages for each package-manager
selections = []
for k,mgr in mgrMap.iteritems():
if not cfg.has_section(k):
continue
items = cfg.items(k)
# Check duplicates
if HANDLE_DUPS:
for T in items:
tmp = T[1].split('\n')
if len(tmp) > 1:
die('requested multiple versions for package %s:%s' % (T[0],tmp))
# Check for custom actions
custom = []
xitems = []
for T in items:
if cfg.has_section(T[1]):
CA = prepCustomAction(cfg.items(T[1]))
if CA is None:
die('incomplete custom action %s' % T)
custom.append((T[0],CA['version'],CA['run']))
else:
xitems.append(T)
pkgs,missing = mgr.getPackageInfo(xitems, options.debug);
selections.append((mgr, pkgs, missing, custom))
if options.pkglist:
# First print packages not installed
for mgr,pkgs,missing,custom in selections:
print('%s PACKAGES' % mgr.getName().upper())
for p in pkgs:
if not options.verbose:
if not p.isInstalled():
print(' _ %s' % p.getName())
elif p.isOnHold():
print(' h %s' % p.getName())
elif p.isInstalled():
print(' i %s' % p.getName())
else:
print(' _ %s' % p.getName())
for m in missing:
print(' m %s' % m[0])
for c in custom:
print(' c %s' % c[0])
if options.pkginstall:
for mgr,pkgs,missing,custom in selections:
# Install packages first
todo, update = mgr.prepareInstall(pkgs)
if len(todo) == 0 and len(update) == 0:
print('Packages up to date - no action required')
else:
mgr.installPackages(packageList=todo, update=len(update) != 0, debug=options.debug, verbose=options.verbose)
mgr.refreshPackageCandidates(update)
todo, update = mgr.prepareInstall(update)
mgr.installPackages(packageList=todo, update=False, debug=options.debug, verbose=options.verbose)
if len(update) !=0:
warning('unable to satisfy all package constraints')
printPackageInfo(update)
# Now handle custom actions
caPkgs,caMissing = mgr.getPackageInfo(custom, options.debug)
todo, update = mgr.prepareInstall(caPkgs)
if len(caMissing) != 0 or len(update) != 0:
for ca in custom:
exe_path = os.path.join(base_path, ca[2])
if os.path.isfile(exe_path):
if not mgr.executeAsRoot(exe_path, debug=options.debug, verbose=options.verbose):
die('execution failed %s' % exe_path)
else:
die('cannot locate custom action %s' % exe_path)
elif len(todo) == 0:
print('Custom packages up to date - no action required')
else:
mgr.installPackages(packageList=todo, update=len(update) != 0, debug=options.debug, verbose=options.verbose)
mgr.refreshPackageCandidates(update)
todo, update = mgr.prepareInstall(update)
mgr.installPackages(packageList=todo, update=False, debug=options.debug, verbose=options.verbose)
if len(update) !=0:
warning('unable to satisfy all package constraints for custom actions')
printPackageInfo(update)
# if we had missing custom packages must do one more retry
if len(caMissing) > 0:
caPkgs,caMissing = mgr.getPackageInfo(custom, options.debug)
todo, updateRequired = mgr.prepareInstall(caPkgs)
mgr.installPackages(packageList=todo, update=True, debug=options.debug, verbose=options.verbose)
caPkgs,caMissing = mgr.getPackageInfo(custom, options.debug)
if len(caMissing):
warning('cannot resolve missing packages %s' % str(caMissing))
|
py | b41742bd9b4bfcb7e570786ea1892c92c61fe3cd | from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QTreeWidgetItem
from qtawesome import icon
from ...types import TreeItemStatus
class TreeItem(QTreeWidgetItem):
_STATUSES = {
TreeItemStatus.Normal: (None, None),
TreeItemStatus.Loading: ('mdi.progress-clock', None),
TreeItemStatus.Error: ('mdi.alert', 'red'),
}
def __init__(self, texts):
super().__init__(texts)
self.setStatus(TreeItemStatus.Normal)
def setStatus(self, status, toolTip=''):
name, color = self._STATUSES[status]
if not name:
self.setIcon(1, icon())
else:
self.setIcon(1, icon(name, color=color))
self.setToolTip(1, toolTip)
self._status = status
@property
def name(self):
return self.text(0)
@name.setter
def name(self, newValue):
self.setText(0, newValue)
if self.parent():
self.parent().sortChildren(0, Qt.AscendingOrder)
@property
def status(self):
return self._status
def illegalNames(self):
target = self.parent() or self.treeWidget()
return target.illegalChildNames() - {self.name}
def illegalChildNames(self):
return {'', *(self.child(i).name for i in range(self.childCount()))}
|
py | b41743710266461b9ed1c7dd42b8ea2ea62553a3 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import itertools
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from watcher.applier import rpcapi as aapi
from watcher.common import utils
from watcher.db import api as db_api
from watcher import objects
from watcher.tests.api import base as api_base
from watcher.tests.objects import utils as obj_utils
class TestListActionPlan(api_base.FunctionalTest):
def setUp(self):
super(TestListActionPlan, self).setUp()
obj_utils.create_test_goal(self.context)
obj_utils.create_test_strategy(self.context)
obj_utils.create_test_audit(self.context)
def test_empty(self):
response = self.get_json('/action_plans')
self.assertEqual([], response['action_plans'])
def _assert_action_plans_fields(self, action_plan):
action_plan_fields = [
'uuid', 'audit_uuid', 'strategy_uuid', 'strategy_name',
'state', 'global_efficacy', 'efficacy_indicators']
for field in action_plan_fields:
self.assertIn(field, action_plan)
def test_one(self):
action_plan = obj_utils.create_test_action_plan(self.context)
response = self.get_json('/action_plans')
self.assertEqual(action_plan.uuid,
response['action_plans'][0]["uuid"])
self._assert_action_plans_fields(response['action_plans'][0])
def test_one_soft_deleted(self):
action_plan = obj_utils.create_test_action_plan(self.context)
action_plan.soft_delete()
response = self.get_json('/action_plans',
headers={'X-Show-Deleted': 'True'})
self.assertEqual(action_plan.uuid,
response['action_plans'][0]["uuid"])
self._assert_action_plans_fields(response['action_plans'][0])
response = self.get_json('/action_plans')
self.assertEqual([], response['action_plans'])
def test_get_one_ok(self):
action_plan = obj_utils.create_test_action_plan(self.context)
obj_utils.create_test_efficacy_indicator(
self.context, action_plan_id=action_plan['id'])
response = self.get_json('/action_plans/%s' % action_plan['uuid'])
self.assertEqual(action_plan.uuid, response['uuid'])
self._assert_action_plans_fields(response)
self.assertEqual(
[{'description': 'Test indicator',
'name': 'test_indicator',
'value': 0.0,
'unit': '%'}],
response['efficacy_indicators'])
def test_get_one_soft_deleted(self):
action_plan = obj_utils.create_test_action_plan(self.context)
action_plan.soft_delete()
response = self.get_json('/action_plans/%s' % action_plan['uuid'],
headers={'X-Show-Deleted': 'True'})
self.assertEqual(action_plan.uuid, response['uuid'])
self._assert_action_plans_fields(response)
response = self.get_json('/action_plans/%s' % action_plan['uuid'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_detail(self):
action_plan = obj_utils.create_test_action_plan(self.context)
response = self.get_json('/action_plans/detail')
self.assertEqual(action_plan.uuid,
response['action_plans'][0]["uuid"])
self._assert_action_plans_fields(response['action_plans'][0])
def test_detail_soft_deleted(self):
action_plan = obj_utils.create_test_action_plan(self.context)
action_plan.soft_delete()
response = self.get_json('/action_plans/detail',
headers={'X-Show-Deleted': 'True'})
self.assertEqual(action_plan.uuid,
response['action_plans'][0]["uuid"])
self._assert_action_plans_fields(response['action_plans'][0])
response = self.get_json('/action_plans/detail')
self.assertEqual([], response['action_plans'])
def test_detail_against_single(self):
action_plan = obj_utils.create_test_action_plan(self.context)
response = self.get_json(
'/action_plan/%s/detail' % action_plan['uuid'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_many(self):
action_plan_list = []
for id_ in range(5):
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid())
action_plan_list.append(action_plan.uuid)
response = self.get_json('/action_plans')
self.assertEqual(len(action_plan_list), len(response['action_plans']))
uuids = [s['uuid'] for s in response['action_plans']]
self.assertEqual(sorted(action_plan_list), sorted(uuids))
def test_many_with_soft_deleted_audit_uuid(self):
action_plan_list = []
audit1 = obj_utils.create_test_audit(self.context,
id=2,
uuid=utils.generate_uuid())
audit2 = obj_utils.create_test_audit(self.context,
id=3,
uuid=utils.generate_uuid())
for id_ in range(0, 2):
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid(),
audit_id=audit1.id)
action_plan_list.append(action_plan.uuid)
for id_ in range(2, 4):
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid(),
audit_id=audit2.id)
action_plan_list.append(action_plan.uuid)
self.delete('/audits/%s' % audit1.uuid)
response = self.get_json('/action_plans')
self.assertEqual(len(action_plan_list), len(response['action_plans']))
for id_ in range(0, 2):
action_plan = response['action_plans'][id_]
self.assertIsNone(action_plan['audit_uuid'])
for id_ in range(2, 4):
action_plan = response['action_plans'][id_]
self.assertEqual(audit2.uuid, action_plan['audit_uuid'])
def test_many_with_audit_uuid(self):
action_plan_list = []
audit = obj_utils.create_test_audit(self.context,
id=2,
uuid=utils.generate_uuid())
for id_ in range(2, 5):
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid(),
audit_id=audit.id)
action_plan_list.append(action_plan.uuid)
response = self.get_json('/action_plans')
self.assertEqual(len(action_plan_list), len(response['action_plans']))
for action in response['action_plans']:
self.assertEqual(audit.uuid, action['audit_uuid'])
def test_many_with_audit_uuid_filter(self):
action_plan_list1 = []
audit1 = obj_utils.create_test_audit(self.context,
id=2,
uuid=utils.generate_uuid())
for id_ in range(2, 5):
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid(),
audit_id=audit1.id)
action_plan_list1.append(action_plan.uuid)
audit2 = obj_utils.create_test_audit(self.context,
id=3,
uuid=utils.generate_uuid())
action_plan_list2 = []
for id_ in [5, 6, 7]:
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid(),
audit_id=audit2.id)
action_plan_list2.append(action_plan.uuid)
response = self.get_json('/action_plans?audit_uuid=%s' % audit2.uuid)
self.assertEqual(len(action_plan_list2), len(response['action_plans']))
for action in response['action_plans']:
self.assertEqual(audit2.uuid, action['audit_uuid'])
def test_many_without_soft_deleted(self):
action_plan_list = []
for id_ in [1, 2, 3]:
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid())
action_plan_list.append(action_plan.uuid)
for id_ in [4, 5]:
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid())
action_plan.soft_delete()
response = self.get_json('/action_plans')
self.assertEqual(3, len(response['action_plans']))
uuids = [s['uuid'] for s in response['action_plans']]
self.assertEqual(sorted(action_plan_list), sorted(uuids))
def test_many_with_soft_deleted(self):
action_plan_list = []
for id_ in [1, 2, 3]:
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid())
action_plan_list.append(action_plan.uuid)
for id_ in [4, 5]:
action_plan = obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid())
action_plan.soft_delete()
action_plan_list.append(action_plan.uuid)
response = self.get_json('/action_plans',
headers={'X-Show-Deleted': 'True'})
self.assertEqual(5, len(response['action_plans']))
uuids = [s['uuid'] for s in response['action_plans']]
self.assertEqual(sorted(action_plan_list), sorted(uuids))
def test_many_with_sort_key_audit_uuid(self):
audit_list = []
for id_ in range(2, 5):
audit = obj_utils.create_test_audit(self.context,
id=id_,
uuid=utils.generate_uuid())
obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid(),
audit_id=audit.id)
audit_list.append(audit.uuid)
response = self.get_json('/action_plans/?sort_key=audit_uuid')
self.assertEqual(3, len(response['action_plans']))
uuids = [s['audit_uuid'] for s in response['action_plans']]
self.assertEqual(sorted(audit_list), uuids)
def test_links(self):
uuid = utils.generate_uuid()
obj_utils.create_test_action_plan(self.context, id=1, uuid=uuid)
response = self.get_json('/action_plans/%s' % uuid)
self.assertIn('links', response.keys())
self.assertEqual(2, len(response['links']))
self.assertIn(uuid, response['links'][0]['href'])
for l in response['links']:
bookmark = l['rel'] == 'bookmark'
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
def test_collection_links(self):
for id_ in range(5):
obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid())
response = self.get_json('/action_plans/?limit=3')
self.assertEqual(3, len(response['action_plans']))
next_marker = response['action_plans'][-1]['uuid']
self.assertIn(next_marker, response['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
for id_ in range(5):
obj_utils.create_test_action_plan(
self.context, id=id_, uuid=utils.generate_uuid())
response = self.get_json('/action_plans')
self.assertEqual(3, len(response['action_plans']))
next_marker = response['action_plans'][-1]['uuid']
self.assertIn(next_marker, response['next'])
class TestDelete(api_base.FunctionalTest):
def setUp(self):
super(TestDelete, self).setUp()
obj_utils.create_test_goal(self.context)
obj_utils.create_test_strategy(self.context)
obj_utils.create_test_audit(self.context)
self.action_plan = obj_utils.create_test_action_plan(
self.context)
p = mock.patch.object(db_api.BaseConnection, 'destroy_action_plan')
self.mock_action_plan_delete = p.start()
self.mock_action_plan_delete.side_effect = \
self._simulate_rpc_action_plan_delete
self.addCleanup(p.stop)
def _simulate_rpc_action_plan_delete(self, audit_uuid):
action_plan = objects.ActionPlan.get_by_uuid(self.context, audit_uuid)
action_plan.destroy()
def test_delete_action_plan_without_action(self):
self.delete('/action_plans/%s' % self.action_plan.uuid)
response = self.get_json('/action_plans/%s' % self.action_plan.uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_action_plan_with_action(self):
action = obj_utils.create_test_action(
self.context, id=1)
self.delete('/action_plans/%s' % self.action_plan.uuid)
ap_response = self.get_json('/action_plans/%s' % self.action_plan.uuid,
expect_errors=True)
acts_response = self.get_json(
'/actions/?action_plan_uuid=%s' % self.action_plan.uuid)
act_response = self.get_json(
'/actions/%s' % action.uuid,
expect_errors=True)
# The action plan does not exist anymore
self.assertEqual(404, ap_response.status_int)
self.assertEqual('application/json', ap_response.content_type)
self.assertTrue(ap_response.json['error_message'])
# Nor does the action
self.assertEqual(0, len(acts_response['actions']))
self.assertEqual(404, act_response.status_int)
self.assertEqual('application/json', act_response.content_type)
self.assertTrue(act_response.json['error_message'])
def test_delete_action_plan_not_found(self):
uuid = utils.generate_uuid()
response = self.delete('/action_plans/%s' % uuid, expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
class TestPatch(api_base.FunctionalTest):
def setUp(self):
super(TestPatch, self).setUp()
obj_utils.create_test_goal(self.context)
obj_utils.create_test_strategy(self.context)
obj_utils.create_test_audit(self.context)
self.action_plan = obj_utils.create_test_action_plan(
self.context, state=objects.action_plan.State.RECOMMENDED)
p = mock.patch.object(db_api.BaseConnection, 'update_action_plan')
self.mock_action_plan_update = p.start()
self.mock_action_plan_update.side_effect = \
self._simulate_rpc_action_plan_update
self.addCleanup(p.stop)
def _simulate_rpc_action_plan_update(self, action_plan):
action_plan.save()
return action_plan
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_denied(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
new_state = objects.action_plan.State.DELETED
response = self.get_json(
'/action_plans/%s' % self.action_plan.uuid)
self.assertNotEqual(new_state, response['state'])
response = self.patch_json(
'/action_plans/%s' % self.action_plan.uuid,
[{'path': '/state', 'value': new_state, 'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_replace_non_existent_action_plan_denied(self):
response = self.patch_json(
'/action_plans/%s' % utils.generate_uuid(),
[{'path': '/state',
'value': objects.action_plan.State.PENDING,
'op': 'replace'}],
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_add_non_existent_property_denied(self):
response = self.patch_json(
'/action_plans/%s' % self.action_plan.uuid,
[{'path': '/foo', 'value': 'bar', 'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_remove_denied(self):
# We should not be able to remove the state of an action plan
response = self.get_json(
'/action_plans/%s' % self.action_plan.uuid)
self.assertIsNotNone(response['state'])
response = self.patch_json(
'/action_plans/%s' % self.action_plan.uuid,
[{'path': '/state', 'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_remove_uuid_denied(self):
response = self.patch_json(
'/action_plans/%s' % self.action_plan.uuid,
[{'path': '/uuid', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_remove_non_existent_property_denied(self):
response = self.patch_json(
'/action_plans/%s' % self.action_plan.uuid,
[{'path': '/non-existent', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_code)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
@mock.patch.object(aapi.ApplierAPI, 'launch_action_plan')
def test_replace_state_pending_ok(self, applier_mock):
new_state = objects.action_plan.State.PENDING
response = self.get_json(
'/action_plans/%s' % self.action_plan.uuid)
self.assertNotEqual(new_state, response['state'])
response = self.patch_json(
'/action_plans/%s' % self.action_plan.uuid,
[{'path': '/state', 'value': new_state,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
applier_mock.assert_called_once_with(mock.ANY,
self.action_plan.uuid)
ALLOWED_TRANSITIONS = [
{"original_state": objects.action_plan.State.RECOMMENDED,
"new_state": objects.action_plan.State.PENDING},
{"original_state": objects.action_plan.State.RECOMMENDED,
"new_state": objects.action_plan.State.CANCELLED},
{"original_state": objects.action_plan.State.ONGOING,
"new_state": objects.action_plan.State.CANCELLING},
{"original_state": objects.action_plan.State.PENDING,
"new_state": objects.action_plan.State.CANCELLED},
]
class TestPatchStateTransitionDenied(api_base.FunctionalTest):
STATES = [
ap_state for ap_state in objects.action_plan.State.__dict__
if not ap_state.startswith("_")
]
scenarios = [
(
"%s -> %s" % (original_state, new_state),
{"original_state": original_state,
"new_state": new_state},
)
for original_state, new_state
in list(itertools.product(STATES, STATES))
# from DELETED to ...
# NOTE: Any state transition from DELETED (To RECOMMENDED, PENDING,
# ONGOING, CANCELLED, SUCCEEDED and FAILED) will cause a 404 Not Found
# because we cannot retrieve them with a GET (soft_deleted state).
# This is the reason why they are not listed here but they have a
# special test to cover it
if original_state != objects.action_plan.State.DELETED
and original_state != new_state
and {"original_state": original_state,
"new_state": new_state} not in ALLOWED_TRANSITIONS
]
def setUp(self):
super(TestPatchStateTransitionDenied, self).setUp()
obj_utils.create_test_goal(self.context)
obj_utils.create_test_strategy(self.context)
obj_utils.create_test_audit(self.context)
@mock.patch.object(
db_api.BaseConnection, 'update_action_plan',
mock.Mock(side_effect=lambda ap: ap.save() or ap))
def test_replace_state_pending_denied(self):
action_plan = obj_utils.create_test_action_plan(
self.context, state=self.original_state)
initial_ap = self.get_json('/action_plans/%s' % action_plan.uuid)
response = self.patch_json(
'/action_plans/%s' % action_plan.uuid,
[{'path': '/state', 'value': self.new_state,
'op': 'replace'}],
expect_errors=True)
updated_ap = self.get_json('/action_plans/%s' % action_plan.uuid)
self.assertNotEqual(self.new_state, initial_ap['state'])
self.assertEqual(self.original_state, updated_ap['state'])
self.assertEqual(400, response.status_code)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
class TestPatchStateTransitionOk(api_base.FunctionalTest):
scenarios = [
(
"%s -> %s" % (transition["original_state"],
transition["new_state"]),
transition
)
for transition in ALLOWED_TRANSITIONS
]
def setUp(self):
super(TestPatchStateTransitionOk, self).setUp()
obj_utils.create_test_goal(self.context)
obj_utils.create_test_strategy(self.context)
obj_utils.create_test_audit(self.context)
@mock.patch.object(
db_api.BaseConnection, 'update_action_plan',
mock.Mock(side_effect=lambda ap: ap.save() or ap))
@mock.patch.object(aapi.ApplierAPI, 'launch_action_plan', mock.Mock())
def test_replace_state_pending_ok(self):
action_plan = obj_utils.create_test_action_plan(
self.context, state=self.original_state)
initial_ap = self.get_json('/action_plans/%s' % action_plan.uuid)
response = self.patch_json(
'/action_plans/%s' % action_plan.uuid,
[{'path': '/state', 'value': self.new_state, 'op': 'replace'}])
updated_ap = self.get_json('/action_plans/%s' % action_plan.uuid)
self.assertNotEqual(self.new_state, initial_ap['state'])
self.assertEqual(self.new_state, updated_ap['state'])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
class TestActionPlanPolicyEnforcement(api_base.FunctionalTest):
def setUp(self):
super(TestActionPlanPolicyEnforcement, self).setUp()
obj_utils.create_test_goal(self.context)
obj_utils.create_test_strategy(self.context)
obj_utils.create_test_audit(self.context)
def _common_policy_check(self, rule, func, *arg, **kwarg):
self.policy.set_rules({
"admin_api": "(role:admin or role:administrator)",
"default": "rule:admin_api",
rule: "rule:defaut"})
response = func(*arg, **kwarg)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
jsonutils.loads(response.json['error_message'])['faultstring'])
def test_policy_disallow_get_all(self):
self._common_policy_check(
"action_plan:get_all", self.get_json, '/action_plans',
expect_errors=True)
def test_policy_disallow_get_one(self):
action_plan = obj_utils.create_test_action_plan(self.context)
self._common_policy_check(
"action_plan:get", self.get_json,
'/action_plans/%s' % action_plan.uuid,
expect_errors=True)
def test_policy_disallow_detail(self):
self._common_policy_check(
"action_plan:detail", self.get_json,
'/action_plans/detail',
expect_errors=True)
def test_policy_disallow_update(self):
action_plan = obj_utils.create_test_action_plan(self.context)
self._common_policy_check(
"action_plan:update", self.patch_json,
'/action_plans/%s' % action_plan.uuid,
[{'path': '/state',
'value': objects.action_plan.State.DELETED,
'op': 'replace'}],
expect_errors=True)
def test_policy_disallow_delete(self):
action_plan = obj_utils.create_test_action_plan(self.context)
self._common_policy_check(
"action_plan:delete", self.delete,
'/action_plans/%s' % action_plan.uuid, expect_errors=True)
class TestActionPlanPolicyEnforcementWithAdminContext(TestListActionPlan,
api_base.AdminRoleTest):
def setUp(self):
super(TestActionPlanPolicyEnforcementWithAdminContext, self).setUp()
self.policy.set_rules({
"admin_api": "(role:admin or role:administrator)",
"default": "rule:admin_api",
"action_plan:delete": "rule:default",
"action_plan:detail": "rule:default",
"action_plan:get": "rule:default",
"action_plan:get_all": "rule:default",
"action_plan:update": "rule:default"})
|
py | b417445c10ae254489683cedc31de9ba3717f970 | from typing import Callable, Optional, Union
import torch
from ignite.metrics.metrics_lambda import MetricsLambda
from ignite.metrics.precision import Precision
from ignite.metrics.recall import Recall
__all__ = ["Fbeta"]
def Fbeta(
beta: float,
average: bool = True,
precision: Optional[Precision] = None,
recall: Optional[Recall] = None,
output_transform: Optional[Callable] = None,
device: Union[str, torch.device] = torch.device("cpu"),
) -> MetricsLambda:
r"""Calculates F-beta score.
.. math::
F_\beta = \left( 1 + \beta^2 \right) * \frac{ \text{precision} * \text{recall} }
{ \left( \beta^2 * \text{precision} \right) + \text{recall} }
where :math:`\beta` is a positive real factor.
Args:
beta (float): weight of precision in harmonic mean
average (bool, optional): if True, F-beta score is computed as the unweighted average (across all classes
in multiclass case), otherwise, returns a tensor with F-beta score for each class in multiclass case.
precision (Precision, optional): precision object metric with `average=False` to compute F-beta score
recall (Precision, optional): recall object metric with `average=False` to compute F-beta score
output_transform (callable, optional): a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. It is used only if precision or recall are not provided.
device (str or torch.device): specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Returns:
MetricsLambda, F-beta metric
"""
if not (beta > 0):
raise ValueError(f"Beta should be a positive integer, but given {beta}")
if precision is not None and output_transform is not None:
raise ValueError("If precision argument is provided, output_transform should be None")
if recall is not None and output_transform is not None:
raise ValueError("If recall argument is provided, output_transform should be None")
if precision is None:
precision = Precision(
output_transform=(lambda x: x) if output_transform is None else output_transform, # type: ignore[arg-type]
average=False,
device=device,
)
elif precision._average:
raise ValueError("Input precision metric should have average=False")
if recall is None:
recall = Recall(
output_transform=(lambda x: x) if output_transform is None else output_transform, # type: ignore[arg-type]
average=False,
device=device,
)
elif recall._average:
raise ValueError("Input recall metric should have average=False")
fbeta = (1.0 + beta ** 2) * precision * recall / (beta ** 2 * precision + recall + 1e-15)
if average:
fbeta = fbeta.mean().item()
return fbeta
|
py | b41744e8d4836476ec86b154bd3892b11012a0d3 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/protobuf/meta_graph.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
from tensorflow.core.framework import graph_pb2 as tensorflow_dot_core_dot_framework_dot_graph__pb2
from tensorflow.core.framework import op_def_pb2 as tensorflow_dot_core_dot_framework_dot_op__def__pb2
from tensorflow.core.framework import tensor_shape_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2
from tensorflow.core.framework import types_pb2 as tensorflow_dot_core_dot_framework_dot_types__pb2
from tensorflow.core.protobuf import saver_pb2 as tensorflow_dot_core_dot_protobuf_dot_saver__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/meta_graph.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n)tensorflow/core/protobuf/meta_graph.proto\x12\ntensorflow\x1a\x19google/protobuf/any.proto\x1a%tensorflow/core/framework/graph.proto\x1a&tensorflow/core/framework/op_def.proto\x1a,tensorflow/core/framework/tensor_shape.proto\x1a%tensorflow/core/framework/types.proto\x1a$tensorflow/core/protobuf/saver.proto\"\xe3\x05\n\x0cMetaGraphDef\x12;\n\rmeta_info_def\x18\x01 \x01(\x0b\x32$.tensorflow.MetaGraphDef.MetaInfoDef\x12\'\n\tgraph_def\x18\x02 \x01(\x0b\x32\x14.tensorflow.GraphDef\x12\'\n\tsaver_def\x18\x03 \x01(\x0b\x32\x14.tensorflow.SaverDef\x12\x43\n\x0e\x63ollection_def\x18\x04 \x03(\x0b\x32+.tensorflow.MetaGraphDef.CollectionDefEntry\x12\x41\n\rsignature_def\x18\x05 \x03(\x0b\x32*.tensorflow.MetaGraphDef.SignatureDefEntry\x12\x30\n\x0e\x61sset_file_def\x18\x06 \x03(\x0b\x32\x18.tensorflow.AssetFileDef\x1a\xe9\x01\n\x0bMetaInfoDef\x12\x1a\n\x12meta_graph_version\x18\x01 \x01(\t\x12,\n\x10stripped_op_list\x18\x02 \x01(\x0b\x32\x12.tensorflow.OpList\x12&\n\x08\x61ny_info\x18\x03 \x01(\x0b\x32\x14.google.protobuf.Any\x12\x0c\n\x04tags\x18\x04 \x03(\t\x12\x1a\n\x12tensorflow_version\x18\x05 \x01(\t\x12\x1e\n\x16tensorflow_git_version\x18\x06 \x01(\t\x12\x1e\n\x16stripped_default_attrs\x18\x07 \x01(\x08\x1aO\n\x12\x43ollectionDefEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.tensorflow.CollectionDef:\x02\x38\x01\x1aM\n\x11SignatureDefEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.tensorflow.SignatureDef:\x02\x38\x01\"\xdf\x03\n\rCollectionDef\x12\x37\n\tnode_list\x18\x01 \x01(\x0b\x32\".tensorflow.CollectionDef.NodeListH\x00\x12\x39\n\nbytes_list\x18\x02 \x01(\x0b\x32#.tensorflow.CollectionDef.BytesListH\x00\x12\x39\n\nint64_list\x18\x03 \x01(\x0b\x32#.tensorflow.CollectionDef.Int64ListH\x00\x12\x39\n\nfloat_list\x18\x04 \x01(\x0b\x32#.tensorflow.CollectionDef.FloatListH\x00\x12\x35\n\x08\x61ny_list\x18\x05 \x01(\x0b\x32!.tensorflow.CollectionDef.AnyListH\x00\x1a\x19\n\x08NodeList\x12\r\n\x05value\x18\x01 \x03(\t\x1a\x1a\n\tBytesList\x12\r\n\x05value\x18\x01 \x03(\x0c\x1a\x1e\n\tInt64List\x12\x11\n\x05value\x18\x01 \x03(\x03\x42\x02\x10\x01\x1a\x1e\n\tFloatList\x12\x11\n\x05value\x18\x01 \x03(\x02\x42\x02\x10\x01\x1a.\n\x07\x41nyList\x12#\n\x05value\x18\x01 \x03(\x0b\x32\x14.google.protobuf.AnyB\x06\n\x04kind\"\xa0\x02\n\nTensorInfo\x12\x0e\n\x04name\x18\x01 \x01(\tH\x00\x12\x36\n\ncoo_sparse\x18\x04 \x01(\x0b\x32 .tensorflow.TensorInfo.CooSparseH\x00\x12#\n\x05\x64type\x18\x02 \x01(\x0e\x32\x14.tensorflow.DataType\x12\x32\n\x0ctensor_shape\x18\x03 \x01(\x0b\x32\x1c.tensorflow.TensorShapeProto\x1a\x65\n\tCooSparse\x12\x1a\n\x12values_tensor_name\x18\x01 \x01(\t\x12\x1b\n\x13indices_tensor_name\x18\x02 \x01(\t\x12\x1f\n\x17\x64\x65nse_shape_tensor_name\x18\x03 \x01(\tB\n\n\x08\x65ncoding\"\xa0\x02\n\x0cSignatureDef\x12\x34\n\x06inputs\x18\x01 \x03(\x0b\x32$.tensorflow.SignatureDef.InputsEntry\x12\x36\n\x07outputs\x18\x02 \x03(\x0b\x32%.tensorflow.SignatureDef.OutputsEntry\x12\x13\n\x0bmethod_name\x18\x03 \x01(\t\x1a\x45\n\x0bInputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.tensorflow.TensorInfo:\x02\x38\x01\x1a\x46\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.tensorflow.TensorInfo:\x02\x38\x01\"M\n\x0c\x41ssetFileDef\x12+\n\x0btensor_info\x18\x01 \x01(\x0b\x32\x16.tensorflow.TensorInfo\x12\x10\n\x08\x66ilename\x18\x02 \x01(\tB0\n\x18org.tensorflow.frameworkB\x0fMetaGraphProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_graph__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_op__def__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_types__pb2.DESCRIPTOR,tensorflow_dot_core_dot_protobuf_dot_saver__pb2.DESCRIPTOR,])
_METAGRAPHDEF_METAINFODEF = _descriptor.Descriptor(
name='MetaInfoDef',
full_name='tensorflow.MetaGraphDef.MetaInfoDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='meta_graph_version', full_name='tensorflow.MetaGraphDef.MetaInfoDef.meta_graph_version', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stripped_op_list', full_name='tensorflow.MetaGraphDef.MetaInfoDef.stripped_op_list', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='any_info', full_name='tensorflow.MetaGraphDef.MetaInfoDef.any_info', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tags', full_name='tensorflow.MetaGraphDef.MetaInfoDef.tags', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensorflow_version', full_name='tensorflow.MetaGraphDef.MetaInfoDef.tensorflow_version', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensorflow_git_version', full_name='tensorflow.MetaGraphDef.MetaInfoDef.tensorflow_git_version', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stripped_default_attrs', full_name='tensorflow.MetaGraphDef.MetaInfoDef.stripped_default_attrs', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=633,
serialized_end=866,
)
_METAGRAPHDEF_COLLECTIONDEFENTRY = _descriptor.Descriptor(
name='CollectionDefEntry',
full_name='tensorflow.MetaGraphDef.CollectionDefEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.MetaGraphDef.CollectionDefEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.MetaGraphDef.CollectionDefEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=868,
serialized_end=947,
)
_METAGRAPHDEF_SIGNATUREDEFENTRY = _descriptor.Descriptor(
name='SignatureDefEntry',
full_name='tensorflow.MetaGraphDef.SignatureDefEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.MetaGraphDef.SignatureDefEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.MetaGraphDef.SignatureDefEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=949,
serialized_end=1026,
)
_METAGRAPHDEF = _descriptor.Descriptor(
name='MetaGraphDef',
full_name='tensorflow.MetaGraphDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='meta_info_def', full_name='tensorflow.MetaGraphDef.meta_info_def', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='graph_def', full_name='tensorflow.MetaGraphDef.graph_def', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='saver_def', full_name='tensorflow.MetaGraphDef.saver_def', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='collection_def', full_name='tensorflow.MetaGraphDef.collection_def', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='signature_def', full_name='tensorflow.MetaGraphDef.signature_def', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='asset_file_def', full_name='tensorflow.MetaGraphDef.asset_file_def', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_METAGRAPHDEF_METAINFODEF, _METAGRAPHDEF_COLLECTIONDEFENTRY, _METAGRAPHDEF_SIGNATUREDEFENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=287,
serialized_end=1026,
)
_COLLECTIONDEF_NODELIST = _descriptor.Descriptor(
name='NodeList',
full_name='tensorflow.CollectionDef.NodeList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.CollectionDef.NodeList.value', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1335,
serialized_end=1360,
)
_COLLECTIONDEF_BYTESLIST = _descriptor.Descriptor(
name='BytesList',
full_name='tensorflow.CollectionDef.BytesList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.CollectionDef.BytesList.value', index=0,
number=1, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1362,
serialized_end=1388,
)
_COLLECTIONDEF_INT64LIST = _descriptor.Descriptor(
name='Int64List',
full_name='tensorflow.CollectionDef.Int64List',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.CollectionDef.Int64List.value', index=0,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1390,
serialized_end=1420,
)
_COLLECTIONDEF_FLOATLIST = _descriptor.Descriptor(
name='FloatList',
full_name='tensorflow.CollectionDef.FloatList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.CollectionDef.FloatList.value', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1422,
serialized_end=1452,
)
_COLLECTIONDEF_ANYLIST = _descriptor.Descriptor(
name='AnyList',
full_name='tensorflow.CollectionDef.AnyList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.CollectionDef.AnyList.value', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1454,
serialized_end=1500,
)
_COLLECTIONDEF = _descriptor.Descriptor(
name='CollectionDef',
full_name='tensorflow.CollectionDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node_list', full_name='tensorflow.CollectionDef.node_list', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bytes_list', full_name='tensorflow.CollectionDef.bytes_list', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='int64_list', full_name='tensorflow.CollectionDef.int64_list', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='float_list', full_name='tensorflow.CollectionDef.float_list', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='any_list', full_name='tensorflow.CollectionDef.any_list', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_COLLECTIONDEF_NODELIST, _COLLECTIONDEF_BYTESLIST, _COLLECTIONDEF_INT64LIST, _COLLECTIONDEF_FLOATLIST, _COLLECTIONDEF_ANYLIST, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='kind', full_name='tensorflow.CollectionDef.kind',
index=0, containing_type=None, fields=[]),
],
serialized_start=1029,
serialized_end=1508,
)
_TENSORINFO_COOSPARSE = _descriptor.Descriptor(
name='CooSparse',
full_name='tensorflow.TensorInfo.CooSparse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='values_tensor_name', full_name='tensorflow.TensorInfo.CooSparse.values_tensor_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='indices_tensor_name', full_name='tensorflow.TensorInfo.CooSparse.indices_tensor_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dense_shape_tensor_name', full_name='tensorflow.TensorInfo.CooSparse.dense_shape_tensor_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1686,
serialized_end=1787,
)
_TENSORINFO = _descriptor.Descriptor(
name='TensorInfo',
full_name='tensorflow.TensorInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.TensorInfo.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='coo_sparse', full_name='tensorflow.TensorInfo.coo_sparse', index=1,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dtype', full_name='tensorflow.TensorInfo.dtype', index=2,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor_shape', full_name='tensorflow.TensorInfo.tensor_shape', index=3,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_TENSORINFO_COOSPARSE, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='encoding', full_name='tensorflow.TensorInfo.encoding',
index=0, containing_type=None, fields=[]),
],
serialized_start=1511,
serialized_end=1799,
)
_SIGNATUREDEF_INPUTSENTRY = _descriptor.Descriptor(
name='InputsEntry',
full_name='tensorflow.SignatureDef.InputsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.SignatureDef.InputsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.SignatureDef.InputsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1949,
serialized_end=2018,
)
_SIGNATUREDEF_OUTPUTSENTRY = _descriptor.Descriptor(
name='OutputsEntry',
full_name='tensorflow.SignatureDef.OutputsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.SignatureDef.OutputsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.SignatureDef.OutputsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2020,
serialized_end=2090,
)
_SIGNATUREDEF = _descriptor.Descriptor(
name='SignatureDef',
full_name='tensorflow.SignatureDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='inputs', full_name='tensorflow.SignatureDef.inputs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='outputs', full_name='tensorflow.SignatureDef.outputs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='method_name', full_name='tensorflow.SignatureDef.method_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SIGNATUREDEF_INPUTSENTRY, _SIGNATUREDEF_OUTPUTSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1802,
serialized_end=2090,
)
_ASSETFILEDEF = _descriptor.Descriptor(
name='AssetFileDef',
full_name='tensorflow.AssetFileDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tensor_info', full_name='tensorflow.AssetFileDef.tensor_info', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='filename', full_name='tensorflow.AssetFileDef.filename', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2092,
serialized_end=2169,
)
_METAGRAPHDEF_METAINFODEF.fields_by_name['stripped_op_list'].message_type = tensorflow_dot_core_dot_framework_dot_op__def__pb2._OPLIST
_METAGRAPHDEF_METAINFODEF.fields_by_name['any_info'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_METAGRAPHDEF_METAINFODEF.containing_type = _METAGRAPHDEF
_METAGRAPHDEF_COLLECTIONDEFENTRY.fields_by_name['value'].message_type = _COLLECTIONDEF
_METAGRAPHDEF_COLLECTIONDEFENTRY.containing_type = _METAGRAPHDEF
_METAGRAPHDEF_SIGNATUREDEFENTRY.fields_by_name['value'].message_type = _SIGNATUREDEF
_METAGRAPHDEF_SIGNATUREDEFENTRY.containing_type = _METAGRAPHDEF
_METAGRAPHDEF.fields_by_name['meta_info_def'].message_type = _METAGRAPHDEF_METAINFODEF
_METAGRAPHDEF.fields_by_name['graph_def'].message_type = tensorflow_dot_core_dot_framework_dot_graph__pb2._GRAPHDEF
_METAGRAPHDEF.fields_by_name['saver_def'].message_type = tensorflow_dot_core_dot_protobuf_dot_saver__pb2._SAVERDEF
_METAGRAPHDEF.fields_by_name['collection_def'].message_type = _METAGRAPHDEF_COLLECTIONDEFENTRY
_METAGRAPHDEF.fields_by_name['signature_def'].message_type = _METAGRAPHDEF_SIGNATUREDEFENTRY
_METAGRAPHDEF.fields_by_name['asset_file_def'].message_type = _ASSETFILEDEF
_COLLECTIONDEF_NODELIST.containing_type = _COLLECTIONDEF
_COLLECTIONDEF_BYTESLIST.containing_type = _COLLECTIONDEF
_COLLECTIONDEF_INT64LIST.containing_type = _COLLECTIONDEF
_COLLECTIONDEF_FLOATLIST.containing_type = _COLLECTIONDEF
_COLLECTIONDEF_ANYLIST.fields_by_name['value'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_COLLECTIONDEF_ANYLIST.containing_type = _COLLECTIONDEF
_COLLECTIONDEF.fields_by_name['node_list'].message_type = _COLLECTIONDEF_NODELIST
_COLLECTIONDEF.fields_by_name['bytes_list'].message_type = _COLLECTIONDEF_BYTESLIST
_COLLECTIONDEF.fields_by_name['int64_list'].message_type = _COLLECTIONDEF_INT64LIST
_COLLECTIONDEF.fields_by_name['float_list'].message_type = _COLLECTIONDEF_FLOATLIST
_COLLECTIONDEF.fields_by_name['any_list'].message_type = _COLLECTIONDEF_ANYLIST
_COLLECTIONDEF.oneofs_by_name['kind'].fields.append(
_COLLECTIONDEF.fields_by_name['node_list'])
_COLLECTIONDEF.fields_by_name['node_list'].containing_oneof = _COLLECTIONDEF.oneofs_by_name['kind']
_COLLECTIONDEF.oneofs_by_name['kind'].fields.append(
_COLLECTIONDEF.fields_by_name['bytes_list'])
_COLLECTIONDEF.fields_by_name['bytes_list'].containing_oneof = _COLLECTIONDEF.oneofs_by_name['kind']
_COLLECTIONDEF.oneofs_by_name['kind'].fields.append(
_COLLECTIONDEF.fields_by_name['int64_list'])
_COLLECTIONDEF.fields_by_name['int64_list'].containing_oneof = _COLLECTIONDEF.oneofs_by_name['kind']
_COLLECTIONDEF.oneofs_by_name['kind'].fields.append(
_COLLECTIONDEF.fields_by_name['float_list'])
_COLLECTIONDEF.fields_by_name['float_list'].containing_oneof = _COLLECTIONDEF.oneofs_by_name['kind']
_COLLECTIONDEF.oneofs_by_name['kind'].fields.append(
_COLLECTIONDEF.fields_by_name['any_list'])
_COLLECTIONDEF.fields_by_name['any_list'].containing_oneof = _COLLECTIONDEF.oneofs_by_name['kind']
_TENSORINFO_COOSPARSE.containing_type = _TENSORINFO
_TENSORINFO.fields_by_name['coo_sparse'].message_type = _TENSORINFO_COOSPARSE
_TENSORINFO.fields_by_name['dtype'].enum_type = tensorflow_dot_core_dot_framework_dot_types__pb2._DATATYPE
_TENSORINFO.fields_by_name['tensor_shape'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2._TENSORSHAPEPROTO
_TENSORINFO.oneofs_by_name['encoding'].fields.append(
_TENSORINFO.fields_by_name['name'])
_TENSORINFO.fields_by_name['name'].containing_oneof = _TENSORINFO.oneofs_by_name['encoding']
_TENSORINFO.oneofs_by_name['encoding'].fields.append(
_TENSORINFO.fields_by_name['coo_sparse'])
_TENSORINFO.fields_by_name['coo_sparse'].containing_oneof = _TENSORINFO.oneofs_by_name['encoding']
_SIGNATUREDEF_INPUTSENTRY.fields_by_name['value'].message_type = _TENSORINFO
_SIGNATUREDEF_INPUTSENTRY.containing_type = _SIGNATUREDEF
_SIGNATUREDEF_OUTPUTSENTRY.fields_by_name['value'].message_type = _TENSORINFO
_SIGNATUREDEF_OUTPUTSENTRY.containing_type = _SIGNATUREDEF
_SIGNATUREDEF.fields_by_name['inputs'].message_type = _SIGNATUREDEF_INPUTSENTRY
_SIGNATUREDEF.fields_by_name['outputs'].message_type = _SIGNATUREDEF_OUTPUTSENTRY
_ASSETFILEDEF.fields_by_name['tensor_info'].message_type = _TENSORINFO
DESCRIPTOR.message_types_by_name['MetaGraphDef'] = _METAGRAPHDEF
DESCRIPTOR.message_types_by_name['CollectionDef'] = _COLLECTIONDEF
DESCRIPTOR.message_types_by_name['TensorInfo'] = _TENSORINFO
DESCRIPTOR.message_types_by_name['SignatureDef'] = _SIGNATUREDEF
DESCRIPTOR.message_types_by_name['AssetFileDef'] = _ASSETFILEDEF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MetaGraphDef = _reflection.GeneratedProtocolMessageType('MetaGraphDef', (_message.Message,), dict(
MetaInfoDef = _reflection.GeneratedProtocolMessageType('MetaInfoDef', (_message.Message,), dict(
DESCRIPTOR = _METAGRAPHDEF_METAINFODEF,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.MetaGraphDef.MetaInfoDef)
))
,
CollectionDefEntry = _reflection.GeneratedProtocolMessageType('CollectionDefEntry', (_message.Message,), dict(
DESCRIPTOR = _METAGRAPHDEF_COLLECTIONDEFENTRY,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.MetaGraphDef.CollectionDefEntry)
))
,
SignatureDefEntry = _reflection.GeneratedProtocolMessageType('SignatureDefEntry', (_message.Message,), dict(
DESCRIPTOR = _METAGRAPHDEF_SIGNATUREDEFENTRY,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.MetaGraphDef.SignatureDefEntry)
))
,
DESCRIPTOR = _METAGRAPHDEF,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.MetaGraphDef)
))
_sym_db.RegisterMessage(MetaGraphDef)
_sym_db.RegisterMessage(MetaGraphDef.MetaInfoDef)
_sym_db.RegisterMessage(MetaGraphDef.CollectionDefEntry)
_sym_db.RegisterMessage(MetaGraphDef.SignatureDefEntry)
CollectionDef = _reflection.GeneratedProtocolMessageType('CollectionDef', (_message.Message,), dict(
NodeList = _reflection.GeneratedProtocolMessageType('NodeList', (_message.Message,), dict(
DESCRIPTOR = _COLLECTIONDEF_NODELIST,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.CollectionDef.NodeList)
))
,
BytesList = _reflection.GeneratedProtocolMessageType('BytesList', (_message.Message,), dict(
DESCRIPTOR = _COLLECTIONDEF_BYTESLIST,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.CollectionDef.BytesList)
))
,
Int64List = _reflection.GeneratedProtocolMessageType('Int64List', (_message.Message,), dict(
DESCRIPTOR = _COLLECTIONDEF_INT64LIST,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.CollectionDef.Int64List)
))
,
FloatList = _reflection.GeneratedProtocolMessageType('FloatList', (_message.Message,), dict(
DESCRIPTOR = _COLLECTIONDEF_FLOATLIST,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.CollectionDef.FloatList)
))
,
AnyList = _reflection.GeneratedProtocolMessageType('AnyList', (_message.Message,), dict(
DESCRIPTOR = _COLLECTIONDEF_ANYLIST,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.CollectionDef.AnyList)
))
,
DESCRIPTOR = _COLLECTIONDEF,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.CollectionDef)
))
_sym_db.RegisterMessage(CollectionDef)
_sym_db.RegisterMessage(CollectionDef.NodeList)
_sym_db.RegisterMessage(CollectionDef.BytesList)
_sym_db.RegisterMessage(CollectionDef.Int64List)
_sym_db.RegisterMessage(CollectionDef.FloatList)
_sym_db.RegisterMessage(CollectionDef.AnyList)
TensorInfo = _reflection.GeneratedProtocolMessageType('TensorInfo', (_message.Message,), dict(
CooSparse = _reflection.GeneratedProtocolMessageType('CooSparse', (_message.Message,), dict(
DESCRIPTOR = _TENSORINFO_COOSPARSE,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TensorInfo.CooSparse)
))
,
DESCRIPTOR = _TENSORINFO,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TensorInfo)
))
_sym_db.RegisterMessage(TensorInfo)
_sym_db.RegisterMessage(TensorInfo.CooSparse)
SignatureDef = _reflection.GeneratedProtocolMessageType('SignatureDef', (_message.Message,), dict(
InputsEntry = _reflection.GeneratedProtocolMessageType('InputsEntry', (_message.Message,), dict(
DESCRIPTOR = _SIGNATUREDEF_INPUTSENTRY,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.SignatureDef.InputsEntry)
))
,
OutputsEntry = _reflection.GeneratedProtocolMessageType('OutputsEntry', (_message.Message,), dict(
DESCRIPTOR = _SIGNATUREDEF_OUTPUTSENTRY,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.SignatureDef.OutputsEntry)
))
,
DESCRIPTOR = _SIGNATUREDEF,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.SignatureDef)
))
_sym_db.RegisterMessage(SignatureDef)
_sym_db.RegisterMessage(SignatureDef.InputsEntry)
_sym_db.RegisterMessage(SignatureDef.OutputsEntry)
AssetFileDef = _reflection.GeneratedProtocolMessageType('AssetFileDef', (_message.Message,), dict(
DESCRIPTOR = _ASSETFILEDEF,
__module__ = 'tensorflow.core.protobuf.meta_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.AssetFileDef)
))
_sym_db.RegisterMessage(AssetFileDef)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\017MetaGraphProtosP\001\370\001\001'))
_METAGRAPHDEF_COLLECTIONDEFENTRY.has_options = True
_METAGRAPHDEF_COLLECTIONDEFENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_METAGRAPHDEF_SIGNATUREDEFENTRY.has_options = True
_METAGRAPHDEF_SIGNATUREDEFENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_COLLECTIONDEF_INT64LIST.fields_by_name['value'].has_options = True
_COLLECTIONDEF_INT64LIST.fields_by_name['value']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_COLLECTIONDEF_FLOATLIST.fields_by_name['value'].has_options = True
_COLLECTIONDEF_FLOATLIST.fields_by_name['value']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_SIGNATUREDEF_INPUTSENTRY.has_options = True
_SIGNATUREDEF_INPUTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_SIGNATUREDEF_OUTPUTSENTRY.has_options = True
_SIGNATUREDEF_OUTPUTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
py | b41746717716f9a7f1ad217a7af99a013ce50e66 | import backports
import isodate
import itertools
import pandas as pd
import pytz
import tzlocal
from backports.datetime_fromisoformat import MonkeyPatch
from bson.codec_options import CodecOptions
from bson.objectid import ObjectId
from datetime import date, datetime, timedelta
from girderformindlogger.models.applet import Applet as AppletModel
from girderformindlogger.models.user import User as UserModel
from girderformindlogger.models.response_folder import ResponseItem
from girderformindlogger.utility import clean_empty
from pandas.api.types import is_numeric_dtype
from pymongo import ASCENDING, DESCENDING
MonkeyPatch.patch_fromisoformat()
def getSchedule(currentUser, timezone=None):
from .jsonld_expander import formatLdObject
return({
applet['applet'].get('_id', ''): {
applet['activities'][activity].get('_id', ''): {
'lastResponse': getLatestResponseTime(
currentUser['_id'],
applet['applet']['_id'].split('applet/')[-1],
activity,
tz=timezone
) #,
# 'nextScheduled': None,
# 'lastScheduled': None
} for activity in list(
applet.get('activities', {}).keys()
)
} for applet in [
formatLdObject(
applet,
'applet',
currentUser
) for applet in AppletModel().getAppletsForUser(
user=currentUser,
role='user'
)
]
})
def getLatestResponse(informantId, appletId, activityURL):
from .jsonld_expander import reprolibCanonize, reprolibPrefix
responses = list(ResponseItem().find(
query={
"baseParentType": 'user',
"baseParentId": informantId if isinstance(
informantId,
ObjectId
) else ObjectId(informantId),
"meta.applet.@id": {
"$in": [
appletId,
ObjectId(appletId)
]
},
"meta.activity.url": {
"$in": [
activityURL,
reprolibPrefix(activityURL),
reprolibCanonize(activityURL)
]
}
},
force=True,
sort=[("updated", DESCENDING)]
))
if len(responses):
return(responses[0])
return(None)
def getLatestResponseTime(informantId, appletId, activityURL, tz=None):
latestResponse = getLatestResponse(informantId, appletId, activityURL)
try:
latestResponse['updated'].isoformat(
) if tz is None else latestResponse['updated'].astimezone(pytz.timezone(
tz
)).isoformat()
except TypeError:
pass
except:
import sys, traceback
print(sys.exc_info())
print(traceback.print_tb(sys.exc_info()[2]))
return(
(
latestResponse['updated'].astimezone(pytz.timezone(
tz
)).isoformat() if (
isinstance(tz, str) and tz in pytz.all_timezones
) else latestResponse['updated'].isoformat()
) if (
isinstance(latestResponse, dict) and isinstance(
latestResponse.get('updated'),
datetime
)
) else None
)
def aggregate(metadata, informant, startDate=None, endDate=None, getAll=False):
"""
Function to calculate aggregates
"""
thisResponseTime = datetime.now(
tzlocal.get_localzone()
)
startDate = datetime.fromisoformat(startDate.isoformat(
)).astimezone(pytz.utc).replace(tzinfo=None) if startDate is not None else None
endDate = datetime.fromisoformat((
thisResponseTime if endDate is None else endDate
).isoformat()).astimezone(pytz.utc).replace(tzinfo=None)
query = {
"baseParentType": 'user',
"baseParentId": informant.get("_id") if isinstance(
informant,
dict
) else informant,
"updated": {
"$gte": startDate,
"$lt": endDate
} if startDate else {
"$lt": endDate
},
"meta.applet.@id": metadata.get("applet", {}).get("@id"),
"meta.activity.url": metadata.get("activity", {}).get("url"),
"meta.subject.@id": metadata.get("subject", {}).get("@id")
}
definedRange = list(ResponseItem().find(
query=query,
force=True,
sort=[("updated", ASCENDING)]
))
if not len(definedRange):
# TODO: I'm afraid of some asynchronous database writes
# that sometimes make defined range an empty list.
# For now I'm exiting, but this needs to be looked
# into.
print('\n\n defined range returns an empty list.')
return
# raise ValueError("The defined range doesn't have a length")
startDate = min([response.get(
'updated',
endDate
) for response in definedRange]) if startDate is None else startDate
duration = isodate.duration_isoformat(
delocalize(endDate) - delocalize(startDate)
)
responseIRIs = _responseIRIs(definedRange)
for itemIRI in responseIRIs:
for response in definedRange:
if itemIRI in response.get(
'meta',
{}
).get('responses', {}):
completedDate(response)
aggregated = {
"schema:startDate": startDate,
"schema:endDate": endDate,
"schema:duration": duration,
"responses": {
itemIRI: [
{
"value": response.get('meta', {}).get('responses', {}).get(
itemIRI
),
"date": completedDate(response)
} for response in definedRange if itemIRI in response.get(
'meta',
{}
).get('responses', {})
] for itemIRI in responseIRIs
} if getAll else countResponseValues(definedRange, responseIRIs)
}
return(aggregated)
def completedDate(response):
completed = response.get("updated", {})
return completed
def formatResponse(response):
try:
metadata = response.get('meta', response)
if any([
key not in metadata.keys() for key in [
'allTime',
'last7Days'
]
]):
aggregateAndSave(response, response.get('baseParentId'))
thisResponse = {
"thisResponse": {
"schema:startDate": isodatetime(
metadata.get(
'responseStarted',
response.get(
'updated',
datetime.now()
)
)
),
"schema:endDate": isodatetime(
metadata.get(
'responseCompleted',
response.get(
'updated',
datetime.now()
)
)
),
"responses": {
itemURI: metadata['responses'][
itemURI
] for itemURI in metadata.get('responses', {})
}
},
"allToDate": metadata.get("allTime"),
"last7Days": metadata.get("last7Days")
} if isinstance(metadata, dict) and all([
key in metadata.keys() for key in [
'responses',
'applet',
'activity',
'subject'
]
]) else {}
except Exception as e:
import sys, traceback
print(sys.exc_info())
print(traceback.print_tb(sys.exc_info()[2]))
thisResponse = None
return(clean_empty(thisResponse))
def string_or_ObjectID(s):
return([str(s), ObjectId(s)])
def _responseIRIs(definedRange):
return(list(set(itertools.chain.from_iterable([list(
response.get('meta', {}).get('responses', {}).keys()
) for response in definedRange if isinstance(response, dict)]))))
def _flattenDF(df, columnName):
if isinstance(columnName, list):
for c in columnName:
df = _flattenDF(df, c)
return(df)
prefix = columnName if columnName not in ['meta', 'responses'] else ""
newDf = pd.concat(
[
df[columnName].apply(
pd.Series
),
df.drop(columnName, axis=1)
],
axis=1
)
return(
(
newDf.rename(
{
col: "{}-{}".format(
prefix,
col
) for col in list(
df[columnName][0].keys()
)
},
axis='columns'
) if len(prefix) else newDf
).dropna('columns', 'all')
)
def countResponseValues(definedRange, responseIRIs=None):
responseIRIs = _responseIRIs(
definedRange
) if responseIRIs is None else responseIRIs
pd.set_option('display.max_colwidth', -1)
pd.set_option('display.max_columns', None)
df = pd.DataFrame(definedRange)
df = _flattenDF(df, ['meta', 'applet', 'activity', 'responses'])
counts = {
responseIRI: (
df[responseIRI].astype(str) if not(is_numeric_dtype(
df[responseIRI]
)) else df[responseIRI]
).value_counts().to_dict(
) for responseIRI in responseIRIs if isinstance(
df[responseIRI],
pd.Series
)
}
return(
{
responseIRI: [
{
"value": value,
"count": counts[responseIRI][value]
} for value in counts[responseIRI]
] for responseIRI in counts
}
)
def delocalize(dt):
print("delocalizing {} ({}; {})".format(
dt,
type(dt),
dt.tzinfo if isinstance(dt, datetime) else ""
))
if isinstance(dt, datetime):
if dt.tzinfo is None:
return(dt)
print(dt.astimezone(pytz.utc).replace(
tzinfo=None
))
return(dt.astimezone(pytz.utc).replace(
tzinfo=None
))
elif isinstance(dt, str):
return(datetime.fromisoformat(dt).astimezone(pytz.utc).replace(
tzinfo=None
))
print("Here's the problem: {}".format(dt))
raise TypeError
def aggregateAndSave(item, informant):
if item == {} or item is None:
return({})
metadata = item.get("meta", {})
# Save 1 (of 3)
if metadata and metadata != {}:
item = ResponseItem().setMetadata(item, metadata)
# sevenDay ...
metadata = item.get("meta", {})
endDate = datetime.now(
tzlocal.get_localzone()
)
startDate = (endDate - timedelta(days=7)).date()
print("From {} to {}".format(
startDate.strftime("%c"),
endDate.strftime("%c")
))
metadata["last7Days"] = aggregate(
metadata,
informant,
startDate=startDate,
endDate=endDate,
getAll=True
)
# save (2 of 3)
if metadata and metadata != {}:
item = ResponseItem().setMetadata(item, metadata)
# allTime
metadata = item.get("meta", {})
metadata["allTime"] = aggregate(
metadata,
informant,
endDate=endDate,
getAll=False
)
# save (3 of 3)
if metadata and metadata != {}:
item = ResponseItem().setMetadata(item, metadata)
return(item)
def last7Days(
appletId,
appletInfo,
informantId,
reviewer,
subject=None,
referenceDate=None
):
from bson import json_util
from .jsonld_expander import loadCache, reprolibCanonize, reprolibPrefix
referenceDate = delocalize(
datetime.now(
tzlocal.get_localzone()
) if referenceDate is None else referenceDate # TODO allow timeless dates
)
# we need to get the activities
cachedApplet = loadCache(appletInfo['cached'])
listOfActivities = [
reprolibPrefix(activity) for activity in list(
cachedApplet['activities'].keys()
)
]
getLatestResponsesByAct = lambda activityURI: list(ResponseItem().find(
query={
"baseParentType": 'user',
"baseParentId": informantId if isinstance(
informantId,
ObjectId
) else ObjectId(informantId),
"updated": {
"$lte": referenceDate
},
"meta.applet.@id": {
"$in": [
appletId,
ObjectId(appletId)
]
},
"meta.activity.url": {
"$in": [
activityURI,
reprolibPrefix(activityURI),
reprolibCanonize(activityURI)
]
}
},
force=True,
sort=[("updated", DESCENDING)]
))
latestResponses = [getLatestResponsesByAct(act) for act in listOfActivities]
# destructure the responses
# TODO: we are assuming here that activities don't share items.
# might not be the case later on, so watch out.
outputResponses = {}
for resp in latestResponses:
if len(resp):
latest = resp[0]
# the last 7 days for the most recent entry for the activity
l7 = latest.get('meta', {}).get('last7Days', {}).get('responses', {})
# the current response for the most recent entry for the activity
currentResp = latest.get('meta', {}).get('responses', {})
# update the l7 with values from currentResp
for (key, val) in currentResp.items():
if key in l7.keys():
l7[key].append(dict(date=latest['updated'], value=val))
else:
l7[key] = [dict(date=latest['updated'], value=val)]
outputResponses.update(l7)
l7d = {}
l7d["responses"] = _oneResponsePerDate(outputResponses)
endDate = referenceDate.date()
l7d["schema:endDate"] = endDate.isoformat()
startDate = endDate - timedelta(days=7)
l7d["schema:startDate"] = startDate.isoformat()
l7d["schema:duration"] = isodate.duration_isoformat(
endDate - startDate
)
return l7d
def determine_date(d):
if isinstance(d, int):
while (d > 10000000000):
d = d/10
d = datetime.fromtimestamp(d)
return((
datetime.fromisoformat(
d
) if isinstance(d, str) else d
).date())
def isodatetime(d):
if isinstance(d, int):
while (d > 10000000000):
d = d/10
d = datetime.fromtimestamp(d)
return((
datetime.fromisoformat(
d
) if isinstance(d, str) else d
).isoformat())
def responseDateList(appletId, userId, reviewer):
from girderformindlogger.models.profile import Profile
userId = ProfileModel().getProfile(userId, reviewer)
if not isinstance(userId, dict):
return([])
userId = userId.get('userId')
rdl = list(set([
determine_date(
response.get("meta", {}).get(
"responseCompleted",
response.get("updated")
)
).isoformat() for response in list(ResponseItem().find(
query={
"baseParentType": 'user',
"baseParentId": userId,
"meta.applet.@id": appletId
},
sort=[("updated", DESCENDING)]
))
]))
rdl.sort(reverse=True)
return(rdl)
def _oneResponsePerDate(responses):
newResponses = {}
for response in responses:
df = pd.DataFrame(responses[response])
df["datetime"] = df.date
df["date"] = df.date.apply(determine_date)
df.sort_values(by=['datetime'], ascending=False, inplace=True)
df = df.groupby('date').first()
df.drop('datetime', axis=1, inplace=True)
df['date'] = df.index
newResponses[response] = df.to_dict(orient="records")
return(newResponses)
|
py | b41747456c10227ba4705b0582692784b4c875d6 | import sys
from dataclasses import (_FIELD, _FIELD_INITVAR, _FIELDS, _HAS_DEFAULT_FACTORY, _POST_INIT_NAME, MISSING, _create_fn,
_field_init, _init_param, _set_new_attribute, dataclass, field, fields)
# change of signature introduced in python 3.10.1
if sys.version_info >= (3, 10, 1):
_field_init_real = _field_init
def _field_init(f, frozen, locals, self_name):
return _field_init_real(f, frozen, locals, self_name, False)
|
py | b41748a5f494027e9776a10fd8bc09aa739831fa | import boto3
import os
import json
print(os.environ['AWS_ACCESS_KEY_ID'])
session = boto3.Session(
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY']
)
dynamodb = session.resource('dynamodb', region_name='us-east-2')
db = dynamodb.Table('esVerbDefs')
print(db.creation_date_time)
data = json.load(open('verbs.json'))
for key, value in data.items():
if(value[0]['tense'] == 'Infinitive'):
for en in value[0]['translation'].split('; '):
if(en[:3] != 'to '):
en = 'to ' + en
db.put_item(
Item = {
'EN': en,
'ES': key,
'score': 0
}
)
print("EN: " + en + " / ES: " + key) |
py | b41748caf0170bd36ab1210a4e1c728c8a124e93 | from django.apps import AppConfig as BaseAppConfig
from django.utils.translation import ugettext_lazy as _
class AppConfig(BaseAppConfig):
name = "pinax.points"
label = "pinax_points"
verbose_name = _("Pinax Points")
|
py | b417496d5037969bacd8a15834051e6f71cb9b97 | import os
from configparser import ConfigParser
from appdirs import user_data_dir
class ConfigManager:
def __init__(self):
self.config_file = None
self.config_dict = None
# get("UI.language") or get("UI", "language")
def get(self, *keys):
if len(keys) == 1:
keys = keys[0].split(".")
return self.config_dict.get(*keys)
# set("UI.language", "en") or set("UI", "language", "en")
def set(self, *keys_and_value):
if len(keys_and_value) == 2:
keys_and_value = keys_and_value[0].split(".") + [keys_and_value[1]]
self.config_dict.set(*keys_and_value)
self._save_config()
def init(self):
data_dir = self._get_app_data_dir()
if not os.path.exists(data_dir):
os.mkdir(data_dir)
self.config_file = os.path.join(data_dir, "config.ini")
if os.path.exists(self.config_file):
self.config_dict = ConfigParser()
self.config_dict.read(self.config_file)
else:
self.config_dict = self._get_default_config()
self._save_config()
self._ensure_dir_exists()
return self
def _save_config(self):
with open(self.config_file, 'w') as configfile:
self.config_dict.write(configfile)
def _ensure_dir_exists(self):
dir_list = [self.get("PATH", "notes_dir"), self.get("PATH", "notes_index_dir")]
for dir_item in dir_list:
if not os.path.exists(dir_item):
os.mkdir(dir_item)
@classmethod
def _get_app_data_dir(cls):
return user_data_dir(appname="OrchidNote", appauthor=False)
@classmethod
def _get_default_config(cls):
data_dir = cls._get_app_data_dir()
config_parser = ConfigParser()
config_parser["DEFAULT"] = {
"db_file": os.path.join(data_dir, "note.sqlite"),
"notes_dir": os.path.join(data_dir, "notes"),
"notes_index_dir": os.path.join(data_dir, "notes_index"),
"language": "en"
}
config_parser["PATH"] = {
"db_file": os.path.join(data_dir, "note.sqlite"),
"notes_dir": os.path.join(data_dir, "notes"),
"notes_index_dir": os.path.join(data_dir, "notes_index")
}
config_parser["APP"] = {
"language": "en",
"window_title": "Orchid Note"
}
config_parser["UI"] = {
"todo_finished_color": "#00FF00",
"todo_progress_bg_color": "#FFFFFF",
"todo_progress_bg_start": "#00FF00",
"todo_progress_bg_end": '#006600'
}
return config_parser
config = ConfigManager().init()
|
py | b41749bb9f6210509a3ff281d23502294cbb19ee | """
Django settings for breakout project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import json
from pathlib import Path
from django.utils.translation import ugettext_lazy as _
try:
with open('/etc/breakout_config.json') as config_file:
config = json.load(config_file)
except Exception:
with open('~/Documents/dev/srv/breakout/breakout_config.json') as config_file:
config = json.load(config_file)
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
SITE_ID = 1
INTERNAL_IPS = [
# ...
'127.0.0.1',
'192.168.178.27',
# ...
]
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# Application definition
DJANGO_CORE_APPS = [
'mailer',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
]
THIRD_PARTY_APPS = [
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.contrib.routable_page',
'wagtail.contrib.styleguide',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'wagtail.contrib.modeladmin',
"wagtail.contrib.table_block",
'wagtail.contrib.settings',
'wagtail_modeltranslation',
'wagtail_modeltranslation.makemigrations',
'wagtail_modeltranslation.migrate',
'wagtailfontawesome',
'modeltranslation',
'modelcluster',
'taggit',
'paypal.standard.ipn',
'cookie_consent',
# 'blog',
'django_extensions',
'captcha',
'wagtailcaptcha',
# 'wagtail_svgmap',
'django_crontab',
'wagtailyoast',
'wagtail.locales',
'django_inlinecss',
]
MY_APPS = [
'apps.wagtail.home.apps.HomePageConfig', # has to come before flex
'apps.wagtail.myblog.apps.MyblogConfig',
'apps.wagtail.flex.apps.FlexConfig',
'apps.wagtail.site_settings.apps.SiteSettingsConfig',
'apps.wagtail.streams.apps.StreamsConfig',
'apps.wagtail.search',
'apps.users.apps.UsersConfig',
'apps.booking.apps.BookingConfig',
'apps.wagtail.menus.apps.MenusConfig',
]
INSTALLED_APPS = DJANGO_CORE_APPS + THIRD_PARTY_APPS + MY_APPS
COOKIE_CONSENT_NAME = "cookie_consent"
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
'django.middleware.locale.LocaleMiddleware', # should be after SessionMiddleware and before CommonMiddleware
]
ROOT_URLCONF = 'breakout.urls'
WAGTAIL_SVGMAP_IE_COMPAT: False
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'wagtail.contrib.settings.context_processors.settings',
],
},
},
]
WSGI_APPLICATION = 'breakout.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
DEFAULT_AUTO_FIELD='django.db.models.AutoField'
# auth
AUTH_USER_MODEL = 'users.CustomUser'
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'de'
WY_LOCALE = 'de_DE'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
WAGTAIL_CONTENT_LANGUAGES = LANGUAGES = (
('de', _('German')),
('en', _('English')),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
# ManifestStaticFilesStorage is recommended in production, to prevent outdated
# JavaScript / CSS assets being served from cache (e.g. after a Wagtail upgrade).
# See https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/#manifeststaticfilesstorage
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
MEDIA_URL = '/media/'
# Wagtail settings
WAGTAIL_SITE_NAME = "breakout"
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = 'https://breakout-escaperoom.de'
# SITE_ID_FOR_SETTINGS = 1
# email setup
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_BACKEND = 'mailer.backend.DbBackend'
SERVER_EMAIL = '[email protected]'
EMAIL_HOST = 'smtp.strato.de'
EMAIL_PORT = 587
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = config['EMAIL_HOST_PASSWORD']
EMAIL_USE_TLS = True
#paypal
# recaptcha
NOCAPTCHA = True
# crontab
CRONJOBS = [
('0 3 * * *', 'apps.booking.cron.delete_unused_carts')
] |
py | b4174a6b42cd8f1b01d89e8330621dad35658103 | #!/usr/bin/env python
"""
Plot mesh connectivities, facet orientations, global and local DOF ids etc.
"""
from optparse import OptionParser
from sfepy.base.base import output
from sfepy.discrete.fem import Mesh, FEDomain
import sfepy.postprocess.plot_cmesh as pc
usage = '%prog [options] filename\n' + __doc__.rstrip()
helps = {
}
def main():
parser = OptionParser(usage=usage, version='%prog')
options, args = parser.parse_args()
if len(args) == 1:
filename = args[0]
else:
parser.print_help(),
return
mesh = Mesh.from_file(filename)
output('Mesh:')
output(' dimension: %d, vertices: %d, elements: %d'
% (mesh.dim, mesh.n_nod, mesh.n_el))
domain = FEDomain('domain', mesh)
output(domain.cmesh)
domain.cmesh.cprint(1)
dim = domain.cmesh.dim
ax = pc.plot_wireframe(None, domain.cmesh)
ax = pc.plot_entities(ax, domain.cmesh, 0, 'k')
ax = pc.label_global_entities(ax, domain.cmesh, 0, 'k', 12)
ax = pc.label_local_entities(ax, domain.cmesh, 0, 'k', 8)
ax = pc.plot_entities(ax, domain.cmesh, 1, 'b')
ax = pc.label_global_entities(ax, domain.cmesh, 1, 'b', 12)
ax = pc.label_local_entities(ax, domain.cmesh, 1, 'b', 8)
if dim == 3:
ax = pc.plot_entities(ax, domain.cmesh, 2, 'g')
ax = pc.label_global_entities(ax, domain.cmesh, 2, 'g', 12)
ax = pc.label_local_entities(ax, domain.cmesh, 2, 'g', 8)
ax = pc.plot_entities(ax, domain.cmesh, dim, 'r')
ax = pc.label_global_entities(ax, domain.cmesh, dim, 'r', 12)
pc.plt.show()
if __name__ == '__main__':
main()
|
py | b4174a8c23825c644f479575628d4e8e88638b3d | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="deepfake-ecg", # Replace with your own username
version="1.1.2",
author="Vajira Thambawita",
author_email="[email protected]",
description="Unlimited 10-sec 8-leads Deep Fake ECG generator.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/vlbthambawita/deepfake-ecg",
packages=setuptools.find_packages(),
include_package_data=True,
package_data={'deepfakeecg': ['checkpoints/g_stat.pt']},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
'numpy',
'tqdm',
'pandas',
],
) |
py | b4174bfe2d6e54ec61cefbed110b626aa55123fa |
import zmq
from .messages import serialize
class ZMQClientChannel(object):
def __init__(self, listening):
self.listening = listening
self.context = zmq.Context.instance()
self.socket = self.context.socket(zmq.DEALER)
self.socket.connect('tcp://127.0.0.1:5556')
def send(self, event):
msg = serialize(event)
if self.listening:
msg.append(b'Listening')
self.socket.send_multipart(msg)
msg = self.socket.recv_multipart()
print (msg)
def receive(self):
return self.socket.recv_multipart()
|
py | b4174c22e4ec72097c23297c9d4c4056e138a41c | class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
|
py | b4174d9cd7733df00bc213142918ac6f4cd14e8a | #!/usr/bin/env python
"""
Convert text data to embeddings using doc2vec
__author__ = "Hide Inada"
__copyright__ = "Copyright 2018, Hide Inada"
__license__ = "The MIT License"
__email__ = "[email protected]"
"""
import os
import logging
import re
import numpy as np
import keras
from gensim.models.doc2vec import Doc2Vec
from project.text_to_id import map_text_to_word_list
from project.normalize_words import normalize_words
log = logging.getLogger(__name__)
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO")) # Change the 2nd arg to INFO to suppress debug logging
RESERVED_WORD_LIST = ["<UNK>", "<EOS>", "<PAD>"]
MODEL_FILE = "/tmp/tp/doc2vec_newsgroup.model"
def map_text_list_to_embedding(text_list, label_for_text_list, num_labels, label_to_id):
"""
Parameters
----------
text_list: list of str
List of text
label_for_text_list: list of str
List of labels, which is the ground truth for each text on the text_list
num_labels:
Number of labels
label_to_id: dict
Label to integer id mapping
Returns
-------
x: ndarray
Numpy array of mean word embeddings for each text.
y: ndarray
Numpy array of indices representing labels
"""
model = Doc2Vec.load(MODEL_FILE)
x_list = list()
y_list = list()
for i, text in enumerate(text_list):
log.debug("Processing post: [%d]" % (i + 1))
word_list = normalize_words(text)
v = model.infer_vector(word_list)
# For now, do not change non-zero element to 1.
label_id = label_to_id[label_for_text_list[i]]
label_id = keras.utils.to_categorical(label_id, num_labels).astype(np.float32)
label_id = label_id.reshape(1, num_labels)
x_list.append(v)
y_list.append(label_id)
x = np.array(x_list)
print(x.shape)
y = np.concatenate(y_list)
return x, y |
py | b4174da166e95b215f168db284e5bc133c25092d | # =============================================================================
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See LICENSE.txt for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
# =============================================================================
"""Common base class for writers"""
import os
print("loading", os.path.basename(__file__))
import smtk
import smtk.attribute
import smtk.model
from .shared_data import instance as shared
TypeStringMap = {
smtk.attribute.Item.DoubleType: "double",
smtk.attribute.Item.IntType: "int",
smtk.attribute.Item.StringType: "string",
smtk.attribute.Item.VoidType: "bool",
smtk.attribute.Item.FileType: "string",
}
FLOAT_FORMAT = r"{:e}"
def _fetch_subgroup_values(group, name):
values = []
for i in range(group.numberOfGroups()):
v = group.find(i, name, smtk.attribute.SearchStyle.IMMEDIATE)
values.append(v.value())
return r"{" + ",".join([FLOAT_FORMAT.format(x) for x in values]) + r"}"
class BaseWriter:
"""Base writer class for ATS input files.
Should ONLY contain methods (no member data)
"""
def __init__(self):
""""""
# Do NOT include any member data
pass
def _get_native_model_path(self, resource):
""""""
# We only support model resource filenames for now
model_res = smtk.model.Resource.CastTo(resource)
if model_res is None:
return None
# Unfortunately, this logic is all part of SMTK lore.
uuids = model_res.entitiesMatchingFlags(smtk.model.MODEL_ENTITY, True)
if not uuids:
raise RuntimeError("No model entities in model resource")
model_uuid = uuids.pop()
if not model_res.hasStringProperty(model_uuid, "url"):
raise RuntimeError('Model resource missing "url" property')
prop_list = model_res.stringProperty(model_uuid, "url")
return prop_list[0]
def _new_list(self, parent, list_name, list_type="ParameterList"):
"""Appends ParameterList element to parent
If list_type is None, then that xml attribute is omitted
"""
new_list = shared.xml_doc.createElement("ParameterList")
new_list.setAttribute("name", list_name)
if list_type is not None:
new_list.setAttribute("type", list_type)
parent.appendChild(new_list)
return new_list
def _new_param(self, list_elem, param_name, param_type, param_value):
"""Appends Parameter element to list_elem"""
if not isinstance(param_value, str):
raise TypeError(
"trying to insert and invalid value for: ({}: {})".format(
param_name, param_value
)
)
new_param = shared.xml_doc.createElement("Parameter")
new_param.setAttribute("name", param_name)
new_param.setAttribute("type", param_type)
new_param.setAttribute("value", param_value)
list_elem.appendChild(new_param)
return new_param
def _render_associations(self, parent_elem, att, elem_name_or_list):
"""Generates Parameter element for attribute associations."""
ref_item = att.associations()
if ref_item is None:
print(
'Warning: expected attribute "{}" to have associations'.format(
att.name()
)
)
return
n = ref_item.numberOfValues()
if n == 0:
print(
'Warning: expected attribute "{}" to have associations'.format(
att.name()
)
)
return
if isinstance(elem_name_or_list, (list, tuple)) and len(elem_name_or_list) > 1:
elem_name = elem_name_or_list[0]
array_name = elem_name_or_list[1]
else:
elem_name = elem_name_or_list
array_name = elem_name
# Generate list of attribute names
value_list = list()
for i in range(n):
if ref_item.isSet(i):
value_att = ref_item.value(i)
value_list.append(value_att.name())
if len(value_list) == 0:
print(
'Warning: expected attribute "{}" to have associations'.format(
att.name()
)
)
return
if len(value_list) == 1:
self._new_param(parent_elem, elem_name, "string", value_list[0])
return
# (else) n > 1
value_string = ",".join(value_list)
array_string = "{{{}}}".format(value_string)
self._new_param(parent_elem, array_name, "Array(string)", array_string)
def _render_items(
self, parent_elem, att, param_names, force_array=False, index=None
):
"""Generates Parameter elements for items specified by param_names"""
assert isinstance(param_names, list)
for param_name in param_names:
if index is not None:
item = att.find(index, param_name)
else:
item = att.find(param_name)
if item is None:
continue
# TODO: we need to handle `ComponentType`
# skip over optional items if not enabled. Bools are never optional... weird logic here.
if item.type() != smtk.attribute.Item.VoidType and not item.isEnabled():
continue
type_string = TypeStringMap.get(item.type())
value = None
if item.type() == smtk.attribute.Item.VoidType:
value = "true" if item.isEnabled() else "false"
elif hasattr(item, "numberOfValues") and (
force_array or item.numberOfValues() > 1
):
type_string = "Array({})".format(type_string)
value_list = list()
for i in range(item.numberOfValues()):
value_list.append(item.value(i))
string_list = [str(x) for x in value_list]
value = r"{" + ",".join(string_list) + r"}"
elif hasattr(item, "value"):
value = str(item.value())
if isinstance(value, float):
value = FLOAT_FORMAT.format(value)
else:
value = str(value)
else:
raise NotImplementedError(
"({}) for ({}) is not handled".format(item.type(), param_name)
)
self._new_param(parent_elem, param_name, type_string, value)
return
def _render_io_event_specs(self, parent_elem, io_event):
extensible_groups = {
"cycles start period stop": {
"array": ["Start Cycle", "Cycle Period", "Stop Cycle",],
"items": [],
},
"times start period stop": {
"array": ["Start Time", "Time Period", "Stop Time",],
"items": ["units"], # NOTE: assumes all items are string
},
"times": {"array": ["times"], "items": ["units"],},
}
sub_items = [
"cycles", # Int
]
# add the sub items
self._render_items(parent_elem, io_event, sub_items)
# add each array of values
dbl_type_string = "Array({})".format("double")
def _get_array_values(group, items, idx=0):
string_list = [str(group.find(idx, nm).value()) for nm in items]
values = r"{" + ",".join(string_list) + r"}"
return values
for group_name in extensible_groups.keys():
if group_name.startswith("cycle"):
type_string = "Array({})".format("int")
else:
type_string = "Array({})".format("double")
event_group = io_event.find(group_name)
if event_group.isEnabled():
meta = extensible_groups[group_name]
item_names = meta["items"]
array_names = meta["array"]
n = event_group.numberOfGroups()
if n > 1:
for i in range(n):
name = group_name + " {}".format(i)
values = _get_array_values(event_group, array_names, i)
self._new_param(parent_elem, name, type_string, values)
for item in item_names:
value = str(event_group.find(i, item).value())
self._new_param(
parent_elem, name + " " + item, "string", value
)
else:
values = _get_array_values(event_group, array_names)
self._new_param(parent_elem, group_name, type_string, values)
for item in item_names:
value = str(event_group.find(item).value())
self._new_param(
parent_elem, group_name + " " + item, "string", value
)
# TODO: handle times group (non extensible)
return
def _render_function(self, parent_elem, params):
"""`params` is the `function` string item: att.find(_i, "function")"""
func_type = params.value()
if func_type == "constant":
constant_elem = self._new_list(parent_elem, "function-constant")
self._render_items(constant_elem, params, ["value",])
elif func_type == "function-tabular":
tabular_elem = self._new_list(parent_elem, "function-tabular")
group = params.find("tabular-data")
x_values = _fetch_subgroup_values(group, "X")
y_values = _fetch_subgroup_values(group, "Y")
self._new_param(tabular_elem, "x values", "Array(double)", x_values)
self._new_param(tabular_elem, "y values", "Array(double)", y_values)
forms = params.find("forms")
values = []
if forms.find("linear").isEnabled():
values.append("linear")
if forms.find("constant").isEnabled():
values.append("constant")
if len(values):
forms_values = r"{" + ",".join([x for x in values]) + r"}"
self._new_param(tabular_elem, "forms", "Array(string)", forms_values)
elif func_type == "function-linear":
linear_elem = self._new_list(parent_elem, "function-linear")
# breakpoint()
y = params.find("y0").value()
self._new_param(linear_elem, "y0", "double", FLOAT_FORMAT.format(y))
group = params.find("linear-data")
x_values = _fetch_subgroup_values(group, "x0")
g_values = _fetch_subgroup_values(group, "gradient")
self._new_param(linear_elem, "x0", "Array(double)", x_values)
self._new_param(linear_elem, "gradient", "Array(double)", g_values)
elif func_type == "function-file":
tabular_elem = self._new_list(parent_elem, "function-tabular")
options = ["file", "x header", "y header"]
self._render_items(tabular_elem, params, options)
def _render_region_function(
self, parent_elem, att, name=None, _i=0, _recursive=True
):
if att.numberOfGroups() > 1 and _recursive:
for i in range(att.numberOfGroups()):
self._render_region_function(
parent_elem, att, name=None, _i=i, _recursive=False
)
return
if name is None:
name = att.find(_i, "function name").value()
the_group = self._new_list(parent_elem, name)
# add region
regions_comp = att.find(_i, "regions")
value_list = [
regions_comp.value(k).name() for k in range(regions_comp.numberOfValues())
]
if len(value_list) == 1:
self._new_param(the_group, "region", "string", value_list[0])
else:
regions = r"{" + ", ".join(value_list) + r"}"
self._new_param(the_group, "regions", "Array(string)", regions)
# add components
component = str(att.find(_i, "components").value())
if component in ("cell", "face", "boundary_face"):
self._new_param(the_group, "component", "string", component)
else:
components = "{" + component + "}"
self._new_param(the_group, "components", "Array(string)", components)
function_sub_elem = self._new_list(the_group, "function")
params = att.find(_i, "function")
self._render_function(function_sub_elem, params)
|
py | b4174de912f2ba34e2087ce800a9205d1516232b | """
Django settings for info_service project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-y9cw-woo75!@e%s^=n6rx46t@5q93ot#-a(lpxpk7w_i*=%_z2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third-Party Apps
"rest_framework",
"django.contrib.postgres",
# Local Apps
"info_service.coreapp",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'info_service.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'info_service.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
if 'RDS_DB_NAME' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['RDS_DB_NAME'],
'USER': os.environ['RDS_USERNAME'],
'PASSWORD': os.environ['RDS_PASSWORD'],
'HOST': os.environ['RDS_HOSTNAME'],
'PORT': os.environ['RDS_PORT'],
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'cird',
'USER': 'cird',
'PASSWORD': 'cird',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
ALLOWED_HOSTS = ['*'] |
py | b4174eaa12142b28a66980219d9e793201eab417 | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_1
from isi_sdk_8_2_1.models.smb_openfiles import SmbOpenfiles # noqa: E501
from isi_sdk_8_2_1.rest import ApiException
class TestSmbOpenfiles(unittest.TestCase):
"""SmbOpenfiles unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSmbOpenfiles(self):
"""Test SmbOpenfiles"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_1.models.smb_openfiles.SmbOpenfiles() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b41752f6e4a92c93a60a49be63c8b07d70432271 | # Generated by Django 3.0.5 on 2020-04-15 21:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("core", "0001_initial"),
]
operations = [
migrations.AlterModelOptions(
name="service", options={"ordering": ["name", "uuid"]},
),
]
|
py | b417534bee39db15f1bebaf5c7ffaec967597c73 | import colors
import part1, part2
day = '13'
test1answer = 17
test2answer = None
if __name__ == '__main__':
print('AdventOfCode 2021 Day ' + day)
print('\nTESTING')
print('Part 1: ', end='')
outcome = part1.part1('day' + day + '/test1.txt')
if outcome == test1answer:
print(colors.bcolors.OK + 'Answer: ' + str(outcome) + ', Expected: ' + str(test1answer) + ' [PASSED]' + colors.bcolors.RESET)
else:
print(colors.bcolors.FAIL + 'Answer: ' + str(outcome) + ', Expected: ' + str(test1answer) + ' [FAILED]' + colors.bcolors.RESET)
print('Part 2: ', end='')
outcome = part2.part2('day' + day + '/test2.txt')
if outcome == test2answer:
print(colors.bcolors.OK + 'Answer: ' + str(outcome) + ', Expected: ' + str(test2answer) + ' [PASSED]' + colors.bcolors.RESET)
else:
print(colors.bcolors.FAIL + 'Answer: ' + str(outcome) + ', Expected: ' + str(test2answer) + ' [FAILED]' + colors.bcolors.RESET)
print('\nSOLUTIONS')
print('Solution part 1: ' + str(part1.part1('day' + day + '/input.txt')))
print('Solution part 2: ' + str(part2.part2('day' + day + '/input.txt'))) |
py | b417537aa1314c6ab762d09cf4072b0571006a11 | import _plotly_utils.basevalidators
class CustomdatasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="customdatasrc", parent_name="funnel", **kwargs):
super(CustomdatasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
py | b417538daab4754d00fbcb99f883cf385519c48a | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains gsutil base unit test case class."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import logging
import os
import sys
import tempfile
import six
import boto
from boto.utils import get_utf8able_str
from gslib import project_id
from gslib import wildcard_iterator
from gslib.boto_translation import BotoTranslation
from gslib.cloud_api_delegator import CloudApiDelegator
from gslib.command_runner import CommandRunner
from gslib.cs_api_map import ApiMapConstants
from gslib.cs_api_map import ApiSelector
from gslib.discard_messages_queue import DiscardMessagesQueue
from gslib.tests.mock_logging_handler import MockLoggingHandler
from gslib.tests.testcase import base
import gslib.tests.util as util
from gslib.tests.util import unittest
from gslib.tests.util import WorkingDirectory
from gslib.utils.constants import UTF8
from gslib.utils.text_util import print_to_fd
def _AttemptToCloseSysFd(fd):
"""Suppress IOError when closing sys.stdout or sys.stderr in tearDown."""
# In PY2, if another sibling thread/process tried closing it at the same
# time we did, it succeeded either way, so we just continue. This approach
# was taken from https://github.com/pytest-dev/pytest/pull/3305.
if not six.PY2: # This doesn't happen in PY3, AFAICT.
fd.close()
return
try:
fd.close()
except IOError:
pass
class GsutilApiUnitTestClassMapFactory(object):
"""Class map factory for use in unit tests.
BotoTranslation is used for all cases so that GSMockBucketStorageUri can
be used to communicate with the mock XML service.
"""
@classmethod
def GetClassMap(cls):
"""Returns a class map for use in unit tests."""
gs_class_map = {
ApiSelector.XML: BotoTranslation,
ApiSelector.JSON: BotoTranslation
}
s3_class_map = {ApiSelector.XML: BotoTranslation}
class_map = {'gs': gs_class_map, 's3': s3_class_map}
return class_map
@unittest.skipUnless(util.RUN_UNIT_TESTS, 'Not running integration tests.')
class GsUtilUnitTestCase(base.GsUtilTestCase):
"""Base class for gsutil unit tests."""
@classmethod
def setUpClass(cls):
base.GsUtilTestCase.setUpClass()
cls.mock_bucket_storage_uri = util.GSMockBucketStorageUri
cls.mock_gsutil_api_class_map_factory = GsutilApiUnitTestClassMapFactory
cls.logger = logging.getLogger()
cls.command_runner = CommandRunner(
bucket_storage_uri_class=cls.mock_bucket_storage_uri,
gsutil_api_class_map_factory=cls.mock_gsutil_api_class_map_factory)
# Ensure unit tests don't fail if no default_project_id is defined in the
# boto config file.
project_id.UNIT_TEST_PROJECT_ID = 'mock-project-id-for-unit-tests'
def setUp(self):
super(GsUtilUnitTestCase, self).setUp()
self.bucket_uris = []
self.stdout_save = sys.stdout
self.stderr_save = sys.stderr
fd, self.stdout_file = tempfile.mkstemp()
sys.stdout = os.fdopen(fd, 'wb+')
fd, self.stderr_file = tempfile.mkstemp()
# do not set sys.stderr to be 'wb+' - it will blow up the logger
sys.stderr = os.fdopen(fd, 'w+')
self.accumulated_stdout = []
self.accumulated_stderr = []
self.root_logger = logging.getLogger()
self.is_debugging = self.root_logger.isEnabledFor(logging.DEBUG)
self.log_handlers_save = self.root_logger.handlers
fd, self.log_handler_file = tempfile.mkstemp()
self.log_handler_stream = os.fdopen(fd, 'w+')
self.temp_log_handler = logging.StreamHandler(self.log_handler_stream)
self.root_logger.handlers = [self.temp_log_handler]
def tearDown(self):
super(GsUtilUnitTestCase, self).tearDown()
self.root_logger.handlers = self.log_handlers_save
self.temp_log_handler.flush()
self.temp_log_handler.close()
self.log_handler_stream.seek(0)
log_output = self.log_handler_stream.read()
self.log_handler_stream.close()
os.unlink(self.log_handler_file)
sys.stdout.seek(0)
sys.stderr.seek(0)
if six.PY2:
stdout = sys.stdout.read()
stderr = sys.stderr.read()
else:
try:
stdout = sys.stdout.read()
stderr = sys.stderr.read()
except UnicodeDecodeError:
sys.stdout.seek(0)
sys.stderr.seek(0)
stdout = sys.stdout.buffer.read()
stderr = sys.stderr.buffer.read()
[six.ensure_text(string) for string in self.accumulated_stderr]
[six.ensure_text(string) for string in self.accumulated_stdout]
stdout = six.ensure_text(get_utf8able_str(stdout))
stderr = six.ensure_text(get_utf8able_str(stderr))
stdout += ''.join(self.accumulated_stdout)
stderr += ''.join(self.accumulated_stderr)
_AttemptToCloseSysFd(sys.stdout)
_AttemptToCloseSysFd(sys.stderr)
sys.stdout = self.stdout_save
sys.stderr = self.stderr_save
os.unlink(self.stdout_file)
os.unlink(self.stderr_file)
_id = six.ensure_text(self.id())
if self.is_debugging and stdout:
print_to_fd('==== stdout {} ====\n'.format(_id), file=sys.stderr)
print_to_fd(stdout, file=sys.stderr)
print_to_fd('==== end stdout ====\n', file=sys.stderr)
if self.is_debugging and stderr:
print_to_fd('==== stderr {} ====\n'.format(_id), file=sys.stderr)
print_to_fd(stderr, file=sys.stderr)
print_to_fd('==== end stderr ====\n', file=sys.stderr)
if self.is_debugging and log_output:
print_to_fd('==== log output {} ====\n'.format(_id), file=sys.stderr)
print_to_fd(log_output, file=sys.stderr)
print_to_fd('==== end log output ====\n', file=sys.stderr)
def RunCommand(self,
command_name,
args=None,
headers=None,
debug=0,
return_stdout=False,
return_stderr=False,
return_log_handler=False,
cwd=None):
"""Method for calling gslib.command_runner.CommandRunner.
Passes parallel_operations=False for all tests, optionally saving/returning
stdout output. We run all tests multi-threaded, to exercise those more
complicated code paths.
TODO: Change to run with parallel_operations=True for all tests. At
present when you do this it causes many test failures.
Args:
command_name: The name of the command being run.
args: Command-line args (arg0 = actual arg, not command name ala bash).
headers: Dictionary containing optional HTTP headers to pass to boto.
debug: Debug level to pass in to boto connection (range 0..3).
return_stdout: If True, will save and return stdout produced by command.
return_stderr: If True, will save and return stderr produced by command.
return_log_handler: If True, will return a MockLoggingHandler instance
that was attached to the command's logger while running.
cwd: The working directory that should be switched to before running the
command. The working directory will be reset back to its original
value after running the command. If not specified, the working
directory is left unchanged.
Returns:
One or a tuple of requested return values, depending on whether
return_stdout, return_stderr, and/or return_log_handler were specified.
Return Types:
stdout - binary
stderr - str (binary in Py2, text in Py3)
log_handler - MockLoggingHandler
"""
args = args or []
command_line = six.ensure_text(' '.join([command_name] + args))
if self.is_debugging:
print_to_fd('\nRunCommand of {}\n'.format(command_line),
file=self.stderr_save)
# Save and truncate stdout and stderr for the lifetime of RunCommand. This
# way, we can return just the stdout and stderr that was output during the
# RunNamedCommand call below.
sys.stdout.seek(0)
sys.stderr.seek(0)
stdout = sys.stdout.read()
stderr = sys.stderr.read()
if stdout:
self.accumulated_stdout.append(stdout)
if stderr:
self.accumulated_stderr.append(stderr)
sys.stdout.seek(0)
sys.stderr.seek(0)
sys.stdout.truncate()
sys.stderr.truncate()
mock_log_handler = MockLoggingHandler()
logging.getLogger(command_name).addHandler(mock_log_handler)
if debug:
logging.getLogger(command_name).setLevel(logging.DEBUG)
try:
with WorkingDirectory(cwd):
self.command_runner.RunNamedCommand(command_name,
args=args,
headers=headers,
debug=debug,
parallel_operations=False,
do_shutdown=False)
finally:
sys.stdout.seek(0)
sys.stderr.seek(0)
if six.PY2:
stdout = sys.stdout.read()
stderr = sys.stderr.read()
else:
try:
stdout = sys.stdout.read()
stderr = sys.stderr.read()
except UnicodeDecodeError:
sys.stdout.seek(0)
sys.stderr.seek(0)
stdout = sys.stdout.buffer.read().decode(UTF8)
stderr = sys.stderr.buffer.read().decode(UTF8)
logging.getLogger(command_name).removeHandler(mock_log_handler)
mock_log_handler.close()
log_output = '\n'.join(
'%s:\n ' % level + '\n '.join(records)
for level, records in six.iteritems(mock_log_handler.messages)
if records)
_id = six.ensure_text(self.id())
if self.is_debugging and log_output:
print_to_fd('==== logging RunCommand {} {} ====\n'.format(
_id, command_line),
file=self.stderr_save)
print_to_fd(log_output, file=self.stderr_save)
print_to_fd('\n==== end logging ====\n', file=self.stderr_save)
if self.is_debugging and stdout:
print_to_fd('==== stdout RunCommand {} {} ====\n'.format(
_id, command_line),
file=self.stderr_save)
print_to_fd(stdout, file=self.stderr_save)
print_to_fd('==== end stdout ====\n', file=self.stderr_save)
if self.is_debugging and stderr:
print_to_fd('==== stderr RunCommand {} {} ====\n'.format(
_id, command_line),
file=self.stderr_save)
print_to_fd(stderr, file=self.stderr_save)
print_to_fd('==== end stderr ====\n', file=self.stderr_save)
# Reset stdout and stderr files, so that we won't print them out again
# in tearDown if debugging is enabled.
sys.stdout.seek(0)
sys.stderr.seek(0)
sys.stdout.truncate()
sys.stderr.truncate()
to_return = []
if return_stdout:
to_return.append(stdout)
if return_stderr:
to_return.append(stderr)
if return_log_handler:
to_return.append(mock_log_handler)
if len(to_return) == 1:
return to_return[0]
return tuple(to_return)
@classmethod
def MakeGsUtilApi(cls, debug=0):
gsutil_api_map = {
ApiMapConstants.API_MAP:
(cls.mock_gsutil_api_class_map_factory.GetClassMap()),
ApiMapConstants.SUPPORT_MAP: {
'gs': [ApiSelector.XML, ApiSelector.JSON],
's3': [ApiSelector.XML]
},
ApiMapConstants.DEFAULT_MAP: {
'gs': ApiSelector.JSON,
's3': ApiSelector.XML
}
}
return CloudApiDelegator(cls.mock_bucket_storage_uri,
gsutil_api_map,
cls.logger,
DiscardMessagesQueue(),
debug=debug)
@classmethod
def _test_wildcard_iterator(cls, uri_or_str, debug=0):
"""Convenience method for instantiating a test instance of WildcardIterator.
This makes it unnecessary to specify all the params of that class
(like bucket_storage_uri_class=mock_storage_service.MockBucketStorageUri).
Also, naming the factory method this way makes it clearer in the test code
that WildcardIterator needs to be set up for testing.
Args are same as for wildcard_iterator.wildcard_iterator(), except
there are no class args for bucket_storage_uri_class or gsutil_api_class.
Args:
uri_or_str: StorageUri or string representing the wildcard string.
debug: debug level to pass to the underlying connection (0..3)
Returns:
WildcardIterator, over which caller can iterate.
"""
# TODO: Remove when tests no longer pass StorageUri arguments.
uri_string = uri_or_str
if hasattr(uri_or_str, 'uri'):
uri_string = uri_or_str.uri
return wildcard_iterator.CreateWildcardIterator(uri_string,
cls.MakeGsUtilApi())
@staticmethod
def _test_storage_uri(uri_str, default_scheme='file', debug=0, validate=True):
"""Convenience method for instantiating a testing instance of StorageUri.
This makes it unnecessary to specify
bucket_storage_uri_class=mock_storage_service.MockBucketStorageUri.
Also naming the factory method this way makes it clearer in the test
code that StorageUri needs to be set up for testing.
Args, Returns, and Raises are same as for boto.storage_uri(), except there's
no bucket_storage_uri_class arg.
Args:
uri_str: Uri string to create StorageUri for.
default_scheme: Default scheme for the StorageUri
debug: debug level to pass to the underlying connection (0..3)
validate: If True, validate the resource that the StorageUri refers to.
Returns:
StorageUri based on the arguments.
"""
return boto.storage_uri(uri_str, default_scheme, debug, validate,
util.GSMockBucketStorageUri)
def CreateBucket(self,
bucket_name=None,
test_objects=0,
storage_class=None,
provider='gs'):
"""Creates a test bucket.
The bucket and all of its contents will be deleted after the test.
Args:
bucket_name: Create the bucket with this name. If not provided, a
temporary test bucket name is constructed.
test_objects: The number of objects that should be placed in the bucket or
a list of object names to place in the bucket. Defaults to
0.
storage_class: storage class to use. If not provided we us standard.
provider: string provider to use, default gs.
Returns:
StorageUri for the created bucket.
"""
bucket_name = bucket_name or self.MakeTempName('bucket')
bucket_uri = boto.storage_uri(
'%s://%s' % (provider, bucket_name.lower()),
suppress_consec_slashes=False,
bucket_storage_uri_class=util.GSMockBucketStorageUri)
bucket_uri.create_bucket(storage_class=storage_class)
self.bucket_uris.append(bucket_uri)
try:
iter(test_objects)
except TypeError:
test_objects = [self.MakeTempName('obj') for _ in range(test_objects)]
for i, name in enumerate(test_objects):
self.CreateObject(bucket_uri=bucket_uri,
object_name=name,
contents='test {}'.format(i).encode(UTF8))
return bucket_uri
def CreateObject(self, bucket_uri=None, object_name=None, contents=None):
"""Creates a test object.
Args:
bucket_uri: The URI of the bucket to place the object in. If not
specified, a new temporary bucket is created.
object_name: The name to use for the object. If not specified, a temporary
test object name is constructed.
contents: The contents to write to the object. If not specified, the key
is not written to, which means that it isn't actually created
yet on the server.
Returns:
A StorageUri for the created object.
"""
bucket_uri = bucket_uri or self.CreateBucket(provider=self.default_provider)
object_name = object_name or self.MakeTempName('obj')
key_uri = bucket_uri.clone_replace_name(object_name)
if contents is not None:
key_uri.set_contents_from_string(contents)
return key_uri
|
py | b4175413a395486a159c7b7438d893b87918dc80 | import seaborn as sns
print(sns.__version__)
"""# Analisando as notas em geral"""
import pandas as pd
notas = pd.read_csv("ratings.csv")
notas.head()
notas.shape
notas.columns = ["usuarioId", "filmeId", "nota", "momento"]
notas.head()
notas['nota'].unique()
notas['nota'].value_counts()
print("Media",notas['nota'].mean())
print("Mediana",notas['nota'].median())
notas.nota.head()
notas.nota.plot(kind='hist')
notas.nota.describe()
import seaborn as sns
sns.boxplot(notas.nota)
"""# Olhando os filmes"""
filmes = pd.read_csv("movies.csv")
filmes.columns = ["filmeId", "titulo", "generos"]
filmes.head()
notas.head()
"""# Analisando algumas notas especificas por filme"""
notas.query("filmeId==1").nota.mean()
notas.query("filmeId==2").nota.mean()
medias_por_filme = notas.groupby("filmeId").mean().nota
medias_por_filme.head()
medias_por_filme.plot(kind='hist')
import matplotlib.pyplot as plt
plt.figure(figsize=(5,8))
sns.boxplot(y=medias_por_filme)
medias_por_filme.describe()
sns.distplot(medias_por_filme)
plt.hist(medias_por_filme)
plt.title("Histograma das médias dos filmes")
tmdb = pd.read_csv("tmdb_5000_movies.csv")
tmdb.head()
tmdb.original_language.unique() # categorica nominal
# primeiro grau
# segundo grau
# terceiro grau
# 1 grau < 2 grau < 3 grau # categorica ordinal
# budget => orcamento => quantitativa continuo
# quantidade de votos => 1, 2, 3, 4, nao tem 2.5 votos.
# notas do movielens => 0.5, 1, 1.5, ... ,5 nao tem 2.7
tmdb["original_language"].value_counts().index
tmdb["original_language"].value_counts().values
contagem_de_lingua = tmdb["original_language"].value_counts().to_frame().reset_index()
contagem_de_lingua.columns = ["original_language", "total"]
contagem_de_lingua.head()
sns.barplot(x="original_language", y = "total", data = contagem_de_lingua)
sns.catplot(x = "original_language", kind="count", data = tmdb)
plt.pie(contagem_de_lingua["total"], labels = contagem_de_lingua["original_language"])
total_por_lingua = tmdb["original_language"].value_counts()
total_geral = total_por_lingua.sum()
total_de_ingles = total_por_lingua.loc["en"]
total_do_resto = total_geral - total_de_ingles
print(total_de_ingles, total_do_resto)
dados = {
'lingua' : ['ingles','outros'],
'total' : [total_de_ingles, total_do_resto]
}
dados = pd.DataFrame(dados)
sns.barplot(x="lingua", y="total", data = dados)
plt.pie(dados["total"], labels = dados["lingua"])
total_por_lingua_de_outros_filmes = tmdb.query("original_language != 'en'").original_language.value_counts()
total_por_lingua_de_outros_filmes
filmes_sem_lingua_original_em_ingles = tmdb.query("original_language != 'en'")
sns.catplot(x = "original_language", kind="count",
data = filmes_sem_lingua_original_em_ingles)
sns.catplot(x = "original_language", kind="count",
data = filmes_sem_lingua_original_em_ingles,
aspect = 2,
palette="GnBu_d",
order = total_por_lingua_de_outros_filmes.index)
"""# Revisando o papel da média, mediana, medidas de tendência central, dispersão, desvio padrão, box plot, histograma"""
filmes.head(2)
notas_do_toy_story = notas.query("filmeId==1")
notas_do_jumanji = notas.query("filmeId==2")
print(len(notas_do_toy_story), len(notas_do_jumanji))
print("Nota média do Toy Story %.2f" % notas_do_toy_story.nota.mean())
print("Nota média do Jumanji %.2f" % notas_do_jumanji.nota.mean())
print(notas_do_toy_story.nota.std(), notas_do_jumanji.nota.std())
print("Nota mediana do Toy Story %.2f" % notas_do_toy_story.nota.median())
print("Nota mediana do Jumanji %.2f" % notas_do_jumanji.nota.median())
import numpy as np
filme1 = np.append(np.array([2.5] * 10), np.array([3.5] * 10))
filme2 = np.append(np.array([5] * 10), np.array([1] * 10))
print(filme1.mean(), filme2.mean())
print(np.std(filme1), np.std(filme2))
print(np.median(filme1), np.median(filme2))
plt.hist(filme1)
plt.hist(filme2)
plt.boxplot([filme1, filme2])
plt.boxplot([notas_do_toy_story.nota, notas_do_jumanji.nota])
sns.boxplot(x = "filmeId", y = "nota", data = notas.query("filmeId in [1,2,3,4,5]"))
|
py | b4175451c3bf3e6c152bf7acfd21496292072176 | # Generated by Django 3.2.7 on 2021-11-28 13:16
import cloudinary.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pics', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='tags',
new_name='Category',
),
migrations.AddField(
model_name='image',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='pics.category'),
),
migrations.AlterField(
model_name='image',
name='description',
field=models.TextField(max_length=60),
),
migrations.AlterField(
model_name='image',
name='image',
field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='image'),
),
]
|
py | b41754811e363f3dc0012eaac81d7acb4ae46fc5 | from torch import nn
class Encoder(nn.Module):
def __init__(self, seq_len, n_features, embedding_dim):
super(Encoder, self).__init__()
self.seq_len, self.n_features = seq_len, n_features
self.embedding_dim, self.hidden_dim = embedding_dim, 2 * embedding_dim
self.rnn1 = nn.LSTM(
input_size=n_features,
hidden_size=self.hidden_dim,
num_layers=1,
batch_first=True
)
self.rnn2 = nn.LSTM(
input_size=self.hidden_dim,
hidden_size=embedding_dim,
num_layers=1,
batch_first=True
)
def forward(self, x):
x = x.reshape((1, self.seq_len, self.n_features))
x, (_, _) = self.rnn1(x)
x, (hidden_n, _) = self.rnn2(x)
return hidden_n.reshape((self.n_features, self.embedding_dim))
class Decoder(nn.Module):
def __init__(self, seq_len, input_dim, n_features=1):
super(Decoder, self).__init__()
self.seq_len, self.input_dim = seq_len, input_dim
self.hidden_dim, self.n_features = 2 * input_dim, n_features
self.rnn1 = nn.LSTM(
input_size=input_dim,
hidden_size=input_dim,
num_layers=1,
batch_first=True
)
self.rnn2 = nn.LSTM(
input_size=input_dim,
hidden_size=self.hidden_dim,
num_layers=1,
batch_first=True
)
self.output_layer = nn.Linear(self.hidden_dim, n_features)
def forward(self, x):
x = x.repeat(self.seq_len, self.n_features)
x = x.reshape((self.n_features, self.seq_len, self.input_dim))
x, (hidden_n, cell_n) = self.rnn1(x)
x, (hidden_n, cell_n) = self.rnn2(x)
x = x.reshape((self.seq_len, self.hidden_dim))
return self.output_layer(x)
class RecurrentAutoencoder(nn.Module):
def __init__(self, seq_len, n_features, embedding_dim, device):
super(RecurrentAutoencoder, self).__init__()
self.encoder = Encoder(seq_len, n_features, embedding_dim).to(device)
self.decoder = Decoder(seq_len, embedding_dim, n_features).to(device)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
|
py | b41754a05c1aa10639fe385c873487cf47bedb9f | from open_publishing.core.enums import ValueStatus
from open_publishing.core import Field
from open_publishing.core.enums import Subscription
class SubscriptionsField(Field):
def __init__(self,
user):
super(SubscriptionsField, self).__init__(database_object=user,
aspect='email.*')
self._value = None
@property
def value(self):
if self.status is ValueStatus.none:
raise RuntimeError('Accessing to field which is not set')
else :
return self._value.copy()
def hard_set(self,
value):
if isinstance(value, set):
for sub in value:
if sub not in Subscription:
raise ValueError('Unknown subscriptions used : {0}'.format(sub))
self._value = value
self._status = ValueStatus.hard
else:
raise ValueError('Expected set of subscriptions, got {0}'.format(type(value)))
def update(self,
gjp):
if self._status is not ValueStatus.hard:
master_obj = self._master_object(gjp)
if 'email' in master_obj:
self._value = set()
if master_obj['email']['grin_info_mail']:
if master_obj['email']['buyer_newsletter']:
self._value.add(Subscription.buyer)
if master_obj['email']['author_newsletter']:
self._value.add(Subscription.author)
if master_obj['email']['general_newsletter']:
self._value.add(Subscription.general)
self._status = ValueStatus.soft
def gjp(self,
gjp):
if self._status is ValueStatus.hard:
if 'email' not in gjp:
gjp['email'] = {}
gjp['email']['grin_info_mail'] = True if self._value else False
gjp['email']['buyer_newsletter'] = Subscription.buyer in self._value
gjp['email']['author_newsletter'] = Subscription.author in self._value
gjp['email']['general_newsletter'] = Subscription.general in self._value
|
py | b41754f9905c71ccb81712b268957f94d9b75608 | import click
from model.utils.data_generator import DataGenerator
from model.img2seq import Img2SeqModel
from model.utils.lr_schedule import LRSchedule
from model.utils.general import Config
from model.utils.text import Vocab
from model.utils.image import greyscale
@click.command()
@click.option('--data', default="configs/data_small.json",
help='Path to data json config')
@click.option('--vocab', default="configs/vocab_small.json",
help='Path to vocab json config')
@click.option('--training', default="configs/training_small.json",
help='Path to training json config')
@click.option('--model', default="configs/model.json",
help='Path to model json config')
@click.option('--output', default="results/small/",
help='Dir for results and model weights')
def main(data, vocab, training, model, output):
# Load configs
dir_output = output
config = Config([data, vocab, training, model])
config.save(dir_output)
vocab = Vocab(config)
# Load datasets
train_set = DataGenerator(path_formulas=config.path_formulas_train,
dir_images=config.dir_images_train, img_prepro=greyscale,
max_iter=config.max_iter, bucket=config.bucket_train,
path_matching=config.path_matching_train,
max_len=config.max_length_formula,
form_prepro=vocab.form_prepro)
val_set = DataGenerator(path_formulas=config.path_formulas_val,
dir_images=config.dir_images_val, img_prepro=greyscale,
max_iter=config.max_iter, bucket=config.bucket_val,
path_matching=config.path_matching_val,
max_len=config.max_length_formula,
form_prepro=vocab.form_prepro)
# Define learning rate schedule
n_batches_epoch = ((len(train_set) + config.batch_size - 1) //
config.batch_size)
lr_schedule = LRSchedule(lr_init=config.lr_init,
start_decay=config.start_decay*n_batches_epoch,
end_decay=config.end_decay*n_batches_epoch,
end_warm=config.end_warm*n_batches_epoch,
lr_warm=config.lr_warm,
lr_min=config.lr_min)
# Build model and train
model = Img2SeqModel(config, dir_output, vocab)
model.build_train(config)
model.train(config, train_set, val_set, lr_schedule)
if __name__ == "__main__":
main()
|
py | b41755358495baa31d9fe5a8e868fecc94dbd26d | import glob_conf
from util import Util
import pandas as pd
class Test_predictor():
def __init__(self, model, orig_df, label_encoder, name):
"""Constructor setting up name and configuration"""
self.model = model
self.orig_df = orig_df
self.label_encoder = label_encoder
self.target = glob_conf.config['DATA']['target']
self.util = Util()
self.name = name
def predict_and_store(self):
predictions = self.model.get_predictions()
df = pd.DataFrame(index = self.orig_df.index)
df['speaker'] = self.orig_df['speaker']
df['gender'] = self.orig_df['gender']
df[self.target] = self.label_encoder.inverse_transform(predictions)
df.to_csv(self.name) |
py | b41755a154e0ef3875b703d6feb30743f47c7b2e | # -*- coding: utf-8 -*-
# File : train.py
# Author : Kai Ao
# Email : [email protected]
# Date : 2020/12/12 11:10
#
# This file is part of Rotation-Decoupled Detector.
# https://github.com/Capino512/pytorch-rotation-decoupled-detector
# Distributed under MIT License.
import sys
sys.path.append('.')
import os
import tqdm
import torch
import gc
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from data.aug.compose import Compose
from data.aug import ops
from data.dataset import Deepscores
from model.rdd import RDD
from model.backbone import resnet
from utils.adjust_lr import adjust_lr_multi_step
from utils.parallel import convert_model, CustomDetDataParallel
def main():
gc.collect()
torch.cuda.empty_cache()
dir_weight = os.path.join(dir_save, 'weight')
dir_log = os.path.join(dir_save, 'log')
os.makedirs(dir_weight, exist_ok=True)
writer = SummaryWriter(dir_log)
indexes = [int(os.path.splitext(path)[0]) for path in os.listdir(dir_weight)]
current_step = max(indexes) if indexes else 0
image_size = 768
lr = 1e-3
batch_size = 4
num_workers = 4
max_step = 2500
lr_cfg = [[1000, lr], [2000, lr / 10], [max_step, lr / 50]]
warm_up = [10, lr / 5, lr]
save_interval = 1000
aug = Compose([
ops.ToFloat(),
ops.PhotometricDistort(),
ops.RandomHFlip(),
ops.RandomVFlip(),
ops.RandomRotate90(),
ops.ResizeJitter([0.8, 1.2]),
ops.PadSquare(),
ops.Resize(image_size),
ops.BBoxFilter(24 * 24 * 0.4)
])
dataset = Deepscores(dir_dataset, ['val'], aug)
loader = DataLoader(dataset, batch_size, shuffle=True, num_workers=num_workers, pin_memory=True, drop_last=True,
collate_fn=dataset.collate)
num_classes = len(dataset.names)
prior_box = {
'strides': [8, 16, 32, 64, 128],
'sizes': [3] * 5,
'aspects': [[1, 2, 4, 8]] * 5,
'scales': [[2 ** 0, 2 ** (1 / 3), 2 ** (2 / 3)]] * 5,
}
cfg = {
'prior_box': prior_box,
'num_classes': num_classes,
'extra': 2,
}
model = RDD(backbone(fetch_feature=True), cfg)
model.build_pipe(shape=[2, 3, image_size, image_size])
if current_step:
model.restore(os.path.join(dir_weight, '%d.pth' % current_step))
else:
model.init()
if len(device_ids) > 1:
model = convert_model(model)
model = CustomDetDataParallel(model, device_ids)
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
training = True
while training and current_step < max_step:
tqdm_loader = tqdm.tqdm(loader)
for images, targets, infos in tqdm_loader:
current_step += 1
adjust_lr_multi_step(optimizer, current_step, lr_cfg, warm_up)
images = images.cuda() / 255
losses = model(images, targets)
loss = sum(losses.values())
loss.backward()
optimizer.step()
optimizer.zero_grad()
for key, val in list(losses.items()):
losses[key] = val.item()
writer.add_scalar(key, val, global_step=current_step)
writer.flush()
tqdm_loader.set_postfix(losses)
tqdm_loader.set_description(f'<{current_step}/{max_step}>')
if current_step % save_interval == 0:
save_path = os.path.join(dir_weight, '%d.pth' % current_step)
state_dict = model.state_dict() if len(device_ids) == 1 else model.module.state_dict()
torch.save(state_dict, save_path)
cache_file = os.path.join(dir_weight, '%d.pth' % (current_step - save_interval))
if os.path.exists(cache_file):
os.remove(cache_file)
if current_step >= max_step:
training = False
writer.close()
break
if __name__ == '__main__':
torch.manual_seed(0)
torch.backends.cudnn.benchmark = True
#device_ids = [0, 1]
device_ids = [0]
torch.cuda.set_device(device_ids[0])
backbone = resnet.resnet101
dir_dataset = '../ds2_dense/'
dir_save = '../output_train/'
main()
|
py | b41755a9635c805290fc3a2f1cbd0dc24810161d | #!/usr/bin/env python
import signal
import os
import time
import pty
def signal_handler (signum, frame):
print 'Signal handler called with signal:', signum
print 'signal.SIGCHLD=', signal.SIGKILL
# First thing we do is set up a handler for SIGCHLD.
signal.signal (signal.SIGCHLD, signal.SIG_IGN)
print 'PART 1 -- Test signal handling with empty pipe.'
# Create a child process for us to kill.
try:
pid, fd = pty.fork()
except Exception as e:
print str(e)
if pid == 0:
# os.write (sys.stdout.fileno(), 'This is a test.\n This is a test.')
time.sleep(10000)
print 'Sending SIGKILL to child pid:', pid
os.kill (pid, signal.SIGKILL)
# SIGCHLD should interrupt sleep.
# Note that this is a race.
# It is possible that the signal handler will get called
# before we try to sleep, but this has not happened yet.
# But in that case we can only tell by order of printed output.
print 'Entering sleep...'
try:
time.sleep(10)
except:
print 'sleep was interrupted by signal.'
# Just for fun let's see if the process is alive.
try:
os.kill(pid, 0)
print 'Child is alive. This is ambiguous because it may be a Zombie.'
except OSError as e:
print 'Child appears to be dead.'
print 'PART 2 -- Test signal handling with full pipe.'
# Create a child process for us to kill.
try:
pid, fd = pty.fork()
except Exception as e:
print str(e)
if pid == 0:
os.write (sys.stdout.fileno(), 'This is a test.\n This is a test.')
time.sleep(10000)
print 'Sending SIGKILL to child pid:', pid
os.kill (pid, signal.SIGKILL)
# SIGCHLD should interrupt sleep.
# Note that this is a race.
# It is possible that the signal handler will get called
# before we try to sleep, but this has not happened yet.
# But in that case we can only tell by order of printed output.
print 'Entering sleep...'
try:
time.sleep(10)
except:
print 'sleep was interrupted by signal.'
# Just for fun let's see if the process is alive.
try:
os.kill(pid, 0)
print 'Child is alive. This is ambiguous because it may be a Zombie.'
except OSError as e:
print 'Child appears to be dead.'
|
py | b41757ee6741eed421f529e6a140b82e9c65ce47 | import re
from odoo import http
from odoo.http import request
from werkzeug.exceptions import Forbidden
import odoo.addons.website_sale.controllers.main as main
# from odoo.addons.br_base.tools.fiscal import validate_cnpj, validate_cpf
from odoo.addons.portal.controllers.portal import CustomerPortal
class L10nBrWebsiteSale(main.WebsiteSale):
def _get_mandatory_billing_fields(self):
res = super(L10nBrWebsiteSale, self)._get_mandatory_billing_fields()
res.remove("city")
return res + [
"l10n_br_cnpj_cpf",
"zip",
"l10n_br_number",
"l10n_br_district",
"state_id",
"city_id",
]
def _get_mandatory_shipping_fields(self):
res = super(L10nBrWebsiteSale, self)._get_mandatory_shipping_fields()
res.remove("city")
return res + [
"zip",
"l10n_br_number",
"l10n_br_district",
"state_id",
"city_id",
]
@http.route(
["/shop/get_cities"],
type="json",
auth="public",
methods=["POST"],
website=True,
)
def get_cities_json(self, state_id):
if state_id and state_id.isdigit():
cities = (
request.env["res.city"]
.sudo()
.search([("state_id", "=", int(state_id))])
)
return [(city.id, city.name) for city in cities]
return []
@http.route(
["/shop/get_states"],
type="json",
auth="public",
methods=["POST"],
website=True,
)
def get_states_json(self, country_id):
if country_id and country_id.isdigit():
states = (
request.env["res.country.state"]
.sudo()
.search([("country_id", "=", int(country_id))])
)
return [(state.id, state.name) for state in states]
return []
def checkout_form_validate(self, mode, all_form_values, data):
errors, error_msg = super(
L10nBrWebsiteSale, self
).checkout_form_validate(mode, all_form_values, data)
cnpj_cpf = data.get("l10n_br_cnpj_cpf", "0")
email = data.get("email", False)
# TODO Validar campos
# if cnpj_cpf and len(cnpj_cpf) == 18:
# # if not validate_cnpj(cnpj_cpf):
# errors["l10n_br_cnpj_cpf"] = u"invalid"
# error_msg.append(('CNPJ Inválido!'))
# elif cnpj_cpf and len(cnpj_cpf) == 14:
# # if not validate_cpf(cnpj_cpf):
# errors["l10n_br_cnpj_cpf"] = u"invalid"
# error_msg.append(('CPF Inválido!'))
partner_id = data.get("partner_id", False)
if cnpj_cpf:
domain = [("l10n_br_cnpj_cpf", "=", cnpj_cpf)]
if partner_id and mode[0] == "edit":
domain.append(("id", "!=", partner_id))
existe = request.env["res.partner"].sudo().search_count(domain)
if existe > 0:
errors["l10n_br_cnpj_cpf"] = u"invalid"
error_msg.append(("CPF/CNPJ já cadastrado"))
if email:
domain = [("email", "=", email)]
if partner_id and mode[0] == "edit":
domain.append(("id", "!=", partner_id))
existe = request.env["res.partner"].sudo().search_count(domain)
if existe > 0:
errors["email"] = u"invalid"
error_msg.append(("E-mail já cadastrado"))
return errors, error_msg
def values_postprocess(self, order, mode, values, errors, error_msg):
new_values, errors, error_msg = super(
L10nBrWebsiteSale, self
).values_postprocess(order, mode, values, errors, error_msg)
new_values["l10n_br_cnpj_cpf"] = values.get("l10n_br_cnpj_cpf", None)
new_values["company_type"] = values.get("company_type", None)
is_comp = (
False if values.get("company_type", None) == "person" else True
)
new_values["is_company"] = is_comp
if "city_id" in values and values["city_id"] != "":
new_values["city_id"] = int(values.get("city_id", 0))
if "state_id" in values and values["state_id"] != "":
new_values["state_id"] = int(values.get("state_id", 0))
if "country_id" in values and values["country_id"] != "":
new_values["country_id"] = int(values.get("country_id", 0))
new_values["l10n_br_number"] = values.get("l10n_br_number", None)
new_values["street2"] = values.get("street2", None)
new_values["l10n_br_district"] = values.get("l10n_br_district", None)
return new_values, errors, error_msg
def _checkout_form_save(self, mode, checkout, all_values):
Partner = request.env["res.partner"]
if mode[0] == "new":
partner_id = Partner.sudo().create(checkout)
elif mode[0] == "edit":
partner_id = int(all_values.get("partner_id", 0))
if partner_id:
# double check
order = request.website.sale_get_order()
shippings = Partner.sudo().search(
[
(
"id",
"child_of",
order.partner_id.commercial_partner_id.ids,
)
]
)
if (
partner_id not in shippings.mapped("id")
and partner_id != order.partner_id.id
):
return Forbidden()
Partner.browse(partner_id).sudo().write(checkout)
return partner_id
@http.route()
def address(self, **kw):
result = super(L10nBrWebsiteSale, self).address(**kw)
partner_id = 0
if "partner_id" in result.qcontext:
partner_id = result.qcontext["partner_id"]
if partner_id > 0:
partner_id = request.env["res.partner"].sudo().browse(partner_id)
result.qcontext["city"] = partner_id.city_id.id
result.qcontext["state"] = partner_id.state_id.id
if "city_id" in kw and kw["city_id"]:
result.qcontext["city"] = kw["city_id"]
return result
@http.route(
["/shop/zip_search"],
type="json",
auth="public",
methods=["POST"],
website=True,
)
def search_zip_json(self, zip):
if len(zip) >= 8:
cep = re.sub("[^0-9]", "", zip)
vals = (
request.env["res.partner"].sudo().search_address_by_zip(cep)
)
if vals:
return {
"sucesso": True,
"street": vals["street"],
"l10n_br_district": vals["l10n_br_district"],
"city_id": vals["city_id"],
"state_id": vals["state_id"],
"country_id": vals["country_id"],
}
return {"sucesso": False}
class BrWebsiteMyAccount(CustomerPortal):
MANDATORY_BILLING_FIELDS = [
"name",
"phone",
"email",
"street",
"l10n_br_cnpj_cpf",
"l10n_br_number",
"l10n_br_district",
"zipcode",
"company_type",
"city_id",
"state_id",
"country_id",
]
OPTIONAL_BILLING_FIELDS = ["street2"]
@http.route(["/my/account"], type="http", auth="user", website=True)
def account(self, redirect=None, **post):
if "zip" in post:
post["zipcode"] = post.pop("zip")
return super(BrWebsiteMyAccount, self).account(
redirect=redirect, **post
)
|
py | b417582536589c830e68b792b02351bcb1693ea6 | import requests
from bs4 import BeautifulSoup
import JokeManager
url = "http://www.qiushibaike.com/text/"
headers = {
'Connection':'keep-alive',
'Cache-Control':'max-age=0',
'Accept': 'text/html, */*; q=0.01',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8,ja;q=0.6'
}
s = requests.session()
s.headers.update(headers)
res = s.get(url,headers = headers)
if res.ok:
html = res.content.decode(encoding='utf-8')
html = BeautifulSoup(html,'html.parser')
divs = html.find_all(name='div',attrs={"class":"article block untagged mb15"})
jokes = []
for div in divs:
# print(div)
joke_id = int(div.get('id').split('_')[2])
content_div = div.find_all(name='div',attrs={'class':'content'})[0]
jokes.append((joke_id,content_div.get_text().strip()))
# 保存到redis
JokeManager.saveJokesToRedis(jokes)
print('done.')
else:
print('error when accessing url:%s' % url) |
py | b41759287c72907cc0d0ab1620e27744ec74de6f | import numpy as np
import freud
from benchmark import Benchmark
from benchmarker import run_benchmarks
class BenchmarkDensityRDF(Benchmark):
def __init__(self, rmax, dr, rmin):
self.rmax = rmax
self.dr = dr
self.rmin = rmin
def bench_setup(self, N):
self.box_size = self.rmax*3.1
np.random.seed(0)
self.points = np.random.random_sample((N, 3)).astype(np.float32) \
* self.box_size - self.box_size/2
self.rdf = freud.density.RDF(self.rmax, self.dr, rmin=self.rmin)
self.box = freud.box.Box.cube(self.box_size)
def bench_run(self, N):
self.rdf.accumulate(self.box, self.points)
self.rdf.compute(self.box, self.points)
def run():
Ns = [1000, 10000]
rmax = 10.0
dr = 1.0
rmin = 0
number = 100
name = 'freud.density.RDF'
classobj = BenchmarkDensityRDF
return run_benchmarks(name, Ns, number, classobj,
rmax=rmax, dr=dr, rmin=rmin)
if __name__ == '__main__':
run()
|
py | b4175928ec00287f8823de3e03c440ed2bdeef8c | from tqdm import tqdm
from numpy import float32
import copy
import os
import itertools
from collections import OrderedDict
import tensorflow as tf
import numpy as np
# TODO check if optimizers are always ops? Maybe there is a better check
def is_optimizer(x):
return hasattr(x, 'op_def')
def is_summary(x):
return isinstance(x, tf.Tensor) and x.dtype is tf.string
def is_float(x):
return isinstance(x, tf.Tensor) and x.dtype is tf.float32
def is_scalar(x):
return isinstance(x, tf.Tensor) and x.dtype is tf.float32 and len(x.shape) == 0
class Evaluator(object):
def __init__(self, provided_sources, scope="", writer=None,
network=None, tokenizer=None): # debug purpose only, do not use in the code
self.provided_sources = provided_sources
self.scope = scope
self.writer = writer
if len(scope) > 0 and not scope.endswith("/"):
self.scope += "/"
self.use_summary = False
# Debug tools (should be removed on the long run)
self.network=network
self.tokenizer = tokenizer
def process(self, sess, iterator, outputs, out_net=None,inference=False, listener=None,type_data="train"):
assert isinstance(outputs, list), "outputs must be a list"
original_outputs = list(outputs)
is_training = any([is_optimizer(x) for x in outputs])
# if listener is not None:
# outputs += [listener.require()] # add require outputs
# # outputs = flatten(outputs) # flatten list (when multiple requirement)
# outputs = list(OrderedDict.fromkeys(outputs)) # remove duplicate while preserving ordering
# listener.before_epoch(is_training)
n_iter = 1.
aggregated_outputs = [0.0 for v in outputs if is_scalar(v) and v in original_outputs]
good_predict = {}
bad_predict = {}
image_id = None
self.good_predict = 0
self.bad_predict = 0
for batch in tqdm(iterator):
# Appending is_training flag to the feed_dict
batch["is_training"] = is_training
if inference:
image_id = batch["image_id"]
batch_1 = {}
batch_2 = {}
question= batch["question_word"]
images_id = batch["image_id"]
# crops_id = batch["crop_id"]
for key,values in batch.items():
if key!="question_word" and key!= "image_id":
pass
elif type(values) != bool :
batch_1[key] = [values[0]]
batch_2[key] = [values[1]]
else:
batch_1[key] = values
batch_2[key] = values
if inference == False:
batch = {key:value for key,value in batch.items() if key!="question_word" and key!= "image_id"}
results = self.execute(sess, outputs,batch,type_data )
if inference:
old_batch = batch
batch = {key:value for key,value in batch.items() if key!="question_word" and key!= "image_id"}
prediction = self.execute(sess,out_net,batch, type_data)
results = self.execute(sess, outputs,batch, type_data )
# for question,gold,prediction in zip(old_batch["question"],old_batch["answer"],prediction):
# gold_argmax = np.argmax(gold)
# predict_argmax = np.argmax(prediction)
# self.good_predict += 1
# if gold_argmax == predict_argmax:
# self.good_predict += 1
# print("GOOD | Image_id ={}, Question= {}, Categorie_object={}, gold={}, prediction={}, proba_predict = {}".format(image_id,question,categories_object,gold_argmax,predict_argmax,prediction) )
# # print("GOOD | Question= {}, gold={}, prediction={}, proba_predict = {}".format(question,gold_argmax,predict_argmax,prediction) )
# else:
# self.bad_predict += 1
# print("BAD | Image_id ={}, Question= {}, Categorie_object={}, gold={}, prediction={}, proba_predict = {}".format(image_id,question,categories_object,gold_argmax,predict_argmax,prediction) )
# # print("BAD | Question= {}, gold={}, prediction={}, proba_predict = {}".format(question,gold_argmax,predict_argmax,prediction) )
# exit()
print(".... old__batch={} ".format(old_batch))
for image_id,question,categories_object,gold,prediction in zip(old_batch["image_id"],old_batch["question_word"],old_batch["category"],old_batch["answer"],prediction):
print("....... image ..............")
gold_argmax = np.argmax(gold)
predict_argmax = np.argmax(prediction)
self.good_predict += 1
if gold_argmax == predict_argmax:
self.good_predict += 1
print("GOOD | Image_id ={}, Question= {}, Categorie_object={}, gold={}, prediction={}, proba_predict = {}".format(image_id,question,categories_object,gold_argmax,predict_argmax,prediction) )
else:
self.bad_predict += 1
print("BAD | Image_id ={}, Question= {}, Categorie_object={}, gold={}, prediction={}, proba_predict = {}".format(image_id,question,categories_object,gold_argmax,predict_argmax,prediction) )
i = 0
for var, result in zip(outputs, results):
if is_scalar(var) and var in original_outputs:
# moving average
aggregated_outputs[i] = ((n_iter - 1.) / n_iter) * aggregated_outputs[i] + result / n_iter
i += 1
elif is_summary(var): # move into listener?
self.writer.add_summary(result)
if listener is not None and listener.valid(var):
listener.after_batch(result, batch, is_training)
n_iter += 1
if listener is not None:
listener.after_epoch(is_training)
print(" Result good_predict = {} , bad_predict={}".format(self.good_predict,self.bad_predict))
return aggregated_outputs
def execute(self, sess, output, batch,type_data = "Train"):
#print("+++++++++++++++++++++",batch.items())
feed_dict = {self.scope +key + ":0": value for key, value in batch.items() if key in self.provided_sources}
# print("-- [{}] question = {}--".format(type_data, feed_dict["oracle/question:0"]))
# print("shape = {} ".format(np.asarray(feed_dict["oracle/question:0"]).shape ))
# for key,value in feed_dict.items():
# print("-- [{}] Feed_Dict = {} : {}-- ".format(type_data,key,np.asarray(value).shape ))
# exit()
# print("answer = {}".format(feed_dict["oracle/answer:0"]))
# print("-- [{}] image_values = {}--".format(type_data, feed_dict["oracle/image:0"]))
# print("========================================================================")
# print("-- [{}] Output = {} ".format(type_data, output))
# print("-- [{}] result = {} ".format(type_data,sess.run(output, feed_dict=feed_dict)))
# print("")
if type_data == "Valid":
# out = output[0].eval(feed_dict = feed_dict)
# print("Sess_emb = {}".format(out))
# print("Shape = {}".format(out.shape))
# print("Other = {}".format(tf.get_variable("oracle/l2_normalize:0").eval(feed_dict = feed_dict)))
# print("ouput = {}".format(sess.run(output[1], feed_dict=feed_dict)))
# exit()
pass
# print("resultat_softmax = {}".format(sess.run(tf.get_variable("oracle/mlp/Softmax_1:0"), feed_dict=feed_dict)))
# exit()
return sess.run(output, feed_dict=feed_dict)
class MultiGPUEvaluator(object):
"""
Wrapper for evaluating on multiple GPUOptions
parameters
----------
provided_sources: list of sources
Each source has num_gpus placeholders with name:
name_scope[gpu_index]/network_scope/source
network_scope: str
Variable scope of the model
name_scopes: list of str
List that defines name_scope for each GPU
"""
def __init__(self, provided_sources, name_scopes, writer=None,
networks=None, tokenizer=None): #Debug purpose only, do not use here
# Dispatch sources
self.provided_sources = provided_sources
self.name_scopes = name_scopes
self.writer = writer
self.multi_gpu_sources = []
for source in self.provided_sources:
for name_scope in name_scopes:
self.multi_gpu_sources.append(os.path.join(name_scope, source))
# Debug tools, do not use in the code!
self.networks = networks
self.tokenizer = tokenizer
def process(self, sess, iterator, outputs, listener=None):
assert listener is None, "Listener are not yet supported with multi-gpu evaluator"
assert isinstance(outputs, list), "outputs must be a list"
# check for optimizer to define training/eval mode
is_training = any([is_optimizer(x) for x in outputs])
# Prepare epoch
n_iter = 1.
aggregated_outputs = [0.0 for v in outputs if is_scalar(v)]
scope_to_do = list(self.name_scopes)
multi_gpu_batch = dict()
for batch in tqdm(iterator):
assert len(scope_to_do) > 0
# apply training mode
batch['is_training'] = is_training
# update multi-gpu batch
name_scope = scope_to_do.pop()
for source, v in batch.items():
multi_gpu_batch[os.path.join(name_scope, source)] = v
if not scope_to_do: # empty list -> multi_gpu_batch is ready!
n_iter += 1
# Execute the batch
results = self.execute(sess, outputs, multi_gpu_batch)
# reset mini-batch
scope_to_do = list(self.name_scopes)
multi_gpu_batch = dict()
# process the results
i = 0
for var, result in zip(outputs, results):
if is_scalar(var) and var in outputs:
# moving average
aggregated_outputs[i] = ((n_iter - 1.) / n_iter) * aggregated_outputs[i] + result / n_iter
i += 1
elif is_summary(var): # move into listener?
self.writer.add_summary(result)
# No listener as "results" may arrive in different orders... need to find a way to unshuffle them
return aggregated_outputs
def execute(self, sess, output, batch):
feed_dict = {key + ":0": value for key, value in batch.items() if key in self.multi_gpu_sources}
return sess.run(output, feed_dict=feed_dict) |
py | b417599877c47ad536fa85be56d8389ea17dd3b0 | # -*- coding: utf-8 -*-
##
# Copyright (C) 2007 Ingeniweb
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; see the file LICENSE. If not, write to the
# Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
Unit test main script
"""
from Products.PloneGlossary.tests import PloneGlossaryTestCase
from Products.PloneGlossary.utils import html2text
from Products.PloneGlossary.utils import find_word
from Products.PloneGlossary.utils import encode_ascii
from Products.PloneGlossary.utils import LOG
class TestPloneGlossary(PloneGlossaryTestCase.PloneGlossaryTestCase):
def afterSetUp(self):
self.loginAsPortalOwner()
self.glossary = self.addGlossary(
self.portal,
u'General',
(u'Sport', u'Tennis \t\n', u'Open source'))
self.logout()
def testGetGlossaries(self):
self.loginAsPortalOwner()
medical_glossary = self.addGlossary(
self.portal, u'Medical', (u'ADN', u'Bone', u'Heart'))
uids = []
uids.append(self.glossary.UID())
uids.append(medical_glossary.UID())
uids.sort()
# Test PloneGlossaryTool->getGlossaryUIDs
glossary_uids = sorted(self.glossary_tool.getGlossaryUIDs())
self.assertEquals(glossary_uids, uids)
# Test PloneGlossaryTool->getGlossaries
glossary_uid = self.glossary.UID()
glossaries = self.glossary_tool.getGlossaries(
glossary_uids=[glossary_uid])
glossary = glossaries[0]
self.assertEquals(glossary.UID(), glossary_uid)
glossary_uids = [x.UID() for x in self.glossary_tool.getGlossaries()]
glossary_uids.sort()
self.assertEquals(glossary_uids, uids)
self.logout()
def testGetAvailableGlossaryMetaTypes(self):
self.loginAsPortalOwner()
tool = self.glossary_tool
available_metatypes = tool.getAvailableGlossaryMetaTypes()
glossary_metatypes = tool.glossary_metatypes
# test available metatypes, base glossary selected by default
self.assertEquals(available_metatypes,
('PloneGlossary', 'ExampleGlossary'))
self.assertEquals(glossary_metatypes, ('PloneGlossary',))
# test : only selected metatypes are returned by getGlossaryUIDs
glossary = self.glossary
glossaryuid = glossary.UID()
exampleglossary = self.addExampleGlossary(
self.portal,
'Example',
(u'Sport', u'Tennis', u'Open source'))
exampleuid = exampleglossary.UID()
# test :
glossary_uids = sorted(self.glossary_tool.getGlossaryUIDs())
self.assertEquals(glossary_uids, [glossaryuid])
# test : add a glossary type
tool.glossary_metatypes = ('PloneGlossary', 'ExampleGlossary')
glossary_uids = list(self.glossary_tool.getGlossaryUIDs())
glossary_uids.sort()
uids = [glossaryuid, exampleuid]
uids.sort()
self.assertEquals(glossary_uids, uids)
LOG.info("testGetAvailableGlossaryMetaTypes passed")
self.logout()
def testGetGeneralGlossaryUIDs(self):
self.loginAsPortalOwner()
medical_glossary = self.addGlossary(self.portal, u'Medical',
(u'ADN', u'Bone', u'Heart'))
all_uids = []
all_uids.append(self.glossary.UID())
all_uids.append(medical_glossary.UID())
all_uids.sort()
general_glossaries_uids = sorted(
self.glossary_tool.getGeneralGlossaryUIDs())
self.assertEquals(general_glossaries_uids, all_uids)
self.glossary_tool.general_glossary_uids = (medical_glossary.UID(),)
general_glossaries_uids = self.glossary_tool.getGeneralGlossaryUIDs()
self.assertEquals(list(general_glossaries_uids),
[medical_glossary.UID()])
def testTextRelatedTerms(self):
self.loginAsPortalOwner()
gtool = self.glossary_tool
glossary_uids = gtool.getGlossaryUIDs()
glossary_term_items = gtool._getGlossaryTermItems(glossary_uids)
terms = sorted(self.glossary_tool._getTextRelatedTermItems(
"Le tennis est un sport", glossary_term_items))
terms = [t['title'] for t in terms]
self.assertEquals(terms, ['Sport', u'Tennis \t\n'])
terms = list(self.glossary_tool._getTextRelatedTermItems(
"Le tennis est un sport", glossary_term_items,
excluded_terms=('Tennis',)))
terms.sort()
terms = [t['title'] for t in terms]
self.assertEquals(terms, ['Sport'])
def testObjectRelatedTerms(self):
self.loginAsPortalOwner()
# Add french document
doc = self.addFrenchDocument(
self.portal,
self.encodeInSiteCharset(u'Sport fran\xe7ais'))
glossary_uids = self.glossary_tool.getGlossaryUIDs()
terms = sorted(self.glossary_tool.getObjectRelatedTerms(
doc, glossary_uids))
result = ['Sport']
self.assertEquals(terms, result)
# Test terms using 2 words like "open source"
doc = self.addDocument(
self.portal,
self.encodeInSiteCharset(
u'English documentation'),
self.encodeInSiteCharset(u'This is an open source'),)
terms = list(self.glossary_tool.getObjectRelatedTerms(
doc, glossary_uids))
terms.sort()
result = ['Open source']
self.assertEquals(terms, result)
self.logout()
def testObjectRelatedDefinitions(self):
self.loginAsPortalOwner()
# Add french document
doc = self.addFrenchDocument(
self.portal,
self.encodeInSiteCharset(u'Sport fran\xe7ais'))
# no glossary_uid
result = self.glossary_tool.getObjectRelatedDefinitions(doc, ())
self.assertEquals(result, [])
# test normal
glossary_uids = self.glossary_tool.getGlossaryUIDs()
definitions = list(self.glossary_tool.getObjectRelatedDefinitions(
doc, glossary_uids))
self.assertEquals(len(definitions), 1)
definition = definitions[0]
self.assertEquals(definition['url'],
'http://nohost/plone/general/sport')
self.assertEquals(definition['description'], u'Definition of term')
self.assertEquals(definition['variants'], ())
self.assertEquals(definition['id'], 'sport')
self.assertEquals(definition['title'], u'Sport')
self.logout()
def testGlossaryTerms(self):
self.loginAsPortalOwner()
glossary_uids = self.glossary_tool.getGlossaryUIDs()
terms = sorted(self.glossary_tool.getGlossaryTerms(glossary_uids))
result = ['Open source', 'Sport', u'Tennis \t\n']
self.assertEquals(terms, result)
self.logout()
def testGetAbcedaire(self):
"""We should have the 1st letters of all words (merged)"""
self.loginAsPortalOwner()
abcedaire = self.glossary_tool.getAbcedaire([self.glossary.UID()])
result = ('o', 's', 't')
self.assertEquals(abcedaire, result)
brains = self.glossary_tool.getAbcedaireBrains([self.glossary.UID()],
letters=['s'])
self.assertEquals(len(brains), 1)
brain = brains[0]
self.assertEquals(brain.Title, 'Sport')
self.logout()
def testSearchResults(self):
self.loginAsPortalOwner()
brains = self.glossary_tool.searchResults(
[self.glossary.UID()], Title='Sport')
self.assertEquals(len(brains), 1)
brain = brains[0]
self.assertEquals(brain.Title, 'Sport')
self.logout()
def testFindWord(self):
"""Test find_word function in utils"""
# The text is the word
word = "ete"
text = "ete"
result = find_word(word, text)
expected_result = (0,)
self.assertEquals(result, expected_result)
# Many words
text = "l'ete ou ete"
result = find_word(word, text)
expected_result = (2, 9,)
self.assertEquals(result, expected_result)
def testVariants(self):
"""Test variants"""
self.loginAsPortalOwner()
# Add glossary
self.glossary = self.addGlossary(
self.portal,
u'Produits laitiers',
(u'Lait',
u'Beurre',
u'Fromage',
u'Crème',
u'Desserts lactés'))
# Variants of yaourt are yoghourt and yogourt
self.addGlossaryDefinition(
self.glossary,
title=u'Yaourt',
definition=u'Lait caillé ayant subi une fermentation acide.',
variants=(u'Yaourts',
u'Yoghourt',
u'Yoghourts \t',
u'yogourt',
u'yogourts'))
# Variants of fruits, to test white space in variants. But
# white space is stripped on save there. So not much
# interesting happens.
self.addGlossaryDefinition(
self.glossary,
title=u'Fruits',
definition=u'Commes des légumes, mais un peut autre.',
variants=(u'Apples',
u'Fraises \t',
u'Framboises'))
doc = self.addDocument(
self.portal,
"Dessert",
("Notre chef vous propose des fraises au yaourt et des yoghourts "
"à la vanille."))
brains = self.glossary_tool.searchResults([self.glossary.UID()],
SearchableText='Yoghourt')
self.assertEquals(brains[0].Title, 'Yaourt')
brains = self.glossary_tool.searchResults([self.glossary.UID()],
SearchableText='Fraises')
self.assertEquals(brains[0].Title, 'Fruits')
definitions = self.portal.portal_glossary.getObjectRelatedDefinitions(
doc, glossary_uids=[self.glossary.UID()])
self.assertEquals(len(definitions), 3)
definition = definitions[0]
self.assertEquals(definition['terms'], ['yaourt'])
self.assertEquals(definition['show'], 1)
definition = definitions[1]
self.assertEquals(definition['terms'], ['yoghourts'])
self.assertEquals(definition['show'], 0)
definition = definitions[2]
self.assertEquals(definition['terms'], ['fraises'])
self.assertEquals(definition['show'], 1)
def testEncoding(self):
"""Test encoding"""
self.loginAsPortalOwner()
# Add glossary
self.glossary = self.addGlossary(
self.portal,
u'Parfums Femme Chanel',
(u'Lancôme : Ô Oui',
u"Dior : J´Adore",
u'Cerruti 1881 pour Femme',
))
# Variants of yaourt are yoghourt and yogourt
self.addGlossaryDefinition(
self.glossary,
title=u'Chanel N° 5',
definition=(u"Un bouquet de fleurs abstraites d'une "
u"indéfinissable féminité."),
variants=(u'N° 5', ))
doc = self.addDocument(
self.portal,
"Le parfum de ma mère!",
("Alors pour vous dire, une très grande histoire d'amour!! et ce "
"n'est pas par hasard que ça fait maintenant plus de 80ans que "
"Chanel N° 5 se vend!"))
brains = self.glossary_tool.searchResults([self.glossary.UID()],
SearchableText='N° 5')
self.assertEquals(brains[0].Title, 'Chanel N° 5')
definitions = self.portal.portal_glossary.getObjectRelatedDefinitions(
doc, glossary_uids=[self.glossary.UID()])
definition = definitions[0]
self.assertEquals(definition['terms'], ['Chanel N° 5'])
self.assertEquals(definition['show'], 1)
def testEncodeAscii(self):
"""Test encode_ascii function from utils modules"""
utext = u'Ellipsis\u2026'
atext = encode_ascii(utext)
self.assertEquals(len(utext), len(atext))
self.assertEquals(atext, "ellipsis.")
def testHTML2Text(self):
"""Test correct splitting of HTML"""
text = html2text("<div>line1\r\nline2</div>")
self.assertEquals(text, "line1 line2")
text = html2text("<div>line1\r\n line2</div>")
self.assertEquals(text, "line1 line2")
text = html2text("<div>line1 \r\n line2</div>")
self.assertEquals(text, "line1 line2")
text = html2text("<div>line1 \r \n line2</div>")
self.assertEquals(text, "line1 line2")
text = html2text("<div><ul><li>Seleção campeã!</li></ul></div>")
self.assertEquals(text, u"- Seleção campeã!".encode("utf-8"))
text = html2text(
"<div><ul><li>Seleção campeã!</li>"
"</ul></div>")
self.assertEquals(text, u"- Seleção campeã!".encode("utf-8"))
text = html2text(
"<div><ul><li>Seleção campeã!</li></ul></div>")
self.assertEquals(text, u"- Seleção campeã!".encode("utf-8"))
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestPloneGlossary))
return suite
|
py | b4175a378d6f064f44dbb108abd0ace32ad70d23 | """
Python wrappers to external libraries
=====================================
lapack -- wrappers for LAPACK/ATLAS libraries
blas -- wrappers for BLAS/ATLAS libraries
"""
from __future__ import division, print_function, absolute_import
__all__ = ['lapack','blas']
from numpy.testing import Tester
test = Tester().test
|
py | b4175ac5d8951b4d07dd381a922feb50f8856fda | #!/usr/bin/env python3
# Copyright 1996-2020 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert world file from R2020a to R2020b keeping the NUE coordinate system."""
import sys
from transforms3d import quaternions
from webots_parser import WebotsParser
from converted_protos import converted_protos
def rotation(value, r):
q0 = quaternions.axangle2quat([float(value[0]), float(value[1]), float(value[2])], float(value[3]))
q1 = quaternions.axangle2quat([r[0], r[1], r[2]], r[3])
qr = quaternions.qmult(q0, q1)
v, theta = quaternions.quat2axangle(qr)
return [WebotsParser.str(v[0]), WebotsParser.str(v[1]), WebotsParser.str(v[2]), WebotsParser.str(theta)]
def convert_to_nue(filename):
world = WebotsParser()
world.load(filename)
for node in world.content['root']:
if node['name'] == 'WorldInfo':
for field in node['fields']:
if field['name'] == 'gravity':
gravity = float(field['value'][1])
if gravity != 0:
gravity = -gravity
field['value'] = WebotsParser.str(gravity)
field['type'] = 'SFFloat'
if field['name'] == 'coordinateSystem': # world file already updated
return
node['fields'].append({'name': 'coordinateSystem', 'value': 'NUE', 'type': 'SFString'})
elif node['name'] in converted_protos:
print('Rotating', node['name'])
rotation_found = False
for field in node['fields']:
if field['name'] in ['rotation']:
rotation_found = True
field['value'] = rotation(field['value'], converted_protos[node['name']])
if not rotation_found:
node['fields'].append({'name': 'rotation',
'value': rotation(['0', '1', '0', '0'], converted_protos[node['name']]),
'type': 'SFRotation'})
world.save(filename)
if __name__ == "__main__":
# execute only if run as a script
for filename in sys.argv:
if not filename.endswith('.wbt'):
continue
print(filename)
convert_to_nue(filename)
|
py | b4175aeeab691dc765f3f8dcb63a8be972348ab8 | # =============================================================================
# OWSLib. Copyright (C) 2005 Sean C. Gillies
#
# Contact email: [email protected]
#
# $Id: wfs.py 503 2006-02-01 17:09:12Z dokai $
# =============================================================================
#owslib imports:
from owslib.ows import ServiceIdentification, ServiceProvider, OperationsMetadata
from owslib.etree import etree
from owslib.util import nspath, testXMLValue
from owslib.crs import Crs
from owslib.feature import WebFeatureService_
from owslib.namespaces import Namespaces
#other imports
import cgi
from cStringIO import StringIO
from urllib import urlencode
from urllib2 import urlopen
import logging
from owslib.util import log
n = Namespaces()
WFS_NAMESPACE = n.get_namespace("wfs20")
OWS_NAMESPACE = n.get_namespace("ows110")
OGC_NAMESPACE = n.get_namespace("ogc")
GML_NAMESPACE = n.get_namespace("gml")
FES_NAMESPACE = n.get_namespace("fes")
class ServiceException(Exception):
pass
class WebFeatureService_2_0_0(WebFeatureService_):
"""Abstraction for OGC Web Feature Service (WFS).
Implements IWebFeatureService.
"""
def __new__(self,url, version, xml, parse_remote_metadata=False, timeout=30):
""" overridden __new__ method
@type url: string
@param url: url of WFS capabilities document
@type xml: string
@param xml: elementtree object
@type parse_remote_metadata: boolean
@param parse_remote_metadata: whether to fully process MetadataURL elements
@param timeout: time (in seconds) after which requests should timeout
@return: initialized WebFeatureService_2_0_0 object
"""
obj=object.__new__(self)
obj.__init__(url, version, xml, parse_remote_metadata, timeout)
return obj
def __getitem__(self,name):
''' check contents dictionary to allow dict like access to service layers'''
if name in self.__getattribute__('contents').keys():
return self.__getattribute__('contents')[name]
else:
raise KeyError, "No content named %s" % name
def __init__(self, url, version, xml=None, parse_remote_metadata=False, timeout=30):
"""Initialize."""
if log.isEnabledFor(logging.DEBUG):
log.debug('building WFS %s'%url)
self.url = url
self.version = version
self.timeout = timeout
self._capabilities = None
reader = WFSCapabilitiesReader(self.version)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url)
self._buildMetadata(parse_remote_metadata)
def _buildMetadata(self, parse_remote_metadata=False):
'''set up capabilities metadata objects: '''
#serviceIdentification metadata
serviceidentelem=self._capabilities.find(nspath('ServiceIdentification'))
self.identification=ServiceIdentification(serviceidentelem)
#need to add to keywords list from featuretypelist information:
featuretypelistelem=self._capabilities.find(nspath('FeatureTypeList', ns=WFS_NAMESPACE))
featuretypeelems=featuretypelistelem.findall(nspath('FeatureType', ns=WFS_NAMESPACE))
for f in featuretypeelems:
kwds=f.findall(nspath('Keywords/Keyword',ns=OWS_NAMESPACE))
if kwds is not None:
for kwd in kwds[:]:
if kwd.text not in self.identification.keywords:
self.identification.keywords.append(kwd.text)
#TODO: update serviceProvider metadata, miss it out for now
serviceproviderelem=self._capabilities.find(nspath('ServiceProvider'))
self.provider=ServiceProvider(serviceproviderelem)
#serviceOperations metadata
self.operations=[]
for elem in self._capabilities.find(nspath('OperationsMetadata'))[:]:
if elem.tag !=nspath('ExtendedCapabilities'):
self.operations.append(OperationsMetadata(elem))
#serviceContents metadata: our assumption is that services use a top-level
#layer as a metadata organizer, nothing more.
self.contents={}
featuretypelist=self._capabilities.find(nspath('FeatureTypeList',ns=WFS_NAMESPACE))
features = self._capabilities.findall(nspath('FeatureTypeList/FeatureType', ns=WFS_NAMESPACE))
for feature in features:
cm=ContentMetadata(feature, featuretypelist, parse_remote_metadata)
self.contents[cm.id]=cm
#exceptions
self.exceptions = [f.text for f \
in self._capabilities.findall('Capability/Exception/Format')]
def getcapabilities(self):
"""Request and return capabilities document from the WFS as a
file-like object.
NOTE: this is effectively redundant now"""
reader = WFSCapabilitiesReader(self.version)
return urlopen(reader.capabilities_url(self.url), timeout=self.timeout)
def items(self):
'''supports dict-like items() access'''
items=[]
for item in self.contents:
items.append((item,self.contents[item]))
return items
def getfeature(self, typename=None, filter=None, bbox=None, featureid=None,
featureversion=None, propertyname=None, maxfeatures=None,storedQueryID=None, storedQueryParams={},
method='Get', outputFormat=None, startindex=None):
"""Request and return feature data as a file-like object.
#TODO: NOTE: have changed property name from ['*'] to None - check the use of this in WFS 2.0
Parameters
----------
typename : list
List of typenames (string)
filter : string
XML-encoded OGC filter expression.
bbox : tuple
(left, bottom, right, top) in the feature type's coordinates == (minx, miny, maxx, maxy)
featureid : list
List of unique feature ids (string)
featureversion : string
Default is most recent feature version.
propertyname : list
List of feature property names. '*' matches all.
maxfeatures : int
Maximum number of features to be returned.
method : string
Qualified name of the HTTP DCP method to use.
outputFormat: string (optional)
Requested response format of the request.
startindex: int (optional)
Start position to return feature set (paging in combination with maxfeatures)
There are 3 different modes of use
1) typename and bbox (simple spatial query)
2) typename and filter (==query) (more expressive)
3) featureid (direct access to known features)
"""
url = data = None
if typename and type(typename) == type(""):
typename = [typename]
if method.upper() == "GET":
(url) = self.getGETGetFeatureRequest(typename, filter, bbox, featureid,
featureversion, propertyname,
maxfeatures, storedQueryID,
storedQueryParams, outputFormat, 'Get', startindex)
if log.isEnabledFor(logging.DEBUG):
log.debug('GetFeature WFS GET url %s'% url)
else:
(url,data) = self.getPOSTGetFeatureRequest()
# If method is 'Post', data will be None here
u = urlopen(url, data, self.timeout)
# check for service exceptions, rewrap, and return
# We're going to assume that anything with a content-length > 32k
# is data. We'll check anything smaller.
try:
length = int(u.info()['Content-Length'])
have_read = False
except KeyError:
data = u.read()
have_read = True
length = len(data)
if length < 32000:
if not have_read:
data = u.read()
try:
tree = etree.fromstring(data)
except BaseException:
# Not XML
return StringIO(data)
else:
if tree.tag == "{%s}ServiceExceptionReport" % OGC_NAMESPACE:
se = tree.find(nspath('ServiceException', OGC_NAMESPACE))
raise ServiceException(str(se.text).strip())
else:
return StringIO(data)
else:
if have_read:
return StringIO(data)
return u
def getpropertyvalue(self, query=None, storedquery_id=None, valuereference=None, typename=None, method=nspath('Get'),**kwargs):
''' the WFS GetPropertyValue method'''
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetPropertyValue').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
request = {'service': 'WFS', 'version': self.version, 'request': 'GetPropertyValue'}
if query:
request['query'] = str(query)
if valuereference:
request['valueReference'] = str(valuereference)
if storedquery_id:
request['storedQuery_id'] = str(storedquery_id)
if typename:
request['typename']=str(typename)
if kwargs:
for kw in kwargs.keys():
request[kw]=str(kwargs[kw])
encoded_request=urlencode(request)
u = urlopen(base_url + encoded_request)
return u.read()
def _getStoredQueries(self):
''' gets descriptions of the stored queries available on the server '''
sqs=[]
#This method makes two calls to the WFS - one ListStoredQueries, and one DescribeStoredQueries. The information is then
#aggregated in 'StoredQuery' objects
method=nspath('Get')
#first make the ListStoredQueries response and save the results in a dictionary if form {storedqueryid:(title, returnfeaturetype)}
try:
base_url = next((m.get('url') for m in self.getOperationByName('ListStoredQueries').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
request = {'service': 'WFS', 'version': self.version, 'request': 'ListStoredQueries'}
encoded_request = urlencode(request)
u = urlopen(base_url, data=encoded_request, timeout=self.timeout)
tree=etree.fromstring(u.read())
tempdict={}
for sqelem in tree[:]:
title=rft=id=None
id=sqelem.get('id')
for elem in sqelem[:]:
if elem.tag==nspath('Title', WFS_NAMESPACE):
title=elem.text
elif elem.tag==nspath('ReturnFeatureType', WFS_NAMESPACE):
rft=elem.text
tempdict[id]=(title,rft) #store in temporary dictionary
#then make the DescribeStoredQueries request and get the rest of the information about the stored queries
try:
base_url = next((m.get('url') for m in self.getOperationByName('DescribeStoredQueries').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
request = {'service': 'WFS', 'version': self.version, 'request': 'DescribeStoredQueries'}
encoded_request = urlencode(request)
u = urlopen(base_url, data=encoded_request, timeout=to)
tree=etree.fromstring(u.read())
tempdict2={}
for sqelem in tree[:]:
params=[] #list to store parameters for the stored query description
id =sqelem.get('id')
for elem in sqelem[:]:
if elem.tag==nspath('Abstract', WFS_NAMESPACE):
abstract=elem.text
elif elem.tag==nspath('Parameter', WFS_NAMESPACE):
newparam=Parameter(elem.get('name'), elem.get('type'))
params.append(newparam)
tempdict2[id]=(abstract, params) #store in another temporary dictionary
#now group the results into StoredQuery objects:
for key in tempdict.keys():
abstract='blah'
parameters=[]
sqs.append(StoredQuery(key, tempdict[key][0], tempdict[key][1], tempdict2[key][0], tempdict2[key][1]))
return sqs
storedqueries = property(_getStoredQueries, None)
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError, "No operation named %s" % name
class StoredQuery(object):
'''' Class to describe a storedquery '''
def __init__(self, id, title, returntype, abstract, parameters):
self.id=id
self.title=title
self.returnfeaturetype=returntype
self.abstract=abstract
self.parameters=parameters
class Parameter(object):
def __init__(self, name, type):
self.name=name
self.type=type
class ContentMetadata:
"""Abstraction for WFS metadata.
Implements IMetadata.
"""
def __init__(self, elem, parent, parse_remote_metadata=False, timeout=30):
"""."""
self.id = elem.find(nspath('Name',ns=WFS_NAMESPACE)).text
self.title = elem.find(nspath('Title',ns=WFS_NAMESPACE)).text
abstract = elem.find(nspath('Abstract',ns=WFS_NAMESPACE))
if abstract is not None:
self.abstract = abstract.text
else:
self.abstract = None
self.keywords = [f.text for f in elem.findall(nspath('Keywords',ns=WFS_NAMESPACE))]
# bboxes
self.boundingBoxWGS84 = None
b = elem.find(nspath('WGS84BoundingBox',ns=OWS_NAMESPACE))
if b is not None:
lc = b.find(nspath("LowerCorner",ns=OWS_NAMESPACE))
uc = b.find(nspath("UpperCorner",ns=OWS_NAMESPACE))
ll = [float(s) for s in lc.text.split()]
ur = [float(s) for s in uc.text.split()]
self.boundingBoxWGS84 = (ll[0],ll[1],ur[0],ur[1])
# there is no such think as bounding box
# make copy of the WGS84BoundingBox
self.boundingBox = (self.boundingBoxWGS84[0],
self.boundingBoxWGS84[1],
self.boundingBoxWGS84[2],
self.boundingBoxWGS84[3],
Crs("epsg:4326"))
# crs options
self.crsOptions = [Crs(srs.text) for srs in elem.findall(nspath('OtherCRS',ns=WFS_NAMESPACE))]
defaultCrs = elem.findall(nspath('DefaultCRS',ns=WFS_NAMESPACE))
if len(defaultCrs) > 0:
self.crsOptions.insert(0,Crs(defaultCrs[0].text))
# verbs
self.verbOptions = [op.tag for op \
in parent.findall(nspath('Operations/*',ns=WFS_NAMESPACE))]
self.verbOptions + [op.tag for op \
in elem.findall(nspath('Operations/*',ns=WFS_NAMESPACE)) \
if op.tag not in self.verbOptions]
#others not used but needed for iContentMetadata harmonisation
self.styles=None
self.timepositions=None
self.defaulttimeposition=None
# MetadataURLs
self.metadataUrls = []
for m in elem.findall('MetadataURL'):
metadataUrl = {
'type': testXMLValue(m.attrib['type'], attrib=True),
'format': m.find('Format').text.strip(),
'url': testXMLValue(m.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href'], attrib=True)
}
if metadataUrl['url'] is not None and parse_remote_metadata: # download URL
try:
content = urllib2.urlopen(metadataUrl['url'], timeout=timeout)
doc = etree.parse(content)
try: # FGDC
metadataUrl['metadata'] = Metadata(doc)
except: # ISO
metadataUrl['metadata'] = MD_Metadata(doc)
except Exception, err:
metadataUrl['metadata'] = None
self.metadataUrls.append(metadataUrl)
class WFSCapabilitiesReader(object):
"""Read and parse capabilities document into a lxml.etree infoset
"""
def __init__(self, version='2.0.0'):
"""Initialize"""
self.version = version
self._infoset = None
def capabilities_url(self, service_url):
"""Return a capabilities url
"""
qs = []
if service_url.find('?') != -1:
qs = cgi.parse_qsl(service_url.split('?')[1])
params = [x[0] for x in qs]
if 'service' not in params:
qs.append(('service', 'WFS'))
if 'request' not in params:
qs.append(('request', 'GetCapabilities'))
if 'version' not in params:
qs.append(('version', self.version))
urlqs = urlencode(tuple(qs))
return service_url.split('?')[0] + '?' + urlqs
def read(self, url, timeout=30):
"""Get and parse a WFS capabilities document, returning an
instance of WFSCapabilitiesInfoset
Parameters
----------
url : string
The URL to the WFS capabilities document.
timeout : number
A timeout value (in seconds) for the request.
"""
request = self.capabilities_url(url)
u = urlopen(request, timeout=timeout)
return etree.fromstring(u.read())
def readString(self, st):
"""Parse a WFS capabilities document, returning an
instance of WFSCapabilitiesInfoset
string should be an XML capabilities document
"""
if not isinstance(st, str):
raise ValueError("String must be of type string, not %s" % type(st))
return etree.fromstring(st)
|
py | b4175b4d5974de2bccad390f088ecf01c0709e10 | #!/usr/bin/env python3
'''
read in kmer counts
assemble into contigs based on simple frequency cut off
'''
import sys
import argparse
from kmer_module import *
def walk_forwards(contig,kmer_length,kmer_counts):
'walk the contig forwards through all unique extensions'
while True:
#extract the last kmer-length-minus-one bases of the contig
starting_seq = contig[-(kmer_length-1):]
#search for a unique extension in the remaining kmers
next_base = extend_sequence(starting_seq,kmer_counts)
#walk ends when no unique extension is found
if next_base == None: return contig
#extend the contig using the unique match
contig += next_base
def extend_sequence(seq,kmer_counts):
'try to forward-extend seq by one base using the kmer dictionary'
next_base = None
unique = True
#find all possible extensions and remove them from the kmer dictionary
for base in 'ATCG':
kmer = seq + base
#ensure we only look up canonical kmers
rev = reverse_complement(kmer)
if rev < kmer: kmer = rev
if kmer in kmer_counts:
del kmer_counts[kmer]
if next_base == None:
#possible unique extension
next_base = base
else:
#extension is not unique
unique = False
if next_base != None and unique == True: return next_base
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--min-count', type=int,required=True, help='Minimum kmer count filter')
parser.add_argument('--kmer-counts', type=str, required=True, help='File containung kmer counts')
args = parser.parse_args()
input_file = args.kmer_counts
min_count = args.min_count
kmer_counts = {}
with open(input_file) as f:
for line in f:
column = line.strip().split()
kmer = column[0]
count = int(column[1])
if count < min_count: continue
kmer_counts[kmer] = count
contig_list = []
while len(kmer_counts) > 0:
#pick a seed kmer from the remaining kmers
seed_kmer = next(iter(kmer_counts))
kmer_length = len(seed_kmer)
contig = seed_kmer
del kmer_counts[seed_kmer]
#walk the contig forward through the kmer graph
contig = walk_forwards(contig,kmer_length,kmer_counts)
#reverse compliment the contig to allow walking backwards
contig = reverse_complement(contig)
#walk the (now reversed) contig forward through the kmer graph again
contig = walk_forwards(contig,kmer_length,kmer_counts)
#append completed contig to final list
contig_list.append(contig)
#print contigs
for seq_counter,contig in enumerate(contig_list):
print("contig" + str(seq_counter) + ' ' + contig)
|
py | b4175b52b8f7d39d9bab7ff3d762c394e261427e | from .losses import * # noqa
from .postprocess import * # noqa |
py | b4175bbe5d5f07b52d5dc7ba6bb8bf28549be0f7 | """This module implements classifier-based density ratio estimation."""
# Carl is free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.base import RegressorMixin
from sklearn.base import clone
from sklearn.utils import check_random_state
from .base import DensityRatioMixin
from ..learning import as_classifier
class ClassifierRatio(BaseEstimator, DensityRatioMixin):
"""Classifier-based density ratio estimator.
This class approximates a density ratio `r(x) = p0(x) / p1(x)` as
`s(x) / 1 - s(x)`, where `s` is a classifier trained to distinguish
samples `x ~ p0` from samples `x ~ p1`, and where `s(x)` is the
classifier approximate of the probability `p0(x) / (p0(x) + p1(x))`.
This class can be used in the likelihood-free setup, i.e. either
- with known data `X` drawn from `p0` and `p1`, or
- with generators `p0` and `p1` implementing sampling through `rvs`.
"""
def __init__(self, base_estimator, random_state=None):
"""Constructor.
Parameters
----------
* `base_estimator` [`BaseEstimator`]:
A scikit-learn classifier or regressor.
* `random_state` [integer or RandomState object]:
The random seed.
"""
self.base_estimator = base_estimator
self.random_state = random_state
def fit(self, X=None, y=None, sample_weight=None,
numerator=None, denominator=None, n_samples=None, **kwargs):
"""Fit the density ratio estimator.
The density ratio estimator `r(x) = p0(x) / p1(x)` can be fit either
- from data, using `fit(X, y)` or
- from distributions, using
`fit(numerator=p0, denominator=p1, n_samples=N)`
Parameters
----------
* `X` [array-like, shape=(n_samples, n_features), optional]:
Training data.
* `y` [array-like, shape=(n_samples,), optional]:
Labels. Samples labeled with `y=0` correspond to data from the
numerator distribution, while samples labeled with `y=1` correspond
data from the denominator distribution.
* `sample_weight` [array-like, shape=(n_samples,), optional]:
The sample weights.
* `numerator` [`DistributionMixin`, optional]:
The numerator distribution `p0`, if `X` and `y` are not provided.
This object is required to implement sampling through the `rvs`
method.
* `denominator` [`DistributionMixin`, optional]:
The denominator distribution `p1`, if `X` and `y` are not provided.
This object is required to implement sampling through the `rvs`
method.
* `n_samples` [integer, optional]
The total number of samples to draw from the numerator and
denominator distributions, if `X` and `y` are not provided.
Returns
-------
* `self` [object]:
`self`.
"""
# Check for identity
self.identity_ = (numerator is not None) and (numerator is denominator)
if self.identity_:
return self
# Build training data
rng = check_random_state(self.random_state)
if (numerator is not None and denominator is not None and
n_samples is not None):
X = np.vstack(
(numerator.rvs(n_samples // 2,
random_state=rng, **kwargs),
denominator.rvs(n_samples - (n_samples // 2),
random_state=rng, **kwargs)))
y = np.zeros(n_samples, dtype=np.int)
y[n_samples // 2:] = 1
sample_weight = None
elif X is not None and y is not None:
if sample_weight is None:
n_num = (y == 0).sum()
n_den = (y == 1).sum()
if n_num != n_den:
sample_weight = np.ones(len(y), dtype=np.float)
sample_weight[y == 1] *= 1.0 * n_num / n_den
else:
sample_weight = None
else:
raise ValueError
# Fit base estimator
clf = clone(self.base_estimator)
if isinstance(clf, RegressorMixin):
clf = as_classifier(clf)
if sample_weight is None:
self.classifier_ = clf.fit(X, y)
else:
self.classifier_ = clf.fit(X, y, sample_weight=sample_weight)
return self
def predict(self, X, log=False, **kwargs):
"""Predict the density ratio `r(x_i)` for all `x_i` in `X`.
Parameters
----------
* `X` [array-like, shape=(n_samples, n_features)]:
The samples.
* `log` [boolean, default=False]:
If true, return the log-ratio `log r(x) = log(p0(x)) - log(p1(x))`.
Returns
-------
* `r` [array, shape=(n_samples,)]:
The predicted ratio `r(X)`.
"""
if self.identity_:
if log:
return np.zeros(len(X))
else:
return np.ones(len(X))
else:
p = self.classifier_.predict_proba(X)
if log:
return np.log(p[:, 0]) - np.log(p[:, 1])
else:
return np.divide(p[:, 0], p[:, 1])
|
py | b4175cf448c7dedc3a8db8921831ad77ec14c691 | #Copyright 2008 Erik Tollerud
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package contains a variety of utilities and algorithms for processing
and visualizing astronomical data.
**Modules**
* constants: physical constants and conversions, Cosmology objects for choosing
related sets of constants
* coords: Astronomical coordinate systems, distance measurements,
and related objects
* obstools: Tools and corrections for observations (mostly optical)
* models: Fitting functions/models and related calculations
* objcat: Object Catalog objects and functions
* phot: Photometry objects and related functions
* spec: Spectrum objects and related functions
* ccd: Functions and tools for processing CCD images.
"""
#If True, the add_docs and similar functions will only replace with empty
#strings - this is used by sphinx to
_ignore_add_docs = False
#Large-scale TODO list:
#Add Obsplots
#ccd tweaks/todos
#instrument registry along with site
#objcat testing w/ M31 catalog
#ZODB/Web objcat integration
#pipeline gui and mid-saving
#Phot reworking/clean up docs |
py | b4175f482a986ba32caaececb34ce662811f3c2b | import os
from urllib.parse import urljoin
from django.conf import settings
from django.core.files.storage import FileSystemStorage
class CustomStorage(FileSystemStorage):
"""Custom storage for django_ckeditor_5 images."""
location = os.path.join(settings.MEDIA_ROOT, "django_ckeditor_5")
base_url = urljoin(settings.MEDIA_URL, "django_ckeditor_5/")
|
py | b4175fe081dbee80c37ed2cb3e3e2d613c1d59e6 | from .user import User
from .users import Users
from .login import Login
from .signup import SignUp
from .logout import (LogoutAccess, LogoutRefresh)
from .token_refresh import TokenRefresh
from .policy import Policy
from .policies import Policies
|
py | b417604e3d60492b76f4b3d2e7d1a7323e015832 | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
# from rest_framework import status
class ModelTests(TestCase):
def test_user_with_email_created(self):
"""Test that a user with an email is created"""
email = '[email protected]'
password = 'jksj12af22'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test that new user email is normalized"""
email = '[email protected]'
user = get_user_model().objects.create_user(
email, 'asd321asd'
)
self.assertEqual(email.lower(), user.email)
def test_new_user_no_email(self):
"""Test creating user with no email raises error"""
email = None
with self.assertRaises(ValueError):
get_user_model().objects.create_user(
email, 'asdjhfksjdh'
)
def test_new_user_valid_email(self):
"""Test creating new user with an invalid email raises error"""
email1 = 'oijo@'
email2 = 'koko@loko'
email3 = '--'
with self.assertRaises(ValidationError):
get_user_model().objects.create_user(email1, 'ksdfjklsldk')
with self.assertRaises(ValidationError):
get_user_model().objects.create_user(email2, 'lsdkjfou5i')
with self.assertRaises(ValidationError):
get_user_model().objects.create_user(email3, 'laksflks11d')
def test_new_superuser(self):
"""Test creating a new superuser"""
email = '[email protected]'
password = 'jsdhfioiqw4'
user = get_user_model().objects.create_superuser(
email=email,
password=password,
name='Bobo'
)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
|
py | b41760c8ba169589013cccae240072e597559df4 | class PathReinforcementType(ElementType,IDisposable):
""" An object that specifies the type of a Structural Path Reinforcement element in Autodesk Revit. """
@staticmethod
def CreateDefaultPathReinforcementType(ADoc):
"""
CreateDefaultPathReinforcementType(ADoc: Document) -> ElementId
Creates a new PathReinforcementType object with a default name.
ADoc: The document.
Returns: The newly created type id.
"""
pass
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
|
py | b417610ad55565611e17c97183cf2b2ba321b4c6 | import cv2, argparse
## construindo parser
ap = argparse.ArgumentParser()
ap.add_argument("-i","--image", required = True, help="C:\\repos\\find_musk\\foto-oficial.jpg")
ap.add_argument("-t","--template", required=True, help="C:\\repos\\find_musk\\foto-gabriel.jpg")
args = vars(ap.parse_args())
print('vida')
## carrega imagens de entrada
print("[INFO] loading images..", args)
image = cv2.imread(args["image"])
template = cv2.imread(args["template"])
templateResiz = cv2.resize(template, (58,58))
cv2.imshow("Image", image)
cv2.imshow("Template", template)
cv2.waitKey()
## realiza comparação de padrão
print("[INFO] template matching..")
result = cv2.matchTemplate(image,templateResiz, cv2.TM_CCOEFF_NORMED)
(minVal,maxVal,minLoc,maxLoc) = cv2.minMaxLoc(result)
## determina local onde foi encontrado o padrão
(startX,startY) = maxLoc
endX = startX + templateResiz.shape[1]
endY = startY + templateResiz.shape[0]
## desenha quadrado ao redor do padrão encontrado
cv2.rectangle(image, (startX,startY),(endX,endY), (255,0,0),10)
## mostra imagem completa com padrão destacado
cv2.namedWindow("output", cv2.WINDOW_NORMAL)
imgResiz = cv2.resize(image, (960,600))
cv2.imshow("Output", imgResiz)
cv2.waitKey(0) |
py | b41761e7fa771535fb776f1988347603366f08e9 | """Module to find insects from data."""
from typing import Optional, Tuple
import numpy as np
import numpy.ma as ma
from scipy.ndimage.filters import gaussian_filter
from cloudnetpy import utils
from cloudnetpy.categorize import droplet
from cloudnetpy.categorize.containers import ClassData
def find_insects(obs: ClassData,
melting_layer: np.ndarray,
liquid_layers: np.ndarray,
prob_lim: Optional[float] = 0.8) -> Tuple[np.ndarray, np.ndarray]:
"""Returns insect probability and boolean array of insect presence.
Insects are classified by estimating heuristic probability
of insects from various individual radar parameters and combining
these probabilities. Insects typically yield small echo and spectral width
but high linear depolarization ratio (ldr), and they are present in warm
temperatures.
The combination of echo, ldr and temperature is generally the best proxy
for insects. If ldr is not available, we use other radar parameters.
Insects are finally screened from liquid layers and melting layer - and
above melting layer.
Args:
obs: The :class:`ClassData` instance.
melting_layer: 2D array denoting melting layer.
liquid_layers: 2D array denoting liquid layers.
prob_lim: Probability higher than this will lead to positive detection. Default is 0.8.
Returns:
tuple: 2-element tuple containing
- 2-D probability of pixel containing insects.
- 2-D boolean flag of insects presence.
Notes:
This insect detection method is novel and needs to be validated.
"""
probabilities = _insect_probability(obs)
insect_prob = _screen_insects(*probabilities, melting_layer, liquid_layers, obs)
is_insects = insect_prob > prob_lim
return is_insects, ma.masked_where(insect_prob == 0, insect_prob)
def _insect_probability(obs: ClassData) -> Tuple[np.ndarray, np.ndarray]:
prob = _get_probabilities(obs)
prob_from_ldr = _calc_prob_from_ldr(prob)
prob_from_others = _calc_prob_from_all(prob)
prob_from_others = _adjust_for_radar(obs, prob, prob_from_others)
prob_combined = _fill_missing_pixels(prob_from_ldr, prob_from_others)
return prob_combined, prob_from_others
def _get_probabilities(obs: ClassData) -> dict:
smooth_v = _get_smoothed_v(obs)
lwp_interp = droplet.interpolate_lwp(obs)
fun = utils.array_to_probability
return {
'width': fun(obs.width, 1, 0.3, True) if hasattr(obs, 'width') else 1,
'z_strong': fun(obs.z, 0, 8, True),
'z_weak': fun(obs.z, -20, 8, True),
'ldr': fun(obs.ldr, -25, 5) if hasattr(obs, 'ldr') else None,
'temp_loose': fun(obs.tw, 268, 2),
'temp_strict': fun(obs.tw, 274, 1),
'v': fun(smooth_v, -3.5, 2),
'lwp': utils.transpose(fun(lwp_interp, 150, 50, invert=True)),
'v_sigma': fun(obs.v_sigma, 0.01, 0.1)}
def _get_smoothed_v(obs: ClassData,
sigma: Optional[Tuple[float, float]] = (5, 5)) -> np.ndarray:
smoothed_v = gaussian_filter(obs.v, sigma)
smoothed_v = ma.masked_where(obs.v.mask, smoothed_v)
return smoothed_v
def _calc_prob_from_ldr(prob: dict) -> np.ndarray:
"""This is the most reliable proxy for insects."""
if prob['ldr'] is None:
return np.zeros(prob['z_strong'].shape)
return prob['ldr'] * prob['temp_loose']
def _calc_prob_from_all(prob: dict) -> np.ndarray:
"""This can be tried when LDR is not available. To detect insects without LDR unambiguously is
difficult and might result in many false positives and/or false negatives."""
return prob['z_weak'] * prob['temp_strict'] * prob['width'] * prob['v']
def _adjust_for_radar(obs: ClassData,
prob: dict,
prob_from_others: np.ndarray) -> np.ndarray:
"""Adds radar-specific weighting to insect probabilities."""
if 'mira' in obs.radar_type.lower():
prob_from_others *= prob['lwp']
return prob_from_others
def _fill_missing_pixels(prob_from_ldr: np.ndarray,
prob_from_others: np.ndarray) -> np.ndarray:
prob_combined = np.copy(prob_from_ldr)
no_ldr = np.where(prob_from_ldr == 0)
prob_combined[no_ldr] = prob_from_others[no_ldr]
return prob_combined
def _screen_insects(insect_prob, insect_prob_no_ldr, melting_layer,
liquid_layers, obs):
def _screen_liquid_layers():
prob[liquid_layers == 1] = 0
def _screen_above_melting():
above_melting = utils.ffill(melting_layer)
prob[above_melting == 1] = 0
def _screen_above_liquid():
above_liquid = utils.ffill(liquid_layers)
prob[(above_liquid == 1) & (insect_prob_no_ldr > 0)] = 0
def _screen_rainy_profiles():
prob[obs.is_rain == 1, :] = 0
prob = np.copy(insect_prob)
_screen_liquid_layers()
_screen_above_melting()
_screen_above_liquid()
_screen_rainy_profiles()
return prob
|
py | b41761f2865ca1dc28cf7b1eb03e8add108498a5 | # -*- coding: utf-8 -*-
{
'!langcode!': 'ru',
'!langname!': 'Русский',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Изменить" - необязательное выражение вида "field1=\'новое значение\'". Результаты операции JOIN нельзя изменить или удалить.',
'%d days ago': '%d %%{день} тому',
'%d hours ago': '%d %%{час} тому',
'%d minutes ago': '%d %%{минуту} тому',
'%d months ago': '%d %%{месяц} тому',
'%d seconds ago': '%d %%{секунду} тому',
'%d weeks ago': '%d %%{неделю} тому',
'%d years ago': '%d %%{год} тому',
'%d.%m.%Y %H:%M': '%d.%m.%Y %H:%M',
'%s %%{row} deleted': '%%{!удалена[0]} %s %%{строка[0]}',
'%s %%{row} updated': '%%{!изменена[0]} %s %%{строка[0]}',
'%s selected': '%%{!выбрана[0]} %s %%{запись[0]}',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(**%.0d MB**)': '(**%.0d MB**)',
'**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'1 day ago': '1 день тому',
'1 hour ago': '1 час тому',
'1 minute ago': '1 минуту тому',
'1 month ago': '1 месяц тому',
'1 second ago': '1 секунду тому',
'1 week ago': '1 неделю тому',
'1 year ago': '1 год тому',
'?': '?',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'@markmin\x01Number of entries: **%s**': 'Number of entries: **%s**',
'``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'About': 'About',
'Access Control': 'Access Control',
'admin': 'admin',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': 'административный интерфейс',
'Ajax Recipes': 'Ajax Recipes',
'An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel',
'Are you sure you want to delete this object?': 'Вы уверены, что хотите удалить этот объект?',
'Available Databases and Tables': 'Базы данных и таблицы',
'Buy this book': 'Buy this book',
"Buy web2py's book": "Buy web2py's book",
'cache': 'cache',
'Cache': 'Cache',
'Cache Cleared': 'Cache Cleared',
'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Пустое значение недопустимо',
'Change Password': 'Смените пароль',
'Check to delete': 'Удалить',
'Check to delete:': 'Удалить:',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': 'Client IP',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Config.ini': 'Config.ini',
'Confirm Password': 'Confirm Password',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Current request': 'Текущий запрос',
'Current response': 'Текущий ответ',
'Current session': 'Текущая сессия',
'customize me!': 'настройте внешний вид!',
'data uploaded': 'данные загружены',
'Database': 'Database',
'Database %s select': 'выбор базы данных %s',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'db': 'БД',
'DB Model': 'DB Model',
'Delete:': 'Удалить:',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Описание',
'design': 'дизайн',
'Design': 'Design',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': 'готово!',
'Download': 'Download',
'E-mail': 'E-mail',
'Edit current record': 'Редактировать текущую запись',
'Edit Profile': 'Редактировать профиль',
'Email and SMS': 'Email and SMS',
'Enter a number between %(min)g and %(max)g': 'Enter a number between %(min)g and %(max)g',
'Enter an integer between %(min)g and %(max)g': 'Enter an integer between %(min)g and %(max)g',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'Errors': 'Errors',
'export as csv file': 'экспорт в csv-файл',
'FAQ': 'FAQ',
'First name': 'Имя',
'Forgot username?': 'Forgot username?',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Graph Model': 'Graph Model',
'Group ID': 'Group ID',
'Groups': 'Groups',
'Hello World': 'Заработало!',
'Helping web2py': 'Helping web2py',
'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': 'Импорт/экспорт',
'insert new': 'добавить',
'insert new %s': 'добавить %s',
'Internal State': 'Внутренне состояние',
'Introduction': 'Introduction',
'Invalid email': 'Неверный email',
'Invalid login': 'Неверный логин',
'Invalid password': 'Неверный пароль',
'Invalid Query': 'Неверный запрос',
'invalid request': 'неверный запрос',
'Key': 'Key',
'Last name': 'Фамилия',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'Log In': 'Log In',
'Log Out': 'Log Out',
'Logged in': 'Вход выполнен',
'Logged out': 'Выход выполнен',
'login': 'вход',
'Login': 'Вход',
'logout': 'выход',
'Logout': 'Выход',
'Lost Password': 'Забыли пароль?',
'Lost password?': 'Lost password?',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Cache': 'Manage Cache',
'Memberships': 'Memberships',
'Menu Model': 'Menu Model',
'My Sites': 'My Sites',
'Name': 'Name',
'New password': 'Новый пароль',
'New Record': 'Новая запись',
'new record inserted': 'новая запись добавлена',
'next %s rows': 'next %s rows',
'next 100 rows': 'следующие 100 строк',
'No databases in this application': 'В приложении нет баз данных',
'now': 'сейчас',
'Number of entries: **%s**': 'Number of entries: **%s**',
'Object or table name': 'Object or table name',
'Old password': 'Старый пароль',
'Online book': 'Online book',
'Online examples': 'примеры он-лайн',
'or import from csv file': 'или импорт из csv-файла',
'Origin': 'Происхождение',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': 'Пароль',
'password': 'пароль',
"Password fields don't match": 'Пароли не совпадают',
'Permission': 'Permission',
'Permissions': 'Permissions',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'previous %s rows': 'previous %s rows',
'previous 100 rows': 'предыдущие 100 строк',
'Profile': 'Profile',
'profile': 'профиль',
'pygraphviz library not found': 'pygraphviz library not found',
'Python': 'Python',
'Query:': 'Запрос:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Recipes': 'Recipes',
'Record': 'Record',
'record does not exist': 'запись не найдена',
'Record ID': 'ID записи',
'Record id': 'id записи',
'Register': 'Зарегистрироваться',
'Registration identifier': 'Registration identifier',
'Registration key': 'Ключ регистрации',
'Remember me (for 30 days)': 'Запомнить меня (на 30 дней)',
'Reset Password key': 'Сбросить ключ пароля',
'Role': 'Роль',
'Roles': 'Roles',
'Rows in Table': 'Строк в таблице',
'Rows selected': 'Выделено строк',
'Save model as...': 'Save model as...',
'Semantic': 'Semantic',
'Services': 'Services',
'Sign Up': 'Sign Up',
'Size of cache:': 'Size of cache:',
'state': 'состояние',
'Statistics': 'Statistics',
'Stylesheet': 'Stylesheet',
'submit': 'submit',
'Submit': 'Отправить',
'Support': 'Support',
'Sure you want to delete this object?': 'Подтвердите удаление объекта',
'Table': 'таблица',
'Table name': 'Имя таблицы',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Запрос" - это условие вида "db.table1.field1==\'значение\'". Выражение вида "db.table1.field1==db.table2.field2" формирует SQL JOIN.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'This App': 'This App',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Отметка времени',
'Too short': 'Too short',
'Traceback': 'Traceback',
'Twitter': 'Twitter',
'unable to parse csv file': 'нечитаемый csv-файл',
'Update:': 'Изменить:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Для построение сложных запросов используйте операторы "И": (...)&(...), "ИЛИ": (...)|(...), "НЕ": ~(...).',
'User': 'User',
'User %(id)s Logged-in': 'Пользователь %(id)s вошёл',
'User %(id)s Logged-out': 'Пользователь %(id)s вышел',
'User %(id)s Password changed': 'Пользователь %(id)s сменил пароль',
'User %(id)s Profile updated': 'Пользователь %(id)s обновил профиль',
'User %(id)s Registered': 'Пользователь %(id)s зарегистрировался',
'User ID': 'ID пользователя',
'Username': 'Username',
'Users': 'Users',
'Verify Password': 'Повторите пароль',
'Videos': 'Videos',
'View': 'View',
'Welcome': 'Welcome',
'Welcome to web2py': 'Добро пожаловать в web2py',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'Working...': 'Working...',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
|
py | b41762cb6aaa5b53687976da7cfa08f0267c7613 | #!/usr/bin/env python
"""
<Program Name>
test_in_toto_run.py
<Author>
Lukas Puehringer <[email protected]>
<Started>
Nov 28, 2016
<Copyright>
See LICENSE for licensing information.
<Purpose>
Test in_toto_run command line tool.
"""
import os
import sys
import unittest
import glob
import tempfile
# Use external backport 'mock' on versions under 3.3
if sys.version_info >= (3, 3):
import unittest.mock as mock # pylint: disable=no-name-in-module,import-error
else:
import mock # pylint: disable=import-error
from in_toto.models.metadata import Metablock
from in_toto.in_toto_run import main as in_toto_run_main
from in_toto.models.link import FILENAME_FORMAT
from tests.common import CliTestCase, TmpDirMixin, GPGKeysMixin, GenKeysMixin
import securesystemslib.interface # pylint: disable=unused-import
class TestInTotoRunTool(CliTestCase, TmpDirMixin, GPGKeysMixin, GenKeysMixin):
"""Test in_toto_run's main() - requires sys.argv patching; and
in_toto_run- calls runlib and error logs/exits on Exception. """
cli_main_func = staticmethod(in_toto_run_main)
@classmethod
def setUpClass(self):
"""Create and change into temporary directory,
generate key pair, dummy artifact and base arguments. """
self.set_up_test_dir()
self.set_up_gpg_keys()
self.set_up_keys()
self.test_step = "test_step"
self.test_link_rsa = FILENAME_FORMAT.format(
step_name=self.test_step, keyid=self.rsa_key_id)
self.test_link_ed25519 = FILENAME_FORMAT.format(
step_name=self.test_step, keyid=self.ed25519_key_id)
self.test_link_rsa_enc = FILENAME_FORMAT.format(
step_name=self.test_step, keyid=self.rsa_key_enc_id)
self.test_link_ed25519_enc = FILENAME_FORMAT.format(
step_name=self.test_step, keyid=self.ed25519_key_enc_id)
self.test_artifact = "test_artifact"
open(self.test_artifact, "w").close()
@classmethod
def tearDownClass(self):
self.tear_down_test_dir()
def tearDown(self):
for link in glob.glob("*.link"):
os.remove(link)
def test_main_required_args(self):
"""Test CLI command with required arguments. """
args = ["--step-name", self.test_step, "--key", self.rsa_key_path, "--",
"python", "--version"]
self.assert_cli_sys_exit(args, 0)
self.assertTrue(os.path.exists(self.test_link_rsa))
def test_main_optional_args(self):
"""Test CLI command with optional arguments. """
named_args = ["--step-name", self.test_step, "--key",
self.rsa_key_path, "--materials", self.test_artifact, "--products",
self.test_artifact, "--record-streams"]
positional_args = ["--", "python", "--version"]
# Test and assert recorded artifacts
args1 = named_args + positional_args
self.assert_cli_sys_exit(args1, 0)
link_metadata = Metablock.load(self.test_link_rsa)
self.assertTrue(self.test_artifact in
list(link_metadata.signed.materials.keys()))
self.assertTrue(self.test_artifact in
list(link_metadata.signed.products.keys()))
# Test and assert exlcuded artifacts
args2 = named_args + ["--exclude", "*test*"] + positional_args
self.assert_cli_sys_exit(args2, 0)
link_metadata = Metablock.load(self.test_link_rsa)
self.assertFalse(link_metadata.signed.materials)
self.assertFalse(link_metadata.signed.products)
# Test with base path
args3 = named_args + ["--base-path", self.test_dir] + positional_args
self.assert_cli_sys_exit(args3, 0)
link_metadata = Metablock.load(self.test_link_rsa)
self.assertListEqual(list(link_metadata.signed.materials.keys()),
[self.test_artifact])
self.assertListEqual(list(link_metadata.signed.products.keys()),
[self.test_artifact])
# Test with bogus base path
args4 = named_args + ["--base-path", "bogus/path"] + positional_args
self.assert_cli_sys_exit(args4, 1)
# Test with lstrip path
strip_prefix = self.test_artifact[:-1]
args5 = named_args + ["--lstrip-paths", strip_prefix] + positional_args
self.assert_cli_sys_exit(args5, 0)
link_metadata = Metablock.load(self.test_link_rsa)
self.assertListEqual(list(link_metadata.signed.materials.keys()),
[self.test_artifact[len(strip_prefix):]])
self.assertListEqual(list(link_metadata.signed.products.keys()),
[self.test_artifact[len(strip_prefix):]])
def test_main_with_metadata_directory(self):
"""Test CLI command with metadata directory. """
tmp_dir = os.path.realpath(tempfile.mkdtemp(dir=os.getcwd()))
args = ["--step-name", self.test_step, "--key", self.rsa_key_path,
"--metadata-directory", tmp_dir, "--", "python", "--version"]
self.assert_cli_sys_exit(args, 0)
linkpath = os.path.join(tmp_dir, self.test_link_rsa)
self.assertTrue(os.path.exists(linkpath))
def test_main_with_unencrypted_ed25519_key(self):
"""Test CLI command with ed25519 key. """
args = ["-n", self.test_step,
"--key", self.ed25519_key_path,
"--key-type", "ed25519", "--", "ls"]
self.assert_cli_sys_exit(args, 0)
self.assertTrue(os.path.exists(self.test_link_ed25519))
def test_main_with_encrypted_keys(self):
"""Test CLI command with encrypted ed25519 key. """
for key_type, key_path, link_path in [
("rsa", self.rsa_key_enc_path, self.test_link_rsa_enc),
("ed25519", self.ed25519_key_enc_path, self.test_link_ed25519_enc)]:
# Define common arguments passed to in in-toto-run below
args = [
"-n", self.test_step,
"--key", key_path,
"--key-type", key_type]
cmd = ["--", "python", "--version"]
# Make sure the link file to be generated doesn't already exist
self.assertFalse(os.path.exists(link_path))
# Test 1: Call in-toto-run entering signing key password on prompt
with mock.patch('securesystemslib.interface.get_password',
return_value=self.key_pw):
self.assert_cli_sys_exit(args + ["--password"] + cmd, 0)
self.assertTrue(os.path.exists(link_path))
os.remove(link_path)
# Test 2: Call in-toto-run passing signing key password
self.assert_cli_sys_exit(args + ["--password", self.key_pw] + cmd, 0)
self.assertTrue(os.path.exists(link_path))
os.remove(link_path)
def test_main_with_specified_gpg_key(self):
"""Test CLI command with specified gpg key. """
args = ["-n", self.test_step,
"--gpg", self.gpg_key_85DA58,
"--gpg-home", self.gnupg_home, "--", "python", "--version"]
self.assert_cli_sys_exit(args, 0)
link_filename = FILENAME_FORMAT.format(step_name=self.test_step,
keyid=self.gpg_key_85DA58)
self.assertTrue(os.path.exists(link_filename))
def test_main_with_default_gpg_key(self):
"""Test CLI command with default gpg key. """
args = ["-n", self.test_step,
"--gpg", "--gpg-home", self.gnupg_home, "--", "python", "--version"]
self.assert_cli_sys_exit(args, 0)
link_filename = FILENAME_FORMAT.format(step_name=self.test_step,
keyid=self.gpg_key_D924E9)
self.assertTrue(os.path.exists(link_filename))
def test_main_no_command_arg(self):
"""Test CLI command with --no-command argument. """
args = ["in_toto_run.py", "--step-name", self.test_step, "--key",
self.rsa_key_path, "--no-command"]
self.assert_cli_sys_exit(args, 0)
self.assertTrue(os.path.exists(self.test_link_rsa))
def test_main_wrong_args(self):
"""Test CLI command with missing arguments. """
wrong_args_list = [
[],
["--step-name", "some"],
["--key", self.rsa_key_path],
["--", "echo", "blub"],
["--step-name", "test-step", "--key", self.rsa_key_path],
["--step-name", "--", "echo", "blub"],
["--key", self.rsa_key_path, "--", "echo", "blub"],
["--step-name", "test-step", "--key", self.rsa_key_path, "--"],
["--step-name", "test-step",
"--key", self.rsa_key_path, "--gpg", "--", "echo", "blub"]
]
for wrong_args in wrong_args_list:
self.assert_cli_sys_exit(wrong_args, 2)
self.assertFalse(os.path.exists(self.test_link_rsa))
def test_main_wrong_key_exits(self):
"""Test CLI command with wrong key argument, exits and logs error """
args = ["--step-name", self.test_step, "--key",
"non-existing-key", "--", "echo", "test"]
self.assert_cli_sys_exit(args, 1)
self.assertFalse(os.path.exists(self.test_link_rsa))
def test_main_encrypted_key_but_no_pw(self):
"""Test CLI command exits 1 with encrypted key but no pw. """
args = ["-n", self.test_step, "--key", self.rsa_key_enc_path, "-x"]
self.assert_cli_sys_exit(args, 1)
self.assertFalse(os.path.exists(self.test_link_rsa_enc))
args = ["-n", self.test_step, "--key", self.ed25519_key_enc_path, "-x"]
self.assert_cli_sys_exit(args, 1)
self.assertFalse(os.path.exists(self.test_link_ed25519_enc))
if __name__ == "__main__":
unittest.main()
|
py | b41763acd858027431428776cf743cab4cd866cb | from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
"""Serializes a name field for testing our APIView"""
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
"""Serializes a user profile object"""
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
def create(self, validated_data):
"""Create and return a new user"""
user = models.UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password']
)
return user
class ProfileFeedItemSerializer(serializers.ModelSerializer):
"""Serializes profile feed items"""
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on')
extra_kwargs = {'user_profile': {'read_only': True}}
|
py | b4176412a95a1784d7bd5df67cc9a79e4dc93092 | # Generated by Django 4.0.3 on 2022-03-26 03:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.author')),
],
),
]
|
py | b4176435029b253383206d045218417b7cf829af | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lambdamud.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | b417643518d435a69ed2f1dfb12d632a81c25110 | # Generated by Django 2.0.8 on 2018-10-05 13:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app_questions', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='question',
name='poll',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='app_polls.Poll'),
),
]
|
py | b417645155eb721a5453126ef90e0f0540a477b1 | import os.path
__dir__ = os.path.split(os.path.abspath(os.path.realpath(__file__)))[0]
data_location = os.path.join(__dir__, "verilog")
src = "https://github.com/RapidSilicon/uart_axi"
version_str = "1.1.0.post190"
version_tuple = (1, 1, 0, 190)
try:
from packaging.version import Version as V
pversion = V("1.1.0.post190")
except ImportError:
pass
data_version_str = "1.1.0.post40"
data_version_tuple = (1, 1, 0, 40)
try:
from packaging.version import Version as V
pdata_version = V("1.1.0.post40")
except ImportError:
pass
data_git_hash = "39789e40bc57f628507909e6ff493708980b25f0"
data_git_describe = "1.1.0-40-39789e4"
data_git_msg = """\
commit 39789e40bc57f628507909e6ff493708980b25f0
Author: asadaleem-rs <[email protected]>
Date: Fri Jan 7 02:39:27 2022 +0100
Makefile to build python package added
"""
tool_version_str = "0.0.post144"
tool_version_tuple = (0, 0, 144)
try:
from packaging.version import Version as V
ptool_version = V("0.0.post144")
except ImportError:
pass
def data_file(f):
"""Get absolute path for file inside pythondata_ip_uart16550."""
fn = os.path.join(data_location, f)
fn = os.path.abspath(fn)
if not os.path.exists(fn):
raise IOError("File {f} doesn't exist in pythondata_ip_uart16550".format(f))
return fn
|
py | b4176584dda2025fc3284894978afd67f5f831c9 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import argparse
import cv2
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
import time
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Webcam Demo")
parser.add_argument(
"--config-file",
default="../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.7,
help="Minimum score for the prediction to be shown",
)
parser.add_argument(
"--min-image-size",
type=int,
default=224,
help="Smallest size of the image to feed to the model. "
"Model was trained with 800, which gives best results",
)
parser.add_argument(
"--show-mask-heatmaps",
dest="show_mask_heatmaps",
help="Show a heatmap probability for the top masks-per-dim masks",
action="store_true",
)
parser.add_argument(
"--masks-per-dim",
type=int,
default=2,
help="Number of heatmaps per dimension to show",
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
# load config from file and command-line arguments
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=args.confidence_threshold,
show_mask_heatmaps=args.show_mask_heatmaps,
masks_per_dim=args.masks_per_dim,
min_image_size=args.min_image_size,
)
# cam = cv2.VideoCapture(0)
# while True:
# start_time = time.time()
# ret_val, img = cam.read()
# composite = coco_demo.run_on_opencv_image(img)
# print("Time: {:.2f} s / img".format(time.time() - start_time))
# cv2.imshow("COCO detections", composite)
# if cv2.waitKey(1) == 27:
# break # esc to quit
# cv2.destroyAllWindows()
start_time = time.time()
import os
root = "/workspace/mnt/group/ocr/qiutairu/code/maskrcnn-benchmark/demo"
img = cv2.imread(os.path.join(root, "test.jpg"))
composite = coco_demo.run_on_opencv_image(img)
print("Time: {:.2f} s / img".format(time.time() - start_time))
cv2.imwrite(os.path.join(root, "test_result.jpg"), composite)
if __name__ == "__main__":
main()
|
py | b41765858221c94abdfdfb75bd68b4a0afc4b22d | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# List of pretrained models: https://huggingface.co/transformers/pretrained_models.html
# Pretrained model name to a tuple of input names, opset_version, use_external_data_format and optimization model type
MODELS = {
# BERT
"bert-base-uncased": (["input_ids", "attention_mask", "token_type_ids"], 11, False, "bert"),
"bert-large-uncased": (["input_ids", "attention_mask", "token_type_ids"], 11, False, "bert"),
"bert-base-cased": (["input_ids", "attention_mask", "token_type_ids"], 11, False, "bert"),
"bert-large-cased": (["input_ids", "attention_mask", "token_type_ids"], 11, False, "bert"),
"bert-base-multilingual-uncased": (["input_ids", "attention_mask", "token_type_ids"], 11, False, "bert"),
"bert-base-multilingual-cased": (["input_ids", "attention_mask", "token_type_ids"], 11, False, "bert"),
"bert-base-chinese": (["input_ids", "attention_mask", "token_type_ids"], 11, False, "bert"),
"bert-base-german-cased": (["input_ids", "attention_mask", "token_type_ids"], 11, False, "bert"),
"bert-large-uncased-whole-word-masking": (["input_ids", "attention_mask", "token_type_ids"], 11, False, "bert"),
"bert-large-cased-whole-word-masking": (["input_ids", "attention_mask", "token_type_ids"], 11, False, "bert"),
"bert-large-uncased-whole-word-masking-finetuned-squad": (["input_ids", "attention_mask", "token_type_ids"], 11, False, "bert"),
"bert-large-cased-whole-word-masking-finetuned-squad": (["input_ids", "attention_mask", "token_type_ids"], 11, False, "bert"),
"bert-base-cased-finetuned-mrpc": (["input_ids", "attention_mask", "token_type_ids"], 11, False, "bert"),
"bert-base-german-dbmdz-cased": (["input_ids", "attention_mask", "token_type_ids"], 11, False, "bert"),
"bert-base-german-dbmdz-uncased": (["input_ids", "attention_mask", "token_type_ids"], 11, False, "bert"),
# todo: more models to add
# GPT
"openai-gpt": (["input_ids"], 11, False, "gpt2"), # no past state inputs
# GPT-2
"gpt2": (["input_ids"], 11, False, "gpt2"), # no past state inputs & outputs
"gpt2-medium": (["input_ids"], 11, False, "gpt2"),
"gpt2-large": (["input_ids"], 11, True, "gpt2"), # Model>2GB. Need use_external_data_format=True to export it. No past state inputs for GPT models.
"gpt2-xl": (["input_ids"], 11, True, "gpt2"),
"distilgpt2": (["input_ids"], 11, False, "gpt2"), # no past state inputs & outputs
# Transformer-XL
#"transfo-xl-wt103": (["input_ids"], 11, False, "bert"),
# XLNet
#"xlnet-base-cased": (["input_ids"], 12, False, "bert"), # Models uses Einsum, which need opset version 12 and PyTorch 1.5.0 or above.
#"xlnet-large-cased": (["input_ids"], 12, False, "bert"), # Models uses Einsum, which need opset version 12 and PyTorch 1.5.0 or above.
# XLM
"xlm-mlm-en-2048": (["input_ids"], 11, True, "bert"),
"xlm-mlm-ende-1024": (["input_ids"], 11, False, "bert"),
"xlm-mlm-enfr-1024": (["input_ids"], 11, False, "bert"),
"xlm-mlm-enro-1024": (["input_ids"], 11, False, "bert"),
"xlm-mlm-xnli15-1024": (["input_ids"], 11, False, "bert"),
"xlm-mlm-tlm-xnli15-1024": (["input_ids"], 11, False, "bert"),
"xlm-clm-enfr-1024": (["input_ids"], 11, False, "bert"),
"xlm-clm-ende-1024": (["input_ids"], 11, False, "bert"),
"xlm-mlm-17-1280": (["input_ids"], 11, True, "bert"),
"xlm-mlm-100-1280": (["input_ids"], 11, True, "bert"),
# RoBERTa
"roberta-base": (["input_ids", "attention_mask"], 11, False, "bert"),
"roberta-large": (["input_ids", "attention_mask"], 11, False, "bert"),
"roberta-large-mnli": (["input_ids", "attention_mask"], 11, False, "bert"),
"distilroberta-base": (["input_ids", "attention_mask"], 11, False, "bert"),
"roberta-base-openai-detector": (["input_ids", "attention_mask"], 11, False, "bert"),
"roberta-large-openai-detector": (["input_ids", "attention_mask"], 11, False, "bert"),
# DistilBERT
"distilbert-base-uncased": (["input_ids", "attention_mask"], 11, False, "bert"),
"distilbert-base-uncased-distilled-squad": (["input_ids", "attention_mask"], 11, False, "bert"),
"distilbert-base-cased": (["input_ids", "attention_mask"], 11, False, "bert"),
"distilbert-base-cased-distilled-squad": (["input_ids", "attention_mask"], 11, False, "bert"),
"distilbert-base-german-cased": (["input_ids", "attention_mask"], 11, False, "bert"),
"distilbert-base-multilingual-cased": (["input_ids", "attention_mask"], 11, False, "bert"),
# CTRL
"ctrl": (["input_ids"], 11, True, "bert"),
# CamemBERT
"camembert-base": (["input_ids"], 11, False, "bert"),
# ALBERT
# Models uses Einsum, which need opset version 12 and PyTorch 1.5.0 or above.
"albert-base-v1": (["input_ids"], 12, False, "bert"),
"albert-large-v1": (["input_ids"], 12, False, "bert"),
"albert-xlarge-v1": (["input_ids"], 12, True, "bert"),
"albert-xxlarge-v1": (["input_ids"], 12, True, "bert"),
"albert-base-v2": (["input_ids"], 12, False, "bert"),
"albert-large-v2": (["input_ids"], 12, False, "bert"),
"albert-xlarge-v2": (["input_ids"], 12, True, "bert"),
"albert-xxlarge-v2": (["input_ids"], 12, True, "bert"),
# T5
#"t5-small": (["input_ids"], 11, False, "bert"),
#"t5-base": (["input_ids"], 11, False, "bert"),
#"t5-large": (["input_ids"], 11, False, "bert"),
#"t5-3b": (["input_ids"], 11, False, "bert"),
#"t5-11b": (["input_ids"], 11, False, "bert"),
# XLM-RoBERTa
"xlm-roberta-base": (["input_ids"], 11, False, "bert"),
"xlm-roberta-large": (["input_ids"], 11, True, "bert"),
# FlauBERT
"flaubert/flaubert_small_cased": (["input_ids"], 11, False, "bert"),
"flaubert/flaubert_base_uncased": (["input_ids"], 11, False, "bert"),
"flaubert/flaubert_base_cased": (["input_ids"], 11, False, "bert"),
"flaubert/flaubert_large_cased": (["input_ids"], 11, False, "bert"),
# Bart
#"facebook/bart-large": (["input_ids"], 11, False, "bert"),
#"facebook/bart-base": (["input_ids"], 11, False, "bert"),
#"facebook/bart-large-mnli": (["input_ids"], 11, False, "bert"),
#"facebook/bart-large-cnn": (["input_ids"], 11, False, "bert"),
#"facebook/mbart-large-en-ro": (["input_ids"], 11, True, "bert"),
# DialoGPT
"microsoft/DialoGPT-small": (["input_ids"], 11, False, "gpt2"),
"microsoft/DialoGPT-medium": (["input_ids"], 11, False, "gpt2"),
"microsoft/DialoGPT-large": (["input_ids"], 11, True, "gpt2"),
# Reformer
#"google/reformer-enwik8": (["input_ids"], 11, False, "bert"),
#"google/reformer-crime-and-punishment": (["input_ids"], 11, False, "bert"),
# MarianMT
#"Helsinki-NLP/opus-mt-ROMANCE-en": (["input_ids"], 12, False, "bert"),
# Longformer
#"allenai/longformer-base-4096": (["input_ids"], 12, False, "bert"),
#"allenai/longformer-large-4096": (["input_ids"], 12, False, "bert"),
}
|
py | b4176776f3b9be805507b120ca8fcc6f580f0740 | # coding=utf-8
import sys
from unittest import TestResult, TextTestRunner
import datetime
import re
from teamcity.messages import TeamcityServiceMessages
from teamcity.common import is_string, get_class_fullname, convert_error_to_string, limit_output, split_output
_real_stdout = sys.stdout
class TeamcityTestResult(TestResult):
separator2 = "\n"
# noinspection PyUnusedLocal
def __init__(self, stream=_real_stdout, descriptions=None, verbosity=None):
super(TeamcityTestResult, self).__init__()
# Some code may ask for self.failfast, see unittest2.case.TestCase.subTest
self.failfast = getattr(self, "failfast", False)
self.test_started_datetime_map = {}
self.failed_tests = set()
self.subtest_failures = {}
self.messages = TeamcityServiceMessages(_real_stdout)
@staticmethod
def get_test_id(test):
if is_string(test):
return test
# Force test_id for doctests
if get_class_fullname(test) != "doctest.DocTestCase":
desc = test.shortDescription()
test_method_name = getattr(test, "_testMethodName", "")
if desc and desc != test.id() and desc != test_method_name:
return "%s (%s)" % (test.id(), desc.replace('.', '_'))
return test.id()
def addSuccess(self, test):
super(TeamcityTestResult, self).addSuccess(test)
def addExpectedFailure(self, test, err):
_super = super(TeamcityTestResult, self)
if hasattr(_super, "addExpectedFailure"):
_super.addExpectedFailure(test, err)
err = convert_error_to_string(err)
test_id = self.get_test_id(test)
self.messages.testIgnored(test_id, message="Expected failure: " + err, flowId=test_id)
def get_subtest_block_id(self, test, subtest):
test_id = self.get_test_id(test)
subtest_id = self.get_test_id(subtest)
if subtest_id.startswith(test_id):
block_id = subtest_id[len(test_id):].strip()
else:
block_id = subtest_id
if len(block_id) == 0:
block_id = test_id
return block_id
def addSkip(self, test, reason=""):
if sys.version_info >= (2, 7):
super(TeamcityTestResult, self).addSkip(test, reason)
if reason:
reason_str = ": " + str(reason)
else:
reason_str = ""
test_class_name = get_class_fullname(test)
if test_class_name == "unittest.case._SubTest" or test_class_name == "unittest2.case._SubTest":
parent_test = test.test_case
parent_test_id = self.get_test_id(parent_test)
subtest = test
block_id = self.get_subtest_block_id(parent_test, subtest)
self.messages.subTestBlockOpened(block_id, subTestResult="Skip", flowId=parent_test_id)
self.messages.testStdOut(parent_test_id, out="SubTest skipped" + reason_str + "\n", flowId=parent_test_id)
self.messages.blockClosed(block_id, flowId=parent_test_id)
else:
test_id = self.get_test_id(test)
self.messages.testIgnored(test_id, message="Skipped" + reason_str, flowId=test_id)
def addUnexpectedSuccess(self, test):
_super = super(TeamcityTestResult, self)
if hasattr(_super, "addUnexpectedSuccess"):
_super.addUnexpectedSuccess(test)
test_id = self.get_test_id(test)
self.messages.testFailed(test_id, message='Failure',
details="Test should not succeed since it's marked with @unittest.expectedFailure",
flowId=test_id)
def addError(self, test, err, *k):
super(TeamcityTestResult, self).addError(test, err)
test_class = get_class_fullname(test)
if test_class == "unittest.suite._ErrorHolder" or test_class == "unittest2.suite._ErrorHolder":
# This is a standalone error
test_name = test.id()
# patch setUpModule (__main__) -> __main__.setUpModule
test_name = re.sub(r'^(.*) \((.*)\)$', r'\2.\1', test_name)
self.messages.testStarted(test_name, flowId=test_name)
# noinspection PyTypeChecker
self.report_fail(test_name, 'Failure', err)
self.messages.testFinished(test_name, flowId=test_name)
elif get_class_fullname(err[0]) == "unittest2.case.SkipTest":
message = ""
if hasattr(err[1], "message"):
message = getattr(err[1], "message", "")
elif hasattr(err[1], "args"):
message = getattr(err[1], "args", [""])[0]
self.addSkip(test, message)
else:
self.report_fail(test, 'Error', err)
def addFailure(self, test, err, *k):
super(TeamcityTestResult, self).addFailure(test, err)
self.report_fail(test, 'Failure', err)
def addSubTest(self, test, subtest, err):
_super = super(TeamcityTestResult, self)
if hasattr(_super, "addSubTest"):
_super.addSubTest(test, subtest, err)
test_id = self.get_test_id(test)
subtest_id = self.get_test_id(subtest)
if subtest_id.startswith(test_id):
# Replace "." -> "_" since '.' is a test hierarchy separator
# See i.e. https://github.com/JetBrains/teamcity-messages/issues/134 (https://youtrack.jetbrains.com/issue/PY-23846)
block_id = subtest_id[len(test_id):].strip().replace(".", "_")
else:
block_id = subtest_id
if len(block_id) == 0:
block_id = subtest_id
if err is not None:
self.add_subtest_failure(test_id, block_id)
if issubclass(err[0], test.failureException):
self.messages.subTestBlockOpened(block_id, subTestResult="Failure", flowId=test_id)
self.messages.testStdErr(test_id, out="SubTest failure: %s\n" % convert_error_to_string(err), flowId=test_id)
self.messages.blockClosed(block_id, flowId=test_id)
else:
self.messages.subTestBlockOpened(block_id, subTestResult="Error", flowId=test_id)
self.messages.testStdErr(test_id, out="SubTest error: %s\n" % convert_error_to_string(err), flowId=test_id)
self.messages.blockClosed(block_id, flowId=test_id)
else:
self.messages.subTestBlockOpened(block_id, subTestResult="Success", flowId=test_id)
self.messages.blockClosed(block_id, flowId=test_id)
def add_subtest_failure(self, test_id, subtest_block_id):
fail_array = self.subtest_failures.get(test_id, [])
fail_array.append(subtest_block_id)
self.subtest_failures[test_id] = fail_array
def get_subtest_failure(self, test_id):
fail_array = self.subtest_failures.get(test_id, [])
return ", ".join(fail_array)
def report_fail(self, test, fail_type, err):
test_id = self.get_test_id(test)
if is_string(err):
details = err
elif get_class_fullname(err) == "twisted.python.failure.Failure":
details = err.getTraceback()
else:
details = convert_error_to_string(err)
subtest_failures = self.get_subtest_failure(test_id)
if subtest_failures:
details = "Failed subtests list: " + subtest_failures + "\n\n" + details.strip()
details = details.strip()
self.messages.testFailed(test_id, message=fail_type, details=details, flowId=test_id)
self.failed_tests.add(test_id)
def startTest(self, test):
super(TeamcityTestResult, self).startTest(test)
test_id = self.get_test_id(test)
self.test_started_datetime_map[test_id] = datetime.datetime.now()
self.messages.testStarted(test_id, captureStandardOutput='true', flowId=test_id)
def stopTest(self, test):
test_id = self.get_test_id(test)
if getattr(self, 'buffer', None):
# Do not allow super() method to print output by itself
self._mirrorOutput = False
output = sys.stdout.getvalue()
if output:
for chunk in split_output(limit_output(output)):
self.messages.testStdOut(test_id, chunk, flowId=test_id)
error = sys.stderr.getvalue()
if error:
for chunk in split_output(limit_output(error)):
self.messages.testStdErr(test_id, chunk, flowId=test_id)
super(TeamcityTestResult, self).stopTest(test)
if test_id not in self.failed_tests:
subtest_failures = self.get_subtest_failure(test_id)
if subtest_failures:
self.report_fail(test, "One or more subtests failed", "")
time_diff = datetime.datetime.now() - self.test_started_datetime_map[test_id]
self.messages.testFinished(test_id, testDuration=time_diff, flowId=test_id)
def printErrors(self):
pass
class TeamcityTestRunner(TextTestRunner):
resultclass = TeamcityTestResult
if sys.version_info < (2, 7):
def _makeResult(self):
return TeamcityTestResult(self.stream, self.descriptions, self.verbosity)
def run(self, test):
# noinspection PyBroadException
try:
total_tests = test.countTestCases()
TeamcityServiceMessages(_real_stdout).testCount(total_tests)
except:
pass
return super(TeamcityTestRunner, self).run(test)
if __name__ == '__main__':
from unittest import main
main(module=None, testRunner=TeamcityTestRunner())
|
py | b41768abb79e067c956bef7a16adea968c6f8f10 | """
django-bitfield
~~~~~~~~~~~~~~~
"""
try:
VERSION = __import__('pkg_resources') \
.get_distribution('bitfield').version
except Exception:
VERSION = 'unknown'
from bitfield.models import Bit, BitHandler, CompositeBitField, BitField # noqa
|
py | b4176975490c60bb3860063ead576823b412535c | from flask import current_app
def add_to_index(index, model):
if not current_app.elasticsearch:
return
payload = {}
for field in model.__searchable__:
payload[field] = getattr(model, field)
current_app.elasticsearch.index(
index=index, doc_type=index, id=model.id, body=payload)
def remove_from_index(index, model):
if not current_app.elasticsearch:
return
current_app.elasticsearch.delete(index=index, doc_type=index, id=model.id)
def query_index(index, query, page, per_page):
if not current_app.elasticsearch:
return [], 0
search = current_app.elasticsearch.search(
index=index,
doc_type=index,
body={
'query': {
'multi_match': {
'query': query,
'fields': ['*']
}
},
'from': (page - 1) * per_page,
'size': per_page
})
ids = [int(hit['_id']) for hit in search['hits']['hits']]
return ids, search['hits']['total']
|
py | b41769df42c3f07f7a11e6e9e031174991c5dc04 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import numpy as np
import pytest
import torch
from argparse import Namespace
from espnet.nets.pytorch_backend.e2e_tts_transformer import subsequent_mask
from espnet.nets.pytorch_backend.e2e_tts_transformer import Transformer
from espnet.nets.pytorch_backend.nets_utils import pad_list
def make_transformer_args(**kwargs):
defaults = dict(
embed_dim=32,
spk_embed_dim=None,
eprenet_conv_layers=2,
eprenet_conv_filts=5,
eprenet_conv_chans=32,
dprenet_layers=2,
dprenet_units=32,
adim=32,
aheads=4,
elayers=2,
eunits=32,
dlayers=2,
dunits=32,
postnet_layers=2,
postnet_filts=5,
postnet_chans=32,
eprenet_dropout_rate=0.1,
dprenet_dropout_rate=0.5,
postnet_dropout_rate=0.1,
transformer_enc_dropout_rate=0.1,
transformer_enc_positional_dropout_rate=0.1,
transformer_enc_attn_dropout_rate=0.0,
transformer_dec_dropout_rate=0.1,
transformer_dec_positional_dropout_rate=0.1,
transformer_dec_attn_dropout_rate=0.3,
transformer_enc_dec_attn_dropout_rate=0.0,
spk_embed_integration_type="add",
use_masking=True,
use_weighted_masking=False,
bce_pos_weight=1.0,
use_batch_norm=True,
use_scaled_pos_enc=True,
encoder_normalize_before=True,
decoder_normalize_before=True,
encoder_concat_after=False,
decoder_concat_after=False,
transformer_init="pytorch",
initial_encoder_alpha=1.0,
initial_decoder_alpha=1.0,
reduction_factor=1,
loss_type="L1",
use_guided_attn_loss=False,
num_heads_applied_guided_attn=2,
num_layers_applied_guided_attn=2,
guided_attn_loss_sigma=0.4,
guided_attn_loss_lambda=1.0,
modules_applied_guided_attn=["encoder", "decoder", "encoder-decoder"]
)
defaults.update(kwargs)
return defaults
def make_inference_args(**kwargs):
defaults = dict(
threshold=0.5,
maxlenratio=5.0,
minlenratio=0.0
)
defaults.update(kwargs)
return defaults
def prepare_inputs(idim, odim, ilens, olens, spk_embed_dim=None,
device=torch.device('cpu')):
xs = [np.random.randint(0, idim, l) for l in ilens]
ys = [np.random.randn(l, odim) for l in olens]
ilens = torch.LongTensor(ilens).to(device)
olens = torch.LongTensor(olens).to(device)
xs = pad_list([torch.from_numpy(x).long() for x in xs], 0).to(device)
ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)
labels = ys.new_zeros(ys.size(0), ys.size(1))
for i, l in enumerate(olens):
labels[i, l - 1:] = 1
batch = {
"xs": xs,
"ilens": ilens,
"ys": ys,
"labels": labels,
"olens": olens,
}
if spk_embed_dim is not None:
batch["spembs"] = torch.FloatTensor(np.random.randn(len(ilens), spk_embed_dim)).to(device)
return batch
@pytest.mark.parametrize(
"model_dict", [
({}),
({"use_masking": False}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "concat"}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "add"}),
({"use_scaled_pos_enc": False}),
({"bce_pos_weight": 10.0}),
({"reduction_factor": 2}),
({"reduction_factor": 3}),
({"encoder_normalize_before": False}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"encoder_concat_after": True}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
({"loss_type": "L1"}),
({"loss_type": "L2"}),
({"loss_type": "L1+L2"}),
({"use_masking": False}),
({"use_masking": False, "use_weighted_masking": True}),
({"use_guided_attn_loss": True}),
({"use_guided_attn_loss": True, "reduction_factor": 3}),
({"use_guided_attn_loss": True, "modules_applied_guided_attn": ["encoder-decoder"]}),
({"use_guided_attn_loss": True, "modules_applied_guided_attn": ["encoder", "decoder"]}),
({"use_guided_attn_loss": True, "num_heads_applied_guided_attn": -1}),
({"use_guided_attn_loss": True, "num_layers_applied_guided_attn": -1}),
({"use_guided_attn_loss": True, "modules_applied_guided_attn": ["encoder"], "elayers": 2, "dlayers": 3}),
])
def test_transformer_trainable_and_decodable(model_dict):
# make args
model_args = make_transformer_args(**model_dict)
inference_args = make_inference_args()
# setup batch
idim = 5
odim = 10
ilens = [10, 5]
olens = [20, 15]
batch = prepare_inputs(idim, odim, ilens, olens, model_args["spk_embed_dim"])
# define model
model = Transformer(idim, odim, Namespace(**model_args))
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# check gradient of ScaledPositionalEncoding
if model.use_scaled_pos_enc:
assert model.encoder.embed[1].alpha.grad is not None
assert model.decoder.embed[1].alpha.grad is not None
# decodable
model.eval()
with torch.no_grad():
if model_args["spk_embed_dim"] is None:
spemb = None
else:
spemb = batch["spembs"][0]
model.inference(batch["xs"][0][:batch["ilens"][0]], Namespace(**inference_args), spemb=spemb)
model.calculate_all_attentions(**batch)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="gpu required")
@pytest.mark.parametrize(
"model_dict", [
({}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "concat"}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "add"}),
({"use_masking": False}),
({"use_scaled_pos_enc": False}),
({"bce_pos_weight": 10.0}),
({"encoder_normalize_before": False}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
({"use_masking": False}),
({"use_masking": False, "use_weighted_masking": True}),
])
def test_transformer_gpu_trainable_and_decodable(model_dict):
# make args
model_args = make_transformer_args(**model_dict)
inference_args = make_inference_args()
idim = 5
odim = 10
ilens = [10, 5]
olens = [20, 15]
device = torch.device('cuda')
batch = prepare_inputs(idim, odim, ilens, olens, model_args["spk_embed_dim"], device=device)
# define model
model = Transformer(idim, odim, Namespace(**model_args))
model.to(device)
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# check gradient of ScaledPositionalEncoding
if model.use_scaled_pos_enc:
assert model.encoder.embed[1].alpha.grad is not None
assert model.decoder.embed[1].alpha.grad is not None
# decodable
model.eval()
with torch.no_grad():
if model_args["spk_embed_dim"] is None:
spemb = None
else:
spemb = batch["spembs"][0]
model.inference(batch["xs"][0][:batch["ilens"][0]], Namespace(**inference_args), spemb=spemb)
model.calculate_all_attentions(**batch)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="multi gpu required")
@pytest.mark.parametrize(
"model_dict", [
({}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "concat"}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "add"}),
({"use_masking": False}),
({"use_scaled_pos_enc": False}),
({"bce_pos_weight": 10.0}),
({"encoder_normalize_before": False}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
({"use_masking": False}),
({"use_masking": False, "use_weighted_masking": True}),
])
def test_transformer_multi_gpu_trainable(model_dict):
# make args
model_args = make_transformer_args(**model_dict)
# setup batch
idim = 5
odim = 10
ilens = [10, 5]
olens = [20, 15]
device = torch.device('cuda')
batch = prepare_inputs(idim, odim, ilens, olens, model_args["spk_embed_dim"], device=device)
# define model
ngpu = 2
device_ids = list(range(ngpu))
model = Transformer(idim, odim, Namespace(**model_args))
model = torch.nn.DataParallel(model, device_ids)
model.to(device)
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# check gradient of ScaledPositionalEncoding
if model.module.use_scaled_pos_enc:
assert model.module.encoder.embed[1].alpha.grad is not None
assert model.module.decoder.embed[1].alpha.grad is not None
@pytest.mark.parametrize(
"model_dict", [
({}),
])
def test_attention_masking(model_dict):
# make args
model_args = make_transformer_args(**model_dict)
# setup batch
idim = 5
odim = 10
ilens = [10, 5]
olens = [20, 15]
batch = prepare_inputs(idim, odim, ilens, olens)
# define model
model = Transformer(idim, odim, Namespace(**model_args))
# test encoder self-attention
xs = model.encoder.embed(batch["xs"])
xs[1, ilens[1]:] = float("nan")
x_masks = model._source_mask(batch["ilens"])
a = model.encoder.encoders[0].self_attn
a(xs, xs, xs, x_masks)
aws = a.attn.detach().numpy()
for aw, ilen in zip(aws, batch["ilens"]):
assert not np.isnan(aw[:, :ilen, :ilen]).any()
np.testing.assert_almost_equal(aw[:, :ilen, :ilen].sum(), float(aw.shape[0] * ilen), decimal=4)
assert aw[:, ilen:, ilen:].sum() == 0.0
# test encoder-decoder attention
ys = model.decoder.embed(batch["ys"])
ys[1, olens[1]:] = float("nan")
xy_masks = x_masks
a = model.decoder.decoders[0].src_attn
a(ys, xs, xs, xy_masks)
aws = a.attn.detach().numpy()
for aw, ilen, olen in zip(aws, batch["ilens"], batch["olens"]):
assert not np.isnan(aw[:, :olen, :ilen]).any()
np.testing.assert_almost_equal(aw[:, :olen, :ilen].sum(), float(aw.shape[0] * olen), decimal=4)
assert aw[:, olen:, ilen:].sum() == 0.0
# test decoder self-attention
y_masks = model._target_mask(batch["olens"])
a = model.decoder.decoders[0].self_attn
a(ys, ys, ys, y_masks)
aws = a.attn.detach().numpy()
for aw, olen in zip(aws, batch["olens"]):
assert not np.isnan(aw[:, :olen, :olen]).any()
np.testing.assert_almost_equal(aw[:, :olen, :olen].sum(), float(aw.shape[0] * olen), decimal=4)
assert aw[:, olen:, olen:].sum() == 0.0
@pytest.mark.parametrize(
"model_dict", [
({}),
({"reduction_factor": 3}),
({"reduction_factor": 4}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
])
def test_forward_and_inference_are_equal(model_dict):
# make args
model_args = make_transformer_args(dprenet_dropout_rate=0.0, **model_dict)
# setup batch
idim = 5
odim = 10
ilens = [10]
olens = [20]
batch = prepare_inputs(idim, odim, ilens, olens)
xs = batch["xs"]
ilens = batch["ilens"]
ys = batch["ys"]
olens = batch["olens"]
# define model
model = Transformer(idim, odim, Namespace(**model_args))
model.eval()
# TODO(kan-bayashi): update following ugly part
with torch.no_grad():
# --------- forward calculation ---------
x_masks = model._source_mask(ilens)
hs_fp, h_masks = model.encoder(xs, x_masks)
if model.reduction_factor > 1:
ys_in = ys[:, model.reduction_factor - 1::model.reduction_factor]
olens_in = olens.new([olen // model.reduction_factor for olen in olens])
else:
ys_in, olens_in = ys, olens
ys_in = model._add_first_frame_and_remove_last_frame(ys_in)
y_masks = model._target_mask(olens_in)
zs, _ = model.decoder(ys_in, y_masks, hs_fp, h_masks)
before_outs = model.feat_out(zs).view(zs.size(0), -1, model.odim)
logits = model.prob_out(zs).view(zs.size(0), -1)
after_outs = before_outs + model.postnet(before_outs.transpose(1, 2)).transpose(1, 2)
# --------- forward calculation ---------
# --------- inference calculation ---------
hs_ir, _ = model.encoder(xs, None)
maxlen = ys_in.shape[1]
minlen = ys_in.shape[1]
idx = 0
# this is the inferene calculation but we use groundtruth to check the behavior
ys_in_ = ys_in[0, idx].view(1, 1, model.odim)
np.testing.assert_array_equal(
ys_in_.new_zeros(1, 1, model.odim).detach().cpu().numpy(),
ys_in_.detach().cpu().numpy(),
)
outs, probs = [], []
while True:
idx += 1
y_masks = subsequent_mask(idx).unsqueeze(0)
z = model.decoder.forward_one_step(ys_in_, y_masks, hs_ir)[0] # (B, idx, adim)
outs += [model.feat_out(z).view(1, -1, model.odim)] # [(1, r, odim), ...]
probs += [torch.sigmoid(model.prob_out(z))[0]] # [(r), ...]
if idx >= maxlen:
if idx < minlen:
continue
outs = torch.cat(outs, dim=1).transpose(1, 2) # (1, L, odim) -> (1, odim, L)
if model.postnet is not None:
outs = outs + model.postnet(outs) # (1, odim, L)
outs = outs.transpose(2, 1).squeeze(0) # (L, odim)
probs = torch.cat(probs, dim=0)
break
ys_in_ = torch.cat((ys_in_, ys_in[0, idx].view(1, 1, model.odim)), dim=1) # (1, idx + 1, odim)
# --------- inference calculation ---------
# check both are equal
np.testing.assert_array_almost_equal(
hs_fp.detach().cpu().numpy(),
hs_ir.detach().cpu().numpy(),
)
np.testing.assert_array_almost_equal(
after_outs.squeeze(0).detach().cpu().numpy(),
outs.detach().cpu().numpy(),
)
np.testing.assert_array_almost_equal(
torch.sigmoid(logits.squeeze(0)).detach().cpu().numpy(),
probs.detach().cpu().numpy(),
)
|
py | b4176abab8bb0f0f10fe7fd2a0a68461e352f84b | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__all__ = ['__version__']
import pbr.version
version_info = pbr.version.VersionInfo('python-congressclient')
try:
__version__ = version_info.version_string()
except AttributeError:
__version__ = None
|
py | b4176c3a241e741c4a48ed53777d7205226fcf99 | from io import BytesIO
import pytest
from thefuck.specific.npm import get_scripts
run_script_stdout = b'''
Lifecycle scripts included in code-view-web:
test
jest
available via `npm run-script`:
build
cp node_modules/ace-builds/src-min/ -a resources/ace/ && webpack --progress --colors -p --config ./webpack.production.config.js
develop
cp node_modules/ace-builds/src/ -a resources/ace/ && webpack-dev-server --progress --colors
watch-test
jest --verbose --watch
'''
@pytest.mark.usefixtures('no_memoize')
def test_get_scripts(mocker):
patch = mocker.patch('thefuck.specific.npm.Popen')
patch.return_value.stdout = BytesIO(run_script_stdout)
assert get_scripts() == ['build', 'develop', 'watch-test']
|
py | b4176d0c1efb60793fd5b065128e84c0053246e9 | from __future__ import print_function
import os
import sys
import numpy as np
import torch
import random
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from copy import deepcopy
from q_net import NStepQNet, greedy_actions
sys.path.append('%s/../common' % os.path.dirname(os.path.realpath(__file__)))
from cmd_args import cmd_args
from rl_common import GraphEdgeEnv, local_args, load_graphs, load_base_model
from nstep_replay_mem import NstepReplayMem
sys.path.append('%s/../graph_classification' % os.path.dirname(os.path.realpath(__file__)))
from graph_common import loop_dataset
class Agent(object):
def __init__(self, g_list, test_g_list, env):
self.g_list = g_list
if test_g_list is None:
self.test_g_list = g_list
else:
self.test_g_list = test_g_list
self.mem_pool = NstepReplayMem(memory_size=50000, n_steps=2)
self.env = env
# self.net = QNet()
self.net = NStepQNet(2)
self.old_net = NStepQNet(2)
if cmd_args.ctx == 'gpu':
self.net = self.net.cuda()
self.old_net = self.old_net.cuda()
self.eps_start = 1.0
self.eps_end = 1.0
self.eps_step = 10000
self.burn_in = 100
self.step = 0
self.best_eval = None
self.pos = 0
self.sample_idxes = list(range(len(g_list)))
random.shuffle(self.sample_idxes)
self.take_snapshot()
def take_snapshot(self):
self.old_net.load_state_dict(self.net.state_dict())
def make_actions(self, time_t, greedy=False):
self.eps = self.eps_end + max(0., (self.eps_start - self.eps_end)
* (self.eps_step - max(0., self.step)) / self.eps_step)
if random.random() < self.eps and not greedy:
actions = self.env.uniformRandActions()
else:
cur_state = self.env.getStateRef()
actions, _, _ = self.net(time_t, cur_state, None, greedy_acts=True)
actions = list(actions.cpu().numpy())
return actions
def run_simulation(self):
# I think this shuffles the batches? pos is batch_id?
if (self.pos + 1) * cmd_args.batch_size > len(self.sample_idxes):
self.pos = 0
random.shuffle(self.sample_idxes)
# setup env with a batch
selected_idx = self.sample_idxes[self.pos * cmd_args.batch_size : (self.pos + 1) * cmd_args.batch_size]
self.pos += 1
self.env.setup([self.g_list[idx] for idx in selected_idx])
t = 0
while not env.isTerminal():
list_at = self.make_actions(t)
list_st = self.env.cloneState()
self.env.step(list_at)
assert (env.rewards is not None) == env.isTerminal()
if env.isTerminal():
rewards = env.rewards
s_prime = None
else:
rewards = np.zeros(len(list_at), dtype=np.float32)
s_prime = self.env.cloneState()
self.mem_pool.add_list(list_st, list_at, rewards, s_prime, [env.isTerminal()] * len(list_at), t)
t += 1
def eval(self):
self.env.setup(deepcopy(self.test_g_list))
t = 0
while not self.env.isTerminal():
list_at = self.make_actions(t, greedy=True)
self.env.step(list_at)
t += 1
test_loss = loop_dataset(env.g_list, env.classifier, list(range(len(env.g_list))))
print('\033[93m average test: loss %.5f acc %.5f\033[0m' % (test_loss[0], test_loss[1]))
if cmd_args.phase == 'train' and self.best_eval is None or test_loss[1] < self.best_eval:
print('----saving to best attacker since this is the best attack rate so far.----')
torch.save(self.net.state_dict(), cmd_args.save_dir + '/epoch-best.model')
with open(cmd_args.save_dir + '/epoch-best.txt', 'w') as f:
f.write('%.4f\n' % test_loss[1])
self.best_eval = test_loss[1]
reward = np.mean(self.env.rewards)
print(reward)
return reward, test_loss[1]
def train(self):
log_out = open(cmd_args.logfile, 'w', 0)
# do a burn in phase
pbar = tqdm(range(self.burn_in), unit='batch')
for p in pbar:
self.run_simulation()
# local_args.num_steps = 100000
pbar = tqdm(range(local_args.num_steps), unit='steps')
optimizer = optim.Adam(self.net.parameters(), lr=cmd_args.learning_rate)
for self.step in pbar:
# run a simulation and add to self.mem_pool (memory replay buffer)
self.run_simulation()
# save/print statistics every 100 steps
if self.step % 100 == 0:
self.take_snapshot()
if self.step % 100 == 0:
r, acc = self.eval()
log_out.write('%d %.6f %.6f\n' % (self.step, r, acc))
# sample from the replay memory?
cur_time, list_st, list_at, list_rt, list_s_primes, list_term = self.mem_pool.sample(batch_size=cmd_args.batch_size)
list_target = torch.Tensor(list_rt)
if cmd_args.ctx == 'gpu':
list_target = list_target.cuda()
cleaned_sp = []
nonterms = []
for i in range(len(list_st)):
if not list_term[i]:
cleaned_sp.append(list_s_primes[i])
nonterms.append(i)
if len(cleaned_sp): # if len(cleaned_sp) > 0
_, _, banned = zip(*cleaned_sp)
_, q_t_plus_1, prefix_sum_prime = self.old_net(cur_time + 1, cleaned_sp, None)
_, q_rhs = greedy_actions(q_t_plus_1, prefix_sum_prime, banned)
list_target[nonterms] = q_rhs
# list_target = get_supervision(self.env.classifier, list_st, list_at)
list_target = Variable(list_target.view(-1, 1))
_, q_sa, _ = self.net(cur_time, list_st, list_at)
loss = F.mse_loss(q_sa, list_target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
pbar.set_description('exp: %.5f, loss: %0.5f' % (self.eps, loss) )
log_out.close()
if __name__ == '__main__':
# set seed
random.seed(cmd_args.seed)
np.random.seed(cmd_args.seed)
torch.manual_seed(cmd_args.seed)
# load test graphs
label_map, _, g_list = load_graphs()
random.shuffle(g_list)
base_classifier = load_base_model(label_map, g_list)
env = GraphEdgeEnv(base_classifier, n_edges = 1)
# split dataset into train and test?
if cmd_args.frac_meta > 0:
num_train = int( len(g_list) * (1 - cmd_args.frac_meta) )
agent = Agent(g_list[:num_train], g_list[num_train:], env)
else:
agent = Agent(g_list, None, env)
if cmd_args.phase == 'train':
agent.train()
else:
agent.net.load_state_dict(torch.load(cmd_args.save_dir + '/epoch-best.model'))
agent.eval()
# env.setup([g_list[idx] for idx in selected_idx])
# t = 0
# while not env.isTerminal():
# policy_net = net_list[t]
# t += 1
# batch_graph, picked_nodes = env.getState()
# log_probs, prefix_sum = policy_net(batch_graph, picked_nodes)
# actions = env.sampleActions(torch.exp(log_probs).data.cpu().numpy(), prefix_sum.data.cpu().numpy(), greedy=True)
# env.step(actions)
# test_loss = loop_dataset(env.g_list, base_classifier, list(range(len(env.g_list))))
# print('\033[93maverage test: loss %.5f acc %.5f\033[0m' % (test_loss[0], test_loss[1]))
# print(np.mean(avg_rewards), np.mean(env.rewards)) |
py | b4176e897f956f932d301c79ba9b026c1129ddd8 | """Base class for validation."""
from __future__ import annotations
from time import monotonic
from typing import TYPE_CHECKING
from ..enums import HacsCategory
from ..exceptions import HacsException
if TYPE_CHECKING:
from ..repositories.base import HacsRepository
class ValidationException(HacsException):
"""Raise when there is a validation issue."""
class ActionValidationBase:
"""Base class for action validation."""
categories: list[HacsCategory] = []
more_info: str = "https://hacs.xyz/docs/publish/action"
def __init__(self, repository: HacsRepository) -> None:
self.hacs = repository.hacs
self.repository = repository
self.failed = False
@property
def slug(self) -> str:
"""Return the check slug."""
return self.__class__.__module__.rsplit(".", maxsplit=1)[-1]
async def async_validate(self) -> None:
"""Validate the repository."""
async def execute_validation(self, *_, **__) -> None:
"""Execute the task defined in subclass."""
self.hacs.log.info("<Validation %s> Starting validation", self.slug)
start_time = monotonic()
self.failed = False
try:
await self.async_validate()
except ValidationException as exception:
self.failed = True
self.hacs.log.error(
"<Validation %s> failed: %s (More info: %s)",
self.slug,
exception,
self.more_info,
)
else:
self.hacs.log.info(
"<Validation %s> took %.3f seconds to complete", self.slug, monotonic() - start_time
)
|
py | b4176eea28b7336155a6335403dc70d4af6bd082 | from rest_framework import serializers
from .models import Room, Booking
class BookingSerializer(serializers.ModelSerializer):
class Meta:
model = Booking
fields = '__all__'
# exclude = ['start_timing', 'end_timing']
class RoomSerializer(serializers.ModelSerializer):
bookings = BookingSerializer(many=True, read_only=True)
class Meta:
model = Room
fields = '__all__'
|
py | b4176f4dfb75b04c6ff51473ef2d8fa6f6597d39 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import PY3
from six import StringIO
from tests.compat import unittest
from webob import Request, Response
import warnings
import mock
from webtest import TestApp
from webtest.compat import to_bytes
from webtest.lint import check_headers
from webtest.lint import check_content_type
from webtest.lint import check_environ
from webtest.lint import IteratorWrapper
from webtest.lint import WriteWrapper
from webtest.lint import ErrorWrapper
from webtest.lint import InputWrapper
from webtest.lint import to_string
from webtest.lint import middleware
from webtest.lint import _assert_latin1_str
from six import BytesIO
def application(environ, start_response):
req = Request(environ)
resp = Response()
env_input = environ['wsgi.input']
len_body = len(req.body)
env_input.input.seek(0)
if req.path_info == '/read':
resp.body = env_input.read(len_body)
elif req.path_info == '/read_line':
resp.body = env_input.readline(len_body)
elif req.path_info == '/read_lines':
resp.body = b'-'.join(env_input.readlines(len_body))
elif req.path_info == '/close':
resp.body = env_input.close()
return resp(environ, start_response)
class TestLatin1Assertion(unittest.TestCase):
def test_valid_type(self):
value = "useful-inførmation-5"
if not PY3:
value = value.encode("latin1")
assert value == _assert_latin1_str(value, "fail")
def test_invalid_type(self):
value = b"useful-information-5"
if not PY3:
value = value.decode("utf8")
self.assertRaises(AssertionError, _assert_latin1_str, value, "fail")
class TestToString(unittest.TestCase):
def test_to_string(self):
self.assertEqual(to_string('foo'), 'foo')
self.assertEqual(to_string(b'foo'), 'foo')
class TestMiddleware(unittest.TestCase):
def test_lint_too_few_args(self):
linter = middleware(application)
with self.assertRaisesRegexp(AssertionError, "Two arguments required"):
linter()
with self.assertRaisesRegexp(AssertionError, "Two arguments required"):
linter({})
def test_lint_no_keyword_args(self):
linter = middleware(application)
with self.assertRaisesRegexp(AssertionError, "No keyword arguments "
"allowed"):
linter({}, 'foo', baz='baz')
# TODO: test start_response_wrapper
@mock.patch.multiple('webtest.lint',
check_environ=lambda x: True, # don't block too early
InputWrapper=lambda x: True)
def test_lint_iterator_returned(self):
linter = middleware(lambda x, y: None) # None is not an iterator
msg = "The application must return an iterator, if only an empty list"
with self.assertRaisesRegexp(AssertionError, msg):
linter({'wsgi.input': 'foo', 'wsgi.errors': 'foo'}, 'foo')
class TestInputWrapper(unittest.TestCase):
def test_read(self):
app = TestApp(application)
resp = app.post('/read', 'hello')
self.assertEqual(resp.body, b'hello')
def test_readline(self):
app = TestApp(application)
resp = app.post('/read_line', 'hello\n')
self.assertEqual(resp.body, b'hello\n')
def test_readlines(self):
app = TestApp(application)
resp = app.post('/read_lines', 'hello\nt\n')
self.assertEqual(resp.body, b'hello\n-t\n')
def test_close(self):
input_wrapper = InputWrapper(None)
self.assertRaises(AssertionError, input_wrapper.close)
def test_iter(self):
data = to_bytes("A line\nAnother line\nA final line\n")
input_wrapper = InputWrapper(BytesIO(data))
self.assertEquals(to_bytes("").join(input_wrapper), data, '')
def test_seek(self):
data = to_bytes("A line\nAnother line\nA final line\n")
input_wrapper = InputWrapper(BytesIO(data))
input_wrapper.seek(0)
self.assertEquals(to_bytes("").join(input_wrapper), data, '')
class TestMiddleware2(unittest.TestCase):
def test_exc_info(self):
def application_exc_info(environ, start_response):
body = to_bytes('body stuff')
headers = [
('Content-Type', 'text/plain; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(to_bytes('200 OK'), headers, ('stuff',))
return [body]
app = TestApp(application_exc_info)
app.get('/')
# don't know what to assert here... a bit cheating, just covers code
class TestCheckContentType(unittest.TestCase):
def test_no_content(self):
status = "204 No Content"
headers = [
('Content-Type', 'text/plain; charset=utf-8'),
('Content-Length', '4')
]
self.assertRaises(AssertionError, check_content_type, status, headers)
def test_no_content_type(self):
status = "200 OK"
headers = [
('Content-Length', '4')
]
self.assertRaises(AssertionError, check_content_type, status, headers)
class TestCheckHeaders(unittest.TestCase):
@unittest.skipIf(PY3, 'unicode is str in Python3')
def test_header_unicode_name(self):
headers = [(u'X-Price', str('100'))]
self.assertRaises(AssertionError, check_headers, headers)
@unittest.skipIf(PY3, 'unicode is str in Python3')
def test_header_unicode_value(self):
headers = [(str('X-Price'), u'100')]
self.assertRaises(AssertionError, check_headers, headers)
@unittest.skipIf(not PY3, 'bytes is str in Python2')
def test_header_bytes_name(self):
headers = [(b'X-Price', '100')]
self.assertRaises(AssertionError, check_headers, headers)
@unittest.skipIf(not PY3, 'bytes is str in Python2')
def test_header_bytes_value(self):
headers = [('X-Price', b'100')]
self.assertRaises(AssertionError, check_headers, headers)
def test_header_non_latin1_value(self):
headers = [(str('X-Price'), '100€')]
self.assertRaises(AssertionError, check_headers, headers)
def test_header_non_latin1_name(self):
headers = [('X-€', str('foo'))]
self.assertRaises(AssertionError, check_headers, headers)
class TestCheckEnviron(unittest.TestCase):
def test_no_query_string(self):
environ = {
'REQUEST_METHOD': str('GET'),
'SERVER_NAME': str('localhost'),
'SERVER_PORT': str('80'),
'wsgi.version': (1, 0, 1),
'wsgi.input': StringIO('test'),
'wsgi.errors': StringIO(),
'wsgi.multithread': None,
'wsgi.multiprocess': None,
'wsgi.run_once': None,
'wsgi.url_scheme': 'http',
'PATH_INFO': str('/'),
}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
check_environ(environ)
self.assertEqual(len(w), 1, "We should have only one warning")
self.assertTrue(
"QUERY_STRING" in str(w[-1].message),
"The warning message should say something about QUERY_STRING")
def test_no_valid_request(self):
environ = {
'REQUEST_METHOD': str('PROPFIND'),
'SERVER_NAME': str('localhost'),
'SERVER_PORT': str('80'),
'wsgi.version': (1, 0, 1),
'wsgi.input': StringIO('test'),
'wsgi.errors': StringIO(),
'wsgi.multithread': None,
'wsgi.multiprocess': None,
'wsgi.run_once': None,
'wsgi.url_scheme': 'http',
'PATH_INFO': str('/'),
'QUERY_STRING': str(''),
}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
check_environ(environ)
self.assertEqual(len(w), 1, "We should have only one warning")
self.assertTrue(
"REQUEST_METHOD" in str(w[-1].message),
"The warning message should say something "
"about REQUEST_METHOD")
def test_handles_native_strings_in_variables(self):
# "native string" means unicode in py3, but bytes in py2
path = '/umläut'
if not PY3:
path = path.encode('utf-8')
environ = {
'REQUEST_METHOD': str('GET'),
'SERVER_NAME': str('localhost'),
'SERVER_PORT': str('80'),
'wsgi.version': (1, 0, 1),
'wsgi.input': StringIO('test'),
'wsgi.errors': StringIO(),
'wsgi.multithread': None,
'wsgi.multiprocess': None,
'wsgi.run_once': None,
'wsgi.url_scheme': 'http',
'PATH_INFO': path,
'QUERY_STRING': str(''),
}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
check_environ(environ)
self.assertEqual(0, len(w), "We should have no warning")
class TestIteratorWrapper(unittest.TestCase):
def test_close(self):
class MockIterator(object):
def __init__(self):
self.closed = False
def __iter__(self):
return self
def __next__(self):
return None
next = __next__
def close(self):
self.closed = True
mock = MockIterator()
wrapper = IteratorWrapper(mock, None)
wrapper.close()
self.assertTrue(mock.closed, "Original iterator has not been closed")
class TestWriteWrapper(unittest.TestCase):
def test_wrong_type(self):
write_wrapper = WriteWrapper(None)
self.assertRaises(AssertionError, write_wrapper, 'not a binary')
def test_normal(self):
class MockWriter(object):
def __init__(self):
self.written = []
def __call__(self, s):
self.written.append(s)
data = to_bytes('foo')
mock = MockWriter()
write_wrapper = WriteWrapper(mock)
write_wrapper(data)
self.assertEqual(
mock.written, [data],
"WriterWrapper should call original writer when data is binary "
"type")
class TestErrorWrapper(unittest.TestCase):
def test_dont_close(self):
error_wrapper = ErrorWrapper(None)
self.assertRaises(AssertionError, error_wrapper.close)
class FakeError(object):
def __init__(self):
self.written = []
self.flushed = False
def write(self, s):
self.written.append(s)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
self.flushed = True
def test_writelines(self):
fake_error = self.FakeError()
error_wrapper = ErrorWrapper(fake_error)
data = [to_bytes('a line'), to_bytes('another line')]
error_wrapper.writelines(data)
self.assertEqual(fake_error.written, data,
"ErrorWrapper should call original writer")
def test_flush(self):
fake_error = self.FakeError()
error_wrapper = ErrorWrapper(fake_error)
error_wrapper.flush()
self.assertTrue(
fake_error.flushed,
"ErrorWrapper should have called original wsgi_errors's flush")
|
py | b4176facd3f511c55c080463eeb061b9f487d7d5 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
class MultiHead(nn.Module):
def __init__(self, in_size, out_sizes, *args, **kwargs):
super(MultiHead, self).__init__(*args, **kwargs)
if isinstance(in_size, torch.Size):
assert len(in_size) == 1, 'Multhihead expect 1d inputs, got {}'\
.format(in_size)
in_size = in_size[0]
heads = [nn.Linear(in_size, out) for i, out in enumerate(out_sizes)]
# heads = [nn.Linear(in_size, 1 if out in [1, 2] else out) for i, out in enumerate(out_sizes)]
self.heads = nn.ModuleList(heads)
self.n_out = len(out_sizes)
def forward(self, input):
return [head(input) for head in self.heads]
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
def make_model(*blocks, seq=nn.Sequential):
blocks_list = []
for block in blocks:
if isinstance(block, nn.Module):
block = [block]
assert isinstance(block, list)
blocks_list += block
model = seq(*blocks_list)
model.n_out = blocks_list[-1].n_out
return model
def get_conv_out_size(in_size, kernel_size, padding, stride):
return [(d+padding - (kernel_size-1) + 1) // stride for d in in_size]
|
py | b4177100f9f3763bb7ee3b94d880cf405cfe52c7 | """
Degeneracy maps
"""
from __future__ import absolute_import
#*****************************************************************************
# Sage: System for Algebra and Geometry Experimentation
#
# Copyright (C) 2005 William Stein <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from . import morphism
class DegeneracyMap(morphism.HeckeModuleMorphism_matrix):
"""
A degeneracy map between Hecke modules of different levels.
EXAMPLES:
We construct a number of degeneracy maps::
sage: M = ModularSymbols(33)
sage: d = M.degeneracy_map(11)
sage: d
Hecke module morphism degeneracy map corresponding to f(q) |--> f(q) defined by the matrix
[ 1 0 0]
[ 0 0 1]
[ 0 0 -1]
[ 0 1 -1]
[ 0 0 1]
[ 0 -1 1]
[-1 0 0]
[-1 0 0]
[-1 0 0]
Domain: Modular Symbols space of dimension 9 for Gamma_0(33) of weight ...
Codomain: Modular Symbols space of dimension 3 for Gamma_0(11) of weight ...
sage: d.t()
1
sage: d = M.degeneracy_map(11,3)
sage: d.t()
3
The parameter d must be a divisor of the quotient of the two levels::
sage: d = M.degeneracy_map(11,2)
Traceback (most recent call last):
...
ValueError: The level of self (=33) must be a divisor or multiple of level (=11), and t (=2) must be a divisor of the quotient.
Degeneracy maps can also go from lower level to higher level::
sage: M.degeneracy_map(66,2)
Hecke module morphism degeneracy map corresponding to f(q) |--> f(q^2) defined by the matrix
[ 2 0 0 0 0 0 1 0 0 0 1 -1 0 0 0 -1 1 0 0 0 0 0 0 0 -1]
[ 0 0 1 -1 0 -1 1 0 -1 2 0 0 0 -1 0 0 -1 1 2 -2 0 0 0 -1 1]
[ 0 0 1 0 0 0 0 0 1 0 0 0 1 0 0 0 -1 1 0 0 -1 1 0 0 0]
[ 0 0 0 0 0 0 0 0 0 2 -1 0 0 1 0 0 -1 1 0 0 1 0 -1 -1 1]
[ 0 -1 0 0 1 0 0 0 0 0 0 1 0 0 1 1 -1 0 0 -1 0 0 0 0 0]
[ 0 0 0 0 0 0 0 1 -1 0 0 2 -1 0 0 1 0 0 0 -1 0 -1 1 -1 1]
[ 0 0 0 0 1 -1 0 1 -1 0 0 0 0 0 -1 2 0 0 0 0 1 0 1 0 0]
[ 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 1 0 0]
[ 0 0 0 0 0 0 0 0 0 0 -1 0 0 0 0 0 0 0 0 1 1 1 0 0 0]
Domain: Modular Symbols space of dimension 9 for Gamma_0(33) of weight ...
Codomain: Modular Symbols space of dimension 25 for Gamma_0(66) of weight ...
"""
def __init__(self, matrix, domain, codomain, t):
r"""
Initialise a degeneracy map.
EXAMPLES::
sage: D = ModularSymbols(Gamma0(100)).degeneracy_map(2,5); D
Hecke module morphism degeneracy map corresponding to f(q) |--> f(q^5) defined by the matrix
31 x 1 dense matrix over Rational Field
Domain: Modular Symbols space of dimension 31 for Gamma_0(100) of weight ...
Codomain: Modular Symbols space of dimension 1 for Gamma_0(2) of weight ...
sage: D == loads(dumps(D))
True
"""
self.__t = t
H = domain.Hom(codomain)
if t == 1:
pow = ""
else:
pow = "^%s"%t
name = "degeneracy map corresponding to f(q) |--> f(q%s)"%(pow)
morphism.HeckeModuleMorphism_matrix.__init__(self, H, matrix, name)
def t(self):
"""
Return the divisor of the quotient of the two levels
associated to the degeneracy map.
EXAMPLES::
sage: M = ModularSymbols(33)
sage: d = M.degeneracy_map(11,3)
sage: d.t()
3
sage: d = M.degeneracy_map(11,1)
sage: d.t()
1
"""
return self.__t
|
py | b417710ed9938982a41450376d0598c198f9ad54 | from threading import current_thread
from getTweets import getTweets
from sentiment import getSentiment
from tweet_subroutines import *
count1 = 0
def tweets_handler(api, query, cur_date, prev_date, sentiments, all_locs):
global count1
"""
This methods handle the operations on tweets, from extracting them fro twitter api, to cleaning them and getting sentiments from them
"""
# Calling subroutine to get tweet objects from specified range
tweets = getTweets(api, query, cur_date, prev_date)
count1 += 1
if count1 == 7:
print("Data Collected", flush=True)
print("Calculating Sentiments", flush=True)
# Calling subroutine to open up the tweet batches
tweets = open_tweet_obj(tweets)
# Calling subroutine to remove duplicate tweets, if given by twitter
tweets = remove_duplicate_tweets(tweets)
# Calling subroutine to extract tweet_text and loc of tweeter from tweet object, now tweets = {"text": ..,"loc":..}
tweets = extract_data(tweets)
# calling subroutine for removing promotional tweets
tweets = remove_promotional_tweets(tweets)
# calling subroutine cleaning the tweets
# tweets_text = tweet_cleaner(tweets)
cur_day_locations = make_loc_dict(tweets)
# calling subroutine for getting sentiment from the tweets
cur_day_sentiment = getSentiment(tweets, cur_day_locations)
# updating sentiments list
thread_no = current_thread().name
sentiments[int(thread_no) - 1] = cur_day_sentiment
all_loc = merge(all_locs, cur_day_locations)
|
py | b41771a946c85a4756d3b25bcbc20887573915ac | from dowel import logger, tabular
import numpy as np
import scipy.optimize
import tensorflow as tf
from garage.misc.overrides import overrides
from garage.tf.algos.batch_polopt import BatchPolopt
from garage.tf.misc import tensor_utils
from garage.tf.misc.tensor_utils import filter_valids
from garage.tf.misc.tensor_utils import filter_valids_dict
from garage.tf.misc.tensor_utils import flatten_batch
from garage.tf.misc.tensor_utils import flatten_batch_dict
from garage.tf.misc.tensor_utils import flatten_inputs
from garage.tf.misc.tensor_utils import graph_inputs
from garage.tf.optimizers import LbfgsOptimizer
class REPS(BatchPolopt):
"""Relative Entropy Policy Search.
References
----------
[1] J. Peters, K. Mulling, and Y. Altun, "Relative Entropy Policy Search,"
Artif. Intell., pp. 1607-1612, 2008.
Example:
$ python garage/examples/tf/reps_gym_cartpole.py
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.tf.policies.base.Policy): Policy.
baseline (garage.tf.baselines.Baseline): The baseline.
scope (str): Scope for identifying the algorithm.
Must be specified if running multiple algorithms
simultaneously, each using different environments
and policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
fixed_horizon (bool): Whether to fix horizon.
epsilon (float): dual func parameter.
l2_reg_dual (float): coefficient for dual func l2 regularization.
l2_reg_loss (float): coefficient for policy loss l2 regularization.
dual_optimizer (object): dual func optimizer.
dual_optimizer_args (dict): arguments of the dual optimizer.
name (str): Name of the algorithm.
"""
def __init__(self,
env_spec,
policy,
baseline,
max_path_length=500,
discount=0.99,
gae_lambda=1,
center_adv=True,
positive_adv=False,
fixed_horizon=False,
epsilon=0.5,
l2_reg_dual=0.,
l2_reg_loss=0.,
optimizer=LbfgsOptimizer,
optimizer_args=dict(max_opt_itr=50),
dual_optimizer=scipy.optimize.fmin_l_bfgs_b,
dual_optimizer_args=dict(maxiter=50),
name='REPS'):
self.name = name
self._name_scope = tf.name_scope(self.name)
with self._name_scope:
self.optimizer = optimizer(**optimizer_args)
self.dual_optimizer = dual_optimizer
self.dual_optimizer_args = dual_optimizer_args
self.epsilon = float(epsilon)
self.l2_reg_dual = float(l2_reg_dual)
self.l2_reg_loss = float(l2_reg_loss)
super(REPS, self).__init__(
env_spec=env_spec,
policy=policy,
baseline=baseline,
max_path_length=max_path_length,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
fixed_horizon=fixed_horizon)
@overrides
def init_opt(self):
"""Initialize the optimization procedure."""
pol_loss_inputs, pol_opt_inputs, dual_opt_inputs = self._build_inputs()
self._policy_opt_inputs = pol_opt_inputs
self._dual_opt_inputs = dual_opt_inputs
pol_loss = self._build_policy_loss(pol_loss_inputs)
self.optimizer.update_opt(
loss=pol_loss,
target=self.policy,
inputs=flatten_inputs(self._policy_opt_inputs))
def __getstate__(self):
data = self.__dict__.copy()
del data['_name_scope']
del data['_policy_opt_inputs']
del data['_dual_opt_inputs']
del data['f_dual']
del data['f_dual_grad']
del data['f_policy_kl']
return data
def __setstate__(self, state):
self.__dict__ = state
self._name_scope = tf.name_scope(self.name)
self.init_opt()
@overrides
def get_itr_snapshot(self, itr):
"""Return the data should saved in the snapshot."""
return dict(
itr=itr,
policy=self.policy,
)
@overrides
def optimize_policy(self, itr, samples_data):
"""Perform the policy optimization."""
# Initial BFGS parameter values.
x0 = np.hstack([self.param_eta, self.param_v])
# Set parameter boundaries: \eta>=1e-12, v unrestricted.
bounds = [(-np.inf, np.inf) for _ in x0]
bounds[0] = (1e-12, np.inf)
# Optimize dual
eta_before = self.param_eta
logger.log('Computing dual before')
self.feat_diff = self._features(samples_data)
dual_opt_input_values = self._dual_opt_input_values(samples_data)
dual_before = self.f_dual(*dual_opt_input_values)
logger.log('Optimizing dual')
def eval_dual(x):
self.param_eta = x[0]
self.param_v = x[1:]
dual_opt_input_values = self._dual_opt_input_values(samples_data)
return self.f_dual(*dual_opt_input_values)
def eval_dual_grad(x):
self.param_eta = x[0]
self.param_v = x[1:]
dual_opt_input_values = self._dual_opt_input_values(samples_data)
grad = self.f_dual_grad(*dual_opt_input_values)
eta_grad = np.float(grad[0])
v_grad = grad[1]
return np.hstack([eta_grad, v_grad])
params_ast, _, _ = self.dual_optimizer(
func=eval_dual,
x0=x0,
fprime=eval_dual_grad,
bounds=bounds,
**self.dual_optimizer_args)
logger.log('Computing dual after')
self.param_eta, self.param_v = params_ast[0], params_ast[1:]
dual_opt_input_values = self._dual_opt_input_values(samples_data)
dual_after = self.f_dual(*dual_opt_input_values)
# Optimize policy
policy_opt_input_values = self._policy_opt_input_values(samples_data)
logger.log('Computing policy loss before')
loss_before = self.optimizer.loss(policy_opt_input_values)
logger.log('Computing policy KL before')
policy_kl_before = self.f_policy_kl(*policy_opt_input_values)
logger.log('Optimizing policy')
self.optimizer.optimize(policy_opt_input_values)
logger.log('Computing policy KL')
policy_kl = self.f_policy_kl(*policy_opt_input_values)
logger.log('Computing policy loss after')
loss_after = self.optimizer.loss(policy_opt_input_values)
tabular.record('EtaBefore', eta_before)
tabular.record('EtaAfter', self.param_eta)
tabular.record('DualBefore', dual_before)
tabular.record('DualAfter', dual_after)
tabular.record('{}/LossBefore'.format(self.policy.name), loss_before)
tabular.record('{}/LossAfter'.format(self.policy.name), loss_after)
tabular.record('{}/dLoss'.format(self.policy.name),
loss_before - loss_after)
tabular.record('{}/KLBefore'.format(self.policy.name),
policy_kl_before)
tabular.record('{}/KL'.format(self.policy.name), policy_kl)
def _build_inputs(self):
"""Decalre graph inputs variables."""
observation_space = self.policy.observation_space
action_space = self.policy.action_space
policy_dist = self.policy.distribution
with tf.name_scope('inputs'):
obs_var = observation_space.to_tf_placeholder(
name='obs',
batch_dims=2) # yapf: disable
action_var = action_space.to_tf_placeholder(
name='action',
batch_dims=2) # yapf: disable
reward_var = tensor_utils.new_tensor(
name='reward',
ndim=2,
dtype=tf.float32) # yapf: disable
valid_var = tensor_utils.new_tensor(
name='valid',
ndim=2,
dtype=tf.float32) # yapf: disable
feat_diff = tensor_utils.new_tensor(
name='feat_diff',
ndim=2,
dtype=tf.float32) # yapf: disable
param_v = tensor_utils.new_tensor(
name='param_v',
ndim=1,
dtype=tf.float32) # yapf: disable
param_eta = tensor_utils.new_tensor(
name='param_eta',
ndim=0,
dtype=tf.float32) # yapf: disable
policy_state_info_vars = {
k: tf.compat.v1.placeholder(
tf.float32,
shape=[None] * 2 + list(shape),
name=k)
for k, shape in self.policy.state_info_specs
} # yapf: disable
policy_state_info_vars_list = [
policy_state_info_vars[k]
for k in self.policy.state_info_keys
] # yapf: disable
policy_old_dist_info_vars = {
k: tf.compat.v1.placeholder(
tf.float32,
shape=[None] * 2 + list(shape),
name='policy_old_%s' % k)
for k, shape in policy_dist.dist_info_specs
}
policy_old_dist_info_vars_list = [
policy_old_dist_info_vars[k]
for k in policy_dist.dist_info_keys
]
with tf.name_scope('flat'):
obs_flat = flatten_batch(obs_var, name='obs_flat')
action_flat = flatten_batch(action_var, name='action_flat')
reward_flat = flatten_batch(reward_var, name='reward_flat')
valid_flat = flatten_batch(valid_var, name='valid_flat')
feat_diff_flat = flatten_batch(
feat_diff,
name='feat_diff_flat') # yapf: disable
policy_state_info_vars_flat = flatten_batch_dict(
policy_state_info_vars,
name='policy_state_info_vars_flat') # yapf: disable
policy_old_dist_info_vars_flat = flatten_batch_dict(
policy_old_dist_info_vars,
name='policy_old_dist_info_vars_flat')
with tf.name_scope('valid'):
reward_valid = filter_valids(
reward_flat,
valid_flat,
name='reward_valid') # yapf: disable
action_valid = filter_valids(
action_flat,
valid_flat,
name='action_valid') # yapf: disable
policy_state_info_vars_valid = filter_valids_dict(
policy_state_info_vars_flat,
valid_flat,
name='policy_state_info_vars_valid')
policy_old_dist_info_vars_valid = filter_valids_dict(
policy_old_dist_info_vars_flat,
valid_flat,
name='policy_old_dist_info_vars_valid')
pol_flat = graph_inputs(
'PolicyLossInputsFlat',
obs_var=obs_flat,
action_var=action_flat,
reward_var=reward_flat,
valid_var=valid_flat,
feat_diff=feat_diff_flat,
policy_state_info_vars=policy_state_info_vars_flat,
policy_old_dist_info_vars=policy_old_dist_info_vars_flat,
)
pol_valid = graph_inputs(
'PolicyLossInputsValid',
reward_var=reward_valid,
action_var=action_valid,
policy_state_info_vars=policy_state_info_vars_valid,
policy_old_dist_info_vars=policy_old_dist_info_vars_valid,
)
policy_loss_inputs = graph_inputs(
'PolicyLossInputs',
obs_var=obs_var,
action_var=action_var,
reward_var=reward_var,
valid_var=valid_var,
feat_diff=feat_diff,
param_eta=param_eta,
param_v=param_v,
policy_state_info_vars=policy_state_info_vars,
policy_old_dist_info_vars=policy_old_dist_info_vars,
flat=pol_flat,
valid=pol_valid,
)
policy_opt_inputs = graph_inputs(
'PolicyOptInputs',
obs_var=obs_var,
action_var=action_var,
reward_var=reward_var,
valid_var=valid_var,
feat_diff=feat_diff,
param_eta=param_eta,
param_v=param_v,
policy_state_info_vars_list=policy_state_info_vars_list,
policy_old_dist_info_vars_list=policy_old_dist_info_vars_list,
)
dual_opt_inputs = graph_inputs(
'DualOptInputs',
reward_var=reward_var,
valid_var=valid_var,
feat_diff=feat_diff,
param_eta=param_eta,
param_v=param_v,
policy_state_info_vars_list=policy_state_info_vars_list,
policy_old_dist_info_vars_list=policy_old_dist_info_vars_list,
)
return policy_loss_inputs, policy_opt_inputs, dual_opt_inputs
def _build_policy_loss(self, i):
"""Initialize policy loss complie function based on inputs i."""
pol_dist = self.policy.distribution
is_recurrent = self.policy.recurrent
# Initialize dual params
self.param_eta = 15.
self.param_v = np.random.rand(
self.env_spec.observation_space.flat_dim * 2 + 4)
if is_recurrent:
raise NotImplementedError
policy_dist_info_flat = self.policy.dist_info_sym(
i.flat.obs_var,
i.flat.policy_state_info_vars,
name='policy_dist_info_flat')
policy_dist_info_valid = filter_valids_dict(
policy_dist_info_flat,
i.flat.valid_var,
name='policy_dist_info_valid')
with tf.name_scope('bellman_error'):
delta_v = i.valid.reward_var + tf.tensordot(
i.feat_diff, i.param_v, 1)
with tf.name_scope('policy_loss'):
ll = pol_dist.log_likelihood_sym(i.valid.action_var,
policy_dist_info_valid)
loss = -tf.reduce_mean(ll * tf.exp(
delta_v / i.param_eta - tf.reduce_max(delta_v / i.param_eta)))
reg_params = self.policy.get_params(regularizable=True)
loss += self.l2_reg_loss * tf.reduce_sum(
[tf.reduce_mean(tf.square(param))
for param in reg_params]) / len(reg_params)
with tf.name_scope('kl'):
kl = pol_dist.kl_sym(
i.valid.policy_old_dist_info_vars,
policy_dist_info_valid,
)
pol_mean_kl = tf.reduce_mean(kl)
with tf.name_scope('dual'):
dual_loss = i.param_eta * self.epsilon + i.param_eta * tf.math.log(
tf.reduce_mean(
tf.exp(delta_v / i.param_eta -
tf.reduce_max(delta_v / i.param_eta)))
) + i.param_eta * tf.reduce_max(delta_v / i.param_eta)
dual_loss += self.l2_reg_dual * (
tf.square(i.param_eta) + tf.square(1 / i.param_eta))
dual_grad = tf.gradients(dual_loss, [i.param_eta, i.param_v])
self.f_dual = tensor_utils.compile_function(
flatten_inputs(self._dual_opt_inputs),
dual_loss,
log_name='f_dual')
self.f_dual_grad = tensor_utils.compile_function(
flatten_inputs(self._dual_opt_inputs),
dual_grad,
log_name='f_dual_grad')
self.f_policy_kl = tensor_utils.compile_function(
flatten_inputs(self._policy_opt_inputs),
pol_mean_kl,
log_name='f_policy_kl')
return loss
def _dual_opt_input_values(self, samples_data):
"""Update dual func optimize input values based on samples data."""
policy_state_info_list = [
samples_data['agent_infos'][k]
for k in self.policy.state_info_keys
] # yapf: disable
policy_old_dist_info_list = [
samples_data['agent_infos'][k]
for k in self.policy.distribution.dist_info_keys
]
dual_opt_input_values = self._dual_opt_inputs._replace(
reward_var=samples_data['rewards'],
valid_var=samples_data['valids'],
feat_diff=self.feat_diff,
param_eta=self.param_eta,
param_v=self.param_v,
policy_state_info_vars_list=policy_state_info_list,
policy_old_dist_info_vars_list=policy_old_dist_info_list,
)
return flatten_inputs(dual_opt_input_values)
def _policy_opt_input_values(self, samples_data):
"""Update policy optimize input values based on samples data."""
policy_state_info_list = [
samples_data['agent_infos'][k]
for k in self.policy.state_info_keys
] # yapf: disable
policy_old_dist_info_list = [
samples_data['agent_infos'][k]
for k in self.policy.distribution.dist_info_keys
]
# pylint: disable=locally-disabled, unexpected-keyword-arg
policy_opt_input_values = self._policy_opt_inputs._replace(
obs_var=samples_data['observations'],
action_var=samples_data['actions'],
reward_var=samples_data['rewards'],
valid_var=samples_data['valids'],
feat_diff=self.feat_diff,
param_eta=self.param_eta,
param_v=self.param_v,
policy_state_info_vars_list=policy_state_info_list,
policy_old_dist_info_vars_list=policy_old_dist_info_list,
)
return flatten_inputs(policy_opt_input_values)
def _features(self, samples_data):
"""Get valid view features based on samples data."""
paths = samples_data['paths']
feat_diff = []
for path in paths:
o = np.clip(path['observations'],
self.env_spec.observation_space.low,
self.env_spec.observation_space.high)
lr = len(path['rewards'])
al = np.arange(lr).reshape(-1, 1) / self.max_path_length
feats = np.concatenate(
[o, o**2, al, al**2, al**3,
np.ones((lr, 1))], axis=1)
feats = np.vstack([feats, np.zeros(feats.shape[1])])
feat_diff.append(feats[1:] - feats[:-1])
return np.vstack(feat_diff)
|
py | b417726b70b05a7f0bac9fcb175d8c0dd38380ef | # Phase Correlation to Estimate Pose
import cv2
import numpy as np
import matplotlib.pyplot as plt # matplotlibの描画系
import math
class imregpoc_nowindow:
def __init__(self,iref,icmp,*,threshold = 0.06, alpha=0.5, beta=0.8):
self.ref = iref.astype(np.float32)
self.cmp = icmp.astype(np.float32)
self.th = threshold
self.center = np.array(iref.shape)/2.0
self.alpha = alpha
self.beta = beta
self.param = [0,0,0,1]
self.peak = 0
self.affine = np.float32([1,0,0,0,1,0]).reshape(2,3)
self.perspective = np.float32([1,0,0,0,1,0,0,0,0]).reshape(3,3)
self.match()
def match(self):
height,width = self.ref.shape
self.hanw = np.ones((height,width),dtype='float32')
# Windowing and FFT
G_a = np.fft.fft2(self.ref)
G_b = np.fft.fft2(self.cmp)
# 1.1: Frequency Whitening
self.LA = np.fft.fftshift(np.log(np.absolute(G_a)+1))
self.LB = np.fft.fftshift(np.log(np.absolute(G_b)+1))
# 1.2: Log polar Transformation
cx = self.center[1]
cy = self.center[0]
self.Mag = width/math.log(width)
self.LPA = cv2.logPolar(self.LA, (cy, cx), self.Mag, flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
self.LPB = cv2.logPolar(self.LB, (cy, cx), self.Mag, flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
# 1.3:filtering
LPmin = math.floor(self.Mag*math.log(self.alpha*width/2.0/math.pi))
LPmax = min(width, math.floor(self.Mag*math.log(width*self.beta/2)))
assert LPmax > LPmin, 'Invalid condition!\n Enlarge lpmax tuning parameter or lpmin_tuning parameter'
Tile = np.repeat([0.0,1.0,0.0],[LPmin-1,LPmax-LPmin+1,width-LPmax])
self.Mask = np.tile(Tile,[height,1])
self.LPA_filt = self.LPA*self.Mask
self.LPB_filt = self.LPB*self.Mask
# 1.4: Phase Correlate to Get Rotation and Scaling
Diff,peak,self.r_rotatescale = self.PhaseCorrelation(self.LPA_filt,self.LPB_filt)
theta1 = 2*math.pi * Diff[1] / height; # deg
theta2 = theta1 + math.pi; # deg theta ambiguity
invscale = math.exp(Diff[0]/self.Mag)
# 2.1: Correct rotation and scaling
b1 = self.Warp_4dof(self.cmp,[0,0,theta1,invscale])
b2 = self.Warp_4dof(self.cmp,[0,0,theta2,invscale])
# 2.2 : Translation estimation
diff1, peak1, self.r1 = self.PhaseCorrelation(self.ref,b1) #diff1, peak1 = PhaseCorrelation(a,b1)
diff2, peak2, self.r2 = self.PhaseCorrelation(self.ref,b2) #diff2, peak2 = PhaseCorrelation(a,b2)
# Use cv2.phaseCorrelate(a,b1) because it is much faster
# 2.3: Compare peaks and choose true rotational error
if peak1 > peak2:
Trans = diff1
peak = peak1
theta = -theta1
else:
Trans = diff2
peak = peak2
theta = -theta2
if theta > math.pi:
theta -= math.pi*2
elif theta < -math.pi:
theta += math.pi*2
# Results
self.param = [Trans[0],Trans[1],theta,1/invscale]
self.peak = peak
self.perspective = self.poc2warp(self.center,self.param)
self.affine = self.perspective[0:2,:]
def match_new(self, newImg):
self.cmp = newImg
height,width = self.cmp.shape
cy,cx = height/2,width/2
G_b = np.fft.fft2(self.cmp)
self.LB = np.fft.fftshift(np.log(np.absolute(G_b)+1))
self.LPB = cv2.logPolar(self.LB, (cy, cx), self.Mag, flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
self.LPB_filt = self.LPB*self.Mask
# 1.4: Phase Correlate to Get Rotation and Scaling
Diff,peak,self.r_rotatescale = self.PhaseCorrelation(self.LPA_filt,self.LPB_filt)
theta1 = 2*math.pi * Diff[1] / height; # deg
theta2 = theta1 + math.pi; # deg theta ambiguity
invscale = math.exp(Diff[0]/self.Mag)
# 2.1: Correct rotation and scaling
b1 = self.Warp_4dof(self.cmp,[0,0,theta1,invscale])
b2 = self.Warp_4dof(self.cmp,[0,0,theta2,invscale])
# 2.2 : Translation estimation
diff1, peak1, self.r1 = self.PhaseCorrelation(self.ref,b1) #diff1, peak1 = PhaseCorrelation(a,b1)
diff2, peak2, self.r2 = self.PhaseCorrelation(self.ref,b2) #diff2, peak2 = PhaseCorrelation(a,b2)
# Use cv2.phaseCorrelate(a,b1) because it is much faster
# 2.3: Compare peaks and choose true rotational error
if peak1 > peak2:
Trans = diff1
peak = peak1
theta = -theta1
else:
Trans = diff2
peak = peak2
theta = -theta2
if theta > math.pi:
theta -= math.pi*2
elif theta < -math.pi:
theta += math.pi*2
# Results
self.param = [Trans[0],Trans[1],theta,1/invscale]
self.peak = peak
self.perspective = self.poc2warp(self.center,self.param)
self.affine = self.perspective[0:2,:]
def poc2warp(self,center,param):
cx,cy = center
dx,dy,theta,scale = param
cs = scale * math.cos(theta)
sn = scale * math.sin(theta)
Rot = np.float32([[cs, sn, 0],[-sn, cs,0],[0,0,1]])
center_Trans = np.float32([[1,0,cx],[0,1,cy],[0,0,1]])
center_iTrans = np.float32([[1,0,-cx],[0,1,-cy],[0,0,1]])
cRot = np.dot(np.dot(center_Trans,Rot),center_iTrans)
Trans = np.float32([[1,0,dx],[0,1,dy],[0,0,1]])
Affine = np.dot(cRot,Trans)
return Affine
# Waro Image based on poc parameter
def Warp_4dof(self,Img,param):
center = np.array(Img.shape)/2
rows,cols = Img.shape
Affine = self.poc2warp(center,param)
outImg = cv2.warpPerspective(Img, Affine, (cols,rows), cv2.INTER_LINEAR)
return outImg
# Get peak point
def CenterOfGravity(self,mat):
hei,wid = mat.shape
if hei != wid: # if mat size is not square, there must be something wrong
print("Skip subpixel estimation!")
return [0,0]
Tile=np.arange(wid,dtype=float)-(wid-1.0)/2.0
Tx = np.tile(Tile,[hei,1]) # Ty = Tx.T
Sum = np.sum(mat)
#print(mat)
Ax = np.sum(mat*Tx)/Sum
Ay = np.sum(mat*Tx.T)/Sum
return [Ay,Ax]
# Weighted Center Of Gravity
def WeightedCOG(self,mat):
if mat.size == 0:
print("Skip subpixel estimation!")
Res = [0,0]
else:
peak = mat.max()
newmat = mat*(mat>peak/10) # discard information of lower peak
Res = self.CenterOfGravity(newmat)
return Res
# Phase Correlation
def PhaseCorrelation(self, a, b):
height,width = a.shape
#dt = a.dtype # data type
# Windowing
# FFT
G_a = np.fft.fft2(a*self.hanw)
G_b = np.fft.fft2(b*self.hanw)
conj_b = np.ma.conjugate(G_b)
R = G_a*conj_b
R /= np.absolute(R)
r = np.fft.fftshift(np.fft.ifft2(R).real)
# Get result and Interpolation
DY,DX = np.unravel_index(r.argmax(), r.shape)
# Subpixel Accuracy
boxsize = 5
box = r[DY-int((boxsize-1)/2):DY+int((boxsize-1)/2)+1,DX-int((boxsize-1)/2):DX+int((boxsize-1)/2)+1] # x times x box
#TY,TX= CenterOfGravity(box)
TY,TX= self.WeightedCOG(box)
sDY = TY+DY
sDX = TX+DX
# Show the result
return [width/2-sDX,height/2-sDY],r[DY,DX],r
def MoveCenter(self,Affine,center,newcenter):
dx = newcenter[1] - center[1]
dy = newcenter[0] - center[0]
center_Trans = np.float32([[1,0,dx],[0,1,dy],[0,0,1]])
center_iTrans = np.float32([[1,0,-dx],[0,1,-dy],[0,0,1]])
newAffine = center_iTrans.dot( Affine.dot(center_Trans))
return newAffine
def getParam(self):
return self.param
def getPeak(self):
return self.peak
def getAffine(self):
return self.affine
def getPerspective(self):
return self.perspective
def showRotatePeak(self):
plt.imshow(self.r_rotatescale,vmin=self.r_rotatescale.min(),vmax=self.r_rotatescale.max(),cmap='gray')
plt.show()
def showTranslationPeak(self):
plt.subplot(211)
plt.imshow(self.r1,vmin=self.r1.min(),vmax=self.r1.max(),cmap='gray')
plt.subplot(212)
plt.imshow(self.r2,vmin=self.r2.min(),vmax=self.r2.max(),cmap='gray')
plt.show()
def showLPA(self):
plt.imshow(self.LPA,vmin=self.LPA.min(),vmax=self.LPA.max(),cmap='gray')
plt.show()
def showLPB(self):
plt.imshow(self.LPB,vmin=self.LPB.min(),vmax=self.LPB.max(),cmap='gray')
plt.show()
def showMAT(self,MAT):
plt.figure()
plt.imshow(MAT,vmin=MAT.min(),vmax=MAT.max(),cmap='gray')
plt.show()
def saveMat(self,MAT,name):
cv2.imwrite(name,cv2.normalize(MAT, MAT, 0, 255, cv2.NORM_MINMAX))
def isSucceed(self):
if self.peak > self.th:
return 1
return 0
class imregpoc_noLP:
def __init__(self,iref,icmp,*,threshold = 0.06, alpha=0.5, beta=100):
self.ref = iref.astype(np.float32)
self.cmp = icmp.astype(np.float32)
self.th = threshold
self.center = np.array(iref.shape)/2.0
self.alpha = alpha
self.beta = beta
self.param = [0,0,0,1]
self.peak = 0
self.affine = np.float32([1,0,0,0,1,0]).reshape(2,3)
self.perspective = np.float32([1,0,0,0,1,0,0,0,0]).reshape(3,3)
self.match()
def match(self):
height,width = self.ref.shape
self.hanw = cv2.createHanningWindow((height, width),cv2.CV_64F)
# Windowing and FFT
G_a = np.fft.fft2(self.ref*self.hanw)
G_b = np.fft.fft2(self.cmp*self.hanw)
# 1.1: Frequency Whitening
self.LA = np.fft.fftshift(np.log(np.absolute(G_a)+1))
self.LB = np.fft.fftshift(np.log(np.absolute(G_b)+1))
# 1.2: Log polar Transformation
cx = self.center[1]
cy = self.center[0]
self.Mag = width/math.log(width)
self.LPA = cv2.logPolar(self.LA, (cy, cx), self.Mag, flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
self.LPB = cv2.logPolar(self.LB, (cy, cx), self.Mag, flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
# 1.3:filtering
LPmin = math.floor(self.Mag*math.log(self.alpha*width/2.0/math.pi))
LPmax = min(width, math.floor(self.Mag*math.log(width*self.beta/2)))
assert LPmax > LPmin, 'Invalid condition!\n Enlarge lpmax tuning parameter or lpmin_tuning parameter'
Tile = np.repeat([0.0,1.0,0.0],[LPmin-1,LPmax-LPmin+1,width-LPmax])
self.Mask = np.tile(Tile,[height,1])
self.LPA_filt = self.LPA*self.Mask
self.LPB_filt = self.LPB*self.Mask
# 1.4: Phase Correlate to Get Rotation and Scaling
Diff,peak,self.r_rotatescale = self.PhaseCorrelation(self.LPA_filt,self.LPB_filt)
theta1 = 2*math.pi * Diff[1] / height; # deg
theta2 = theta1 + math.pi; # deg theta ambiguity
invscale = math.exp(Diff[0]/self.Mag)
# 2.1: Correct rotation and scaling
b1 = self.Warp_4dof(self.cmp,[0,0,theta1,invscale])
b2 = self.Warp_4dof(self.cmp,[0,0,theta2,invscale])
# 2.2 : Translation estimation
diff1, peak1, self.r1 = self.PhaseCorrelation(self.ref,b1) #diff1, peak1 = PhaseCorrelation(a,b1)
diff2, peak2, self.r2 = self.PhaseCorrelation(self.ref,b2) #diff2, peak2 = PhaseCorrelation(a,b2)
# Use cv2.phaseCorrelate(a,b1) because it is much faster
# 2.3: Compare peaks and choose true rotational error
if peak1 > peak2:
Trans = diff1
peak = peak1
theta = -theta1
else:
Trans = diff2
peak = peak2
theta = -theta2
if theta > math.pi:
theta -= math.pi*2
elif theta < -math.pi:
theta += math.pi*2
# Results
self.param = [Trans[0],Trans[1],theta,1/invscale]
self.peak = peak
self.perspective = self.poc2warp(self.center,self.param)
self.affine = self.perspective[0:2,:]
def match_new(self, newImg):
self.cmp = newImg
height,width = self.cmp.shape
cy,cx = height/2,width/2
G_b = np.fft.fft2(self.cmp*self.hanw)
self.LB = np.fft.fftshift(np.log(np.absolute(G_b)+1))
self.LPB = cv2.logPolar(self.LB, (cy, cx), self.Mag, flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
self.LPB_filt = self.LPB*self.Mask
# 1.4: Phase Correlate to Get Rotation and Scaling
Diff,peak,self.r_rotatescale = self.PhaseCorrelation(self.LPA_filt,self.LPB_filt)
theta1 = 2*math.pi * Diff[1] / height; # deg
theta2 = theta1 + math.pi; # deg theta ambiguity
invscale = math.exp(Diff[0]/self.Mag)
# 2.1: Correct rotation and scaling
b1 = self.Warp_4dof(self.cmp,[0,0,theta1,invscale])
b2 = self.Warp_4dof(self.cmp,[0,0,theta2,invscale])
# 2.2 : Translation estimation
diff1, peak1, self.r1 = self.PhaseCorrelation(self.ref,b1) #diff1, peak1 = PhaseCorrelation(a,b1)
diff2, peak2, self.r2 = self.PhaseCorrelation(self.ref,b2) #diff2, peak2 = PhaseCorrelation(a,b2)
# Use cv2.phaseCorrelate(a,b1) because it is much faster
# 2.3: Compare peaks and choose true rotational error
if peak1 > peak2:
Trans = diff1
peak = peak1
theta = -theta1
else:
Trans = diff2
peak = peak2
theta = -theta2
if theta > math.pi:
theta -= math.pi*2
elif theta < -math.pi:
theta += math.pi*2
# Results
self.param = [Trans[0],Trans[1],theta,1/invscale]
self.peak = peak
self.perspective = self.poc2warp(self.center,self.param)
self.affine = self.perspective[0:2,:]
def poc2warp(self,center,param):
cx,cy = center
dx,dy,theta,scale = param
cs = scale * math.cos(theta)
sn = scale * math.sin(theta)
Rot = np.float32([[cs, sn, 0],[-sn, cs,0],[0,0,1]])
center_Trans = np.float32([[1,0,cx],[0,1,cy],[0,0,1]])
center_iTrans = np.float32([[1,0,-cx],[0,1,-cy],[0,0,1]])
cRot = np.dot(np.dot(center_Trans,Rot),center_iTrans)
Trans = np.float32([[1,0,dx],[0,1,dy],[0,0,1]])
Affine = np.dot(cRot,Trans)
return Affine
# Waro Image based on poc parameter
def Warp_4dof(self,Img,param):
center = np.array(Img.shape)/2
rows,cols = Img.shape
Affine = self.poc2warp(center,param)
outImg = cv2.warpPerspective(Img, Affine, (cols,rows), cv2.INTER_LINEAR)
return outImg
# Get peak point
def CenterOfGravity(self,mat):
hei,wid = mat.shape
if hei != wid: # if mat size is not square, there must be something wrong
print("Skip subpixel estimation!")
return [0,0]
Tile=np.arange(wid,dtype=float)-(wid-1.0)/2.0
Tx = np.tile(Tile,[hei,1]) # Ty = Tx.T
Sum = np.sum(mat)
#print(mat)
Ax = np.sum(mat*Tx)/Sum
Ay = np.sum(mat*Tx.T)/Sum
return [Ay,Ax]
# Weighted Center Of Gravity
def WeightedCOG(self,mat):
if mat.size == 0:
print("Skip subpixel estimation!")
Res = [0,0]
else:
peak = mat.max()
newmat = mat*(mat>peak/10) # discard information of lower peak
Res = self.CenterOfGravity(newmat)
return Res
# Phase Correlation
def PhaseCorrelation(self, a, b):
height,width = a.shape
#dt = a.dtype # data type
# Windowing
# FFT
G_a = np.fft.fft2(a*self.hanw)
G_b = np.fft.fft2(b*self.hanw)
conj_b = np.ma.conjugate(G_b)
R = G_a*conj_b
R /= np.absolute(R)
r = np.fft.fftshift(np.fft.ifft2(R).real)
# Get result and Interpolation
DY,DX = np.unravel_index(r.argmax(), r.shape)
# Subpixel Accuracy
boxsize = 5
box = r[DY-int((boxsize-1)/2):DY+int((boxsize-1)/2)+1,DX-int((boxsize-1)/2):DX+int((boxsize-1)/2)+1] # x times x box
#TY,TX= CenterOfGravity(box)
TY,TX= self.WeightedCOG(box)
sDY = TY+DY
sDX = TX+DX
# Show the result
return [width/2-sDX,height/2-sDY],r[DY,DX],r
def MoveCenter(self,Affine,center,newcenter):
dx = newcenter[1] - center[1]
dy = newcenter[0] - center[0]
center_Trans = np.float32([[1,0,dx],[0,1,dy],[0,0,1]])
center_iTrans = np.float32([[1,0,-dx],[0,1,-dy],[0,0,1]])
newAffine = center_iTrans.dot( Affine.dot(center_Trans))
return newAffine
def getParam(self):
return self.param
def getPeak(self):
return self.peak
def getAffine(self):
return self.affine
def getPerspective(self):
return self.perspective
def showRotatePeak(self):
plt.imshow(self.r_rotatescale,vmin=self.r_rotatescale.min(),vmax=self.r_rotatescale.max(),cmap='gray')
plt.show()
def showTranslationPeak(self):
plt.subplot(211)
plt.imshow(self.r1,vmin=self.r1.min(),vmax=self.r1.max(),cmap='gray')
plt.subplot(212)
plt.imshow(self.r2,vmin=self.r2.min(),vmax=self.r2.max(),cmap='gray')
plt.show()
def showLPA(self):
plt.imshow(self.LPA,vmin=self.LPA.min(),vmax=self.LPA.max(),cmap='gray')
plt.show()
def showLPB(self):
plt.imshow(self.LPB,vmin=self.LPB.min(),vmax=self.LPB.max(),cmap='gray')
plt.show()
def showMAT(self,MAT):
plt.figure()
plt.imshow(MAT,vmin=MAT.min(),vmax=MAT.max(),cmap='gray')
plt.show()
def saveMat(self,MAT,name):
cv2.imwrite(name,cv2.normalize(MAT, MAT, 0, 255, cv2.NORM_MINMAX))
def isSucceed(self):
if self.peak > self.th:
return 1
return 0
class imregpoc_noWCOG:
def __init__(self,iref,icmp,*,threshold = 0.06, alpha=0.5, beta=0.8):
self.ref = iref.astype(np.float32)
self.cmp = icmp.astype(np.float32)
self.th = threshold
self.center = np.array(iref.shape)/2.0
self.alpha = alpha
self.beta = beta
self.param = [0,0,0,1]
self.peak = 0
self.affine = np.float32([1,0,0,0,1,0]).reshape(2,3)
self.perspective = np.float32([1,0,0,0,1,0,0,0,0]).reshape(3,3)
self.match()
def match(self):
height,width = self.ref.shape
self.hanw = cv2.createHanningWindow((height, width),cv2.CV_64F)
# Windowing and FFT
G_a = np.fft.fft2(self.ref*self.hanw)
G_b = np.fft.fft2(self.cmp*self.hanw)
# 1.1: Frequency Whitening
self.LA = np.fft.fftshift(np.log(np.absolute(G_a)+1))
self.LB = np.fft.fftshift(np.log(np.absolute(G_b)+1))
# 1.2: Log polar Transformation
cx = self.center[1]
cy = self.center[0]
self.Mag = width/math.log(width)
self.LPA = cv2.logPolar(self.LA, (cy, cx), self.Mag, flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
self.LPB = cv2.logPolar(self.LB, (cy, cx), self.Mag, flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
# 1.3:filtering
LPmin = math.floor(self.Mag*math.log(self.alpha*width/2.0/math.pi))
LPmax = min(width, math.floor(self.Mag*math.log(width*self.beta/2)))
assert LPmax > LPmin, 'Invalid condition!\n Enlarge lpmax tuning parameter or lpmin_tuning parameter'
Tile = np.repeat([0.0,1.0,0.0],[LPmin-1,LPmax-LPmin+1,width-LPmax])
self.Mask = np.tile(Tile,[height,1])
self.LPA_filt = self.LPA*self.Mask
self.LPB_filt = self.LPB*self.Mask
# 1.4: Phase Correlate to Get Rotation and Scaling
Diff,peak,self.r_rotatescale = self.PhaseCorrelation(self.LPA_filt,self.LPB_filt)
theta1 = 2*math.pi * Diff[1] / height; # deg
theta2 = theta1 + math.pi; # deg theta ambiguity
invscale = math.exp(Diff[0]/self.Mag)
# 2.1: Correct rotation and scaling
b1 = self.Warp_4dof(self.cmp,[0,0,theta1,invscale])
b2 = self.Warp_4dof(self.cmp,[0,0,theta2,invscale])
# 2.2 : Translation estimation
diff1, peak1, self.r1 = self.PhaseCorrelation(self.ref,b1) #diff1, peak1 = PhaseCorrelation(a,b1)
diff2, peak2, self.r2 = self.PhaseCorrelation(self.ref,b2) #diff2, peak2 = PhaseCorrelation(a,b2)
# Use cv2.phaseCorrelate(a,b1) because it is much faster
# 2.3: Compare peaks and choose true rotational error
if peak1 > peak2:
Trans = diff1
peak = peak1
theta = -theta1
else:
Trans = diff2
peak = peak2
theta = -theta2
if theta > math.pi:
theta -= math.pi*2
elif theta < -math.pi:
theta += math.pi*2
# Results
self.param = [Trans[0],Trans[1],theta,1/invscale]
self.peak = peak
self.perspective = self.poc2warp(self.center,self.param)
self.affine = self.perspective[0:2,:]
def match_new(self, newImg):
self.cmp = newImg
height,width = self.cmp.shape
cy,cx = height/2,width/2
G_b = np.fft.fft2(self.cmp*self.hanw)
self.LB = np.fft.fftshift(np.log(np.absolute(G_b)+1))
self.LPB = cv2.logPolar(self.LB, (cy, cx), self.Mag, flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
self.LPB_filt = self.LPB*self.Mask
# 1.4: Phase Correlate to Get Rotation and Scaling
Diff,peak,self.r_rotatescale = self.PhaseCorrelation(self.LPA_filt,self.LPB_filt)
theta1 = 2*math.pi * Diff[1] / height; # deg
theta2 = theta1 + math.pi; # deg theta ambiguity
invscale = math.exp(Diff[0]/self.Mag)
# 2.1: Correct rotation and scaling
b1 = self.Warp_4dof(self.cmp,[0,0,theta1,invscale])
b2 = self.Warp_4dof(self.cmp,[0,0,theta2,invscale])
# 2.2 : Translation estimation
diff1, peak1, self.r1 = self.PhaseCorrelation(self.ref,b1) #diff1, peak1 = PhaseCorrelation(a,b1)
diff2, peak2, self.r2 = self.PhaseCorrelation(self.ref,b2) #diff2, peak2 = PhaseCorrelation(a,b2)
# Use cv2.phaseCorrelate(a,b1) because it is much faster
# 2.3: Compare peaks and choose true rotational error
if peak1 > peak2:
Trans = diff1
peak = peak1
theta = -theta1
else:
Trans = diff2
peak = peak2
theta = -theta2
if theta > math.pi:
theta -= math.pi*2
elif theta < -math.pi:
theta += math.pi*2
# Results
self.param = [Trans[0],Trans[1],theta,1/invscale]
self.peak = peak
self.perspective = self.poc2warp(self.center,self.param)
self.affine = self.perspective[0:2,:]
def poc2warp(self,center,param):
cx,cy = center
dx,dy,theta,scale = param
cs = scale * math.cos(theta)
sn = scale * math.sin(theta)
Rot = np.float32([[cs, sn, 0],[-sn, cs,0],[0,0,1]])
center_Trans = np.float32([[1,0,cx],[0,1,cy],[0,0,1]])
center_iTrans = np.float32([[1,0,-cx],[0,1,-cy],[0,0,1]])
cRot = np.dot(np.dot(center_Trans,Rot),center_iTrans)
Trans = np.float32([[1,0,dx],[0,1,dy],[0,0,1]])
Affine = np.dot(cRot,Trans)
return Affine
# Waro Image based on poc parameter
def Warp_4dof(self,Img,param):
center = np.array(Img.shape)/2
rows,cols = Img.shape
Affine = self.poc2warp(center,param)
outImg = cv2.warpPerspective(Img, Affine, (cols,rows), cv2.INTER_LINEAR)
return outImg
# Get peak point
def CenterOfGravity(self,mat):
hei,wid = mat.shape
if hei != wid: # if mat size is not square, there must be something wrong
print("Skip subpixel estimation!")
return [0,0]
Tile=np.arange(wid,dtype=float)-(wid-1.0)/2.0
Tx = np.tile(Tile,[hei,1]) # Ty = Tx.T
Sum = np.sum(mat)
#print(mat)
Ax = np.sum(mat*Tx)/Sum
Ay = np.sum(mat*Tx.T)/Sum
return [Ay,Ax]
# Weighted Center Of Gravity
def WeightedCOG(self,mat):
if mat.size == 0:
print("Skip subpixel estimation!")
Res = [0,0]
else:
peak = mat.max()
newmat = mat # discard information of lower peak
Res = self.CenterOfGravity(newmat)
return Res
# Phase Correlation
def PhaseCorrelation(self, a, b):
height,width = a.shape
#dt = a.dtype # data type
# Windowing
# FFT
G_a = np.fft.fft2(a*self.hanw)
G_b = np.fft.fft2(b*self.hanw)
conj_b = np.ma.conjugate(G_b)
R = G_a*conj_b
R /= np.absolute(R)
r = np.fft.fftshift(np.fft.ifft2(R).real)
# Get result and Interpolation
DY,DX = np.unravel_index(r.argmax(), r.shape)
# Subpixel Accuracy
boxsize = 5
box = r[DY-int((boxsize-1)/2):DY+int((boxsize-1)/2)+1,DX-int((boxsize-1)/2):DX+int((boxsize-1)/2)+1] # x times x box
#TY,TX= CenterOfGravity(box)
TY,TX= self.WeightedCOG(box)
sDY = TY+DY
sDX = TX+DX
# Show the result
return [width/2-sDX,height/2-sDY],r[DY,DX],r
def MoveCenter(self,Affine,center,newcenter):
dx = newcenter[1] - center[1]
dy = newcenter[0] - center[0]
center_Trans = np.float32([[1,0,dx],[0,1,dy],[0,0,1]])
center_iTrans = np.float32([[1,0,-dx],[0,1,-dy],[0,0,1]])
newAffine = center_iTrans.dot( Affine.dot(center_Trans))
return newAffine
def getParam(self):
return self.param
def getPeak(self):
return self.peak
def getAffine(self):
return self.affine
def getPerspective(self):
return self.perspective
def showRotatePeak(self):
plt.imshow(self.r_rotatescale,vmin=self.r_rotatescale.min(),vmax=self.r_rotatescale.max(),cmap='gray')
plt.show()
def showTranslationPeak(self):
plt.subplot(211)
plt.imshow(self.r1,vmin=self.r1.min(),vmax=self.r1.max(),cmap='gray')
plt.subplot(212)
plt.imshow(self.r2,vmin=self.r2.min(),vmax=self.r2.max(),cmap='gray')
plt.show()
def showLPA(self):
plt.imshow(self.LPA,vmin=self.LPA.min(),vmax=self.LPA.max(),cmap='gray')
plt.show()
def showLPB(self):
plt.imshow(self.LPB,vmin=self.LPB.min(),vmax=self.LPB.max(),cmap='gray')
plt.show()
def showMAT(self,MAT):
plt.figure()
plt.imshow(MAT,vmin=MAT.min(),vmax=MAT.max(),cmap='gray')
plt.show()
def saveMat(self,MAT,name):
cv2.imwrite(name,cv2.normalize(MAT, MAT, 0, 255, cv2.NORM_MINMAX))
def isSucceed(self):
if self.peak > self.th:
return 1
return 0
class imregpoc_largeM:
def __init__(self,iref,icmp,*,threshold = 0.06, alpha=0.5, beta=0.8):
self.ref = iref.astype(np.float32)
self.cmp = icmp.astype(np.float32)
self.th = threshold
self.center = np.array(iref.shape)/2.0
self.alpha = alpha
self.beta = beta
self.param = [0,0,0,1]
self.peak = 0
self.affine = np.float32([1,0,0,0,1,0]).reshape(2,3)
self.perspective = np.float32([1,0,0,0,1,0,0,0,0]).reshape(3,3)
self.match()
def match(self):
height,width = self.ref.shape
self.hanw = cv2.createHanningWindow((height, width),cv2.CV_64F)
# Windowing and FFT
G_a = np.fft.fft2(self.ref*self.hanw)
G_b = np.fft.fft2(self.cmp*self.hanw)
# 1.1: Frequency Whitening
self.LA = np.fft.fftshift(np.log(np.absolute(G_a)+1))
self.LB = np.fft.fftshift(np.log(np.absolute(G_b)+1))
# 1.2: Log polar Transformation
cx = self.center[1]
cy = self.center[0]
self.Mag = width/(math.log(width)-math.log(2)*0.5)
self.LPA = cv2.logPolar(self.LA, (cy, cx), self.Mag, flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
self.LPB = cv2.logPolar(self.LB, (cy, cx), self.Mag, flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
# 1.3:filtering
LPmin = math.floor(self.Mag*math.log(self.alpha*width/2.0/math.pi))
LPmax = min(width, math.floor(self.Mag*math.log(width*self.beta/2)))
assert LPmax > LPmin, 'Invalid condition!\n Enlarge lpmax tuning parameter or lpmin_tuning parameter'
Tile = np.repeat([0.0,1.0,0.0],[LPmin-1,LPmax-LPmin+1,width-LPmax])
self.Mask = np.tile(Tile,[height,1])
self.LPA_filt = self.LPA*self.Mask
self.LPB_filt = self.LPB*self.Mask
# 1.4: Phase Correlate to Get Rotation and Scaling
Diff,peak,self.r_rotatescale = self.PhaseCorrelation(self.LPA_filt,self.LPB_filt)
theta1 = 2*math.pi * Diff[1] / height; # deg
theta2 = theta1 + math.pi; # deg theta ambiguity
invscale = math.exp(Diff[0]/self.Mag)
# 2.1: Correct rotation and scaling
b1 = self.Warp_4dof(self.cmp,[0,0,theta1,invscale])
b2 = self.Warp_4dof(self.cmp,[0,0,theta2,invscale])
# 2.2 : Translation estimation
diff1, peak1, self.r1 = self.PhaseCorrelation(self.ref,b1) #diff1, peak1 = PhaseCorrelation(a,b1)
diff2, peak2, self.r2 = self.PhaseCorrelation(self.ref,b2) #diff2, peak2 = PhaseCorrelation(a,b2)
# Use cv2.phaseCorrelate(a,b1) because it is much faster
# 2.3: Compare peaks and choose true rotational error
if peak1 > peak2:
Trans = diff1
peak = peak1
theta = -theta1
else:
Trans = diff2
peak = peak2
theta = -theta2
if theta > math.pi:
theta -= math.pi*2
elif theta < -math.pi:
theta += math.pi*2
# Results
self.param = [Trans[0],Trans[1],theta,1/invscale]
self.peak = peak
self.perspective = self.poc2warp(self.center,self.param)
self.affine = self.perspective[0:2,:]
def match_new(self, newImg):
self.cmp = newImg
height,width = self.cmp.shape
cy,cx = height/2,width/2
G_b = np.fft.fft2(self.cmp*self.hanw)
self.LB = np.fft.fftshift(np.log(np.absolute(G_b)+1))
self.LPB = cv2.logPolar(self.LB, (cy, cx), self.Mag, flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
self.LPB_filt = self.LPB*self.Mask
# 1.4: Phase Correlate to Get Rotation and Scaling
Diff,peak,self.r_rotatescale = self.PhaseCorrelation(self.LPA_filt,self.LPB_filt)
theta1 = 2*math.pi * Diff[1] / height; # deg
theta2 = theta1 + math.pi; # deg theta ambiguity
invscale = math.exp(Diff[0]/self.Mag)
# 2.1: Correct rotation and scaling
b1 = self.Warp_4dof(self.cmp,[0,0,theta1,invscale])
b2 = self.Warp_4dof(self.cmp,[0,0,theta2,invscale])
# 2.2 : Translation estimation
diff1, peak1, self.r1 = self.PhaseCorrelation(self.ref,b1) #diff1, peak1 = PhaseCorrelation(a,b1)
diff2, peak2, self.r2 = self.PhaseCorrelation(self.ref,b2) #diff2, peak2 = PhaseCorrelation(a,b2)
# Use cv2.phaseCorrelate(a,b1) because it is much faster
# 2.3: Compare peaks and choose true rotational error
if peak1 > peak2:
Trans = diff1
peak = peak1
theta = -theta1
else:
Trans = diff2
peak = peak2
theta = -theta2
if theta > math.pi:
theta -= math.pi*2
elif theta < -math.pi:
theta += math.pi*2
# Results
self.param = [Trans[0],Trans[1],theta,1/invscale]
self.peak = peak
self.perspective = self.poc2warp(self.center,self.param)
self.affine = self.perspective[0:2,:]
def poc2warp(self,center,param):
cx,cy = center
dx,dy,theta,scale = param
cs = scale * math.cos(theta)
sn = scale * math.sin(theta)
Rot = np.float32([[cs, sn, 0],[-sn, cs,0],[0,0,1]])
center_Trans = np.float32([[1,0,cx],[0,1,cy],[0,0,1]])
center_iTrans = np.float32([[1,0,-cx],[0,1,-cy],[0,0,1]])
cRot = np.dot(np.dot(center_Trans,Rot),center_iTrans)
Trans = np.float32([[1,0,dx],[0,1,dy],[0,0,1]])
Affine = np.dot(cRot,Trans)
return Affine
# Waro Image based on poc parameter
def Warp_4dof(self,Img,param):
center = np.array(Img.shape)/2
rows,cols = Img.shape
Affine = self.poc2warp(center,param)
outImg = cv2.warpPerspective(Img, Affine, (cols,rows), cv2.INTER_LINEAR)
return outImg
# Get peak point
def CenterOfGravity(self,mat):
hei,wid = mat.shape
if hei != wid: # if mat size is not square, there must be something wrong
print("Skip subpixel estimation!")
return [0,0]
Tile=np.arange(wid,dtype=float)-(wid-1.0)/2.0
Tx = np.tile(Tile,[hei,1]) # Ty = Tx.T
Sum = np.sum(mat)
#print(mat)
Ax = np.sum(mat*Tx)/Sum
Ay = np.sum(mat*Tx.T)/Sum
return [Ay,Ax]
# Weighted Center Of Gravity
def WeightedCOG(self,mat):
if mat.size == 0:
print("Skip subpixel estimation!")
Res = [0,0]
else:
peak = mat.max()
newmat = mat*(mat>peak/10)# discard information of lower peak
Res = self.CenterOfGravity(newmat)
return Res
# Phase Correlation
def PhaseCorrelation(self, a, b):
height,width = a.shape
#dt = a.dtype # data type
# Windowing
# FFT
G_a = np.fft.fft2(a*self.hanw)
G_b = np.fft.fft2(b*self.hanw)
conj_b = np.ma.conjugate(G_b)
R = G_a*conj_b
R /= np.absolute(R)
r = np.fft.fftshift(np.fft.ifft2(R).real)
# Get result and Interpolation
DY,DX = np.unravel_index(r.argmax(), r.shape)
# Subpixel Accuracy
boxsize = 5
box = r[DY-int((boxsize-1)/2):DY+int((boxsize-1)/2)+1,DX-int((boxsize-1)/2):DX+int((boxsize-1)/2)+1] # x times x box
#TY,TX= CenterOfGravity(box)
TY,TX= self.WeightedCOG(box)
sDY = TY+DY
sDX = TX+DX
# Show the result
return [width/2-sDX,height/2-sDY],r[DY,DX],r
def MoveCenter(self,Affine,center,newcenter):
dx = newcenter[1] - center[1]
dy = newcenter[0] - center[0]
center_Trans = np.float32([[1,0,dx],[0,1,dy],[0,0,1]])
center_iTrans = np.float32([[1,0,-dx],[0,1,-dy],[0,0,1]])
newAffine = center_iTrans.dot( Affine.dot(center_Trans))
return newAffine
def getParam(self):
return self.param
def getPeak(self):
return self.peak
def getAffine(self):
return self.affine
def getPerspective(self):
return self.perspective
def showRotatePeak(self):
plt.imshow(self.r_rotatescale,vmin=self.r_rotatescale.min(),vmax=self.r_rotatescale.max(),cmap='gray')
plt.show()
def showTranslationPeak(self):
plt.subplot(211)
plt.imshow(self.r1,vmin=self.r1.min(),vmax=self.r1.max(),cmap='gray')
plt.subplot(212)
plt.imshow(self.r2,vmin=self.r2.min(),vmax=self.r2.max(),cmap='gray')
plt.show()
def showLPA(self):
plt.imshow(self.LPA,vmin=self.LPA.min(),vmax=self.LPA.max(),cmap='gray')
plt.show()
def showLPB(self):
plt.imshow(self.LPB,vmin=self.LPB.min(),vmax=self.LPB.max(),cmap='gray')
plt.show()
def showMAT(self,MAT):
plt.figure()
plt.imshow(MAT,vmin=MAT.min(),vmax=MAT.max(),cmap='gray')
plt.show()
def saveMat(self,MAT,name):
cv2.imwrite(name,cv2.normalize(MAT, MAT, 0, 255, cv2.NORM_MINMAX))
def isSucceed(self):
if self.peak > self.th:
return 1
return 0
class imregpoc_NoWhite:
def __init__(self,iref,icmp,*,threshold = 0.06, alpha=0.5, beta=0.8):
self.ref = iref.astype(np.float32)
self.cmp = icmp.astype(np.float32)
self.th = threshold
self.center = np.array(iref.shape)/2.0
self.alpha = alpha
self.beta = beta
self.param = [0,0,0,1]
self.peak = 0
self.affine = np.float32([1,0,0,0,1,0]).reshape(2,3)
self.perspective = np.float32([1,0,0,0,1,0,0,0,0]).reshape(3,3)
self.match()
def match(self):
height,width = self.ref.shape
self.hanw = cv2.createHanningWindow((height, width),cv2.CV_64F)
# Windowing and FFT
G_a = np.fft.fft2(self.ref*self.hanw)
G_b = np.fft.fft2(self.cmp*self.hanw)
# 1.1: Frequency Whitening
self.LA = np.fft.fftshift(np.absolute(G_a))
self.LB = np.fft.fftshift(np.absolute(G_b))
# 1.2: Log polar Transformation
cx = self.center[1]
cy = self.center[0]
self.Mag = width/math.log(width)
self.LPA = cv2.logPolar(self.LA, (cy, cx), self.Mag, flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
self.LPB = cv2.logPolar(self.LB, (cy, cx), self.Mag, flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
# 1.3:filtering
LPmin = math.floor(self.Mag*math.log(self.alpha*width/2.0/math.pi))
LPmax = min(width, math.floor(self.Mag*math.log(width*self.beta/2)))
assert LPmax > LPmin, 'Invalid condition!\n Enlarge lpmax tuning parameter or lpmin_tuning parameter'
Tile = np.repeat([0.0,1.0,0.0],[LPmin-1,LPmax-LPmin+1,width-LPmax])
self.Mask = np.tile(Tile,[height,1])
self.LPA_filt = self.LPA*self.Mask
self.LPB_filt = self.LPB*self.Mask
# 1.4: Phase Correlate to Get Rotation and Scaling
Diff,peak,self.r_rotatescale = self.PhaseCorrelation(self.LPA_filt,self.LPB_filt)
theta1 = 2*math.pi * Diff[1] / height; # deg
theta2 = theta1 + math.pi; # deg theta ambiguity
invscale = math.exp(Diff[0]/self.Mag)
# 2.1: Correct rotation and scaling
b1 = self.Warp_4dof(self.cmp,[0,0,theta1,invscale])
b2 = self.Warp_4dof(self.cmp,[0,0,theta2,invscale])
# 2.2 : Translation estimation
diff1, peak1, self.r1 = self.PhaseCorrelation(self.ref,b1) #diff1, peak1 = PhaseCorrelation(a,b1)
diff2, peak2, self.r2 = self.PhaseCorrelation(self.ref,b2) #diff2, peak2 = PhaseCorrelation(a,b2)
# Use cv2.phaseCorrelate(a,b1) because it is much faster
# 2.3: Compare peaks and choose true rotational error
if peak1 > peak2:
Trans = diff1
peak = peak1
theta = -theta1
else:
Trans = diff2
peak = peak2
theta = -theta2
if theta > math.pi:
theta -= math.pi*2
elif theta < -math.pi:
theta += math.pi*2
# Results
self.param = [Trans[0],Trans[1],theta,1/invscale]
self.peak = peak
self.perspective = self.poc2warp(self.center,self.param)
self.affine = self.perspective[0:2,:]
def match_new(self, newImg):
self.cmp = newImg
height,width = self.cmp.shape
cy,cx = height/2,width/2
G_b = np.fft.fft2(self.cmp*self.hanw)
self.LB = np.fft.fftshift(np.absolute(G_b))
self.LPB = cv2.logPolar(self.LB, (cy, cx), self.Mag, flags=cv2.INTER_LINEAR+cv2.WARP_FILL_OUTLIERS)
self.LPB_filt = self.LPB*self.Mask
# 1.4: Phase Correlate to Get Rotation and Scaling
Diff,peak,self.r_rotatescale = self.PhaseCorrelation(self.LPA_filt,self.LPB_filt)
theta1 = 2*math.pi * Diff[1] / height; # deg
theta2 = theta1 + math.pi; # deg theta ambiguity
invscale = math.exp(Diff[0]/self.Mag)
# 2.1: Correct rotation and scaling
b1 = self.Warp_4dof(self.cmp,[0,0,theta1,invscale])
b2 = self.Warp_4dof(self.cmp,[0,0,theta2,invscale])
# 2.2 : Translation estimation
diff1, peak1, self.r1 = self.PhaseCorrelation(self.ref,b1) #diff1, peak1 = PhaseCorrelation(a,b1)
diff2, peak2, self.r2 = self.PhaseCorrelation(self.ref,b2) #diff2, peak2 = PhaseCorrelation(a,b2)
# Use cv2.phaseCorrelate(a,b1) because it is much faster
# 2.3: Compare peaks and choose true rotational error
if peak1 > peak2:
Trans = diff1
peak = peak1
theta = -theta1
else:
Trans = diff2
peak = peak2
theta = -theta2
if theta > math.pi:
theta -= math.pi*2
elif theta < -math.pi:
theta += math.pi*2
# Results
self.param = [Trans[0],Trans[1],theta,1/invscale]
self.peak = peak
self.perspective = self.poc2warp(self.center,self.param)
self.affine = self.perspective[0:2,:]
def poc2warp(self,center,param):
cx,cy = center
dx,dy,theta,scale = param
cs = scale * math.cos(theta)
sn = scale * math.sin(theta)
Rot = np.float32([[cs, sn, 0],[-sn, cs,0],[0,0,1]])
center_Trans = np.float32([[1,0,cx],[0,1,cy],[0,0,1]])
center_iTrans = np.float32([[1,0,-cx],[0,1,-cy],[0,0,1]])
cRot = np.dot(np.dot(center_Trans,Rot),center_iTrans)
Trans = np.float32([[1,0,dx],[0,1,dy],[0,0,1]])
Affine = np.dot(cRot,Trans)
return Affine
# Waro Image based on poc parameter
def Warp_4dof(self,Img,param):
center = np.array(Img.shape)/2
rows,cols = Img.shape
Affine = self.poc2warp(center,param)
outImg = cv2.warpPerspective(Img, Affine, (cols,rows), cv2.INTER_LINEAR)
return outImg
# Get peak point
def CenterOfGravity(self,mat):
hei,wid = mat.shape
if hei != wid: # if mat size is not square, there must be something wrong
print("Skip subpixel estimation!")
return [0,0]
Tile=np.arange(wid,dtype=float)-(wid-1.0)/2.0
Tx = np.tile(Tile,[hei,1]) # Ty = Tx.T
Sum = np.sum(mat)
#print(mat)
Ax = np.sum(mat*Tx)/Sum
Ay = np.sum(mat*Tx.T)/Sum
return [Ay,Ax]
# Weighted Center Of Gravity
def WeightedCOG(self,mat):
if mat.size == 0:
print("Skip subpixel estimation!")
Res = [0,0]
else:
peak = mat.max()
newmat = mat*(mat>peak/10) # discard information of lower peak
Res = self.CenterOfGravity(newmat)
return Res
# Phase Correlation
def PhaseCorrelation(self, a, b):
height,width = a.shape
#dt = a.dtype # data type
# Windowing
# FFT
G_a = np.fft.fft2(a*self.hanw)
G_b = np.fft.fft2(b*self.hanw)
conj_b = np.ma.conjugate(G_b)
R = G_a*conj_b
R /= np.absolute(R)
r = np.fft.fftshift(np.fft.ifft2(R).real)
# Get result and Interpolation
DY,DX = np.unravel_index(r.argmax(), r.shape)
# Subpixel Accuracy
boxsize = 5
box = r[DY-int((boxsize-1)/2):DY+int((boxsize-1)/2)+1,DX-int((boxsize-1)/2):DX+int((boxsize-1)/2)+1] # x times x box
#TY,TX= CenterOfGravity(box)
TY,TX= self.WeightedCOG(box)
sDY = TY+DY
sDX = TX+DX
# Show the result
return [width/2-sDX,height/2-sDY],r[DY,DX],r
def MoveCenter(self,Affine,center,newcenter):
dx = newcenter[1] - center[1]
dy = newcenter[0] - center[0]
center_Trans = np.float32([[1,0,dx],[0,1,dy],[0,0,1]])
center_iTrans = np.float32([[1,0,-dx],[0,1,-dy],[0,0,1]])
newAffine = center_iTrans.dot( Affine.dot(center_Trans))
return newAffine
def getParam(self):
return self.param
def getPeak(self):
return self.peak
def getAffine(self):
return self.affine
def getPerspective(self):
return self.perspective
def showRotatePeak(self):
plt.imshow(self.r_rotatescale,vmin=self.r_rotatescale.min(),vmax=self.r_rotatescale.max(),cmap='gray')
plt.show()
def showTranslationPeak(self):
plt.subplot(211)
plt.imshow(self.r1,vmin=self.r1.min(),vmax=self.r1.max(),cmap='gray')
plt.subplot(212)
plt.imshow(self.r2,vmin=self.r2.min(),vmax=self.r2.max(),cmap='gray')
plt.show()
def showLPA(self):
plt.imshow(self.LPA,vmin=self.LPA.min(),vmax=self.LPA.max(),cmap='gray')
plt.show()
def showLPB(self):
plt.imshow(self.LPB,vmin=self.LPB.min(),vmax=self.LPB.max(),cmap='gray')
plt.show()
def showMAT(self,MAT):
plt.figure()
plt.imshow(MAT,vmin=MAT.min(),vmax=MAT.max(),cmap='gray')
plt.show()
def saveMat(self,MAT,name):
cv2.imwrite(name,cv2.normalize(MAT, MAT, 0, 255, cv2.NORM_MINMAX))
def isSucceed(self):
if self.peak > self.th:
return 1
return 0
|
py | b41772ba88d14bd218d0f7ff69049c96a086bb23 | from connector import create_connection
def insert_coord(data):
values = data.split(' ')
tracking_id = values[0]
names = get_names(tracking_id)
insert_data(tracking_id, values[3], values[4], values[2], values[1], names[0][0])
def get_names(tracking_id):
cnx2 = create_connection()
cursor = cnx2.cursor()
query = ("Select ownerID from Animal where trackingID = %s")
cursor.execute(query, (tracking_id, ))
result = cursor.fetchall()
cursor.close()
cnx2.close()
return result
def insert_data(tracking_id, longitude, latitude, time, date, owner):
cnx2 = create_connection()
cursor = cnx2.cursor()
query = ("Insert into currentCoordinates (trackingID,longitude,latitude,time,date, username) values(%s, %s, %s,%s, %s, %s)")
cursor.execute(query, (tracking_id, longitude, latitude, time, date, str(owner)))
cnx2.commit()
cursor.close()
cnx2.close()
|
py | b41774e9a652e06f11b1ab29a862360c18597fe6 | #!/usr/bin/env python
# coding=utf-8
"""Environment Information.
# Authors: MohammadHossein GohariNejad <[email protected]>
# License: BSD 3 clause
"""
import os
import signal
import sys
from time import gmtime,strftime
import rospy;
import tf;
from geometry_msgs.msg import *;
from nav_msgs.msg import *;
from std_msgs.msg import *;
import threading;
import math
robots_list=["robot0","robot1","robot2","robot3"];
info_list=[];
map_logger=None;
path_logger=None;
base_time=0;
def on_exit(*args):
global information_logger
print ( "\n EXITING MESSAGE HANDLER")
if path_logger!=None :
path_logger.write("\n The Test has finished on "+strftime("%Y-%m-%d %H:%M:%S", gmtime()) + " GMT time \n")
path_logger.write("\n ======================== \n ======================== \n \n \n")
path_logger.close()
sys.exit(0)
class path_wrapper:
def __init__(self,robot_name_space,t_lock_path,subscribing_topic):
self.path_sub=rospy.Subscriber("/"+robot_name_space+"/"+subscribing_topic, Path, self.path_callback);
self.poses=[];
self.poses_lenght=0;
self.path_lenght=0.0;
self.t_lock_path=t_lock_path;
self.robot=robot_name_space;
def path_callback(self,input_path_data):
self.t_lock_path.acquire();
self.poses=list(input_path_data.poses);
self.t_lock_path.release();
def path_lenght_calculator(self):
self.t_lock_path.acquire();
for i in range(self.poses_lenght,len(self.poses)):
if (i==0):continue
x=(self.poses[i].pose.position.x-self.poses[i-1].pose.position.x)*(self.poses[i].pose.position.x-self.poses[i-1].pose.position.x)
y=(self.poses[i].pose.position.y-self.poses[i-1].pose.position.y)*(self.poses[i].pose.position.y-self.poses[i-1].pose.position.y)
self.path_lenght+=float("{0:.2f}".format(math.sqrt(y+x)));
self.poses_lenght=len(self.poses);
self.t_lock_path.release();
return self.path_lenght;
def main():
global info_list;
global path_logger;
global map_logger;
global base_time;
global logfile;
global robots_list;
signal.signal(signal.SIGINT, on_exit)
signal.signal(signal.SIGTERM, on_exit)
rospy.init_node('path_node', anonymous=True)
debuger_mode=True;
if debuger_mode==True:
log_file=rospy.get_param("log_file",default="results")
log_folder=rospy.get_param("log_folder",default="map")
if not os.path.exists("/home/sosvr/communication_node_project/communication_node2/results_pack/"+log_folder):
os.makedirs("/home/sosvr/communication_node_project/communication_node2/results_pack/"+log_folder)
path_logger = open("/home/sosvr/communication_node_project/communication_node2/results_pack/"+log_folder+"/"+log_file+"_path.log", "w")
path_logger.write("\n \n \n ###################### \n ###################### \n")
path_logger.write("\n This is the result of test on "+strftime("%Y-%m-%d %H:%M:%S", gmtime()) + " GMT time \n")
for i in robots_list:
info_list.append(path_wrapper(i,threading.Lock(),"trajectory"));
rate = rospy.Rate(0.05)
base_time = 0;
while (not rospy.is_shutdown()) and base_time<1500:
if(base_time%100==0):print(str(base_time/100),"seconds");
if debuger_mode==True:
for i in info_list:
i.path_lenght_calculator();
path_logger.write("\n "+i.robot+","+str(i.path_lenght)+" ,"+str(int(base_time)));
base_time+=20;
rate.sleep();
print("finished");
rospy.spin()
main();
|
py | b41775f3c8b4182bb6c0bf2080e9f3f243b83244 | import dash
import dash_auth
import dash_core_components as dcc
import dash_html_components as html
import plotly
# Keep this out of source code repository - save in a file or a database
VALID_USERNAME_PASSWORD_PAIRS = {
'hello': 'world'
}
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
auth = dash_auth.BasicAuth(
app,
VALID_USERNAME_PASSWORD_PAIRS
)
app.layout = html.Div([
html.H1('Welcome to the app'),
html.H3('You are successfully authorized'),
dcc.Dropdown(
id='dropdown',
options=[{'label': i, 'value': i} for i in ['A', 'B']],
value='A'
),
dcc.Graph(id='graph')
], className='container')
@app.callback(
dash.dependencies.Output('graph', 'figure'),
[dash.dependencies.Input('dropdown', 'value')])
def update_graph(dropdown_value):
return {
'layout': {
'title': 'Graph of {}'.format(dropdown_value),
'margin': {
'l': 20,
'b': 20,
'r': 10,
't': 60
}
},
'data': [{'x': [1, 2, 3], 'y': [4, 1, 2]}]
}
if __name__ == '__main__':
app.run_server(debug=True)
|
py | b4177667e982cfa4df308f6c4ceb3a3341b843a3 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['GalleryImageVersionArgs', 'GalleryImageVersion']
@pulumi.input_type
class GalleryImageVersionArgs:
def __init__(__self__, *,
gallery_image_name: pulumi.Input[str],
gallery_name: pulumi.Input[str],
publishing_profile: pulumi.Input['GalleryImageVersionPublishingProfileArgs'],
resource_group_name: pulumi.Input[str],
gallery_image_version_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a GalleryImageVersion resource.
:param pulumi.Input[str] gallery_image_name: The name of the gallery Image Definition in which the Image Version is to be created.
:param pulumi.Input[str] gallery_name: The name of the Shared Image Gallery in which the Image Definition resides.
:param pulumi.Input['GalleryImageVersionPublishingProfileArgs'] publishing_profile: The publishing profile of a gallery Image Version.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] gallery_image_version_name: The name of the gallery Image Version to be created. Needs to follow semantic version name pattern: The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. Format: <MajorVersion>.<MinorVersion>.<Patch>
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
pulumi.set(__self__, "gallery_image_name", gallery_image_name)
pulumi.set(__self__, "gallery_name", gallery_name)
pulumi.set(__self__, "publishing_profile", publishing_profile)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if gallery_image_version_name is not None:
pulumi.set(__self__, "gallery_image_version_name", gallery_image_version_name)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="galleryImageName")
def gallery_image_name(self) -> pulumi.Input[str]:
"""
The name of the gallery Image Definition in which the Image Version is to be created.
"""
return pulumi.get(self, "gallery_image_name")
@gallery_image_name.setter
def gallery_image_name(self, value: pulumi.Input[str]):
pulumi.set(self, "gallery_image_name", value)
@property
@pulumi.getter(name="galleryName")
def gallery_name(self) -> pulumi.Input[str]:
"""
The name of the Shared Image Gallery in which the Image Definition resides.
"""
return pulumi.get(self, "gallery_name")
@gallery_name.setter
def gallery_name(self, value: pulumi.Input[str]):
pulumi.set(self, "gallery_name", value)
@property
@pulumi.getter(name="publishingProfile")
def publishing_profile(self) -> pulumi.Input['GalleryImageVersionPublishingProfileArgs']:
"""
The publishing profile of a gallery Image Version.
"""
return pulumi.get(self, "publishing_profile")
@publishing_profile.setter
def publishing_profile(self, value: pulumi.Input['GalleryImageVersionPublishingProfileArgs']):
pulumi.set(self, "publishing_profile", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="galleryImageVersionName")
def gallery_image_version_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the gallery Image Version to be created. Needs to follow semantic version name pattern: The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. Format: <MajorVersion>.<MinorVersion>.<Patch>
"""
return pulumi.get(self, "gallery_image_version_name")
@gallery_image_version_name.setter
def gallery_image_version_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gallery_image_version_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class GalleryImageVersion(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
gallery_image_name: Optional[pulumi.Input[str]] = None,
gallery_image_version_name: Optional[pulumi.Input[str]] = None,
gallery_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
publishing_profile: Optional[pulumi.Input[pulumi.InputType['GalleryImageVersionPublishingProfileArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Specifies information about the gallery Image Version that you want to create or update.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] gallery_image_name: The name of the gallery Image Definition in which the Image Version is to be created.
:param pulumi.Input[str] gallery_image_version_name: The name of the gallery Image Version to be created. Needs to follow semantic version name pattern: The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. Format: <MajorVersion>.<MinorVersion>.<Patch>
:param pulumi.Input[str] gallery_name: The name of the Shared Image Gallery in which the Image Definition resides.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[pulumi.InputType['GalleryImageVersionPublishingProfileArgs']] publishing_profile: The publishing profile of a gallery Image Version.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GalleryImageVersionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Specifies information about the gallery Image Version that you want to create or update.
:param str resource_name: The name of the resource.
:param GalleryImageVersionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GalleryImageVersionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
gallery_image_name: Optional[pulumi.Input[str]] = None,
gallery_image_version_name: Optional[pulumi.Input[str]] = None,
gallery_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
publishing_profile: Optional[pulumi.Input[pulumi.InputType['GalleryImageVersionPublishingProfileArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GalleryImageVersionArgs.__new__(GalleryImageVersionArgs)
if gallery_image_name is None and not opts.urn:
raise TypeError("Missing required property 'gallery_image_name'")
__props__.__dict__["gallery_image_name"] = gallery_image_name
__props__.__dict__["gallery_image_version_name"] = gallery_image_version_name
if gallery_name is None and not opts.urn:
raise TypeError("Missing required property 'gallery_name'")
__props__.__dict__["gallery_name"] = gallery_name
__props__.__dict__["location"] = location
if publishing_profile is None and not opts.urn:
raise TypeError("Missing required property 'publishing_profile'")
__props__.__dict__["publishing_profile"] = publishing_profile
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["replication_status"] = None
__props__.__dict__["storage_profile"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute/v20180601:GalleryImageVersion"), pulumi.Alias(type_="azure-native:compute:GalleryImageVersion"), pulumi.Alias(type_="azure-nextgen:compute:GalleryImageVersion"), pulumi.Alias(type_="azure-native:compute/v20190301:GalleryImageVersion"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:GalleryImageVersion"), pulumi.Alias(type_="azure-native:compute/v20190701:GalleryImageVersion"), pulumi.Alias(type_="azure-nextgen:compute/v20190701:GalleryImageVersion"), pulumi.Alias(type_="azure-native:compute/v20191201:GalleryImageVersion"), pulumi.Alias(type_="azure-nextgen:compute/v20191201:GalleryImageVersion"), pulumi.Alias(type_="azure-native:compute/v20200930:GalleryImageVersion"), pulumi.Alias(type_="azure-nextgen:compute/v20200930:GalleryImageVersion")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(GalleryImageVersion, __self__).__init__(
'azure-native:compute/v20180601:GalleryImageVersion',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'GalleryImageVersion':
"""
Get an existing GalleryImageVersion resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = GalleryImageVersionArgs.__new__(GalleryImageVersionArgs)
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["publishing_profile"] = None
__props__.__dict__["replication_status"] = None
__props__.__dict__["storage_profile"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return GalleryImageVersion(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publishingProfile")
def publishing_profile(self) -> pulumi.Output['outputs.GalleryImageVersionPublishingProfileResponse']:
"""
The publishing profile of a gallery Image Version.
"""
return pulumi.get(self, "publishing_profile")
@property
@pulumi.getter(name="replicationStatus")
def replication_status(self) -> pulumi.Output['outputs.ReplicationStatusResponse']:
"""
This is the replication status of the gallery Image Version.
"""
return pulumi.get(self, "replication_status")
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> pulumi.Output['outputs.GalleryImageVersionStorageProfileResponse']:
"""
This is the storage profile of a gallery Image Version.
"""
return pulumi.get(self, "storage_profile")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
|
py | b41776823f36bf9b2a9b13e51c39fa6836f3334a | #!/usr/bin/env python
# Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import argparse
import os
import subprocess
import sys
from distutils.command.build_py import build_py
from distutils.core import setup
# We need to know the prefix for the installation
# so we can know where to get the library
parser = argparse.ArgumentParser()
parser.add_argument("--prefix", required=False)
parser.add_argument(
"--recurse", required=False, default=False, action="store_true"
)
args, _ = parser.parse_known_args()
class my_build_py(build_py):
def run(self):
if not self.dry_run:
self.mkpath(self.build_lib)
# Compile up our C header here and insert it as a string
# into legate_core_cffi.py so that it is installed with
# the python library directly
root_dir = os.path.dirname(os.path.realpath(__file__))
header_src = os.path.join(
root_dir, "src", "cunumeric", "cunumeric_c.h"
)
output_dir = os.path.join(root_dir, "cunumeric")
include_dir = os.path.join(args.prefix, "include")
header = subprocess.check_output(
[
os.getenv("CC", "gcc"),
"-E",
"-DLEGATE_USE_PYTHON_CFFI",
"-I" + str(include_dir),
"-P",
header_src,
]
).decode("utf-8")
libpath = os.path.join(args.prefix, "lib")
with open(os.path.join(output_dir, "install_info.py.in")) as f:
content = f.read()
content = content.format(
header=repr(header), libpath=repr(libpath)
)
with open(os.path.join(output_dir, "install_info.py"), "wb") as f:
f.write(content.encode("utf-8"))
build_py.run(self)
# If we haven't been called from install.py then do that first
if args.recurse:
# Remove the recurse argument from the list
sys.argv.remove("--recurse")
setup(
name="cunumeric",
version="22.03",
packages=[
"cunumeric",
"cunumeric.linalg",
"cunumeric.random",
"cunumeric._ufunc",
],
cmdclass={"build_py": my_build_py},
)
else:
with open("install.py") as f:
code = compile(f.read(), "install.py", "exec")
exec(code)
|
py | b417775064388ac0cfcc6d1eddacec66eaf1b127 | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OrderNotCached
from ccxt.base.errors import CancelPending
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import RequestTimeout
class poloniex (Exchange):
def describe(self):
return self.deep_extend(super(poloniex, self).describe(), {
'id': 'poloniex',
'name': 'Poloniex',
'countries': ['US'],
'rateLimit': 1000, # up to 6 calls per second
'has': {
'createDepositAddress': True,
'fetchDepositAddress': True,
'CORS': False,
'editOrder': True,
'createMarketOrder': False,
'fetchOHLCV': True,
'fetchOrderTrades': True,
'fetchMyTrades': True,
'fetchOrder': 'emulated',
'fetchOrders': 'emulated',
'fetchOpenOrders': True,
'fetchClosedOrders': 'emulated',
'fetchTickers': True,
'fetchTradingFees': True,
'fetchCurrencies': True,
'withdraw': True,
},
'timeframes': {
'5m': 300,
'15m': 900,
'30m': 1800,
'2h': 7200,
'4h': 14400,
'1d': 86400,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766817-e9456312-5ee6-11e7-9b3c-b628ca5626a5.jpg',
'api': {
'public': 'https://poloniex.com/public',
'private': 'https://poloniex.com/tradingApi',
},
'www': 'https://poloniex.com',
'doc': [
'https://poloniex.com/support/api/',
'http://pastebin.com/dMX7mZE0',
],
'fees': 'https://poloniex.com/fees',
},
'api': {
'public': {
'get': [
'return24hVolume',
'returnChartData',
'returnCurrencies',
'returnLoanOrders',
'returnOrderBook',
'returnTicker',
'returnTradeHistory',
],
},
'private': {
'post': [
'buy',
'cancelLoanOffer',
'cancelOrder',
'closeMarginPosition',
'createLoanOffer',
'generateNewAddress',
'getMarginPosition',
'marginBuy',
'marginSell',
'moveOrder',
'returnActiveLoans',
'returnAvailableAccountBalances',
'returnBalances',
'returnCompleteBalances',
'returnDepositAddresses',
'returnDepositsWithdrawals',
'returnFeeInfo',
'returnLendingHistory',
'returnMarginAccountSummary',
'returnOpenLoanOffers',
'returnOpenOrders',
'returnOrderTrades',
'returnTradableBalances',
'returnTradeHistory',
'sell',
'toggleAutoRenew',
'transferBalance',
'withdraw',
],
},
},
'fees': {
'trading': {
'maker': 0.0015,
'taker': 0.0025,
},
'funding': {},
},
'limits': {
'amount': {
'min': 0.00000001,
'max': 1000000000,
},
'price': {
'min': 0.00000001,
'max': 1000000000,
},
'cost': {
'min': 0.00000000,
'max': 1000000000,
},
},
'precision': {
'amount': 8,
'price': 8,
},
'commonCurrencies': {
'AIR': 'AirCoin',
'APH': 'AphroditeCoin',
'BCC': 'BTCtalkcoin',
'BDG': 'Badgercoin',
'BTM': 'Bitmark',
'CON': 'Coino',
'GOLD': 'GoldEagles',
'GPUC': 'GPU',
'HOT': 'Hotcoin',
'ITC': 'Information Coin',
'PLX': 'ParallaxCoin',
'KEY': 'KEYCoin',
'STR': 'XLM',
'SOC': 'SOCC',
'XAP': 'API Coin',
},
'options': {
'limits': {
'cost': {
'min': {
'BTC': 0.0001,
'ETH': 0.0001,
'XMR': 0.0001,
'USDT': 1.0,
},
},
},
},
})
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
key = 'quote'
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * rate))
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(self.fee_to_precision(symbol, cost)),
}
def parse_ohlcv(self, ohlcv, market=None, timeframe='5m', since=None, limit=None):
return [
ohlcv['date'] * 1000,
ohlcv['open'],
ohlcv['high'],
ohlcv['low'],
ohlcv['close'],
ohlcv['quoteVolume'],
]
async def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
if since is None:
since = 0
request = {
'currencyPair': market['id'],
'period': self.timeframes[timeframe],
'start': int(since / 1000),
}
if limit is not None:
request['end'] = self.sum(request['start'], limit * self.timeframes[timeframe])
response = await self.publicGetReturnChartData(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_markets(self):
markets = await self.publicGetReturnTicker()
keys = list(markets.keys())
result = []
for p in range(0, len(keys)):
id = keys[p]
market = markets[id]
quote, base = id.split('_')
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
minCost = self.safe_float(self.options['limits']['cost']['min'], quote, 0.0)
result.append(self.extend(self.fees['trading'], {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': True,
'precision': {
'amount': 8,
'price': 8,
},
'limits': {
'amount': {
'min': 0.00000001,
'max': 1000000000,
},
'price': {
'min': 0.00000001,
'max': 1000000000,
},
'cost': {
'min': minCost,
'max': 1000000000,
},
},
'info': market,
}))
return result
async def fetch_balance(self, params={}):
await self.load_markets()
balances = await self.privatePostReturnCompleteBalances(self.extend({
'account': 'all',
}, params))
result = {'info': balances}
currencies = list(balances.keys())
for c in range(0, len(currencies)):
id = currencies[c]
balance = balances[id]
currency = self.common_currency_code(id)
account = {
'free': float(balance['available']),
'used': float(balance['onOrders']),
'total': 0.0,
}
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
async def fetch_trading_fees(self, params={}):
await self.load_markets()
fees = await self.privatePostReturnFeeInfo()
return {
'info': fees,
'maker': self.safe_float(fees, 'makerFee'),
'taker': self.safe_float(fees, 'takerFee'),
'withdraw': {},
'deposit': {},
}
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'currencyPair': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit # 100
response = await self.publicGetReturnOrderBook(self.extend(request, params))
orderbook = self.parse_order_book(response)
orderbook['nonce'] = self.safe_integer(response, 'sec')
return orderbook
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
open = None
change = None
average = None
last = self.safe_float(ticker, 'last')
relativeChange = self.safe_float(ticker, 'percentChange')
if relativeChange != -1:
open = last / self.sum(1, relativeChange)
change = last - open
average = self.sum(last, open) / 2
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high24hr'),
'low': self.safe_float(ticker, 'low24hr'),
'bid': self.safe_float(ticker, 'highestBid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'lowestAsk'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': relativeChange * 100,
'average': average,
'baseVolume': self.safe_float(ticker, 'quoteVolume'),
'quoteVolume': self.safe_float(ticker, 'baseVolume'),
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetReturnTicker(params)
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_currencies(self, params={}):
currencies = await self.publicGetReturnCurrencies(params)
ids = list(currencies.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
currency = currencies[id]
# todo: will need to rethink the fees
# to add support for multiple withdrawal/deposit methods and
# differentiated fees for each particular method
precision = 8 # default precision, todo: fix "magic constants"
code = self.common_currency_code(id)
active = (currency['delisted'] == 0) and not currency['disabled']
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': currency['name'],
'active': active,
'fee': self.safe_float(currency, 'txFee'), # todo: redesign
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': currency['txFee'],
'max': math.pow(10, precision),
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
tickers = await self.publicGetReturnTicker(params)
ticker = tickers[market['id']]
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
timestamp = self.parse8601(trade['date'])
symbol = None
base = None
quote = None
if (not market) and('currencyPair' in list(trade.keys())):
currencyPair = trade['currencyPair']
if currencyPair in self.markets_by_id:
market = self.markets_by_id[currencyPair]
else:
parts = currencyPair.split('_')
quote = parts[0]
base = parts[1]
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
base = market['base']
quote = market['quote']
side = trade['type']
fee = None
cost = self.safe_float(trade, 'total')
amount = self.safe_float(trade, 'amount')
if 'fee' in trade:
rate = self.safe_float(trade, 'fee')
feeCost = None
currency = None
if side == 'buy':
currency = base
feeCost = amount * rate
else:
currency = quote
if cost is not None:
feeCost = cost * rate
fee = {
'type': None,
'rate': rate,
'cost': feeCost,
'currency': currency,
}
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string(trade, 'tradeID'),
'order': self.safe_string(trade, 'orderNumber'),
'type': 'limit',
'side': side,
'price': self.safe_float(trade, 'rate'),
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'currencyPair': market['id'],
}
if since is not None:
request['start'] = int(since / 1000)
request['end'] = self.seconds() # last 50000 trades by default
trades = await self.publicGetReturnTradeHistory(self.extend(request, params))
return self.parse_trades(trades, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
pair = market['id'] if market else 'all'
request = {'currencyPair': pair}
if since is not None:
request['start'] = int(since / 1000)
request['end'] = self.seconds()
# limit is disabled(does not really work as expected)
if limit is not None:
request['limit'] = int(limit)
response = await self.privatePostReturnTradeHistory(self.extend(request, params))
result = []
if market is not None:
result = self.parse_trades(response, market)
else:
if response:
ids = list(response.keys())
for i in range(0, len(ids)):
id = ids[i]
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
trades = self.parse_trades(response[id], market)
for j in range(0, len(trades)):
result.append(trades[j])
return self.filter_by_since_limit(result, since, limit)
def parse_order(self, order, market=None):
timestamp = self.safe_integer(order, 'timestamp')
if not timestamp:
timestamp = self.parse8601(order['date'])
trades = None
if 'resultingTrades' in order:
trades = self.parse_trades(order['resultingTrades'], market)
symbol = None
if market:
symbol = market['symbol']
price = self.safe_float(order, 'price')
remaining = self.safe_float(order, 'amount')
amount = self.safe_float(order, 'startingAmount', remaining)
filled = None
cost = 0
if amount is not None:
if remaining is not None:
filled = amount - remaining
if price is not None:
cost = filled * price
if filled is None:
if trades is not None:
filled = 0
cost = 0
for i in range(0, len(trades)):
trade = trades[i]
tradeAmount = trade['amount']
tradePrice = trade['price']
filled = self.sum(filled, tradeAmount)
cost += tradePrice * tradeAmount
return {
'info': order,
'id': order['orderNumber'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': order['status'],
'symbol': symbol,
'type': order['type'],
'side': order['side'],
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': trades,
'fee': None,
}
def parse_open_orders(self, orders, market, result):
for i in range(0, len(orders)):
order = orders[i]
extended = self.extend(order, {
'status': 'open',
'type': 'limit',
'side': order['type'],
'price': order['rate'],
})
result.append(self.parse_order(extended, market))
return result
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
pair = market['id'] if market else 'all'
response = await self.privatePostReturnOpenOrders(self.extend({
'currencyPair': pair,
}))
openOrders = []
if market is not None:
openOrders = self.parse_open_orders(response, market, openOrders)
else:
marketIds = list(response.keys())
for i in range(0, len(marketIds)):
marketId = marketIds[i]
orders = response[marketId]
m = self.markets_by_id[marketId]
openOrders = self.parse_open_orders(orders, m, openOrders)
for j in range(0, len(openOrders)):
self.orders[openOrders[j]['id']] = openOrders[j]
openOrdersIndexedById = self.index_by(openOrders, 'id')
cachedOrderIds = list(self.orders.keys())
result = []
for k in range(0, len(cachedOrderIds)):
id = cachedOrderIds[k]
if id in openOrdersIndexedById:
self.orders[id] = self.extend(self.orders[id], openOrdersIndexedById[id])
else:
order = self.orders[id]
if order['status'] == 'open':
order = self.extend(order, {
'status': 'closed',
'cost': None,
'filled': order['amount'],
'remaining': 0.0,
})
if order['cost'] is None:
if order['filled'] is not None:
order['cost'] = order['filled'] * order['price']
self.orders[id] = order
order = self.orders[id]
if market is not None:
if order['symbol'] == symbol:
result.append(order)
else:
result.append(order)
return self.filter_by_since_limit(result, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
since = self.safe_value(params, 'since')
limit = self.safe_value(params, 'limit')
request = self.omit(params, ['since', 'limit'])
orders = await self.fetch_orders(symbol, since, limit, request)
for i in range(0, len(orders)):
if orders[i]['id'] == id:
return orders[i]
raise OrderNotCached(self.id + ' order id ' + str(id) + ' is not in "open" state and not found in cache')
def filter_orders_by_status(self, orders, status):
result = []
for i in range(0, len(orders)):
if orders[i]['status'] == status:
result.append(orders[i])
return result
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
orders = await self.fetch_orders(symbol, since, limit, params)
return self.filter_orders_by_status(orders, 'open')
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
orders = await self.fetch_orders(symbol, since, limit, params)
return self.filter_orders_by_status(orders, 'closed')
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
await self.load_markets()
method = 'privatePost' + self.capitalize(side)
market = self.market(symbol)
price = float(price)
amount = float(amount)
response = await getattr(self, method)(self.extend({
'currencyPair': market['id'],
'rate': self.price_to_precision(symbol, price),
'amount': self.amount_to_precision(symbol, amount),
}, params))
timestamp = self.milliseconds()
order = self.parse_order(self.extend({
'timestamp': timestamp,
'status': 'open',
'type': type,
'side': side,
'price': price,
'amount': amount,
}, response), market)
id = order['id']
self.orders[id] = order
return self.extend({'info': response}, order)
async def edit_order(self, id, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
price = float(price)
request = {
'orderNumber': id,
'rate': self.price_to_precision(symbol, price),
}
if amount is not None:
amount = float(amount)
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostMoveOrder(self.extend(request, params))
result = None
if id in self.orders:
self.orders[id]['status'] = 'canceled'
newid = response['orderNumber']
self.orders[newid] = self.extend(self.orders[id], {
'id': newid,
'price': price,
'status': 'open',
})
if amount is not None:
self.orders[newid]['amount'] = amount
result = self.extend(self.orders[newid], {'info': response})
else:
market = None
if symbol is not None:
market = self.market(symbol)
result = self.parse_order(response, market)
self.orders[result['id']] = result
return result
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
response = None
try:
response = await self.privatePostCancelOrder(self.extend({
'orderNumber': id,
}, params))
except Exception as e:
if isinstance(e, CancelPending):
# A request to cancel the order has been sent already.
# If we then attempt to cancel the order the second time
# before the first request is processed the exchange will
# raise a CancelPending exception. Poloniex won't show the
# order in the list of active(open) orders and the cached
# order will be marked as 'closed'(see #1801 for details).
# To avoid that we proactively mark the order as 'canceled'
# here. If for some reason the order does not get canceled
# and still appears in the active list then the order cache
# will eventually get back in sync on a call to `fetchOrder`.
if id in self.orders:
self.orders[id]['status'] = 'canceled'
raise e
if id in self.orders:
self.orders[id]['status'] = 'canceled'
return response
async def fetch_order_status(self, id, symbol=None):
await self.load_markets()
orders = await self.fetch_open_orders(symbol)
indexed = self.index_by(orders, 'id')
return 'open' if (id in list(indexed.keys())) else 'closed'
async def fetch_order_trades(self, id, symbol=None, params={}):
await self.load_markets()
trades = await self.privatePostReturnOrderTrades(self.extend({
'orderNumber': id,
}, params))
return self.parse_trades(trades)
async def create_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
response = await self.privatePostGenerateNewAddress({
'currency': currency['id'],
})
address = None
if response['success'] == 1:
address = self.safe_string(response, 'response')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': None,
'info': response,
}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
response = await self.privatePostReturnDepositAddresses()
currencyId = currency['id']
address = self.safe_string(response, currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': None,
'info': response,
}
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': amount,
'address': address,
}
if tag:
request['paymentId'] = tag
result = await self.privatePostWithdraw(self.extend(request, params))
return {
'info': result,
'id': result['response'],
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
query = self.extend({'command': path}, params)
if api == 'public':
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
query['nonce'] = self.nonce()
body = self.urlencode(query)
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Key': self.apiKey,
'Sign': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body):
response = None
try:
response = json.loads(body)
except Exception as e:
# syntax error, resort to default error handler
return
if 'error' in response:
message = response['error']
feedback = self.id + ' ' + self.json(response)
if message == 'Invalid order number, or you are not the person who placed the order.':
raise OrderNotFound(feedback)
elif message == 'Connection timed out. Please try again.':
raise RequestTimeout(feedback)
elif message == 'Internal error. Please try again.':
raise ExchangeNotAvailable(feedback)
elif message == 'Order not found, or you are not the person who placed it.':
raise OrderNotFound(feedback)
elif message == 'Invalid API key/secret pair.':
raise AuthenticationError(feedback)
elif message == 'Please do not make more than 8 API calls per second.':
raise DDoSProtection(feedback)
elif message.find('Total must be at least') >= 0:
raise InvalidOrder(feedback)
elif message.find('This account is frozen.') >= 0:
raise AccountSuspended(feedback)
elif message.find('Not enough') >= 0:
raise InsufficientFunds(feedback)
elif message.find('Nonce must be greater') >= 0:
raise InvalidNonce(feedback)
elif message.find('You have already called cancelOrder or moveOrder on self order.') >= 0:
raise CancelPending(feedback)
else:
raise ExchangeError(self.id + ' unknown error ' + self.json(response))
|
py | b4177754aef28b59565ac33a74702728faa5b3e3 | # -*- coding: utf-8 -*-
#
# Rayleigh documentation build configuration file, created by
# sphinx-quickstart on Sun May 5 22:54:33 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import cloud_sptheme as csp
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode']
extensions += ['numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Rayleigh'
copyright = u'2013, Sergey Karayev'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'cloud'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = { "roottarget": "index" }
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [csp.get_theme_dir()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Rayleighdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Rayleigh.tex', u'Rayleigh Documentation',
u'Sergey Karayev', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rayleigh', u'Rayleigh Documentation',
[u'Sergey Karayev'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Rayleigh', u'Rayleigh Documentation',
u'Sergey Karayev', 'Rayleigh', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
py | b41778693d6f77f42440d6a3bb4e46d6b1ef43a0 | import unittest
from django.utils.ipv6 import clean_ipv6_address, is_valid_ipv6_address
class TestUtilsIPv6(unittest.TestCase):
def test_validates_correct_plain_address(self):
self.assertTrue(is_valid_ipv6_address("fe80::223:6cff:fe8a:2e8a"))
self.assertTrue(is_valid_ipv6_address("2a02::223:6cff:fe8a:2e8a"))
self.assertTrue(is_valid_ipv6_address("1::2:3:4:5:6:7"))
self.assertTrue(is_valid_ipv6_address("::"))
self.assertTrue(is_valid_ipv6_address("::a"))
self.assertTrue(is_valid_ipv6_address("2::"))
def test_validates_correct_with_v4mapping(self):
self.assertTrue(is_valid_ipv6_address("::ffff:254.42.16.14"))
self.assertTrue(is_valid_ipv6_address("::ffff:0a0a:0a0a"))
def test_validates_incorrect_plain_address(self):
self.assertFalse(is_valid_ipv6_address("foo"))
self.assertFalse(is_valid_ipv6_address("127.0.0.1"))
self.assertFalse(is_valid_ipv6_address("12345::"))
self.assertFalse(is_valid_ipv6_address("1::2:3::4"))
self.assertFalse(is_valid_ipv6_address("1::zzz"))
self.assertFalse(is_valid_ipv6_address("1::2:3:4:5:6:7:8"))
self.assertFalse(is_valid_ipv6_address("1:2"))
self.assertFalse(is_valid_ipv6_address("1:::2"))
self.assertFalse(is_valid_ipv6_address("fe80::223: 6cff:fe8a:2e8a"))
self.assertFalse(is_valid_ipv6_address("2a02::223:6cff :fe8a:2e8a"))
def test_validates_incorrect_with_v4mapping(self):
self.assertFalse(is_valid_ipv6_address("::ffff:999.42.16.14"))
self.assertFalse(is_valid_ipv6_address("::ffff:zzzz:0a0a"))
# The ::1.2.3.4 format used to be valid but was deprecated
# in rfc4291 section 2.5.5.1
self.assertTrue(is_valid_ipv6_address("::254.42.16.14"))
self.assertTrue(is_valid_ipv6_address("::0a0a:0a0a"))
self.assertFalse(is_valid_ipv6_address("::999.42.16.14"))
self.assertFalse(is_valid_ipv6_address("::zzzz:0a0a"))
def test_cleans_plain_address(self):
self.assertEqual(clean_ipv6_address("DEAD::0:BEEF"), "dead::beef")
self.assertEqual(
clean_ipv6_address("2001:000:a:0000:0:fe:fe:beef"), "2001:0:a::fe:fe:beef"
)
self.assertEqual(
clean_ipv6_address("2001::a:0000:0:fe:fe:beef"), "2001:0:a::fe:fe:beef"
)
def test_cleans_with_v4_mapping(self):
self.assertEqual(clean_ipv6_address("::ffff:0a0a:0a0a"), "::ffff:10.10.10.10")
self.assertEqual(clean_ipv6_address("::ffff:1234:1234"), "::ffff:18.52.18.52")
self.assertEqual(clean_ipv6_address("::ffff:18.52.18.52"), "::ffff:18.52.18.52")
self.assertEqual(clean_ipv6_address("::ffff:0.52.18.52"), "::ffff:0.52.18.52")
self.assertEqual(clean_ipv6_address("::ffff:0.0.0.0"), "::ffff:0.0.0.0")
def test_unpacks_ipv4(self):
self.assertEqual(
clean_ipv6_address("::ffff:0a0a:0a0a", unpack_ipv4=True), "10.10.10.10"
)
self.assertEqual(
clean_ipv6_address("::ffff:1234:1234", unpack_ipv4=True), "18.52.18.52"
)
self.assertEqual(
clean_ipv6_address("::ffff:18.52.18.52", unpack_ipv4=True), "18.52.18.52"
)
|
py | b41778f8c180516d65cbd61e69647fa03d502da7 | import requests
from selenium import webdriver
from selenium.webdriver import ActionChains
from flask import Blueprint, current_app, request, jsonify
import time
import html.parser
import re
import os
import random
from app import db
bp = Blueprint('camping_data', __name__, url_prefix='/camping_data')
@bp.route('/crawling', methods=['GET'])
def get_result():
# form = request.form
# region = form['region_give']
# region = request.args.get('region_give')
lists = ["서울시", "부산시", "대구시", "인천시", "광주시", "대전시", "울산시", "세종시", "경기도", "강원도", "충청북도", "충청남도", "전라북도", "전라남도",
"경상북도", "경상남도", "제주도"]
db.campsite.remove({})
for region in lists:
url = "https://dapi.kakao.com/v2/local/search/keyword.json?query={} 캠핑장&size=9".format(region)
headers = {"Authorization": "KakaoAK " + current_app.config['REST_API']}
data = requests.get(url, headers=headers)
data = data.json()['documents']
documents = []
if data:
for d in data:
document = {
'region': region,
'campsite_name': re.sub('<[^>]*>', ' ', html.unescape(d['place_name'])),
'category' : d['category_name'],
'address' : d['address_name'],
'road_address' : d['road_address_name'],
'phone': d['phone'],
'link' : d['place_url'],
'x' : d['x'],
'y' : d['y']
}
documents.append(document)
print(documents)
for document in documents:
place_id = document['link'].split('/')[-1]
document['tag'] = []
document['image'] = ''
document['description'] = ''
URL_DETAILS_PAGE = "https://place.map.kakao.com/main/v/"
place_details = requests.get(URL_DETAILS_PAGE + place_id).json()
try:
tags = place_details['basicInfo']['metaKeywordList']
for tag in tags:
document['tag'].append(tag)
photo_list = place_details['photo']
time.sleep(0.1)
for p in photo_list['photoList'][0]['list']:
if 'daum' in p['orgurl'] or 'kakao' in p['orgurl']:
document['image'] = p['orgurl']
else:
document['image'] = '../static/assets/img/bg-showcase-' + str(random.randint(1, 5)) + '.jpg'
# document['image'] = photo_list['photoList'][0]['list'][0]['orgurl']
document['description'] = place_details['basicInfo']['introduction']
except:
pass
db.campsite.insert_many(documents)
result = {'result': 'success'}
return jsonify(result)
@bp.route('/', methods=['GET'])
def list_result():
# form = request.form
# region = form['region_give']
region_give = request.args.get('region_give')
data = list(db.campsite.find({'region': region_give}, {'_id': False}))
result = {
'result': 'success',
'articles': data,
}
return jsonify(result) |
py | b4177a60c114ad8a861bfc3d42ad3e941fdb77fa | """
Tools for working with alpha shapes.
"""
__all__ = ['alphashape']
import itertools
import logging
from shapely.ops import unary_union, polygonize
from shapely.geometry import MultiPoint, MultiLineString
from scipy.spatial import Delaunay
import numpy as np
from typing import Union, Tuple, List
try:
import geopandas
USE_GP = True
except ImportError:
USE_GP = False
def circumcenter(points: Union[List[Tuple[float]], np.ndarray]) -> np.ndarray:
"""
Calculate the circumcenter of a set of points in barycentric coordinates.
Args:
points: An `N`x`K` array of points which define an (`N`-1) simplex in K
dimensional space. `N` and `K` must satisfy 1 <= `N` <= `K` and
`K` >= 1.
Returns:
The circumcenter of a set of points in barycentric coordinates.
"""
points = np.asarray(points)
num_rows, num_columns = points.shape
A = np.bmat([[2 * np.dot(points, points.T),
np.ones((num_rows, 1))],
[np.ones((1, num_rows)), np.zeros((1, 1))]])
b = np.hstack((np.sum(points * points, axis=1),
np.ones((1))))
return np.linalg.solve(A, b)[:-1]
def circumradius(points: Union[List[Tuple[float]], np.ndarray]) -> float:
"""
Calculte the circumradius of a given set of points.
Args:
points: An `N`x`K` array of points which define an (`N`-1) simplex in K
dimensional space. `N` and `K` must satisfy 1 <= `N` <= `K` and
`K` >= 1.
Returns:
The circumradius of a given set of points.
"""
points = np.asarray(points)
return np.linalg.norm(points[0, :] - np.dot(circumcenter(points), points))
def alphasimplices(points: Union[List[Tuple[float]], np.ndarray]) -> \
Union[List[Tuple[float]], np.ndarray]:
"""
Returns an iterator of simplices and their circumradii of the given set of
points.
Args:
points: An `N`x`M` array of points.
Yields:
A simplex, and its circumradius as a tuple.
"""
coords = np.asarray(points)
tri = Delaunay(coords)
for simplex in tri.simplices:
simplex_points = coords[simplex]
try:
yield simplex, circumradius(simplex_points)
except np.linalg.LinAlgError:
logging.warn('Singular matrix. Likely caused by all points '
'lying in an N-1 space.')
def alphashape(points: Union[List[Tuple[float]], np.ndarray],
alpha: Union[None, float] = None):
"""
Compute the alpha shape (concave hull) of a set of points. If the number
of points in the input is three or less, the convex hull is returned to the
user. For two points, the convex hull collapses to a `LineString`; for one
point, a `Point`.
Args:
points (list or ``shapely.geometry.MultiPoint`` or \
``geopandas.GeoDataFrame``): an iterable container of points
alpha (float): alpha value
Returns:
``shapely.geometry.Polygon`` or ``shapely.geometry.LineString`` or
``shapely.geometry.Point`` or ``geopandas.GeoDataFrame``: \
the resulting geometry
"""
# If given a geodataframe, extract the geometry
if USE_GP and isinstance(points, geopandas.GeoDataFrame):
crs = points.crs
points = points['geometry']
else:
crs = None
# If given a triangle for input, or an alpha value of zero or less,
# return the convex hull.
if len(points) < 4 or (alpha is not None and not callable(
alpha) and alpha <= 0):
if not isinstance(points, MultiPoint):
points = MultiPoint(list(points))
result = points.convex_hull
if crs:
gdf = geopandas.GeoDataFrame(geopandas.GeoSeries(result)).rename(
columns={0: 'geometry'}).set_geometry('geometry')
gdf.crs = crs
return gdf
else:
return result
# Determine alpha parameter if one is not given
if alpha is None:
try:
from optimizealpha import optimizealpha
except ImportError:
from .optimizealpha import optimizealpha
alpha = optimizealpha(points)
# Convert the points to a numpy array
if USE_GP and isinstance(points, geopandas.geoseries.GeoSeries):
coords = np.array([point.coords[0] for point in points])
else:
coords = np.array(points)
# Create a set to hold unique edges of simplices that pass the radius
# filtering
edges = set()
# Create a set to hold unique edges of perimeter simplices.
# Whenever a simplex is found that passes the radius filter, its edges
# will be inspected to see if they already exist in the `edges` set. If an
# edge does not already exist there, it will be added to both the `edges`
# set and the `permimeter_edges` set. If it does already exist there, it
# will be removed from the `perimeter_edges` set if found there. This is
# taking advantage of the property of perimeter edges that each edge can
# only exist once.
perimeter_edges = set()
for point_indices, circumradius in alphasimplices(coords):
if callable(alpha):
resolved_alpha = alpha(point_indices, circumradius)
else:
resolved_alpha = alpha
# Radius filter
if circumradius < 1.0 / resolved_alpha:
for edge in itertools.combinations(
point_indices, r=coords.shape[-1]):
if all([e not in edges for e in itertools.combinations(
edge, r=len(edge))]):
edges.add(edge)
perimeter_edges.add(edge)
else:
perimeter_edges -= set(itertools.combinations(
edge, r=len(edge)))
if coords.shape[-1] > 3:
return perimeter_edges
elif coords.shape[-1] == 3:
import trimesh
result = trimesh.Trimesh(vertices=coords, faces=list(perimeter_edges))
trimesh.repair.fix_normals(result)
return result
# Create the resulting polygon from the edge points
m = MultiLineString([coords[np.array(edge)] for edge in perimeter_edges])
triangles = list(polygonize(m))
result = unary_union(triangles)
# Convert to pandas geodataframe object if that is what was an input
if crs:
gdf = geopandas.GeoDataFrame(geopandas.GeoSeries(result)).rename(
columns={0: 'geometry'}).set_geometry('geometry')
gdf.crs = crs
return gdf
else:
return result
|
py | b4177b0753fdff63ed8d815351b71896214c18a6 | from decimal import Decimal
class Parameter:
host = "host"
region = "region"
endpoint_name = "endpoint_name"
endpoint_config_name = "endpoint_config_name"
model_name = "model_name"
instance_type = "instance_type"
initial_instance_count = "initial_instance_count"
ramp_start_tps = "ramp_start_tps"
ramp_minutes = "ramp_minutes"
steady_state_tps = "steady_state_tps"
steady_state_minutes = "steady_state_minutes"
class SageMaker:
SAFETY_FACTOR = Decimal("0.5")
class AwsRegion:
US_EAST_2 = "us-east-2" # US East (Ohio)
US_EAST_1 = "us-east-1" # US East (N. Virginia)
US_WEST_1 = "us-west-1" # US West (N. California)
US_WEST_2 = "us-west-2" # US West (Oregon)
AF_SOUTH_1 = "af-south-1" # Africa (Cape Town)
AP_EAST_1 = "ap-east-1" # Asia Pacific (Hong Kong)
AP_SOUTH_1 = "ap-south-1" # Asia Pacific (Mumbai)
AP_NORTHEAST_3 = "ap-northeast-3" # Asia Pacific (Osaka)
AP_NORTHEAST_2 = "ap-northeast-2" # Asia Pacific (Seoul)
AP_SOUTHEAST_1 = "ap-southeast-1" # Asia Pacific (Singapore)
AP_SOUTHEAST_2 = "ap-southeast-2" # Asia Pacific (Sydney)
AP_NORTHEAST_1 = "ap-northeast-1" # Asia Pacific (Tokyo)
CA_CENTRAL_1 = "ca-central-1" # Canada (Central)
EU_CENTRAL_1 = "eu-central-1" # Europe (Frankfurt)
EU_WEST_1 = "eu-west-1" # Europe (Ireland)
EU_WEST_2 = "eu-west-2" # Europe (London)
EU_SOUTH_1 = "eu-south-1" # Europe (Milan)
EU_WEST_3 = "eu-west-3" # Europe (Paris)
EU_NORTH_1 = "eu-north-1" # Europe (Stockholm)
ME_SOUTH_1 = "me-south-1" # Middle East (Bahrain)
SA_EAST_1 = "sa-east-1" # South America (São Paulo)
|
py | b4177b7b1828700b92c141b2a6c716193ef8235f | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ssl_ssh_profile
short_description: Configure SSL/SSH protocol options in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify firewall feature and ssl_ssh_profile category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
firewall_ssl_ssh_profile:
description:
- Configure SSL/SSH protocol options.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
caname:
description:
- CA certificate used by SSL Inspection. Source vpn.certificate.local.name.
comment:
description:
- Optional comments.
ftps:
description:
- Configure FTPS options.
suboptions:
allow-invalid-server-cert:
description:
- When enabled, allows SSL sessions whose server certificate validation failed.
choices:
- enable
- disable
client-cert-request:
description:
- Action based on client certificate request.
choices:
- bypass
- inspect
- block
ports:
description:
- Ports to use for scanning (1 - 65535, default = 443).
status:
description:
- Configure protocol inspection status.
choices:
- disable
- deep-inspection
unsupported-ssl:
description:
- Action based on the SSL encryption used being unsupported.
choices:
- bypass
- inspect
- block
untrusted-cert:
description:
- Allow, ignore, or block the untrusted SSL session server certificate.
choices:
- allow
- block
- ignore
https:
description:
- Configure HTTPS options.
suboptions:
allow-invalid-server-cert:
description:
- When enabled, allows SSL sessions whose server certificate validation failed.
choices:
- enable
- disable
client-cert-request:
description:
- Action based on client certificate request.
choices:
- bypass
- inspect
- block
ports:
description:
- Ports to use for scanning (1 - 65535, default = 443).
status:
description:
- Configure protocol inspection status.
choices:
- disable
- certificate-inspection
- deep-inspection
unsupported-ssl:
description:
- Action based on the SSL encryption used being unsupported.
choices:
- bypass
- inspect
- block
untrusted-cert:
description:
- Allow, ignore, or block the untrusted SSL session server certificate.
choices:
- allow
- block
- ignore
imaps:
description:
- Configure IMAPS options.
suboptions:
allow-invalid-server-cert:
description:
- When enabled, allows SSL sessions whose server certificate validation failed.
choices:
- enable
- disable
client-cert-request:
description:
- Action based on client certificate request.
choices:
- bypass
- inspect
- block
ports:
description:
- Ports to use for scanning (1 - 65535, default = 443).
status:
description:
- Configure protocol inspection status.
choices:
- disable
- deep-inspection
unsupported-ssl:
description:
- Action based on the SSL encryption used being unsupported.
choices:
- bypass
- inspect
- block
untrusted-cert:
description:
- Allow, ignore, or block the untrusted SSL session server certificate.
choices:
- allow
- block
- ignore
mapi-over-https:
description:
- Enable/disable inspection of MAPI over HTTPS.
choices:
- enable
- disable
name:
description:
- Name.
required: true
pop3s:
description:
- Configure POP3S options.
suboptions:
allow-invalid-server-cert:
description:
- When enabled, allows SSL sessions whose server certificate validation failed.
choices:
- enable
- disable
client-cert-request:
description:
- Action based on client certificate request.
choices:
- bypass
- inspect
- block
ports:
description:
- Ports to use for scanning (1 - 65535, default = 443).
status:
description:
- Configure protocol inspection status.
choices:
- disable
- deep-inspection
unsupported-ssl:
description:
- Action based on the SSL encryption used being unsupported.
choices:
- bypass
- inspect
- block
untrusted-cert:
description:
- Allow, ignore, or block the untrusted SSL session server certificate.
choices:
- allow
- block
- ignore
rpc-over-https:
description:
- Enable/disable inspection of RPC over HTTPS.
choices:
- enable
- disable
server-cert:
description:
- Certificate used by SSL Inspection to replace server certificate. Source vpn.certificate.local.name.
server-cert-mode:
description:
- Re-sign or replace the server's certificate.
choices:
- re-sign
- replace
smtps:
description:
- Configure SMTPS options.
suboptions:
allow-invalid-server-cert:
description:
- When enabled, allows SSL sessions whose server certificate validation failed.
choices:
- enable
- disable
client-cert-request:
description:
- Action based on client certificate request.
choices:
- bypass
- inspect
- block
ports:
description:
- Ports to use for scanning (1 - 65535, default = 443).
status:
description:
- Configure protocol inspection status.
choices:
- disable
- deep-inspection
unsupported-ssl:
description:
- Action based on the SSL encryption used being unsupported.
choices:
- bypass
- inspect
- block
untrusted-cert:
description:
- Allow, ignore, or block the untrusted SSL session server certificate.
choices:
- allow
- block
- ignore
ssh:
description:
- Configure SSH options.
suboptions:
inspect-all:
description:
- Level of SSL inspection.
choices:
- disable
- deep-inspection
ports:
description:
- Ports to use for scanning (1 - 65535, default = 443).
ssh-algorithm:
description:
- Relative strength of encryption algorithms accepted during negotiation.
choices:
- compatible
- high-encryption
ssh-policy-check:
description:
- Enable/disable SSH policy check.
choices:
- disable
- enable
ssh-tun-policy-check:
description:
- Enable/disable SSH tunnel policy check.
choices:
- disable
- enable
status:
description:
- Configure protocol inspection status.
choices:
- disable
- deep-inspection
unsupported-version:
description:
- Action based on SSH version being unsupported.
choices:
- bypass
- block
ssl:
description:
- Configure SSL options.
suboptions:
allow-invalid-server-cert:
description:
- When enabled, allows SSL sessions whose server certificate validation failed.
choices:
- enable
- disable
client-cert-request:
description:
- Action based on client certificate request.
choices:
- bypass
- inspect
- block
inspect-all:
description:
- Level of SSL inspection.
choices:
- disable
- certificate-inspection
- deep-inspection
unsupported-ssl:
description:
- Action based on the SSL encryption used being unsupported.
choices:
- bypass
- inspect
- block
untrusted-cert:
description:
- Allow, ignore, or block the untrusted SSL session server certificate.
choices:
- allow
- block
- ignore
ssl-anomalies-log:
description:
- Enable/disable logging SSL anomalies.
choices:
- disable
- enable
ssl-exempt:
description:
- Servers to exempt from SSL inspection.
suboptions:
address:
description:
- IPv4 address object. Source firewall.address.name firewall.addrgrp.name.
address6:
description:
- IPv6 address object. Source firewall.address6.name firewall.addrgrp6.name.
fortiguard-category:
description:
- FortiGuard category ID.
id:
description:
- ID number.
required: true
regex:
description:
- Exempt servers by regular expression.
type:
description:
- Type of address object (IPv4 or IPv6) or FortiGuard category.
choices:
- fortiguard-category
- address
- address6
- wildcard-fqdn
- regex
wildcard-fqdn:
description:
- Exempt servers by wildcard FQDN. Source firewall.wildcard-fqdn.custom.name firewall.wildcard-fqdn.group.name.
ssl-exemptions-log:
description:
- Enable/disable logging SSL exemptions.
choices:
- disable
- enable
ssl-server:
description:
- SSL servers.
suboptions:
ftps-client-cert-request:
description:
- Action based on client certificate request during the FTPS handshake.
choices:
- bypass
- inspect
- block
https-client-cert-request:
description:
- Action based on client certificate request during the HTTPS handshake.
choices:
- bypass
- inspect
- block
id:
description:
- SSL server ID.
required: true
imaps-client-cert-request:
description:
- Action based on client certificate request during the IMAPS handshake.
choices:
- bypass
- inspect
- block
ip:
description:
- IPv4 address of the SSL server.
pop3s-client-cert-request:
description:
- Action based on client certificate request during the POP3S handshake.
choices:
- bypass
- inspect
- block
smtps-client-cert-request:
description:
- Action based on client certificate request during the SMTPS handshake.
choices:
- bypass
- inspect
- block
ssl-other-client-cert-request:
description:
- Action based on client certificate request during an SSL protocol handshake.
choices:
- bypass
- inspect
- block
untrusted-caname:
description:
- Untrusted CA certificate used by SSL Inspection. Source vpn.certificate.local.name.
use-ssl-server:
description:
- Enable/disable the use of SSL server table for SSL offloading.
choices:
- disable
- enable
whitelist:
description:
- Enable/disable exempting servers by FortiGuard whitelist.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure SSL/SSH protocol options.
fortios_firewall_ssl_ssh_profile:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
firewall_ssl_ssh_profile:
state: "present"
caname: "<your_own_value> (source vpn.certificate.local.name)"
comment: "Optional comments."
ftps:
allow-invalid-server-cert: "enable"
client-cert-request: "bypass"
ports: "8"
status: "disable"
unsupported-ssl: "bypass"
untrusted-cert: "allow"
https:
allow-invalid-server-cert: "enable"
client-cert-request: "bypass"
ports: "15"
status: "disable"
unsupported-ssl: "bypass"
untrusted-cert: "allow"
imaps:
allow-invalid-server-cert: "enable"
client-cert-request: "bypass"
ports: "22"
status: "disable"
unsupported-ssl: "bypass"
untrusted-cert: "allow"
mapi-over-https: "enable"
name: "default_name_27"
pop3s:
allow-invalid-server-cert: "enable"
client-cert-request: "bypass"
ports: "31"
status: "disable"
unsupported-ssl: "bypass"
untrusted-cert: "allow"
rpc-over-https: "enable"
server-cert: "<your_own_value> (source vpn.certificate.local.name)"
server-cert-mode: "re-sign"
smtps:
allow-invalid-server-cert: "enable"
client-cert-request: "bypass"
ports: "41"
status: "disable"
unsupported-ssl: "bypass"
untrusted-cert: "allow"
ssh:
inspect-all: "disable"
ports: "47"
ssh-algorithm: "compatible"
ssh-policy-check: "disable"
ssh-tun-policy-check: "disable"
status: "disable"
unsupported-version: "bypass"
ssl:
allow-invalid-server-cert: "enable"
client-cert-request: "bypass"
inspect-all: "disable"
unsupported-ssl: "bypass"
untrusted-cert: "allow"
ssl-anomalies-log: "disable"
ssl-exempt:
-
address: "<your_own_value> (source firewall.address.name firewall.addrgrp.name)"
address6: "<your_own_value> (source firewall.address6.name firewall.addrgrp6.name)"
fortiguard-category: "63"
id: "64"
regex: "<your_own_value>"
type: "fortiguard-category"
wildcard-fqdn: "<your_own_value> (source firewall.wildcard-fqdn.custom.name firewall.wildcard-fqdn.group.name)"
ssl-exemptions-log: "disable"
ssl-server:
-
ftps-client-cert-request: "bypass"
https-client-cert-request: "bypass"
id: "72"
imaps-client-cert-request: "bypass"
ip: "<your_own_value>"
pop3s-client-cert-request: "bypass"
smtps-client-cert-request: "bypass"
ssl-other-client-cert-request: "bypass"
untrusted-caname: "<your_own_value> (source vpn.certificate.local.name)"
use-ssl-server: "disable"
whitelist: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_ssl_ssh_profile_data(json):
option_list = ['caname', 'comment', 'ftps',
'https', 'imaps', 'mapi-over-https',
'name', 'pop3s', 'rpc-over-https',
'server-cert', 'server-cert-mode', 'smtps',
'ssh', 'ssl', 'ssl-anomalies-log',
'ssl-exempt', 'ssl-exemptions-log', 'ssl-server',
'untrusted-caname', 'use-ssl-server', 'whitelist']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_ssl_ssh_profile(data, fos):
vdom = data['vdom']
firewall_ssl_ssh_profile_data = data['firewall_ssl_ssh_profile']
filtered_data = filter_firewall_ssl_ssh_profile_data(firewall_ssl_ssh_profile_data)
if firewall_ssl_ssh_profile_data['state'] == "present":
return fos.set('firewall',
'ssl-ssh-profile',
data=filtered_data,
vdom=vdom)
elif firewall_ssl_ssh_profile_data['state'] == "absent":
return fos.delete('firewall',
'ssl-ssh-profile',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
login(data, fos)
if data['firewall_ssl_ssh_profile']:
resp = firewall_ssl_ssh_profile(data, fos)
fos.logout()
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"firewall_ssl_ssh_profile": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"caname": {"required": False, "type": "str"},
"comment": {"required": False, "type": "str"},
"ftps": {"required": False, "type": "dict",
"options": {
"allow-invalid-server-cert": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"client-cert-request": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"ports": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["disable", "deep-inspection"]},
"unsupported-ssl": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"untrusted-cert": {"required": False, "type": "str",
"choices": ["allow", "block", "ignore"]}
}},
"https": {"required": False, "type": "dict",
"options": {
"allow-invalid-server-cert": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"client-cert-request": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"ports": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["disable", "certificate-inspection", "deep-inspection"]},
"unsupported-ssl": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"untrusted-cert": {"required": False, "type": "str",
"choices": ["allow", "block", "ignore"]}
}},
"imaps": {"required": False, "type": "dict",
"options": {
"allow-invalid-server-cert": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"client-cert-request": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"ports": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["disable", "deep-inspection"]},
"unsupported-ssl": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"untrusted-cert": {"required": False, "type": "str",
"choices": ["allow", "block", "ignore"]}
}},
"mapi-over-https": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"name": {"required": True, "type": "str"},
"pop3s": {"required": False, "type": "dict",
"options": {
"allow-invalid-server-cert": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"client-cert-request": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"ports": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["disable", "deep-inspection"]},
"unsupported-ssl": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"untrusted-cert": {"required": False, "type": "str",
"choices": ["allow", "block", "ignore"]}
}},
"rpc-over-https": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"server-cert": {"required": False, "type": "str"},
"server-cert-mode": {"required": False, "type": "str",
"choices": ["re-sign", "replace"]},
"smtps": {"required": False, "type": "dict",
"options": {
"allow-invalid-server-cert": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"client-cert-request": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"ports": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["disable", "deep-inspection"]},
"unsupported-ssl": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"untrusted-cert": {"required": False, "type": "str",
"choices": ["allow", "block", "ignore"]}
}},
"ssh": {"required": False, "type": "dict",
"options": {
"inspect-all": {"required": False, "type": "str",
"choices": ["disable", "deep-inspection"]},
"ports": {"required": False, "type": "int"},
"ssh-algorithm": {"required": False, "type": "str",
"choices": ["compatible", "high-encryption"]},
"ssh-policy-check": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"ssh-tun-policy-check": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"status": {"required": False, "type": "str",
"choices": ["disable", "deep-inspection"]},
"unsupported-version": {"required": False, "type": "str",
"choices": ["bypass", "block"]}
}},
"ssl": {"required": False, "type": "dict",
"options": {
"allow-invalid-server-cert": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"client-cert-request": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"inspect-all": {"required": False, "type": "str",
"choices": ["disable", "certificate-inspection", "deep-inspection"]},
"unsupported-ssl": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"untrusted-cert": {"required": False, "type": "str",
"choices": ["allow", "block", "ignore"]}
}},
"ssl-anomalies-log": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"ssl-exempt": {"required": False, "type": "list",
"options": {
"address": {"required": False, "type": "str"},
"address6": {"required": False, "type": "str"},
"fortiguard-category": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"regex": {"required": False, "type": "str"},
"type": {"required": False, "type": "str",
"choices": ["fortiguard-category", "address", "address6",
"wildcard-fqdn", "regex"]},
"wildcard-fqdn": {"required": False, "type": "str"}
}},
"ssl-exemptions-log": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"ssl-server": {"required": False, "type": "list",
"options": {
"ftps-client-cert-request": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"https-client-cert-request": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"id": {"required": True, "type": "int"},
"imaps-client-cert-request": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"ip": {"required": False, "type": "str"},
"pop3s-client-cert-request": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"smtps-client-cert-request": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]},
"ssl-other-client-cert-request": {"required": False, "type": "str",
"choices": ["bypass", "inspect", "block"]}
}},
"untrusted-caname": {"required": False, "type": "str"},
"use-ssl-server": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"whitelist": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
py | b4177ba1129f7fa1ecc4d76c1ea41b2970c55389 | from geoscript.render.image import Image
class PNG(Image):
"""
Renderer that produces a PNG image.
"""
def __init__(self):
Image.__init__(self, 'png')
|
py | b4177c10c267c7724e45eacb2ee48ca378489480 | """
Module to read MODFLOW 6 binary grid files (*.grb) that define the model
grid binary output files. The module contains the MfGrdFile class that can
be accessed by the user.
"""
import numpy as np
import collections
from ..utils.utils_def import FlopyBinaryData
from ..utils.reference import SpatialReference, SpatialReferenceUnstructured
class MfGrdFile(FlopyBinaryData):
"""
The MfGrdFile class.
Parameters
----------
filename : str
Name of the MODFLOW 6 binary grid file
precision : string
'single' or 'double'. Default is 'double'.
verbose : bool
Write information to the screen. Default is False.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
The MfGrdFile class provides simple ways to retrieve data from binary
MODFLOW 6 binary grid files (.grb). The binary grid file contains data
that can be used for post processing MODFLOW 6 model results.
Examples
--------
>>> import flopy
>>> gobj = flopy.utils.MfGrdFile('test.dis.grb')
"""
def __init__(self, filename, precision='double', verbose=False):
"""
Class constructor.
"""
# Call base class init
super(MfGrdFile, self).__init__()
# set attributes
self.set_float(precision=precision)
self.verbose = verbose
self._initial_len = 50
self._recorddict = collections.OrderedDict()
self._datadict = collections.OrderedDict()
self._recordkeys = []
if self.verbose:
print('\nProcessing binary grid file: {}'.format(filename))
# open the grb file
self.file = open(filename, 'rb')
# grid type
line = self.read_text(self._initial_len).strip()
t = line.split()
self._grid = t[1]
# version
line = self.read_text(self._initial_len).strip()
t = line.split()
self._version = t[1]
# version
line = self.read_text(self._initial_len).strip()
t = line.split()
self._ntxt = int(t[1])
# length of text
line = self.read_text(self._initial_len).strip()
t = line.split()
self._lentxt = int(t[1])
# read text strings
for idx in range(self._ntxt):
line = self.read_text(self._lentxt).strip()
t = line.split()
key = t[0]
dt = t[1]
if dt == 'INTEGER':
dtype = np.int32
elif dt == 'SINGLE':
dtype = np.float32
elif dt == 'DOUBLE':
dtype = np.float64
else:
dtype = None
nd = int(t[3])
if nd > 0:
shp = [int(v) for v in t[4:]]
shp = tuple(shp[::-1])
else:
shp = (0,)
self._recorddict[key] = (dtype, nd, shp)
self._recordkeys.append(key)
if self.verbose:
s = ''
if nd > 0:
s = shp
msg = ' File contains data for {} '.format(key) + \
'with shape {}'.format(s)
print(msg)
if self.verbose:
msg = 'Attempting to read {} '.format(self._ntxt) + \
'records from {}'.format(filename)
print(msg)
for key in self._recordkeys:
if self.verbose:
msg = ' Reading {}'.format(key)
print(msg)
dt, nd, shp = self._recorddict[key]
# read array data
if nd > 0:
count = 1
for v in shp:
count *= v
v = self.read_record(count=count, dtype=dt)
# read variable data
else:
if dt == np.int32:
v = self.read_integer()
elif dt == np.float32:
v = self.read_real()
elif dt == np.float64:
v = self.read_real()
self._datadict[key] = v
if self.verbose:
if nd == 0:
msg = ' {} = {}'.format(key, v)
print(msg)
else:
msg = ' {}: '.format(key) + \
'min = {} max = {}'.format(v.min(), v.max())
print(msg)
# set the spatial reference
self.sr = self._set_spatialreference()
# close the grb file
self.file.close()
def _set_spatialreference(self):
"""
Define structured or unstructured spatial reference based on
MODFLOW 6 discretization type.
Returns
-------
sr : SpatialReference
"""
sr = None
try:
if self._grid == 'DISV' or self._grid == 'DISU':
try:
iverts, verts = self.get_verts()
vertc = self.get_centroids()
xc = vertc[:, 0]
yc = vertc[:, 1]
sr = SpatialReferenceUnstructured(xc, yc, verts, iverts,
[xc.shape[0]])
except:
msg = 'could not set spatial reference for ' + \
'{} discretization '.format(self._grid) + \
'defined in {}'.format(self.file.name)
print(msg)
elif self._grid == 'DIS':
delr, delc = self._datadict['DELR'], self._datadict['DELC']
xorigin, yorigin, rot = self._datadict['XORIGIN'], \
self._datadict['YORIGIN'], \
self._datadict['ANGROT']
sr = SpatialReference(delr=delr, delc=delc,
xll=xorigin, yll=yorigin, rotation=rot)
except:
print('could not set spatial reference for {}'.format(
self.file.name))
return sr
def get_spatialreference(self):
"""
Get the SpatialReference based on the MODFLOW 6 discretization type
Returns
-------
sr : SpatialReference
Examples
--------
>>> import flopy
>>> gobj = flopy.utils.MfGrdFile('test.dis.grb')
>>> sr = gobj.get_spatialreference()
"""
return self.sr
def get_centroids(self):
"""
Get the centroids for a MODFLOW 6 GWF model that uses the DIS,
DISV, or DISU discretization.
Returns
-------
vertc : np.ndarray
Array with x, y pairs of the centroid for every model cell
Examples
--------
>>> import flopy
>>> gobj = flopy.utils.MfGrdFile('test.dis.grb')
>>> vertc = gobj.get_centroids()
"""
try:
if self._grid in ['DISV', 'DISU']:
x = self._datadict['CELLX']
y = self._datadict['CELLY']
elif self._grid == 'DIS':
nlay = self._datadict['NLAY']
x = np.tile(self.sr.xcentergrid.flatten(), nlay)
y = np.tile(self.sr.ycentergrid.flatten(), nlay)
return np.column_stack((x, y))
except:
msg = 'could not return centroids' + \
' for {}'.format(self.file.name)
raise KeyError(msg)
def get_verts(self):
"""
Get a list of the vertices that define each model cell and the x, y
pair for each vertex.
Returns
-------
iverts : list of lists
List with lists containing the vertex indices for each model cell.
verts : np.ndarray
Array with x, y pairs for every vertex used to define the model.
Examples
--------
>>> import flopy
>>> gobj = flopy.utils.MfGrdFile('test.dis.grb')
>>> iverts, verts = gobj.get_verts()
"""
if self._grid == 'DISV':
try:
iverts = []
iavert = self._datadict['IAVERT']
javert = self._datadict['JAVERT']
shpvert = self._recorddict['VERTICES'][2]
for ivert in range(self._datadict['NCPL']):
i0 = iavert[ivert] - 1
i1 = iavert[ivert + 1] - 1
iverts.append((javert[i0:i1] - 1).tolist())
if self.verbose:
msg = 'returning vertices for {}'.format(self.file.name)
print(msg)
return iverts, self._datadict['VERTICES'].reshape(shpvert)
except:
msg = 'could not return vertices for ' + \
'{}'.format(self.file.name)
raise KeyError(msg)
elif self._grid == 'DISU':
try:
iverts = []
iavert = self._datadict['IAVERT']
javert = self._datadict['JAVERT']
shpvert = self._recorddict['VERTICES'][2]
for ivert in range(self._datadict['NODES']):
i0 = iavert[ivert] - 1
i1 = iavert[ivert + 1] - 1
iverts.append((javert[i0:i1] - 1).tolist())
if self.verbose:
msg = 'returning vertices for {}'.format(self.file.name)
print(msg)
return iverts, self._datadict['VERTICES'].reshape(shpvert)
except:
msg = 'could not return vertices for {}'.format(self.file.name)
raise KeyError(msg)
elif self._grid == 'DIS':
try:
nlay, nrow, ncol = self._datadict['NLAY'], \
self._datadict['NROW'], \
self._datadict['NCOL']
iv = 0
verts = []
iverts = []
for k in range(nlay):
for i in range(nrow):
for j in range(ncol):
ivlist = []
v = self.sr.get_vertices(i, j)
for (x, y) in v:
verts.append((x, y))
ivlist.append(iv)
iv += 1
iverts.append(ivlist)
verts = np.array(verts)
return iverts, verts
except:
msg = 'could not return vertices for {}'.format(self.file.name)
raise KeyError(msg)
return
|
py | b4177c66063976e4ee206298bc2ee0dd0dbe3ddf | import unittest
import bill_info
# parsing various kinds of action text to extract metadata and establish state
def parse_bill_action(line, state, bill_id, title):
return bill_info.parse_bill_action({"text": line}, state, bill_id, title)
class BillActions(unittest.TestCase):
def test_veto(self):
bill_id = "hjres64-111"
title = "Making further continuing appropriations for fiscal year 2010, and for other purposes."
state = "PASSED:BILL"
line = "Vetoed by President."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "vetoed")
self.assertEqual(new_state, "PROV_KILL:VETO")
def test_pocket_veto(self):
bill_id = "hr2415-106"
title = "United Nations Reform Act of 1999"
state = "PASSED:BILL"
line = "Pocket Vetoed by President."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "vetoed")
self.assertEqual(new_action['pocket'], "1")
self.assertEqual(new_state, "VETOED:POCKET")
def test_reported_from_committee(self):
bill_id = "s968-112"
title = "A bill to prevent online threats to economic creativity and theft of intellectual property, and for other purposes."
state = "REFERRED"
line = "Committee on the Judiciary. Ordered to be reported with an amendment in the nature of a substitute favorably."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], 'calendar')
# self.assertEqual(new_action['committee'], "Committee on the Judiciary")
self.assertEqual(new_state, "REPORTED")
def test_added_to_calendar(self):
bill_id = "s968-112"
title = "A bill to prevent online threats to economic creativity and theft of intellectual property, and for other purposes."
state = "REPORTED"
line = "Placed on Senate Legislative Calendar under General Orders. Calendar No. 70."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], 'calendar')
self.assertEqual(new_action['calendar'], "Senate Legislative")
self.assertEqual(new_action['under'], "General Orders")
self.assertEqual(new_action['number'], "70")
self.assertEqual(new_state, None)
def test_enacted_as_public_law(self):
bill_id = "hr3590-111"
title = "An act entitled The Patient Protection and Affordable Care Act."
state = "ENACTED:SIGNED"
line = "Became Public Law No: 111-148."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "enacted")
self.assertEqual(new_action['congress'], "111")
self.assertEqual(new_action['number'], "148")
self.assertEqual(new_action['law'], "public")
def test_cleared_for_whitehouse(self):
bill_id = "hr3590-111"
title = "An act entitled The Patient Protection and Affordable Care Act."
state = "PASSED:BILL"
line = "Cleared for White House."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
# should not be marked as presented to president, since it hasn't been yet
# self.assertEqual(new_action['type'], 'action')
def test_presented_to_president(self):
bill_id = "hr3590-111"
title = "An act entitled The Patient Protection and Affordable Care Act."
state = "PASSED:BILL"
line = "Presented to President."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], 'topresident')
def test_signed_by_president(self):
bill_id = "hr3590-111"
title = "An act entitled The Patient Protection and Affordable Care Act."
state = "PASSED:BILL"
line = "Signed by President."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], 'signed')
# voting tests
def test_vote_normal_roll(self):
bill_id = "hr3590-111"
title = "An act entitled The Patient Protection and Affordable Care Act."
state = "INTRODUCED"
line = "On motion to suspend the rules and pass the bill Agreed to by the Yeas and Nays: (2/3 required): 416 - 0 (Roll no. 768)."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "vote")
self.assertEqual(new_action['vote_type'], "vote")
self.assertEqual(new_action['where'], "h")
self.assertEqual(new_action['how'], "roll")
self.assertEqual(new_action['result'], "pass")
self.assertEqual(new_action['roll'], "768")
self.assertEqual(new_state, "PASS_OVER:HOUSE")
def test_vote_normal_roll_second(self):
bill_id = "hr3590-111"
title = "An act entitled The Patient Protection and Affordable Care Act."
state = "PASS_OVER:HOUSE"
line = "Passed Senate with an amendment and an amendment to the Title by Yea-Nay Vote. 60 - 39. Record Vote Number: 396."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "vote")
self.assertEqual(new_action['vote_type'], "vote2")
self.assertEqual(new_action['where'], "s")
self.assertEqual(new_action['how'], "roll")
self.assertEqual(new_action['result'], "pass")
self.assertEqual(new_action['roll'], "396")
self.assertEqual(new_state, "PASS_BACK:SENATE")
def test_cloture_vote_verbose(self):
bill_id = "s1982-113"
title = "Comprehensive Veterans Health and Benefits and Military Retirement Pay Restoration Act of 2014"
line = "Cloture motion on the motion to proceed to the measure invoked in Senate by Yea-Nay Vote. 99 - 0. Record Vote Number: 44."
state = "REPORTED"
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "vote-aux")
self.assertEqual(new_action['vote_type'], "cloture")
self.assertEqual(new_action['where'], "s")
self.assertEqual(new_action['how'], "roll")
self.assertEqual(new_action['result'], "pass")
self.assertEqual(new_action['roll'], "44")
self.assertEqual(new_state, None)
def test_vote_roll_pingpong(self):
bill_id = "hr3590-111"
title = "An act entitled The Patient Protection and Affordable Care Act."
state = "PASS_BACK:SENATE"
line = "On motion that the House agree to the Senate amendments Agreed to by recorded vote: 219 - 212 (Roll no. 165)."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['roll'], "165")
self.assertEqual(new_action['type'], "vote")
self.assertEqual(new_action['vote_type'], "pingpong")
self.assertEqual(new_action['where'], "h")
self.assertEqual(new_action['how'], "roll")
self.assertEqual(new_action['result'], "pass")
def test_vote_cloture(self):
bill_id = "hr3590-111"
title = "An act entitled The Patient Protection and Affordable Care Act."
state = "PASS_OVER:HOUSE" # should not change
line = "Cloture on the motion to proceed to the bill invoked in Senate by Yea-Nay Vote. 60 - 39. Record Vote Number: 353."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['roll'], "353")
self.assertEqual(new_action['type'], "vote-aux")
self.assertEqual(new_action['vote_type'], "cloture")
self.assertEqual(new_action['where'], "s")
self.assertEqual(new_action['how'], "roll")
self.assertEqual(new_action['result'], "pass")
self.assertEqual(new_state, None) # unchanged
def test_vote_cloture_2(self):
bill_id = "hr3590-111"
title = "An act entitled The Patient Protection and Affordable Care Act."
state = "PASS_OVER:HOUSE" # should not change
line = "Cloture invoked in Senate by Yea-Nay Vote. 60 - 39. Record Vote Number: 395."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['roll'], "395")
self.assertEqual(new_action['type'], "vote-aux")
self.assertEqual(new_action['vote_type'], "cloture")
self.assertEqual(new_action['where'], "s")
self.assertEqual(new_action['how'], "roll")
self.assertEqual(new_action['result'], "pass")
self.assertEqual(new_state, None) # unchanged
# not sure whether to include votes that are on process, not passage or cloture
# def test_vote_process_voice_senate(self):
# bill_id = "hr3590-111"
# title = "An act entitled The Patient Protection and Affordable Care Act."
# state = "PASS_OVER:HOUSE" # should not change
# line = "Motion to proceed to consideration of measure agreed to in Senate by Unanimous Consent."
# new_action, new_state = parse_bill_action(line, state, bill_id, title)
# self.assertEqual(new_action['type'], 'vote')
# self.assertEqual(new_action['vote_type'], 'other')
# self.assertEqual(new_action['how'], 'Unanimous Consent')
# self.assertEqual(new_action['where'], 's')
# self.assertEqual(new_action['result'], 'pass')
# self.assertEqual(new_state, None)
# def test_vote_commit_roll_failure(self):
# bill_id = "hr3590-111"
# title = "An act entitled The Patient Protection and Affordable Care Act."
# state = "PASS_OVER:HOUSE" # should not change
# line = "Motion by Senator McCain to commit to Senate Committee on Finance under the order of 12/2/2009, not having achieved 60 votes in the affirmative, the motion was rejected in Senate by Yea-Nay Vote. 42 - 58. Record Vote Number: 358."
# new_action, new_state = parse_bill_action(line, state, bill_id, title)
# self.assertEqual(new_action['type'], 'vote')
# self.assertEqual(new_action['vote_type'], 'other')
# self.assertEqual(new_action['how'], 'roll')
# self.assertEqual(new_action['where'], 's')
# self.assertEqual(new_action['result'], 'fail')
# self.assertEqual(new_action['roll'], "358")
# self.assertEqual(new_state, None)
# def test_vote_motion_conference(self):
# bill_id = "hr3630-112"
# title = "A bill to extend the payroll tax holiday, unemployment compensation, Medicare physician payment, provide for the consideration of the Keystone XL pipeline, and for other purposes."
# state = "PASS_BACK:SENATE"
# line = "On motion that the House disagree to the Senate amendments, and request a conference Agreed to by the Yeas and Nays: 229 - 193 (Roll no. 946)."
# new_action, new_state = parse_bill_action(line, state, bill_id, title)
# self.assertEqual(new_action['type'], 'vote')
# self.assertEqual(new_action['vote_type'], 'other')
# self.assertEqual(new_action['how'], 'roll')
# self.assertEqual(new_action['where'], 'h')
# self.assertEqual(new_action['result'], 'pass')
# self.assertEqual(new_action['roll'], "946")
# self.assertEqual(new_state, None)
# def test_vote_motion_instruct_conferees(self):
# bill_id = "hr3630-112"
# title = "A bill to extend the payroll tax holiday, unemployment compensation, Medicare physician payment, provide for the consideration of the Keystone XL pipeline, and for other purposes."
# state = "PASS_BACK:SENATE"
# line = "On motion that the House instruct conferees Agreed to by the Yeas and Nays: 397 - 16 (Roll no. 9)."
# new_action, new_state = parse_bill_action(line, state, bill_id, title)
# self.assertEqual(new_action['type'], 'vote')
# self.assertEqual(new_action['vote_type'], 'other')
# self.assertEqual(new_action['how'], 'roll')
# self.assertEqual(new_action['where'], 'h')
# self.assertEqual(new_action['result'], 'pass')
# self.assertEqual(new_action['roll'], "9")
# self.assertEqual(new_state, None)
def test_vote_conference_report_house_pass(self):
bill_id = "hr3630-112"
title = "A bill to extend the payroll tax holiday, unemployment compensation, Medicare physician payment, provide for the consideration of the Keystone XL pipeline, and for other purposes."
state = "PASS_BACK:SENATE"
line = "On agreeing to the conference report Agreed to by the Yeas and Nays: 293 - 132 (Roll no. 72)."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], 'vote')
self.assertEqual(new_action['vote_type'], 'conference')
self.assertEqual(new_action['how'], 'roll')
self.assertEqual(new_action['where'], 'h')
self.assertEqual(new_action['result'], 'pass')
self.assertEqual(new_action['roll'], "72")
self.assertEqual(new_state, 'CONFERENCE:PASSED:HOUSE')
def test_vote_conference_report_senate_pass(self):
bill_id = "hr3630-112"
title = "A bill to extend the payroll tax holiday, unemployment compensation, Medicare physician payment, provide for the consideration of the Keystone XL pipeline, and for other purposes."
state = "CONFERENCE:PASSED:HOUSE"
line = "Senate agreed to conference report by Yea-Nay Vote. 60 - 36. Record Vote Number: 22."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], 'vote')
self.assertEqual(new_action['vote_type'], 'conference')
self.assertEqual(new_action['how'], 'roll')
self.assertEqual(new_action['where'], 's')
self.assertEqual(new_action['result'], 'pass')
self.assertEqual(new_action['roll'], "22")
self.assertEqual(new_state, 'PASSED:BILL')
def test_vote_veto_override_fail(self):
bill_id = "hjres64-111"
title = "Making further continuing appropriations for fiscal year 2010, and for other purposes."
state = "PROV_KILL:VETO"
line = "On passage, the objections of the President to the contrary notwithstanding Failed by the Yeas and Nays: (2/3 required): 143 - 245, 1 Present (Roll no. 2).On passage, the objections of the President to the contrary notwithstanding Failed by the Yeas and Nays: (2/3 required): 143 - 245, 1 Present (Roll no. 2)."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "vote")
self.assertEqual(new_action['vote_type'], "override")
self.assertEqual(new_action['where'], "h")
self.assertEqual(new_action["result"], "fail")
self.assertEqual(new_action["how"], "roll")
self.assertEqual(new_action["roll"], "2")
self.assertEqual(new_state, "VETOED:OVERRIDE_FAIL_ORIGINATING:HOUSE")
def test_veto_override_success_once(self):
bill_id = "hr6331-110"
title = "Medicare Improvements for Patients and Providers Act of 2008"
state = "PROV_KILL:VETO"
line = "Two-thirds of the Members present having voted in the affirmative the bill is passed, Passed by the Yeas and Nays: (2/3 required): 383 - 41 (Roll no. 491)."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "vote")
self.assertEqual(new_action['vote_type'], "override")
self.assertEqual(new_action['where'], "h")
self.assertEqual(new_action["result"], "pass")
self.assertEqual(new_action["how"], "roll")
self.assertEqual(new_action["roll"], "491")
self.assertEqual(new_state, "VETOED:OVERRIDE_PASS_OVER:HOUSE")
def test_veto_override_success_twice(self):
bill_id = "hr6331-110"
title = "Medicare Improvements for Patients and Providers Act of 2008"
state = "VETOED:OVERRIDE_PASS_OVER:HOUSE"
line = "Passed Senate over veto by Yea-Nay Vote. 70 - 26. Record Vote Number: 177."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "vote")
self.assertEqual(new_action['vote_type'], "override")
self.assertEqual(new_action['where'], "s")
self.assertEqual(new_action["result"], "pass")
self.assertEqual(new_action["how"], "roll")
self.assertEqual(new_action["roll"], "177")
# self.assertEqual(new_state, "VETOED:OVERRIDE_COMPLETE:SENATE")
# Fictional bill, no constitutional amendment passed by both Houses
# in the THOMAS era (1973-present).
# The 26th was passed by Congress in 1971, 27th passed by Congress in 1789.
# The line here is taken from hjres10-109, when the House passed a
# flag burning amendment. (A separate version later failed the Senate by one vote.)
def test_passed_constitutional_amendment(self):
bill_id = "sjres64-1000"
title = "Proposing an amendment to the Constitution of the United States authorizing the Congress to prohibit the physical desecration of the flag of the United States."
state = "PASS_OVER:SENATE"
line = "On passage Passed by the Yeas and Nays: (2/3 required): 286 - 130 (Roll no. 296)."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "vote")
self.assertEqual(new_action['vote_type'], "vote2")
self.assertEqual(new_action['where'], "h")
self.assertEqual(new_action["result"], "pass")
self.assertEqual(new_action["how"], "roll")
self.assertEqual(new_action["roll"], "296")
self.assertEqual(new_state, "PASSED:CONSTAMEND")
def test_passed_concurrent_resolution(self):
bill_id = "hconres74-112"
title = "Providing for a joint session of Congress to receive a message from the President."
state = "PASS_OVER:HOUSE"
line = "Received in the Senate, considered, and agreed to without amendment by Unanimous Consent."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "vote")
self.assertEqual(new_action['vote_type'], "vote2")
self.assertEqual(new_action['where'], "s")
self.assertEqual(new_action["result"], "pass")
self.assertEqual(new_action["how"], "by Unanimous Consent")
self.assertEqual(new_state, "PASSED:CONCURRENTRES")
def test_passed_simple_resolution_house(self):
bill_id = "hres9-112"
title = "Instructing certain committees to report legislation replacing the job-killing health care law."
state = "REPORTED"
line = "On agreeing to the resolution, as amended Agreed to by the Yeas and Nays: 253 - 175 (Roll no. 16)."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "vote")
self.assertEqual(new_action['vote_type'], "vote")
self.assertEqual(new_action['where'], "h")
self.assertEqual(new_action["result"], "pass")
self.assertEqual(new_action["how"], "roll")
self.assertEqual(new_action['roll'], "16")
self.assertEqual(new_state, "PASSED:SIMPLERES")
def test_passed_simple_resolution_senate(self):
bill_id = "sres484-112"
title = "A resolution designating June 7, 2012, as \"National Hunger Awareness Day\"."
state = "REPORTED"
line = "Submitted in the Senate, considered, and agreed to without amendment and with a preamble by Unanimous Consent."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "vote")
self.assertEqual(new_action['vote_type'], "vote")
self.assertEqual(new_action['where'], "s")
self.assertEqual(new_action["result"], "pass")
self.assertEqual(new_action["how"], "by Unanimous Consent")
self.assertEqual(new_state, "PASSED:SIMPLERES")
def test_failed_simple_resolution_senate(self):
bill_id = "sres5-113"
title = "A resolution amending the Standing Rules of the Senate to provide for cloture to be invoked with less than a three-fifths majority after additional debate."
state = "INTRODUCED"
line = "Disagreed to in Senate by Voice Vote."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "vote")
self.assertEqual(new_action['vote_type'], "vote")
self.assertEqual(new_action['where'], "s")
self.assertEqual(new_action["result"], "fail")
self.assertEqual(new_action["how"], "by Voice Vote")
self.assertEqual(new_state, "FAIL:ORIGINATING:SENATE")
def test_failed_suspension_vote(self):
bill_id = "hr1954-112"
title = "To implement the President's request to increase the statutory limit on the public debt."
state = "REFERRED"
line = "On motion to suspend the rules and pass the bill Failed by the Yeas and Nays: (2/3 required): 97 - 318, 7 Present (Roll no. 379)."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "vote")
self.assertEqual(new_action['vote_type'], "vote")
self.assertEqual(new_action['where'], "h")
self.assertEqual(new_action["result"], "fail")
self.assertEqual(new_action["how"], "roll")
self.assertEqual(new_action['roll'], "379")
self.assertEqual(new_state, "PROV_KILL:SUSPENSIONFAILED")
def test_passed_by_special_rule(self):
bill_id = "hres240-109"
title = "Amending the Rules of the House of Representatives to reinstate certain provisions of the rules relating to procedures of the Committee on Standards of Official Conduct to the form in which those provisions existed at the close of the 108th Congress."
state = "INTRODUCED"
line = "Passed House pursuant to H. Res. 241."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "vote")
self.assertEqual(new_action['vote_type'], "vote")
self.assertEqual(new_action['where'], "h")
self.assertEqual(new_action["result"], "pass")
self.assertEqual(new_action["how"], "by special rule")
self.assertEqual(new_state, "PASSED:SIMPLERES")
self.assertEqual(new_action['bill_ids'], ["hres241-109"])
def test_referral_committee(self):
bill_id = "hr547-113"
title = "To provide for the establishment of a border protection strategy for the international land borders of the United States, to address the ecological and environmental impacts of border security infrastructure, measures, and activities along the international land borders of the United States, and for other purposes."
state = "INTRODUCED"
line = "Referred to the Committee on Homeland Security, and in addition to the Committees on Armed Services, Agriculture, and Natural Resources, for a period to be subsequently determined by the Speaker, in each case for consideration of such provisions as fall within the jurisdiction of the committee concerned."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "referral")
self.assertEqual(new_state, "REFERRED")
def test_referral_subcommittee(self):
bill_id = "hr547-113"
title = "To provide for the establishment of a border protection strategy for the international land borders of the United States, to address the ecological and environmental impacts of border security infrastructure, measures, and activities along the international land borders of the United States, and for other purposes."
state = "INTRODUCED"
line = "Referred to the Subcommittee Indian and Alaska Native Affairs."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "referral")
self.assertEqual(new_state, "REFERRED")
def test_hearings_held(self):
bill_id = "s54-113"
title = "A bill to increase public safety by punishing and deterring firearms trafficking."
state = "REFERRED"
line = "Committee on the Judiciary Subcommittee on the Constitution, Civil Rights and Human Rights. Hearings held."
new_action, new_state = parse_bill_action(line, state, bill_id, title)
self.assertEqual(new_action['type'], "hearings")
# self.assertEqual(new_action['committees'], "Committee on the Judiciary Subcommittee on the Constitution, Civil Rights and Human Rights")
self.assertEqual(new_state, None) # did not change state
|
py | b4177d043257f366000def63f49b15f3f30370c5 | # Импортировали драйвер устройства.
import pycuda.driver as drv
# Определили какой именно GPU в нашей системе доступен для работы
import pycuda.autoinit
# Директива компилятора nVidia (nvcc).
from pycuda.compiler import SourceModule
import numpy
# Сформировали матрицу 5 x 5, значения которой выбираются случайным образом.
a = numpy.random.randn(5, 5)
# GPU поддерживает только одинарную точность.
a = a.astype(numpy.float32)
# Выделили память на устройстве для нашей матрицы.
a_gpu = drv.mem_alloc(a.nbytes)
# Перенесли матрицу на устройство.
drv.memcpy_htod(a_gpu, a)
# Сформировали исполнительный модуль.
mod = SourceModule("""
__global__ void doubles_matrix(float *a){
int idx = threadIdx.x + threadIdx.y*4;
a[idx] *= 2;}
""")
# Указали исполняемую функцию.
func = mod.get_function("doubles_matrix")
# Запустили функцию ядра.
func(a_gpu, block=(5, 5, 1))
# Выделили область памяти для скопированной матрицы.
a_doubled = numpy.empty_like(a)
# Перенесли новую матрицу с устройства в оперативную память
drv.memcpy_dtoh(a_doubled, a_gpu)
# Отобразили результат.
print("ORIGINAL MATRIX")
print(a)
print("DOUBLED MATRIX AFTER PyCUDA EXECUTION")
print(a_doubled)
|
py | b4177d4b0a31864d9bda9df129eb5503ce1a5961 | #!/usr/bin/env python
import argparse
def parse_args():
parser = argparse.ArgumentParser("Produces BED file of bam where read depth is at or above a threshold")
parser.add_argument('--input_fasta', '-i', dest='input_fasta', required=True, type=str,
help='Input fasta file')
parser.add_argument('--output_prefix', '-o', dest='output_prefix', default="output_", type=str,
help="Output will be of form ${OUTPUT_PREFIX}${IDX}${OUTPUT_SUFFIX}")
parser.add_argument('--output_suffix', '-O', dest='output_suffix', default=".fasta", type=str,
help="Output will be of form ${OUTPUT_PREFIX}${IDX}${OUTPUT_SUFFIX}")
parser.add_argument('--minimum_lines', '-l', dest='minimum_lines', required=True, type=int,
help='Minimum number of fasta lines to keep per file')
parser.add_argument('--index_format', '-d', dest='index_format', default="%03d",
help='Format string for chunk index')
args = parser.parse_args()
return args
def main():
# prep
args = parse_args()
with open(args.input_fasta, 'r') as infile:
current_line = 0
current_idx = 0
current_outfile = None
try:
for line in infile:
if current_outfile is None or (line.startswith(">") and current_line >= args.minimum_lines):
if current_outfile is not None:
current_outfile.close()
outfile_idx = args.index_format % current_idx
outfilename = args.output_prefix + outfile_idx + args.output_suffix
print("Writing to file {} after writing {} lines".format(outfilename, current_line))
current_outfile = open(outfilename, 'w')
current_line = 0
current_idx += 1
current_outfile.write(line)
current_line += 1
finally:
if current_outfile is not None: current_outfile.close()
if __name__ == "__main__":
main()
|
py | b4177f9fc853331d7c9af7e4e51a8f55f1c914ed | """
Linear search is a very simple search algorithm. In this type of search, a sequential search is made over all items one by one. Every item is checked and if a match is found then that particular item is returned, otherwise the search continues till the end of the data collection.
"""
a = [1, 5, 6, 7, 8, 9, 0, 'john', 56, 74, 456, 3, 6, 42, 53]
def maf_linear(a, x):
"""
Step 1: Set i to 1
Step 2: if i > n then go to step 7
Step 3: if A[i] = x then go to step 6
Step 4: Set i to i + 1
Step 5: Go to Step 2
Step 6: Print Element x Found at index i and go to step 8
Step 7: Print element not found
Step 8: Exit
"""
e = "Item Not Found"
for i in a:
if i == x:
print('Item found at index:', a.index(i))
return a.index(i)
elif i != a:
print(e)
v = 'john'
normal = maf_linear(a, v)
print("Results from for Loop", normal)
def maf_lin_com(a, x):
""" comprehensive linear search"""
e = "Item Not Found"
return [a.index(i) for i in a if i == x]
print("Results from List Comprehension:", maf_lin_com(a, v) ) |
py | b41781bf824f10cd4f0744e62313db51c54a5d29 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import traceback
import oneflow.python.framework.session_context as session_ctx
import oneflow.python.framework.attr_util as attr_util
import oneflow._oneflow_internal.oneflow.core.job.job_conf as job_conf_cfg
from contextlib import contextmanager
from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate
import oneflow._oneflow_internal
@oneflow_export("experimental.scope.config")
def api_scope_config(**kwargs):
name2default = session_ctx.GetDefaultSession().scope_attr_name2default_val
def SetScopeProto(scope_proto):
for attr_name, py_value in kwargs.items():
assert attr_name in name2default
attr_util.SetAttrValue(
scope_proto.mutable_attr_name2attr_value()[attr_name],
py_value,
name2default[attr_name],
)
sess = session_ctx.GetDefaultSession()
scope = MakeScope(
lambda old_scope, builder: builder.BuildScopeByProtoSetter(
old_scope, SetScopeProto
)
)
return ScopeContext(scope)
@oneflow_export("current_scope")
def api_current_scope():
r""" Return current scope
"""
return oneflow._oneflow_internal.GetCurrentScope()
@oneflow_export("scope.current_scope")
@oneflow_deprecate()
def deprecated_current_scope(*args, **kwargs):
print(
"WARNING:",
"oneflow.scope.current_scope",
"will be removed in the future, use {} instead.".format(
"oneflow.current_scope"
),
)
print(traceback.format_stack()[-2])
return api_current_scope(*args, **kwargs)
def MakeScope(build_func):
scope = None
old_scope = oneflow._oneflow_internal.GetCurrentScope()
assert old_scope is not None
def BuildScope(builder):
nonlocal scope
scope = build_func(old_scope, builder)
assert scope is not None
oneflow._oneflow_internal.deprecated.LogicalRun(BuildScope)
return scope
def MakeInitialScope(job_conf, device_tag, machine_device_ids, hierarchy, is_mirrored):
scope = None
def BuildInitialScope(builder):
nonlocal scope
session_id = session_ctx.GetDefaultSession().id
scope = builder.BuildInitialScope(
session_id, job_conf, device_tag, machine_device_ids, hierarchy, is_mirrored
)
oneflow._oneflow_internal.deprecated.LogicalRun(BuildInitialScope)
return scope
def InitScopeStack():
job_conf = job_conf_cfg.JobConfigProto()
job_conf.mutable_predict_conf()
job_conf.set_job_name("")
scope = MakeInitialScope(job_conf, "cpu", ["0:0"], None, is_mirrored=False)
oneflow._oneflow_internal.InitGlobalScopeStack(scope)
@contextmanager
def ScopeContext(scope):
old_scope = oneflow._oneflow_internal.GetCurrentScope()
oneflow._oneflow_internal.GlobalScopeStackPush(scope)
try:
yield
finally:
assert oneflow._oneflow_internal.GetCurrentScope() is scope
oneflow._oneflow_internal.GlobalScopeStackPop()
assert oneflow._oneflow_internal.GetCurrentScope() is old_scope
|
py | b41782578043c2c5b9a5a12711b99321d2b54cd3 | #!/usr/bin/python3
# Creates DNS zone files for all of the domains of all of the mail users
# and mail aliases and restarts nsd.
########################################################################
import sys, os, os.path, urllib.parse, datetime, re, hashlib, base64
import ipaddress
import rtyaml
import dns.resolver
from mailconfig import get_mail_domains
from utils import shell, load_env_vars_from_file, safe_domain_name, sort_domains
def get_dns_domains(env):
# Add all domain names in use by email users and mail aliases and ensure
# PRIMARY_HOSTNAME is in the list.
domains = set()
domains |= get_mail_domains(env)
domains.add(env['PRIMARY_HOSTNAME'])
return domains
def get_dns_zones(env):
# What domains should we create DNS zones for? Never create a zone for
# a domain & a subdomain of that domain.
domains = get_dns_domains(env)
# Exclude domains that are subdomains of other domains we know. Proceed
# by looking at shorter domains first.
zone_domains = set()
for domain in sorted(domains, key=lambda d : len(d)):
for d in zone_domains:
if domain.endswith("." + d):
# We found a parent domain already in the list.
break
else:
# 'break' did not occur: there is no parent domain.
zone_domains.add(domain)
# Make a nice and safe filename for each domain.
zonefiles = []
for domain in zone_domains:
zonefiles.append([domain, safe_domain_name(domain) + ".txt"])
# Sort the list so that the order is nice and so that nsd.conf has a
# stable order so we don't rewrite the file & restart the service
# meaninglessly.
zone_order = sort_domains([ zone[0] for zone in zonefiles ], env)
zonefiles.sort(key = lambda zone : zone_order.index(zone[0]) )
return zonefiles
def do_dns_update(env, force=False):
# What domains (and their zone filenames) should we build?
domains = get_dns_domains(env)
zonefiles = get_dns_zones(env)
# Custom records to add to zones.
additional_records = list(get_custom_dns_config(env))
# Write zone files.
os.makedirs('/etc/nsd/zones', exist_ok=True)
updated_domains = []
for i, (domain, zonefile) in enumerate(zonefiles):
# Build the records to put in the zone.
records = build_zone(domain, domains, additional_records, env)
# See if the zone has changed, and if so update the serial number
# and write the zone file.
if not write_nsd_zone(domain, "/etc/nsd/zones/" + zonefile, records, env, force):
# Zone was not updated. There were no changes.
continue
# If this is a .justtesting.email domain, then post the update.
try:
justtestingdotemail(domain, records)
except:
# Hmm. Might be a network issue. If we stop now, will we end
# up in an inconsistent state? Let's just continue.
pass
# Mark that we just updated this domain.
updated_domains.append(domain)
# Sign the zone.
#
# Every time we sign the zone we get a new result, which means
# we can't sign a zone without bumping the zone's serial number.
# Thus we only sign a zone if write_nsd_zone returned True
# indicating the zone changed, and thus it got a new serial number.
# write_nsd_zone is smart enough to check if a zone's signature
# is nearing expiration and if so it'll bump the serial number
# and return True so we get a chance to re-sign it.
sign_zone(domain, zonefile, env)
# Now that all zones are signed (some might not have changed and so didn't
# just get signed now, but were before) update the zone filename so nsd.conf
# uses the signed file.
for i in range(len(zonefiles)):
zonefiles[i][1] += ".signed"
# Write the main nsd.conf file.
if write_nsd_conf(zonefiles, additional_records, env):
# Make sure updated_domains contains *something* if we wrote an updated
# nsd.conf so that we know to restart nsd.
if len(updated_domains) == 0:
updated_domains.append("DNS configuration")
# Kick nsd if anything changed.
if len(updated_domains) > 0:
shell('check_call', ["/usr/sbin/service", "nsd", "restart"])
# Write the OpenDKIM configuration tables.
if write_opendkim_tables(domains, env):
# Settings changed. Kick opendkim.
shell('check_call', ["/usr/sbin/service", "opendkim", "restart"])
if len(updated_domains) == 0:
# If this is the only thing that changed?
updated_domains.append("OpenDKIM configuration")
if len(updated_domains) == 0:
# if nothing was updated (except maybe OpenDKIM's files), don't show any output
return ""
else:
return "updated DNS: " + ",".join(updated_domains) + "\n"
########################################################################
def build_zone(domain, all_domains, additional_records, env, is_zone=True):
records = []
# For top-level zones, define the authoritative name servers.
#
# Normally we are our own nameservers. Some TLDs require two distinct IP addresses,
# so we allow the user to override the second nameserver definition so that
# secondary DNS can be set up elsewhere.
#
# 'False' in the tuple indicates these records would not be used if the zone
# is managed outside of the box.
if is_zone:
# Obligatory definition of ns1.PRIMARY_HOSTNAME.
records.append((None, "NS", "ns1.%s." % env["PRIMARY_HOSTNAME"], False))
# Define ns2.PRIMARY_HOSTNAME or whatever the user overrides.
secondary_ns = get_secondary_dns(additional_records) or ("ns2." + env["PRIMARY_HOSTNAME"])
records.append((None, "NS", secondary_ns+'.', False))
# In PRIMARY_HOSTNAME...
if domain == env["PRIMARY_HOSTNAME"]:
# Define ns1 and ns2.
# 'False' in the tuple indicates these records would not be used if the zone
# is managed outside of the box.
records.append(("ns1", "A", env["PUBLIC_IP"], False))
records.append(("ns2", "A", env["PUBLIC_IP"], False))
if env.get('PUBLIC_IPV6'):
records.append(("ns1", "AAAA", env["PUBLIC_IPV6"], False))
records.append(("ns2", "AAAA", env["PUBLIC_IPV6"], False))
# Set the A/AAAA records. Do this early for the PRIMARY_HOSTNAME so that the user cannot override them
# and we can provide different explanatory text.
records.append((None, "A", env["PUBLIC_IP"], "Required. Sets the IP address of the box."))
if env.get("PUBLIC_IPV6"): records.append((None, "AAAA", env["PUBLIC_IPV6"], "Required. Sets the IPv6 address of the box."))
# Add a DANE TLSA record for SMTP.
records.append(("_25._tcp", "TLSA", build_tlsa_record(env), "Recommended when DNSSEC is enabled. Advertises to mail servers connecting to the box that mandatory encryption should be used."))
# Add a SSHFP records to help SSH key validation. One per available SSH key on this system.
for value in build_sshfp_records():
records.append((None, "SSHFP", value, "Optional. Provides an out-of-band method for verifying an SSH key before connecting. Use 'VerifyHostKeyDNS yes' (or 'VerifyHostKeyDNS ask') when connecting with ssh."))
# The MX record says where email for the domain should be delivered: Here!
records.append((None, "MX", "10 %s." % env["PRIMARY_HOSTNAME"], "Required. Specifies the hostname (and priority) of the machine that handles @%s mail." % domain))
# Add DNS records for any subdomains of this domain. We should not have a zone for
# both a domain and one of its subdomains.
subdomains = [d for d in all_domains if d.endswith("." + domain)]
for subdomain in subdomains:
subdomain_qname = subdomain[0:-len("." + domain)]
subzone = build_zone(subdomain, [], additional_records, env, is_zone=False)
for child_qname, child_rtype, child_value, child_explanation in subzone:
if child_qname == None:
child_qname = subdomain_qname
else:
child_qname += "." + subdomain_qname
records.append((child_qname, child_rtype, child_value, child_explanation))
has_rec_base = list(records) # clone current state
def has_rec(qname, rtype, prefix=None):
for rec in has_rec_base:
if rec[0] == qname and rec[1] == rtype and (prefix is None or rec[2].startswith(prefix)):
return True
return False
# The user may set other records that don't conflict with our settings.
# Don't put any TXT records above this line, or it'll prevent any custom TXT records.
for qname, rtype, value in filter_custom_records(domain, additional_records):
# Don't allow custom records for record types that override anything above.
# But allow multiple custom records for the same rtype --- see how has_rec_base is used.
if has_rec(qname, rtype): continue
# The "local" keyword on A/AAAA records are short-hand for our own IP.
# This also flags for web configuration that the user wants a website here.
if rtype == "A" and value == "local":
value = env["PUBLIC_IP"]
if rtype == "AAAA" and value == "local":
if "PUBLIC_IPV6" in env:
value = env["PUBLIC_IPV6"]
else:
continue
records.append((qname, rtype, value, "(Set by user.)"))
# Add defaults if not overridden by the user's custom settings (and not otherwise configured).
# Any "CNAME" record on the qname overrides A and AAAA.
has_rec_base = records
defaults = [
(None, "A", env["PUBLIC_IP"], "Required. May have a different value. Sets the IP address that %s resolves to for web hosting and other services besides mail. The A record must be present but its value does not affect mail delivery." % domain),
("www", "A", env["PUBLIC_IP"], "Optional. Sets the IP address that www.%s resolves to, e.g. for web hosting." % domain),
(None, "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that %s resolves to, e.g. for web hosting. (It is not necessary for receiving mail on this domain.)" % domain),
("www", "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that www.%s resolves to, e.g. for web hosting." % domain),
]
for qname, rtype, value, explanation in defaults:
if value is None or value.strip() == "": continue # skip IPV6 if not set
if not is_zone and qname == "www": continue # don't create any default 'www' subdomains on what are themselves subdomains
# Set the default record, but not if:
# (1) there is not a user-set record of the same type already
# (2) there is not a CNAME record already, since you can't set both and who knows what takes precedence
# (2) there is not an A record already (if this is an A record this is a dup of (1), and if this is an AAAA record then don't set a default AAAA record if the user sets a custom A record, since the default wouldn't make sense and it should not resolve if the user doesn't provide a new AAAA record)
if not has_rec(qname, rtype) and not has_rec(qname, "CNAME") and not has_rec(qname, "A"):
records.append((qname, rtype, value, explanation))
# SPF record: Permit the box ('mx', see above) to send mail on behalf of
# the domain, and no one else.
# Skip if the user has set a custom SPF record.
if not has_rec(None, "TXT", prefix="v=spf1 "):
records.append((None, "TXT", 'v=spf1 mx -all', "Recommended. Specifies that only the box is permitted to send @%s mail." % domain))
# Append the DKIM TXT record to the zone as generated by OpenDKIM.
# Skip if the user has set a DKIM record already.
opendkim_record_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.txt')
with open(opendkim_record_file) as orf:
m = re.match(r'(\S+)\s+IN\s+TXT\s+\( "([^"]+)"\s+"([^"]+)"\s*\)', orf.read(), re.S)
val = m.group(2) + m.group(3)
if not has_rec(m.group(1), "TXT", prefix="v=DKIM1; "):
records.append((m.group(1), "TXT", val, "Recommended. Provides a way for recipients to verify that this machine sent @%s mail." % domain))
# Append a DMARC record.
# Skip if the user has set a DMARC record already.
if not has_rec("_dmarc", "TXT", prefix="v=DMARC1; "):
records.append(("_dmarc", "TXT", 'v=DMARC1; p=quarantine', "Optional. Specifies that mail that does not originate from the box but claims to be from @%s is suspect and should be quarantined by the recipient's mail system." % domain))
# For any subdomain with an A record but no SPF or DMARC record, add strict policy records.
all_resolvable_qnames = set(r[0] for r in records if r[1] in ("A", "AAAA"))
for qname in all_resolvable_qnames:
if not has_rec(qname, "TXT", prefix="v=spf1 "):
records.append((qname, "TXT", 'v=spf1 a mx -all', "Prevents unauthorized use of this domain name for outbound mail by requiring outbound mail to originate from the indicated host(s)."))
dmarc_qname = "_dmarc" + ("" if qname is None else "." + qname)
if not has_rec(dmarc_qname, "TXT", prefix="v=DMARC1; "):
records.append((dmarc_qname, "TXT", 'v=DMARC1; p=reject', "Prevents unauthorized use of this domain name for outbound mail by requiring a valid DKIM signature."))
# Sort the records. The None records *must* go first in the nsd zone file. Otherwise it doesn't matter.
records.sort(key = lambda rec : list(reversed(rec[0].split(".")) if rec[0] is not None else ""))
return records
########################################################################
def build_tlsa_record(env):
# A DANE TLSA record in DNS specifies that connections on a port
# must use TLS and the certificate must match a particular certificate.
#
# Thanks to http://blog.huque.com/2012/10/dnssec-and-certificates.html
# for explaining all of this!
# Get the hex SHA256 of the DER-encoded server certificate:
certder = shell("check_output", [
"/usr/bin/openssl",
"x509",
"-in", os.path.join(env["STORAGE_ROOT"], "ssl", "ssl_certificate.pem"),
"-outform", "DER"
],
return_bytes=True)
certhash = hashlib.sha256(certder).hexdigest()
# Specify the TLSA parameters:
# 3: This is the certificate that the client should trust. No CA is needed.
# 0: The whole certificate is matched.
# 1: The certificate is SHA256'd here.
return "3 0 1 " + certhash
def build_sshfp_records():
# The SSHFP record is a way for us to embed this server's SSH public
# key fingerprint into the DNS so that remote hosts have an out-of-band
# method to confirm the fingerprint. See RFC 4255 and RFC 6594. This
# depends on DNSSEC.
#
# On the client side, set SSH's VerifyHostKeyDNS option to 'ask' to
# include this info in the key verification prompt or 'yes' to trust
# the SSHFP record.
#
# See https://github.com/xelerance/sshfp for inspiriation.
algorithm_number = {
"ssh-rsa": 1,
"ssh-dss": 2,
"ecdsa-sha2-nistp256": 3,
}
# Get our local fingerprints by running ssh-keyscan. The output looks
# like the known_hosts file: hostname, keytype, fingerprint. The order
# of the output is arbitrary, so sort it to prevent spurrious updates
# to the zone file (that trigger bumping the serial number).
keys = shell("check_output", ["ssh-keyscan", "localhost"])
for key in sorted(keys.split("\n")):
if key.strip() == "" or key[0] == "#": continue
try:
host, keytype, pubkey = key.split(" ")
yield "%d %d ( %s )" % (
algorithm_number[keytype],
2, # specifies we are using SHA-256 on next line
hashlib.sha256(base64.b64decode(pubkey)).hexdigest().upper(),
)
except:
# Lots of things can go wrong. Don't let it disturb the DNS
# zone.
pass
########################################################################
def write_nsd_zone(domain, zonefile, records, env, force):
# On the $ORIGIN line, there's typically a ';' comment at the end explaining
# what the $ORIGIN line does. Any further data after the domain confuses
# ldns-signzone, however. It used to say '; default zone domain'.
# The SOA contact address for all of the domains on this system is hostmaster
# @ the PRIMARY_HOSTNAME. Hopefully that's legit.
# For the refresh through TTL fields, a good reference is:
# http://www.peerwisdom.org/2013/05/15/dns-understanding-the-soa-record/
zone = """
$ORIGIN {domain}.
$TTL 1800 ; default time to live
@ IN SOA ns1.{primary_domain}. hostmaster.{primary_domain}. (
__SERIAL__ ; serial number
7200 ; Refresh (secondary nameserver update interval)
1800 ; Retry (when refresh fails, how often to try again)
1209600 ; Expire (when refresh fails, how long secondary nameserver will keep records around anyway)
1800 ; Negative TTL (how long negative responses are cached)
)
"""
# Replace replacement strings.
zone = zone.format(domain=domain, primary_domain=env["PRIMARY_HOSTNAME"])
# Add records.
for subdomain, querytype, value, explanation in records:
if subdomain:
zone += subdomain
zone += "\tIN\t" + querytype + "\t"
if querytype == "TXT":
value = value.replace('\\', '\\\\') # escape backslashes
value = value.replace('"', '\\"') # escape quotes
value = '"' + value + '"' # wrap in quotes
zone += value + "\n"
# DNSSEC requires re-signing a zone periodically. That requires
# bumping the serial number even if no other records have changed.
# We don't see the DNSSEC records yet, so we have to figure out
# if a re-signing is necessary so we can prematurely bump the
# serial number.
force_bump = False
if not os.path.exists(zonefile + ".signed"):
# No signed file yet. Shouldn't normally happen unless a box
# is going from not using DNSSEC to using DNSSEC.
force_bump = True
else:
# We've signed the domain. Check if we are close to the expiration
# time of the signature. If so, we'll force a bump of the serial
# number so we can re-sign it.
with open(zonefile + ".signed") as f:
signed_zone = f.read()
expiration_times = re.findall(r"\sRRSIG\s+SOA\s+\d+\s+\d+\s\d+\s+(\d{14})", signed_zone)
if len(expiration_times) == 0:
# weird
force_bump = True
else:
# All of the times should be the same, but if not choose the soonest.
expiration_time = min(expiration_times)
expiration_time = datetime.datetime.strptime(expiration_time, "%Y%m%d%H%M%S")
if expiration_time - datetime.datetime.now() < datetime.timedelta(days=3):
# We're within three days of the expiration, so bump serial & resign.
force_bump = True
# Set the serial number.
serial = datetime.datetime.now().strftime("%Y%m%d00")
if os.path.exists(zonefile):
# If the zone already exists, is different, and has a later serial number,
# increment the number.
with open(zonefile) as f:
existing_zone = f.read()
m = re.search(r"(\d+)\s*;\s*serial number", existing_zone)
if m:
# Clear out the serial number in the existing zone file for the
# purposes of seeing if anything *else* in the zone has changed.
existing_serial = m.group(1)
existing_zone = existing_zone.replace(m.group(0), "__SERIAL__ ; serial number")
# If the existing zone is the same as the new zone (modulo the serial number),
# there is no need to update the file. Unless we're forcing a bump.
if zone == existing_zone and not force_bump and not force:
return False
# If the existing serial is not less than a serial number
# based on the current date plus 00, increment it. Otherwise,
# the serial number is less than our desired new serial number
# so we'll use the desired new number.
if existing_serial >= serial:
serial = str(int(existing_serial) + 1)
zone = zone.replace("__SERIAL__", serial)
# Write the zone file.
with open(zonefile, "w") as f:
f.write(zone)
return True # file is updated
########################################################################
def write_nsd_conf(zonefiles, additional_records, env):
# Write the list of zones to a configuration file.
nsd_conf_file = "/etc/nsd/zones.conf"
nsdconf = ""
# Append the zones.
for domain, zonefile in zonefiles:
nsdconf += """
zone:
name: %s
zonefile: %s
""" % (domain, zonefile)
# If a custom secondary nameserver has been set, allow zone transfers
# and notifies to that nameserver.
if get_secondary_dns(additional_records):
# Get the IP address of the nameserver by resolving it.
hostname = get_secondary_dns(additional_records)
resolver = dns.resolver.get_default_resolver()
response = dns.resolver.query(hostname+'.', "A")
ipaddr = str(response[0])
nsdconf += """\tnotify: %s NOKEY
provide-xfr: %s NOKEY
""" % (ipaddr, ipaddr)
# Check if the file is changing. If it isn't changing,
# return False to flag that no change was made.
if os.path.exists(nsd_conf_file):
with open(nsd_conf_file) as f:
if f.read() == nsdconf:
return False
# Write out new contents and return True to signal that
# configuration changed.
with open(nsd_conf_file, "w") as f:
f.write(nsdconf)
return True
########################################################################
def dnssec_choose_algo(domain, env):
if '.' in domain and domain.rsplit('.')[-1] in \
("email", "guide", "fund"):
# At GoDaddy, RSASHA256 is the only algorithm supported
# for .email and .guide.
# A variety of algorithms are supported for .fund. This
# is preferred.
return "RSASHA256"
# For any domain we were able to sign before, don't change the algorithm
# on existing users. We'll probably want to migrate to SHA256 later.
return "RSASHA1-NSEC3-SHA1"
def sign_zone(domain, zonefile, env):
algo = dnssec_choose_algo(domain, env)
dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % algo))
# In order to use the same keys for all domains, we have to generate
# a new .key file with a DNSSEC record for the specific domain. We
# can reuse the same key, but it won't validate without a DNSSEC
# record specifically for the domain.
#
# Copy the .key and .private files to /tmp to patch them up.
#
# Use os.umask and open().write() to securely create a copy that only
# we (root) can read.
files_to_kill = []
for key in ("KSK", "ZSK"):
if dnssec_keys.get(key, "").strip() == "": raise Exception("DNSSEC is not properly set up.")
oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys[key])
newkeyfn = '/tmp/' + dnssec_keys[key].replace("_domain_", domain)
dnssec_keys[key] = newkeyfn
for ext in (".private", ".key"):
if not os.path.exists(oldkeyfn + ext): raise Exception("DNSSEC is not properly set up.")
with open(oldkeyfn + ext, "r") as fr:
keydata = fr.read()
keydata = keydata.replace("_domain_", domain) # trick ldns-signkey into letting our generic key be used by this zone
fn = newkeyfn + ext
prev_umask = os.umask(0o77) # ensure written file is not world-readable
try:
with open(fn, "w") as fw:
fw.write(keydata)
finally:
os.umask(prev_umask) # other files we write should be world-readable
files_to_kill.append(fn)
# Do the signing.
expiry_date = (datetime.datetime.now() + datetime.timedelta(days=30)).strftime("%Y%m%d")
shell('check_call', ["/usr/bin/ldns-signzone",
# expire the zone after 30 days
"-e", expiry_date,
# use NSEC3
"-n",
# zonefile to sign
"/etc/nsd/zones/" + zonefile,
# keys to sign with (order doesn't matter -- it'll figure it out)
dnssec_keys["KSK"],
dnssec_keys["ZSK"],
])
# Create a DS record based on the patched-up key files. The DS record is specific to the
# zone being signed, so we can't use the .ds files generated when we created the keys.
# The DS record points to the KSK only. Write this next to the zone file so we can
# get it later to give to the user with instructions on what to do with it.
#
# We want to be able to validate DS records too, but multiple forms may be valid depending
# on the digest type. So we'll write all (both) valid records. Only one DS record should
# actually be deployed. Preferebly the first.
with open("/etc/nsd/zones/" + zonefile + ".ds", "w") as f:
for digest_type in ('2', '1'):
rr_ds = shell('check_output', ["/usr/bin/ldns-key2ds",
"-n", # output to stdout
"-" + digest_type, # 1=SHA1, 2=SHA256
dnssec_keys["KSK"] + ".key"
])
f.write(rr_ds)
# Remove our temporary file.
for fn in files_to_kill:
os.unlink(fn)
########################################################################
def write_opendkim_tables(domains, env):
# Append a record to OpenDKIM's KeyTable and SigningTable for each domain
# that we send mail from (zones and all subdomains).
opendkim_key_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.private')
if not os.path.exists(opendkim_key_file):
# Looks like OpenDKIM is not installed.
return False
config = {
# The SigningTable maps email addresses to a key in the KeyTable that
# specifies signing information for matching email addresses. Here we
# map each domain to a same-named key.
#
# Elsewhere we set the DMARC policy for each domain such that mail claiming
# to be From: the domain must be signed with a DKIM key on the same domain.
# So we must have a separate KeyTable entry for each domain.
"SigningTable":
"".join(
"*@{domain} {domain}\n".format(domain=domain)
for domain in domains
),
# The KeyTable specifies the signing domain, the DKIM selector, and the
# path to the private key to use for signing some mail. Per DMARC, the
# signing domain must match the sender's From: domain.
"KeyTable":
"".join(
"{domain} {domain}:mail:{key_file}\n".format(domain=domain, key_file=opendkim_key_file)
for domain in domains
),
}
did_update = False
for filename, content in config.items():
# Don't write the file if it doesn't need an update.
if os.path.exists("/etc/opendkim/" + filename):
with open("/etc/opendkim/" + filename) as f:
if f.read() == content:
continue
# The contents needs to change.
with open("/etc/opendkim/" + filename, "w") as f:
f.write(content)
did_update = True
# Return whether the files changed. If they didn't change, there's
# no need to kick the opendkim process.
return did_update
########################################################################
def get_custom_dns_config(env):
try:
custom_dns = rtyaml.load(open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml')))
if not isinstance(custom_dns, dict): raise ValueError() # caught below
except:
return [ ]
for qname, value in custom_dns.items():
# Short form. Mapping a domain name to a string is short-hand
# for creating A records.
if isinstance(value, str):
values = [("A", value)]
# A mapping creates multiple records.
elif isinstance(value, dict):
values = value.items()
# No other type of data is allowed.
else:
raise ValueError()
for rtype, value2 in values:
if isinstance(value2, str):
yield (qname, rtype, value2)
elif isinstance(value2, list):
for value3 in value2:
yield (qname, rtype, value3)
# No other type of data is allowed.
else:
raise ValueError()
def filter_custom_records(domain, custom_dns_iter):
for qname, rtype, value in custom_dns_iter:
# We don't count the secondary nameserver config (if present) as a record - that would just be
# confusing to users. Instead it is accessed/manipulated directly via (get/set)_custom_dns_config.
if qname == "_secondary_nameserver": continue
# Is this record for the domain or one of its subdomains?
# If `domain` is None, return records for all domains.
if domain is not None and qname != domain and not qname.endswith("." + domain): continue
# Turn the fully qualified domain name in the YAML file into
# our short form (None => domain, or a relative QNAME) if
# domain is not None.
if domain is not None:
if qname == domain:
qname = None
else:
qname = qname[0:len(qname)-len("." + domain)]
yield (qname, rtype, value)
def write_custom_dns_config(config, env):
# We get a list of (qname, rtype, value) triples. Convert this into a
# nice dictionary format for storage on disk.
from collections import OrderedDict
config = list(config)
dns = OrderedDict()
seen_qnames = set()
# Process the qnames in the order we see them.
for qname in [rec[0] for rec in config]:
if qname in seen_qnames: continue
seen_qnames.add(qname)
records = [(rec[1], rec[2]) for rec in config if rec[0] == qname]
if len(records) == 1 and records[0][0] == "A":
dns[qname] = records[0][1]
else:
dns[qname] = OrderedDict()
seen_rtypes = set()
# Process the rtypes in the order we see them.
for rtype in [rec[0] for rec in records]:
if rtype in seen_rtypes: continue
seen_rtypes.add(rtype)
values = [rec[1] for rec in records if rec[0] == rtype]
if len(values) == 1:
values = values[0]
dns[qname][rtype] = values
# Write.
config_yaml = rtyaml.dump(dns)
with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), "w") as f:
f.write(config_yaml)
def set_custom_dns_record(qname, rtype, value, action, env):
# validate qname
for zone, fn in get_dns_zones(env):
# It must match a zone apex or be a subdomain of a zone
# that we are otherwise hosting.
if qname == zone or qname.endswith("."+zone):
break
else:
# No match.
if qname != "_secondary_nameserver":
raise ValueError("%s is not a domain name or a subdomain of a domain name managed by this box." % qname)
# validate rtype
rtype = rtype.upper()
if value is not None and qname != "_secondary_nameserver":
if rtype in ("A", "AAAA"):
if value != "local": # "local" is a special flag for us
v = ipaddress.ip_address(value) # raises a ValueError if there's a problem
if rtype == "A" and not isinstance(v, ipaddress.IPv4Address): raise ValueError("That's an IPv6 address.")
if rtype == "AAAA" and not isinstance(v, ipaddress.IPv6Address): raise ValueError("That's an IPv4 address.")
elif rtype in ("CNAME", "TXT", "SRV", "MX"):
# anything goes
pass
else:
raise ValueError("Unknown record type '%s'." % rtype)
# load existing config
config = list(get_custom_dns_config(env))
# update
newconfig = []
made_change = False
needs_add = True
for _qname, _rtype, _value in config:
if action == "add":
if (_qname, _rtype, _value) == (qname, rtype, value):
# Record already exists. Bail.
return False
elif action == "set":
if (_qname, _rtype) == (qname, rtype):
if _value == value:
# Flag that the record already exists, don't
# need to add it.
needs_add = False
else:
# Drop any other values for this (qname, rtype).
made_change = True
continue
elif action == "remove":
if (_qname, _rtype, _value) == (qname, rtype, value):
# Drop this record.
made_change = True
continue
if value == None and (_qname, _rtype) == (qname, rtype):
# Drop all qname-rtype records.
made_change = True
continue
else:
raise ValueError("Invalid action: " + action)
# Preserve this record.
newconfig.append((_qname, _rtype, _value))
if action in ("add", "set") and needs_add and value is not None:
newconfig.append((qname, rtype, value))
made_change = True
if made_change:
# serialize & save
write_custom_dns_config(newconfig, env)
return made_change
########################################################################
def get_secondary_dns(custom_dns):
for qname, rtype, value in custom_dns:
if qname == "_secondary_nameserver":
return value
return None
def set_secondary_dns(hostname, env):
if hostname in (None, ""):
# Clear.
set_custom_dns_record("_secondary_nameserver", "A", None, "set", env)
else:
# Validate.
hostname = hostname.strip().lower()
resolver = dns.resolver.get_default_resolver()
try:
response = dns.resolver.query(hostname, "A")
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
raise ValueError("Could not resolve the IP address of %s." % hostname)
# Set.
set_custom_dns_record("_secondary_nameserver", "A", hostname, "set", env)
# Apply.
return do_dns_update(env)
########################################################################
def justtestingdotemail(domain, records):
# If the domain is a subdomain of justtesting.email, which we own,
# automatically populate the zone where it is set up on dns4e.com.
# Ideally if dns4e.com supported NS records we would just have it
# delegate DNS to us, but instead we will populate the whole zone.
import subprocess, json, urllib.parse
if not domain.endswith(".justtesting.email"):
return
for subdomain, querytype, value, explanation in records:
if querytype in ("NS",): continue
if subdomain in ("www", "ns1", "ns2"): continue # don't do unnecessary things
if subdomain == None:
subdomain = domain
else:
subdomain = subdomain + "." + domain
if querytype == "TXT":
# nsd requires parentheses around txt records with multiple parts,
# but DNS4E requires there be no parentheses; also it goes into
# nsd with a newline and a tab, which we replace with a space here
value = re.sub("^\s*\(\s*([\w\W]*)\)", r"\1", value)
value = re.sub("\s+", " ", value)
else:
continue
print("Updating DNS for %s/%s..." % (subdomain, querytype))
resp = json.loads(subprocess.check_output([
"curl",
"-s",
"https://api.dns4e.com/v7/%s/%s" % (urllib.parse.quote(subdomain), querytype.lower()),
"--user", "2ddbd8e88ed1495fa0ec:A97TDJV26CVUJS6hqAs0CKnhj4HvjTM7MwAAg8xb",
"--data", "record=%s" % urllib.parse.quote(value),
]).decode("utf8"))
print("\t...", resp.get("message", "?"))
########################################################################
def build_recommended_dns(env):
ret = []
domains = get_dns_domains(env)
zonefiles = get_dns_zones(env)
additional_records = list(get_custom_dns_config(env))
for domain, zonefile in zonefiles:
records = build_zone(domain, domains, additional_records, env)
# remove records that we don't dislay
records = [r for r in records if r[3] is not False]
# put Required at the top, then Recommended, then everythiing else
records.sort(key = lambda r : 0 if r[3].startswith("Required.") else (1 if r[3].startswith("Recommended.") else 2))
# expand qnames
for i in range(len(records)):
if records[i][0] == None:
qname = domain
else:
qname = records[i][0] + "." + domain
records[i] = {
"qname": qname,
"rtype": records[i][1],
"value": records[i][2],
"explanation": records[i][3],
}
# return
ret.append((domain, records))
return ret
if __name__ == "__main__":
from utils import load_environment
env = load_environment()
if sys.argv[-1] == "--lint":
write_custom_dns_config(get_custom_dns_config(env), env)
else:
for zone, records in build_recommended_dns(env):
for record in records:
print("; " + record['explanation'])
print(record['qname'], record['rtype'], record['value'], sep="\t")
print()
|
py | b417826b8e7e11d9e462bf9b061f592724d7b7ec | import antlr3
import testbase
import unittest
class t011lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TLexer
def testValid(self):
stream = antlr3.StringStream('foobar _Ab98 \n A12sdf')
lexer = self.getLexer(stream)
token = lexer.nextToken()
self.assertEqual(token.type, self.lexerModule.IDENTIFIER)
self.assertEqual(token.start, 0)
self.assertEqual(token.stop, 5)
self.assertEqual(token.text, 'foobar')
token = lexer.nextToken()
self.assertEqual(token.type, self.lexerModule.WS)
self.assertEqual(token.start, 6)
self.assertEqual(token.stop, 6)
self.assertEqual(token.text, ' ')
token = lexer.nextToken()
self.assertEqual(token.type, self.lexerModule.IDENTIFIER)
self.assertEqual(token.start, 7)
self.assertEqual(token.stop, 11)
self.assertEqual(token.text, '_Ab98')
token = lexer.nextToken()
self.assertEqual(token.type, self.lexerModule.WS)
self.assertEqual(token.start, 12)
self.assertEqual(token.stop, 14)
self.assertEqual(token.text, ' \n ')
token = lexer.nextToken()
self.assertEqual(token.type, self.lexerModule.IDENTIFIER)
self.assertEqual(token.start, 15)
self.assertEqual(token.stop, 20)
self.assertEqual(token.text, 'A12sdf')
token = lexer.nextToken()
self.assertEqual(token.type, self.lexerModule.EOF)
def testMalformedInput(self):
stream = antlr3.StringStream('a-b')
lexer = self.getLexer(stream)
lexer.nextToken()
try:
token = lexer.nextToken()
self.fail(token)
except antlr3.NoViableAltException as exc:
self.assertEqual(exc.unexpectedType, '-')
self.assertEqual(exc.charPositionInLine, 1)
self.assertEqual(exc.line, 1)
if __name__ == '__main__':
unittest.main()
|
py | b41782b81ebe33c904b6add06c059b4a7efa13a7 | from flask import (Flask, send_from_directory, make_response, render_template)
app = Flask(__name__, template_folder='',static_url_path='', static_folder='')
@app.route('/')
def index():
response = make_response(render_template('static/html/index.html'))
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080) |
py | b41782e6379a46d294a60a2382f1d1a33649880b | #!d:\workspaces\program workspace\python\project\python_gmail_je\venv\scripts\python.exe
# $Id: rst2odt_prepstyles.py 8346 2019-08-26 12:11:32Z milde $
# Author: Dave Kuhlman <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
# Author: Michael Schutte <[email protected]>
from __future__ import print_function
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print(__doc__, file=sys.stderr)
print("Usage: %s STYLE_FILE.odt\n" % sys.argv[0], file=sys.stderr)
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
|
py | b4178566f27d748da7842c6f2cb3ac66c434182d | from .urdk import cUrdk
|
py | b41785ce16c110faffe4f4e18f034c6283baa219 | '''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from .show import show, show_dispatch
|
py | b417864d8ee88c2841124f9d6d4dc3bad0778239 | from tkinter import *
from constantes import *
import random
class Jogo(object):
"""
Classe que organiza os elementos do jogo
"""
def __init__(self):
#Criamos o conteiner principal do jogo
self.root = Tk()
self.root.geometry('%ix%i'%(LARGURA, ALTURA))
self.root.resizable(False, False)
self.root.title('Joguinho Besta')
#E uma frame para conter o canvas
self.frame=Frame(bg="black")
self.frame.pack()
#Criamos a tela do jogo
self.canvas = Canvas(self.frame, bg="black",width=CANVAS_L,height=CANVAS_A, cursor = 'target')
self.canvas.pack()
#E colocamos um botã para começar o jogo
self.começar = Button(self.root, text = 'START')
self.começar.pack()
#self.canvas.create_polygon((100, 200), (150, 250), (250, 250), (300, 200), (300, 100), (250, 50), (150, 50), (100, 100), fill = 'white')
self.novoJogo()
self.root.mainloop()
def novoJogo(self):
"""
Cria os elementos de um novo jogo
"""
self.canvas.create_rectangle((CANVAS_L//2, 350), (CANVAS_L//2 + 100, 370), fill = 'green')
#Cria a bola do jogo
raio = 30
p = (100, 200)
self.canvas.create_oval(p[0],p[1], p[0] + raio, p[1] + raio, fill='red', outline='white')
#Cria um arco dentro da bola
#self.canvas.create_arc(p[0], p[1], p[0] + raio, p[1] + raio, fill = 'orange', start = 60)#, extent = 90)
#Lista dos retângulos
self.r = []
#E por fim as diversas fileiras de retângulos
l, c, e = 5, 8, 2 #linhas, colunas e espaçamento
b, h, y0 = 48, 20, 50 #Base, altura e posição inicial dos retângulos
for i in range(l):
cor = random.choice(['green', 'orange', 'white', 'lightgray', 'yellow', 'purple'])
for j in range(c):
self.canvas.create_rectangle(b*j+(j+1)*e, i*h+(i+1)*e + y0,b*j+(j+1)*e + b, i*h+(i+1)*e + y0 + h, fill = cor)
self.canvas.create_text(CANVAS_L/2, CANVAS_A/2, text = 'OLA COLEGA!', fill = 'white')
if __name__ == '__main__':
Jogo()
|
py | b417865afece2a42ec25ed952909a190aa583728 | # DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS songplays"
user_table_drop = "DROP TABLE IF EXISTS users"
song_table_drop = "DROP TABLE IF EXISTS songs"
artist_table_drop = "DROP TABLE IF EXISTS artists"
time_table_drop = "DROP TABLE IF EXISTS time"
# CREATE TABLES
songplay_table_create = ("""CREATE TABLE IF NOT EXISTS songplays
(songplay_id SERIAL PRIMARY KEY,
start_time bigint NOT NULL,
user_id int NOT NULL,
level varchar NOT NULL,
song_id varchar,
artist_id varchar,
session_id int NOT NULL,
location varchar NOT NULL,
user_agent varchar NOT NULL)
""")
user_table_create = ("""CREATE TABLE IF NOT EXISTS users (
user_id int PRIMARY KEY,
first_name varchar NOT NULL,
last_name varchar NOT NULL,
gender varchar NOT NULL,
level varchar NOT NULL)
""")
song_table_create = ("""CREATE TABLE IF NOT EXISTS songs (
song_id varchar PRIMARY KEY,
title varchar NOT NULL,
artist_id varchar NOT NULL,
year int,
duration float)
""")
artist_table_create = ("""CREATE TABLE IF NOT EXISTS artists (
artist_id varchar PRIMARY KEY,
name varchar NOT NULL,
location varchar,
latitude float,
longitude float)
""")
time_table_create = ("""CREATE TABLE IF NOT EXISTS time (
start_time bigint NOT NULL,
hour int NOT NULL,
day int NOT NULL,
week int NOT NULL,
month int NOT NULL,
year int NOT NULL,
weekday int NOT NULL)
""")
# INSERT RECORDS
songplay_table_insert = ("""INSERT INTO songplays
(start_time, user_id, level, song_id, artist_id, session_id, location, user_agent)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING
""")
user_table_insert = ("""INSERT INTO users
(user_id, first_name, last_name, gender, level)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT(user_id) DO UPDATE SET level = excluded.level
""")
song_table_insert = ("""INSERT INTO songs
(song_id, title, artist_id, year, duration)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING
""")
artist_table_insert = ("""INSERT INTO artists
(artist_id, name, location, latitude, longitude)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING
""")
time_table_insert = ("""INSERT INTO time
(start_time, hour, day, week, month, year, weekday)
VALUES (%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING
""")
# FIND SONGS
song_select = ("""SELECT song_id, artists.artist_id
FROM songs JOIN artists ON songs.artist_id=artists.artist_id
WHERE title = %s AND name = %s AND duration = %s
""")
# QUERY LISTS
create_table_queries = [songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.