ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b415dd3cd64198d39b4c94f69f64f9c6e11761d6 | DEFAULT_COMMIT_MESSAGE = 'BOJ #[NO]'
DEFAULT_DIR_NAME = '[NO]'
DEFAULT_POLL = 600
DEFAULT_SOURCE_NAME = '[NO]'
class Option:
def __init__(self, option):
self.option = option
def commit_message(self, problem):
if not 'commit_message' in self.option:
return self.replace_msg(DEFAULT_COMMIT_MESSAGE, problem)
return self.replace_msg(self.option['commit_message'], problem)
def source_tree(self, problem, repo_name):
if not 'source_tree' in self.option:
if self.mkdir():
return '%s/%s' % (repo_name, self.dir_name(problem))
return '%s' % self.repo_name
if self.option['source_tree'][-1] == '/':
if self.mkdir():
return '%s%s' % (self.option['source_tree'], self.dir_name(problem))
return '%s' % self.option['source_tree'][:-1]
if self.mkdir():
return '%s/%s' % (self.option['source_tree'], self.dir_name(problem))
return '%s' % self.option['source_tree']
def dir_name(self, problem):
if not 'dir_name' in self.option:
return self.replace_msg(DEFAULT_DIR_NAME, problem)
return self.replace_msg(self.option['dir_name'], problem)
def mkdir(self):
if not 'mkdir' in self.option:
return True
return self.option['mkdir']
def private(self):
if not 'private' in self.option:
return False
return self.option['private']
def poll(self):
if not 'poll' in self.option:
return DEFAULT_POLL
return self.option['poll']
def source_name(self, problem):
if not 'source_name' in self.option:
return self.replace_msg(DEFAULT_SOURCE_NAME, problem)
return self.replace_msg(self.option['source_name'], problem)
def lang(self, problem):
if not 'lang' in self.option:
return False, None
if problem['language'] != self.option['lang']:
return True, False
return True, True
def replace_msg(self, msg, problem):
msg = msg.replace('[NO]', problem['problem_id'])
msg = msg.replace('[TITLE]', problem['problem_title'])
return msg
def get_ext(self, language):
extensions = {
'C': '.c',
'C++': '.cpp',
'C++11': '.cpp',
'C++14': '.cpp',
'C++17': '.cpp',
'Java': '.java',
'Java (OpenJDK)': '.java',
'C11': '.c',
'Python 2': '.py',
'Python 3': '.py',
'PyPy2': '.py',
'PyPy3': '.py',
'Ruby2.5': '.rb',
'Kotlin': '.kt',
'Swift': '.swift',
'C# 6.0': '.cs',
'Text': '.txt',
'node.js': 'js',
'Go': '.go',
'F#': '.fs',
'PHP': '.php',
'Pascal': '.pas',
'Lua': '.lua',
'Perl': '.pl',
'Objective-C': '.m',
'Objective-C++': '.mm',
'C (Clang)': '.c',
'C++11 (Clang)': '.cpp',
'C++14 (Clang)': '.cpp',
'C++17 (Clang)': '.cpp',
'Golfscript': '.gs',
'Bash': '.sh',
'Fortran': '.f95',
'Scheme': '.scm',
'Ada': '.ada',
'awk': '.awk',
'OCaml': '.ml',
'Brainfuck': '.bf',
'Whitespace': '.ws',
'Tcl': '.tcl',
'Assembly (32bit)': '.asm',
'Assembly (32bit)': '.asm',
'D': '.d',
'Clojure': '.clj',
'Rhino': '.js',
'Cobol': '.cob',
'SpiderMonkey': '.js',
'Pike': '.pike',
'sed': '.sed',
'Rust': '.rs',
'Boo': '.boo',
'Intercal': '.i',
'bc': '.bc',
'Nemerle': '.n',
'Cobra': '.cobra',
'Algol 68': '.a68',
'Befunge': '.bf',
'Haxe': '.hx',
'LOLCODE': '.lol',
'VB.NET 4.0': '.vb',
'์ํฌ': '.aheui'
}
if not language in extensions:
return True, 'Unknown language'
return False, extensions[language]
|
py | b415de25a7d2e5e6bc0ba9f822bce88bdbd52c68 | import os
import re
from collections import OrderedDict
from time import sleep
import requests
import toml
from src.common.utils import title_to_page_name
os.makedirs("../data/dnd/equipment/magic-items", exist_ok=True)
with open("magic_items.html") as f:
for line in f.readlines():
m = re.match(r"<tr><td>(.*?)</td><td>(.*?)</td><td>(.*?)</td><td>(.*?)</td><td>(.*?)</td><td>(.*?)</td></tr>", line)
name = m.group(1)
filename = title_to_page_name(name) + ".toml"
filepath = os.path.join("../data/dnd/equipment/magic-items", filename)
print(name)
if os.path.isfile(filepath):
continue
# Get values
d = OrderedDict([
("name", name),
("type", m.group(2)),
("rarity", m.group(3)),
("attunement", True if m.group(4) else False),
("notes", m.group(5)),
("source", m.group(6))
])
# Get description
r = requests.get("https://donjon.bin.sh/5e/magic_items/rpc.cgi?name=" + name)
card = r.json()["card"]
m = re.match(r'<div><h2>.*?</h2>\n<p class="type">.*?</p>\n<div class="description">(.*?)</div>', card)
if m is None:
if "<p>No description available.</p>" in card:
d["description"] = "No description available."
else:
print(card)
break
else:
d["description"] = m.group(1)
# Save file
with open(filepath, 'w') as out_f:
toml.dump(d, out_f)
sleep(1)
|
py | b415deaaa964e60cd67ea963f7a1b858fd5c467d | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio module that stores and registers persistent identifiers."""
import os
from setuptools import find_packages, setup
readme = open('README.rst').read()
history = open('CHANGES.rst').read()
tests_require = [
'attrs>=17.4.0', # once pytest is upgraded this can be removed
'SQLAlchemy-Continuum>=1.2.1',
'check-manifest>=0.25',
'coverage>=4.0',
'isort>=4.3.0',
'invenio-admin>=1.0.0',
'Flask-Menu>=0.5.1',
'invenio-access>=1.0.0',
'invenio-accounts>=1.0.0',
'mock>=1.3.0',
'pydocstyle>=1.0.0',
'pytest-cov>=1.8.0',
'pytest-pep8>=1.0.6',
'pytest>=3.8.0,<5.0.0',
]
extras_require = {
':python_version<"3.4"': ['enum34>=1.1.6'],
'admin': [
'Flask-Admin>=1.3.0',
],
'datacite': [
'datacite>=0.1.0'
],
'mysql': [
'invenio-db[mysql]>=1.0.0',
],
'postgresql': [
'invenio-db[postgresql]>=1.0.0',
],
'sqlite': [
'invenio-db>=1.0.0',
],
'docs': [
'Sphinx>=1.8.5',
],
'tests': tests_require,
}
extras_require['all'] = []
for name, reqs in extras_require.items():
if name in ('mysql', 'postgresql', 'sqlite') \
or name.startswith(':'):
continue
extras_require['all'].extend(reqs)
setup_requires = [
'Babel>=1.3',
'pytest-runner>=2.7.1',
]
install_requires = [
'Flask-BabelEx>=0.9.3',
'Flask>=0.11.1',
]
packages = find_packages()
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('invenio_pidstore', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='invenio-pidstore',
version=version,
description=__doc__,
long_description=readme + '\n\n' + history,
keywords='invenio identifier DOI',
license='MIT',
author='CERN',
author_email='[email protected]',
url='https://github.com/inveniosoftware/invenio-pidstore',
packages=packages,
zip_safe=False,
include_package_data=True,
platforms='any',
entry_points={
'invenio_db.alembic': [
'invenio_pidstore = invenio_pidstore:alembic',
],
'invenio_db.models': [
'invenio_pidstore = invenio_pidstore.models',
],
'invenio_base.apps': [
'invenio_pidstore = invenio_pidstore:InvenioPIDStore',
],
'invenio_base.api_apps': [
'invenio_pidstore = invenio_pidstore:InvenioPIDStore',
],
'invenio_pidstore.minters': [
'recid = invenio_pidstore.minters:recid_minter',
],
'invenio_pidstore.fetchers': [
'recid = invenio_pidstore.fetchers:recid_fetcher',
],
'invenio_admin.views': [
'invenio_pidstore_pid = invenio_pidstore.admin:pid_adminview',
]
},
extras_require=extras_require,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Development Status :: 5 - Production/Stable',
],
)
|
py | b415decf746b64b634e31d704f949079e02ed350 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Test client vfs."""
import io
import logging
import os
import shutil
import stat
from unittest import mock
from absl import app
from absl.testing import absltest
import psutil
# pylint: disable=unused-import,g-bad-import-order
from grr_response_client import client_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr_response_client import vfs
from grr_response_client.vfs_handlers import files
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.util import temp
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
# pylint: mode=test
class VFSTest(vfs_test_lib.VfsTestCase, test_lib.GRRBaseTest):
"""Test the client VFS switch."""
def GetNumbers(self):
"""Generate a test string."""
result = b""
for i in range(1, 1001):
result += "{}\n".format(i).encode("ascii")
return result
def TestFileHandling(self, fd):
"""Test the file like object behaviour."""
original_string = self.GetNumbers()
self.assertEqual(fd.size, len(original_string))
fd.Seek(0)
self.assertEqual(fd.Read(100), original_string[0:100])
self.assertEqual(fd.Tell(), 100)
fd.Seek(-10, 1)
self.assertEqual(fd.Tell(), 90)
self.assertEqual(fd.Read(10), original_string[90:100])
fd.Seek(0, 2)
self.assertEqual(fd.Tell(), len(original_string))
self.assertEqual(fd.Read(10), b"")
self.assertEqual(fd.Tell(), len(original_string))
# Raise if we try to list the contents of a file object.
self.assertRaises(IOError, lambda: list(fd.ListFiles()))
def testRegularFile(self):
"""Test our ability to read regular files."""
path = os.path.join(self.base_path, "morenumbers.txt")
pathspec = rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.OS)
fd = vfs.VFSOpen(pathspec)
self.TestFileHandling(fd)
def testOpenFilehandles(self):
"""Test that file handles are cached."""
current_process = psutil.Process(os.getpid())
num_open_files = len(current_process.open_files())
path = os.path.join(self.base_path, "morenumbers.txt")
fds = []
for _ in range(100):
fd = vfs.VFSOpen(
rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.OS))
self.assertEqual(fd.read(20), b"1\n2\n3\n4\n5\n6\n7\n8\n9\n10")
fds.append(fd)
# This should not create any new file handles.
self.assertLess(len(current_process.open_files()) - num_open_files, 5)
def testFileCasing(self):
"""Test our ability to read the correct casing from filesystem."""
try:
os.lstat(os.path.join(self.base_path, "nUmBeRs.txt"))
os.lstat(os.path.join(self.base_path, "nuMbErs.txt"))
# If we reached this point we are on a case insensitive file system
# and the tests below do not make any sense.
logging.warning("Case insensitive file system detected. Skipping test.")
return
except (IOError, OSError):
pass
# Create 2 files with names that differ only in casing.
with utils.TempDirectory() as temp_dir:
path1 = os.path.join(temp_dir, "numbers.txt")
shutil.copy(os.path.join(self.base_path, "numbers.txt"), path1)
path2 = os.path.join(temp_dir, "numbers.TXT")
shutil.copy(os.path.join(self.base_path, "numbers.txt.ver2"), path2)
fd = vfs.VFSOpen(
rdf_paths.PathSpec(
path=path1, pathtype=rdf_paths.PathSpec.PathType.OS))
self.assertEqual(fd.pathspec.Basename(), "numbers.txt")
fd = vfs.VFSOpen(
rdf_paths.PathSpec(
path=path2, pathtype=rdf_paths.PathSpec.PathType.OS))
self.assertEqual(fd.pathspec.Basename(), "numbers.TXT")
path = os.path.join(self.base_path, "Numbers.txt")
fd = vfs.VFSOpen(
rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.OS))
read_path = fd.pathspec.Basename()
# The exact file now is non deterministic but should be either of the two:
if read_path != "numbers.txt" and read_path != "numbers.TXT":
raise RuntimeError("read path is %s" % read_path)
# Ensure that the produced pathspec specified no case folding:
s = fd.Stat()
self.assertEqual(s.pathspec.path_options,
rdf_paths.PathSpec.Options.CASE_LITERAL)
# Case folding will only occur when requested - this should raise because
# we have the CASE_LITERAL option:
pathspec = rdf_paths.PathSpec(
path=path,
pathtype=rdf_paths.PathSpec.PathType.OS,
path_options=rdf_paths.PathSpec.Options.CASE_LITERAL)
self.assertRaises(IOError, vfs.VFSOpen, pathspec)
def testTSKFile(self):
"""Test our ability to read from image files."""
path = os.path.join(self.base_path, "test_img.dd")
path2 = "Test Directory/numbers.txt"
p2 = rdf_paths.PathSpec(
path=path2, pathtype=rdf_paths.PathSpec.PathType.TSK)
p1 = rdf_paths.PathSpec(path=path, pathtype=rdf_paths.PathSpec.PathType.OS)
p1.Append(p2)
fd = vfs.VFSOpen(p1)
self.TestFileHandling(fd)
def testTSKBTime(self):
pathspec = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "ntfs_img.dd"),
pathtype=rdf_paths.PathSpec.PathType.OS,
offset=63 * 512,
nested_path=rdf_paths.PathSpec(
path="/Test Directory/notes.txt",
pathtype=rdf_paths.PathSpec.PathType.TSK))
fd = vfs.VFSOpen(pathspec)
st = fd.Stat()
self.assertEqual(str(st.st_btime), "2011-12-17 00:14:37")
def testTSKFileInode(self):
"""Test opening a file through an indirect pathspec."""
pathspec = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test_img.dd"),
pathtype=rdf_paths.PathSpec.PathType.OS)
pathspec.Append(
pathtype=rdf_paths.PathSpec.PathType.TSK,
inode=12,
path="/Test Directory")
pathspec.Append(
pathtype=rdf_paths.PathSpec.PathType.TSK, path="numbers.txt")
fd = vfs.VFSOpen(pathspec)
# Check that the new pathspec is correctly reduced to two components.
self.assertEqual(
fd.pathspec.first.path,
utils.NormalizePath(os.path.join(self.base_path, "test_img.dd")))
self.assertEqual(fd.pathspec[1].path, "/Test Directory/numbers.txt")
# And the correct inode is placed in the final branch.
self.assertEqual(fd.Stat().pathspec.nested_path.inode, 15)
self.TestFileHandling(fd)
def testTSKFileCasing(self):
"""Test our ability to read the correct casing from image."""
path = os.path.join(self.base_path, "test_img.dd")
path2 = os.path.join("test directory", "NuMbErS.TxT")
ps2 = rdf_paths.PathSpec(
path=path2, pathtype=rdf_paths.PathSpec.PathType.TSK)
ps = rdf_paths.PathSpec(path=path, pathtype=rdf_paths.PathSpec.PathType.OS)
ps.Append(ps2)
fd = vfs.VFSOpen(ps)
# This fixes Windows paths.
path = path.replace("\\", "/")
# The pathspec should have 2 components.
self.assertEqual(fd.pathspec.first.path, utils.NormalizePath(path))
self.assertEqual(fd.pathspec.first.pathtype, rdf_paths.PathSpec.PathType.OS)
nested = fd.pathspec.last
self.assertEqual(nested.path, u"/Test Directory/numbers.txt")
self.assertEqual(nested.pathtype, rdf_paths.PathSpec.PathType.TSK)
def testTSKInodeHandling(self):
"""Test that we can open files by inode."""
path = os.path.join(self.base_path, "ntfs_img.dd")
ps2 = rdf_paths.PathSpec(
inode=65,
ntfs_type=128,
path="/this/will/be/ignored",
pathtype=rdf_paths.PathSpec.PathType.TSK)
ps = rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.OS, offset=63 * 512)
ps.Append(ps2)
fd = vfs.VFSOpen(ps)
self.assertEqual(fd.Read(100), b"Hello world\n")
ps2 = rdf_paths.PathSpec(
inode=65,
ntfs_type=128,
ntfs_id=4,
pathtype=rdf_paths.PathSpec.PathType.TSK)
ps = rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.OS, offset=63 * 512)
ps.Append(ps2)
fd = vfs.VFSOpen(ps)
self.assertEqual(fd.read(100), b"I am a real ADS\n")
# Make sure the size is correct:
self.assertEqual(fd.Stat().st_size, len(b"I am a real ADS\n"))
def testTSKNTFSHandling(self):
"""Test that TSK can correctly encode NTFS features."""
path = os.path.join(self.base_path, "ntfs_img.dd")
path2 = "test directory"
ps2 = rdf_paths.PathSpec(
path=path2, pathtype=rdf_paths.PathSpec.PathType.TSK)
ps = rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.OS, offset=63 * 512)
ps.Append(ps2)
fd = vfs.VFSOpen(ps)
listing = []
pathspecs = []
for f in fd.ListFiles():
# Make sure the CASE_LITERAL option is set for all drivers so we can just
# resend this proto back.
self.assertEqual(f.pathspec.path_options,
rdf_paths.PathSpec.Options.CASE_LITERAL)
pathspec = f.pathspec.nested_path
self.assertEqual(pathspec.path_options,
rdf_paths.PathSpec.Options.CASE_LITERAL)
pathspecs.append(f.pathspec)
listing.append((pathspec.inode, pathspec.ntfs_type, pathspec.ntfs_id))
# The tsk_fs_attr_type enum:
tsk_fs_attr_type = rdf_paths.PathSpec.tsk_fs_attr_type
ref = [(65, tsk_fs_attr_type.TSK_FS_ATTR_TYPE_DEFAULT, 0),
(65, tsk_fs_attr_type.TSK_FS_ATTR_TYPE_NTFS_DATA, 4),
(66, tsk_fs_attr_type.TSK_FS_ATTR_TYPE_DEFAULT, 0),
(67, tsk_fs_attr_type.TSK_FS_ATTR_TYPE_DEFAULT, 0)]
# Make sure that the ADS is recovered.
self.assertEqual(listing, ref)
# Try to read the main file
self.assertEqual(pathspecs[0].nested_path.path, "/Test Directory/notes.txt")
fd = vfs.VFSOpen(pathspecs[0])
self.assertEqual(fd.read(1000), b"Hello world\n")
s = fd.Stat()
self.assertEqual(s.pathspec.nested_path.inode, 65)
self.assertEqual(s.pathspec.nested_path.ntfs_type, 1)
self.assertEqual(s.pathspec.nested_path.ntfs_id, 0)
# Check that the name of the ads is consistent.
self.assertEqual(pathspecs[1].nested_path.path, "/Test Directory/notes.txt")
self.assertEqual(pathspecs[1].nested_path.stream_name, "ads")
# Check that the ADS name is encoded correctly in the AFF4 URN for this
# file.
aff4_urn = pathspecs[1].AFF4Path(rdf_client.ClientURN("C.1234567812345678"))
self.assertEqual(aff4_urn.Basename(), "notes.txt:ads")
fd = vfs.VFSOpen(pathspecs[1])
self.assertEqual(fd.read(1000), b"I am a real ADS\n")
# Test that the stat contains the inode:
s = fd.Stat()
self.assertEqual(s.pathspec.nested_path.inode, 65)
self.assertEqual(s.pathspec.nested_path.ntfs_type, 128)
self.assertEqual(s.pathspec.nested_path.ntfs_id, 4)
def testNTFSProgressCallback(self):
self.progress_counter = 0
def Progress():
self.progress_counter += 1
path = os.path.join(self.base_path, "ntfs_img.dd")
path2 = "test directory"
ps2 = rdf_paths.PathSpec(
path=path2, pathtype=rdf_paths.PathSpec.PathType.TSK)
ps = rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.OS, offset=63 * 512)
ps.Append(ps2)
vfs.VFSOpen(ps, progress_callback=Progress)
self.assertGreater(self.progress_counter, 0)
def testUnicodeFile(self):
"""Test ability to read unicode files from images."""
path = os.path.join(self.base_path, "test_img.dd")
path2 = os.path.join(u"ืืืื ืื ืฉ ืืงืื", u"ืืืื.txt")
ps2 = rdf_paths.PathSpec(
path=path2, pathtype=rdf_paths.PathSpec.PathType.TSK)
ps = rdf_paths.PathSpec(path=path, pathtype=rdf_paths.PathSpec.PathType.OS)
ps.Append(ps2)
fd = vfs.VFSOpen(ps)
self.TestFileHandling(fd)
def testListDirectory(self):
"""Test our ability to list a directory."""
directory = vfs.VFSOpen(
rdf_paths.PathSpec(
path=self.base_path, pathtype=rdf_paths.PathSpec.PathType.OS))
self.CheckDirectoryListing(directory, "morenumbers.txt")
def testTSKListDirectory(self):
"""Test directory listing in sleuthkit."""
path = os.path.join(self.base_path, u"test_img.dd")
ps2 = rdf_paths.PathSpec(
path=u"ๅ
ฅไนก้ไฟ ๆตทๅคๆฅ่ๅซๆ ท่ฟๆณ", pathtype=rdf_paths.PathSpec.PathType.TSK)
ps = rdf_paths.PathSpec(path=path, pathtype=rdf_paths.PathSpec.PathType.OS)
ps.Append(ps2)
directory = vfs.VFSOpen(ps)
self.CheckDirectoryListing(directory, u"ๅ
ฅไนก้ไฟ.txt")
def testRecursiveImages(self):
"""Test directory listing in sleuthkit."""
p3 = rdf_paths.PathSpec(
path="/home/a.txt", pathtype=rdf_paths.PathSpec.PathType.TSK)
p2 = rdf_paths.PathSpec(
path="/home/image2.img", pathtype=rdf_paths.PathSpec.PathType.TSK)
p1 = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test_img.dd"),
pathtype=rdf_paths.PathSpec.PathType.OS)
p2.Append(p3)
p1.Append(p2)
f = vfs.VFSOpen(p1)
self.assertEqual(f.read(3), b"yay")
def testGuessPathSpec(self):
"""Test that we can guess a pathspec from a path."""
path = os.path.join(self.base_path, "test_img.dd", "home/image2.img",
"home/a.txt")
pathspec = rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.OS)
fd = vfs.VFSOpen(pathspec)
self.assertEqual(fd.read(3), b"yay")
def testFileNotFound(self):
"""Test that we raise an IOError for file not found."""
path = os.path.join(self.base_path, "test_img.dd", "home/image2.img",
"home/nosuchfile.txt")
pathspec = rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.OS)
self.assertRaises(IOError, vfs.VFSOpen, pathspec)
def testGuessPathSpecPartial(self):
"""Test that we can guess a pathspec from a partial pathspec."""
path = os.path.join(self.base_path, "test_img.dd")
pathspec = rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.OS)
pathspec.nested_path.path = "/home/image2.img/home/a.txt"
pathspec.nested_path.pathtype = rdf_paths.PathSpec.PathType.TSK
fd = vfs.VFSOpen(pathspec)
self.assertEqual(fd.read(3), b"yay")
# Open as a directory
pathspec.nested_path.path = "/home/image2.img/home/"
fd = vfs.VFSOpen(pathspec)
names = []
for s in fd.ListFiles():
# Make sure that the stat pathspec is correct - it should be 3 levels
# deep.
self.assertEqual(s.pathspec.nested_path.path, "/home/image2.img")
names.append(s.pathspec.nested_path.nested_path.path)
self.assertIn("home/a.txt", names)
def testRegistryListing(self):
"""Test our ability to list registry keys."""
reg = rdf_paths.PathSpec.PathType.REGISTRY
with vfs_test_lib.VFSOverrider(reg, vfs_test_lib.FakeRegistryVFSHandler):
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.REGISTRY,
path=("/HKEY_USERS/S-1-5-20/Software/Microsoft"
"/Windows/CurrentVersion/Run"))
expected_names = {"MctAdmin": stat.S_IFDIR, "Sidebar": stat.S_IFDIR}
expected_data = [
u"%ProgramFiles%\\Windows Sidebar\\Sidebar.exe /autoRun",
u"%TEMP%\\Sidebar.exe"
]
for f in vfs.VFSOpen(pathspec).ListFiles():
base, name = os.path.split(f.pathspec.CollapsePath())
self.assertEqual(base, pathspec.CollapsePath())
self.assertIn(name, expected_names)
self.assertIn(f.registry_data.GetValue(), expected_data)
def CheckDirectoryListing(self, directory, test_file):
"""Check that the directory listing is sensible."""
found = False
for f in directory.ListFiles():
# TSK makes virtual files with $ if front of them
path = f.pathspec.Basename()
if path.startswith("$"):
continue
# Check the time is reasonable
self.assertGreater(f.st_mtime, 10000000)
self.assertGreater(f.st_atime, 10000000)
self.assertGreater(f.st_ctime, 10000000)
if path == test_file:
found = True
# Make sure it's a regular file with the right size
self.assertTrue(stat.S_ISREG(int(f.st_mode)))
self.assertEqual(f.st_size, 3893)
self.assertEqual(found, True)
# Raise if we try to read the contents of a directory object.
self.assertRaises(IOError, directory.Read, 5)
def testVFSVirtualRoot(self):
# Let's open a file in the virtual root.
os_root = "os:%s" % self.base_path
with test_lib.ConfigOverrider({"Client.vfs_virtualroots": [os_root]}):
# We need to reset the vfs._VFS_VIRTUALROOTS too.
vfs.Init()
fd = vfs.VFSOpen(
rdf_paths.PathSpec(
path="/morenumbers.txt", pathtype=rdf_paths.PathSpec.PathType.OS))
data = fd.read(10)
self.assertEqual(data, b"1\n2\n3\n4\n5\n")
# This should also work with TSK.
tsk_root = "tsk:%s" % os.path.join(self.base_path, "test_img.dd")
with test_lib.ConfigOverrider({"Client.vfs_virtualroots": [tsk_root]}):
vfs.Init()
image_file_ps = rdf_paths.PathSpec(
path=u"ืืืื ืื ืฉ ืืงืื/ืืืื.txt",
pathtype=rdf_paths.PathSpec.PathType.TSK)
fd = vfs.VFSOpen(image_file_ps)
data = fd.read(10)
self.assertEqual(data, b"1\n2\n3\n4\n5\n")
# This should not influence vfs handlers other than OS and TSK.
reg_type = rdf_paths.PathSpec.PathType.REGISTRY
os_handler = vfs.VFS_HANDLERS[rdf_paths.PathSpec.PathType.OS]
with vfs_test_lib.VFSOverrider(reg_type, os_handler):
with self.assertRaises(IOError):
image_file_ps.pathtype = reg_type
vfs.VFSOpen(image_file_ps)
def testFileSizeOverride(self):
# We assume /dev/null exists and has a 0 size.
fname = "/dev/null"
try:
st = os.stat(fname)
except OSError:
self.skipTest("%s not accessible." % fname)
if st.st_size != 0:
self.skipTest("%s doesn't have 0 size." % fname)
pathspec = rdf_paths.PathSpec(
path=fname, pathtype="OS", file_size_override=100000000)
fd = vfs.VFSOpen(pathspec)
self.assertEqual(fd.size, 100000000)
def testNTFSFile(self):
pathspec = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "ntfs.img"),
pathtype=rdf_paths.PathSpec.PathType.OS,
path_options=rdf_paths.PathSpec.Options.CASE_LITERAL,
nested_path=rdf_paths.PathSpec(
path="numbers.txt", pathtype=rdf_paths.PathSpec.PathType.NTFS))
fd = vfs.VFSOpen(pathspec)
self.TestFileHandling(fd)
class VFSMultiOpenTest(absltest.TestCase):
_VFS_OVERRIDER = vfs_test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS,
files.File)
def setUp(self):
self._VFS_OVERRIDER.Start()
self.addCleanup(self._VFS_OVERRIDER.Stop)
def testMultipleFiles(self):
with temp.AutoTempDirPath(remove_non_empty=True) as tempdir:
foo_path = os.path.join(tempdir, "foo")
bar_path = os.path.join(tempdir, "bar")
baz_path = os.path.join(tempdir, "baz")
self._Touch(foo_path, b"FOO")
self._Touch(bar_path, b"BAR")
self._Touch(baz_path, b"BAZ")
foo_pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path=foo_path)
bar_pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path=bar_path)
baz_pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path=baz_path)
pathspecs = [foo_pathspec, bar_pathspec, baz_pathspec]
with vfs.VFSMultiOpen(pathspecs) as filedescs:
self.assertLen(filedescs, 3)
self.assertEqual(filedescs[0].Read(), b"FOO")
self.assertEqual(filedescs[1].Read(), b"BAR")
self.assertEqual(filedescs[2].Read(), b"BAZ")
files.FlushHandleCache()
def testProgressCallback(self):
with temp.AutoTempFilePath() as temppath:
self._Touch(temppath, b"QUUX")
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS, path=temppath)
func = mock.MagicMock()
with vfs.VFSMultiOpen([pathspec], progress_callback=func) as filedescs:
self.assertLen(filedescs, 1)
self.assertEqual(filedescs[0].Read(), b"QUUX")
self.assertTrue(func.called)
files.FlushHandleCache()
def _Touch(self, filepath, content=b""):
with io.open(filepath, mode="wb") as filedesc:
filedesc.write(content)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
py | b415dfc014ddf2aa9b02adf9b90453e4cbbbfcf7 | from BinarySearchTree import BinarySearchTree
tree = BinarySearchTree()
tree.insert(10)
tree.insert(20)
tree.insert(15)
tree.insert(-1)
tree.insert(-2)
tree.insert(-4)
tree.inOrderTraversal()
print('\n')
tree.remove(10)
tree.inOrderTraversal() |
py | b415e0255c1a22d9c72f0eb361656912bbcf1baf | from WH_Utils import Prospect, Company
from WH_Utils.Connectors.Coresignal import coresignal_to_company, coresingal_to_prospect
from WH_Utils.Utils.test_utils import CS_auth_dict
class TestCoresignalConnectors:
prospect_coresignal_id = "64362271" #mcclain
company_coresignal_id = "33158580" #wealthawk
def test_prospect(self):
cs_prospect = coresingal_to_prospect(id=TestCoresignalConnectors.prospect_coresignal_id, auth_dict=CS_auth_dict, company_id=TestCoresignalConnectors.company_coresignal_id)
assert isinstance(cs_prospect, Prospect)
def test_company(self):
cs_company = coresignal_to_company(id=TestCoresignalConnectors.company_coresignal_id, auth_dict=CS_auth_dict)
assert isinstance(cs_company, Company) |
py | b415e0c7e64625fa211d6ba0058e0a2b6b9f0db0 | import requests
import json
class IFSC(object):
""" A client for accessing the Razorpay API. """
"""
Initializes the Twilio Client
sets the BASE_URL
:returns IFSC object
"""
def __init__(self):
self.BASE_URL = 'https://ifsc.razorpay.com/'
with open("../IFSC.json") as json_file:
self.bankdata = json.loads(json_file.read())
pass
"""
validate
:returns True if the code is valid
"""
def validate(self, code: str):
if len(code) != 11:
return False
if code[4] != '0':
return False
_bankcode = code[0:4].upper()
_branchcode = code[5:].upper()
if not _bankcode in self.bankdata:
return False
_banklist = set(self.bankdata[_bankcode])
if _branchcode.isdigit():
return int(_branchcode) in _banklist
return _branchcode in _banklist
"""
Fetches details for given code
:returns response from razorpay api for ifsc
:raises ValueErro for invalid data
"""
def fetch_details(self, code: str):
_final_URL = self.BASE_URL + code
if not self.validate(code):
raise ValueError(f'provided code is invalid')
headers = {
'Content-Type': 'application/json',
}
#https://nedbatchelder.com/blog/200711/rethrowing_exceptions_in_python.html ( gives a full stack trace for exception)
try:
response = requests.get(_final_URL, headers=headers)
except Exception as e:
import sys
raise sys.exc_info()[1]
return response.json()
|
py | b415e14100dfe1c903eb71eec161af1e79714ddf | #
# Parse tree nodes for expressions
#
from __future__ import absolute_import
import cython
cython.declare(error=object, warning=object, warn_once=object, InternalError=object,
CompileError=object, UtilityCode=object, TempitaUtilityCode=object,
StringEncoding=object, operator=object, local_errors=object, report_error=object,
Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object,
list_type=object, tuple_type=object, set_type=object, dict_type=object,
unicode_type=object, str_type=object, bytes_type=object, type_type=object,
Builtin=object, Symtab=object, Utils=object, find_coercion_error=object,
debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object,
bytearray_type=object, slice_type=object, _py_int_types=object,
IS_PYTHON3=cython.bint)
import sys
import copy
import os.path
import operator
from .Errors import (
error, warning, InternalError, CompileError, report_error, local_errors)
from .Code import UtilityCode, TempitaUtilityCode
from . import StringEncoding
from . import Naming
from . import Nodes
from .Nodes import Node, utility_code_for_imports, analyse_type_annotation
from . import PyrexTypes
from .PyrexTypes import py_object_type, c_long_type, typecast, error_type, \
unspecified_type
from . import TypeSlots
from .Builtin import list_type, tuple_type, set_type, dict_type, type_type, \
unicode_type, str_type, bytes_type, bytearray_type, basestring_type, slice_type
from . import Builtin
from . import Symtab
from .. import Utils
from .Annotate import AnnotationItem
from . import Future
from ..Debugging import print_call_chain
from .DebugFlags import debug_disposal_code, debug_temp_alloc, \
debug_coercion
from .Pythran import to_pythran, is_pythran_supported_type, is_pythran_supported_operation_type, \
is_pythran_expr, pythran_func_type, pythran_binop_type, pythran_unaryop_type, has_np_pythran, \
pythran_indexing_code, pythran_indexing_type, is_pythran_supported_node_or_none, pythran_type
from .PyrexTypes import PythranExpr
try:
from __builtin__ import basestring
except ImportError:
# Python 3
basestring = str
any_string_type = (bytes, str)
else:
# Python 2
any_string_type = (bytes, unicode)
if sys.version_info[0] >= 3:
IS_PYTHON3 = True
_py_int_types = int
else:
IS_PYTHON3 = False
_py_int_types = (int, long)
class NotConstant(object):
_obj = None
def __new__(cls):
if NotConstant._obj is None:
NotConstant._obj = super(NotConstant, cls).__new__(cls)
return NotConstant._obj
def __repr__(self):
return "<NOT CONSTANT>"
not_a_constant = NotConstant()
constant_value_not_set = object()
# error messages when coercing from key[0] to key[1]
coercion_error_dict = {
# string related errors
(unicode_type, str_type): ("Cannot convert Unicode string to 'str' implicitly."
" This is not portable and requires explicit encoding."),
(unicode_type, bytes_type): "Cannot convert Unicode string to 'bytes' implicitly, encoding required.",
(unicode_type, PyrexTypes.c_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_const_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_const_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(bytes_type, unicode_type): "Cannot convert 'bytes' object to unicode implicitly, decoding required",
(bytes_type, str_type): "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.",
(bytes_type, basestring_type): ("Cannot convert 'bytes' object to basestring implicitly."
" This is not portable to Py3."),
(bytes_type, PyrexTypes.c_py_unicode_ptr_type): "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.",
(bytes_type, PyrexTypes.c_const_py_unicode_ptr_type): (
"Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'."),
(basestring_type, bytes_type): "Cannot convert 'basestring' object to bytes implicitly. This is not portable.",
(str_type, unicode_type): ("str objects do not support coercion to unicode,"
" use a unicode string literal instead (u'')"),
(str_type, bytes_type): "Cannot convert 'str' to 'bytes' implicitly. This is not portable.",
(str_type, PyrexTypes.c_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_const_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_const_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_py_unicode_ptr_type): "'str' objects do not support coercion to C types (use 'unicode'?).",
(str_type, PyrexTypes.c_const_py_unicode_ptr_type): (
"'str' objects do not support coercion to C types (use 'unicode'?)."),
(PyrexTypes.c_char_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_const_char_ptr_type, unicode_type): (
"Cannot convert 'char*' to unicode implicitly, decoding required"),
(PyrexTypes.c_uchar_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_const_uchar_ptr_type, unicode_type): (
"Cannot convert 'char*' to unicode implicitly, decoding required"),
}
def find_coercion_error(type_tuple, default, env):
err = coercion_error_dict.get(type_tuple)
if err is None:
return default
elif (env.directives['c_string_encoding'] and
any(t in type_tuple for t in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_uchar_ptr_type,
PyrexTypes.c_const_char_ptr_type, PyrexTypes.c_const_uchar_ptr_type))):
if type_tuple[1].is_pyobject:
return default
elif env.directives['c_string_encoding'] in ('ascii', 'default'):
return default
else:
return "'%s' objects do not support coercion to C types with non-ascii or non-default c_string_encoding" % type_tuple[0].name
else:
return err
def default_str_type(env):
return {
'bytes': bytes_type,
'bytearray': bytearray_type,
'str': str_type,
'unicode': unicode_type
}.get(env.directives['c_string_type'])
def check_negative_indices(*nodes):
"""
Raise a warning on nodes that are known to have negative numeric values.
Used to find (potential) bugs inside of "wraparound=False" sections.
"""
for node in nodes:
if node is None or (
not isinstance(node.constant_result, _py_int_types) and
not isinstance(node.constant_result, float)):
continue
if node.constant_result < 0:
warning(node.pos,
"the result of using negative indices inside of "
"code sections marked as 'wraparound=False' is "
"undefined", level=1)
def infer_sequence_item_type(env, seq_node, index_node=None, seq_type=None):
if not seq_node.is_sequence_constructor:
if seq_type is None:
seq_type = seq_node.infer_type(env)
if seq_type is tuple_type:
# tuples are immutable => we can safely follow assignments
if seq_node.cf_state and len(seq_node.cf_state) == 1:
try:
seq_node = seq_node.cf_state[0].rhs
except AttributeError:
pass
if seq_node is not None and seq_node.is_sequence_constructor:
if index_node is not None and index_node.has_constant_result():
try:
item = seq_node.args[index_node.constant_result]
except (ValueError, TypeError, IndexError):
pass
else:
return item.infer_type(env)
# if we're lucky, all items have the same type
item_types = set([item.infer_type(env) for item in seq_node.args])
if len(item_types) == 1:
return item_types.pop()
return None
def get_exception_handler(exception_value):
if exception_value is None:
return "__Pyx_CppExn2PyErr();"
elif exception_value.type.is_pyobject:
return 'try { throw; } catch(const std::exception& exn) { PyErr_SetString(%s, exn.what()); } catch(...) { PyErr_SetNone(%s); }' % (
exception_value.entry.cname,
exception_value.entry.cname)
else:
return '%s(); if (!PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError , "Error converting c++ exception.");' % exception_value.entry.cname
def translate_cpp_exception(code, pos, inside, exception_value, nogil):
raise_py_exception = get_exception_handler(exception_value)
code.putln("try {")
code.putln("%s" % inside)
code.putln("} catch(...) {")
if nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(raise_py_exception)
if nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln("}")
# Used to handle the case where an lvalue expression and an overloaded assignment
# both have an exception declaration.
def translate_double_cpp_exception(code, pos, lhs_type, lhs_code, rhs_code,
lhs_exc_val, assign_exc_val, nogil):
handle_lhs_exc = get_exception_handler(lhs_exc_val)
handle_assignment_exc = get_exception_handler(assign_exc_val)
code.putln("try {")
code.putln(lhs_type.declaration_code("__pyx_local_lvalue = %s;" % lhs_code))
code.putln("try {")
code.putln("__pyx_local_lvalue = %s;" % rhs_code)
# Catch any exception from the overloaded assignment.
code.putln("} catch(...) {")
if nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(handle_assignment_exc)
if nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln("}")
# Catch any exception from evaluating lhs.
code.putln("} catch(...) {")
if nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(handle_lhs_exc)
if nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln('}')
class ExprNode(Node):
# subexprs [string] Class var holding names of subexpr node attrs
# type PyrexType Type of the result
# result_code string Code fragment
# result_ctype string C type of result_code if different from type
# is_temp boolean Result is in a temporary variable
# is_sequence_constructor
# boolean Is a list or tuple constructor expression
# is_starred boolean Is a starred expression (e.g. '*a')
# saved_subexpr_nodes
# [ExprNode or [ExprNode or None] or None]
# Cached result of subexpr_nodes()
# use_managed_ref boolean use ref-counted temps/assignments/etc.
# result_is_used boolean indicates that the result will be dropped and the
# is_numpy_attribute boolean Is a Numpy module attribute
# result_code/temp_result can safely be set to None
# annotation ExprNode or None PEP526 annotation for names or expressions
result_ctype = None
type = None
annotation = None
temp_code = None
old_temp = None # error checker for multiple frees etc.
use_managed_ref = True # can be set by optimisation transforms
result_is_used = True
is_numpy_attribute = False
# The Analyse Expressions phase for expressions is split
# into two sub-phases:
#
# Analyse Types
# Determines the result type of the expression based
# on the types of its sub-expressions, and inserts
# coercion nodes into the expression tree where needed.
# Marks nodes which will need to have temporary variables
# allocated.
#
# Allocate Temps
# Allocates temporary variables where needed, and fills
# in the result_code field of each node.
#
# ExprNode provides some convenience routines which
# perform both of the above phases. These should only
# be called from statement nodes, and only when no
# coercion nodes need to be added around the expression
# being analysed. In that case, the above two phases
# should be invoked separately.
#
# Framework code in ExprNode provides much of the common
# processing for the various phases. It makes use of the
# 'subexprs' class attribute of ExprNodes, which should
# contain a list of the names of attributes which can
# hold sub-nodes or sequences of sub-nodes.
#
# The framework makes use of a number of abstract methods.
# Their responsibilities are as follows.
#
# Declaration Analysis phase
#
# analyse_target_declaration
# Called during the Analyse Declarations phase to analyse
# the LHS of an assignment or argument of a del statement.
# Nodes which cannot be the LHS of an assignment need not
# implement it.
#
# Expression Analysis phase
#
# analyse_types
# - Call analyse_types on all sub-expressions.
# - Check operand types, and wrap coercion nodes around
# sub-expressions where needed.
# - Set the type of this node.
# - If a temporary variable will be required for the
# result, set the is_temp flag of this node.
#
# analyse_target_types
# Called during the Analyse Types phase to analyse
# the LHS of an assignment or argument of a del
# statement. Similar responsibilities to analyse_types.
#
# target_code
# Called by the default implementation of allocate_target_temps.
# Should return a C lvalue for assigning to the node. The default
# implementation calls calculate_result_code.
#
# check_const
# - Check that this node and its subnodes form a
# legal constant expression. If so, do nothing,
# otherwise call not_const.
#
# The default implementation of check_const
# assumes that the expression is not constant.
#
# check_const_addr
# - Same as check_const, except check that the
# expression is a C lvalue whose address is
# constant. Otherwise, call addr_not_const.
#
# The default implementation of calc_const_addr
# assumes that the expression is not a constant
# lvalue.
#
# Code Generation phase
#
# generate_evaluation_code
# - Call generate_evaluation_code for sub-expressions.
# - Perform the functions of generate_result_code
# (see below).
# - If result is temporary, call generate_disposal_code
# on all sub-expressions.
#
# A default implementation of generate_evaluation_code
# is provided which uses the following abstract methods:
#
# generate_result_code
# - Generate any C statements necessary to calculate
# the result of this node from the results of its
# sub-expressions.
#
# calculate_result_code
# - Should return a C code fragment evaluating to the
# result. This is only called when the result is not
# a temporary.
#
# generate_assignment_code
# Called on the LHS of an assignment.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the assignment.
# - If the assignment absorbed a reference, call
# generate_post_assignment_code on the RHS,
# otherwise call generate_disposal_code on it.
#
# generate_deletion_code
# Called on an argument of a del statement.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the deletion.
# - Call generate_disposal_code on all sub-expressions.
#
#
is_sequence_constructor = False
is_dict_literal = False
is_set_literal = False
is_string_literal = False
is_attribute = False
is_subscript = False
is_slice = False
is_buffer_access = False
is_memview_index = False
is_memview_slice = False
is_memview_broadcast = False
is_memview_copy_assignment = False
saved_subexpr_nodes = None
is_temp = False
is_target = False
is_starred = False
constant_result = constant_value_not_set
child_attrs = property(fget=operator.attrgetter('subexprs'))
def not_implemented(self, method_name):
print_call_chain(method_name, "not implemented") ###
raise InternalError(
"%s.%s not implemented" %
(self.__class__.__name__, method_name))
def is_lvalue(self):
return 0
def is_addressable(self):
return self.is_lvalue() and not self.type.is_memoryviewslice
def is_ephemeral(self):
# An ephemeral node is one whose result is in
# a Python temporary and we suspect there are no
# other references to it. Certain operations are
# disallowed on such values, since they are
# likely to result in a dangling pointer.
return self.type.is_pyobject and self.is_temp
def subexpr_nodes(self):
# Extract a list of subexpression nodes based
# on the contents of the subexprs class attribute.
nodes = []
for name in self.subexprs:
item = getattr(self, name)
if item is not None:
if type(item) is list:
nodes.extend(item)
else:
nodes.append(item)
return nodes
def result(self):
if self.is_temp:
#if not self.temp_code:
# pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
# raise RuntimeError("temp result name not set in %s at %r" % (
# self.__class__.__name__, pos))
return self.temp_code
else:
return self.calculate_result_code()
def pythran_result(self, type_=None):
if is_pythran_supported_node_or_none(self):
return to_pythran(self)
assert(type_ is not None)
return to_pythran(self, type_)
def is_c_result_required(self):
"""
Subtypes may return False here if result temp allocation can be skipped.
"""
return True
def result_as(self, type = None):
# Return the result code cast to the specified C type.
if (self.is_temp and self.type.is_pyobject and
type != py_object_type):
# Allocated temporaries are always PyObject *, which may not
# reflect the actual type (e.g. an extension type)
return typecast(type, py_object_type, self.result())
return typecast(type, self.ctype(), self.result())
def py_result(self):
# Return the result code cast to PyObject *.
return self.result_as(py_object_type)
def ctype(self):
# Return the native C type of the result (i.e. the
# C type of the result_code expression).
return self.result_ctype or self.type
def get_constant_c_result_code(self):
# Return the constant value of this node as a result code
# string, or None if the node is not constant. This method
# can be called when the constant result code is required
# before the code generation phase.
#
# The return value is a string that can represent a simple C
# value, a constant C name or a constant C expression. If the
# node type depends on Python code, this must return None.
return None
def calculate_constant_result(self):
# Calculate the constant compile time result value of this
# expression and store it in ``self.constant_result``. Does
# nothing by default, thus leaving ``self.constant_result``
# unknown. If valid, the result can be an arbitrary Python
# value.
#
# This must only be called when it is assured that all
# sub-expressions have a valid constant_result value. The
# ConstantFolding transform will do this.
pass
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def compile_time_value(self, denv):
# Return value of compile-time expression, or report error.
error(self.pos, "Invalid compile-time expression")
def compile_time_value_error(self, e):
error(self.pos, "Error in compile-time expression: %s: %s" % (
e.__class__.__name__, e))
# ------------- Declaration Analysis ----------------
def analyse_target_declaration(self, env):
error(self.pos, "Cannot assign to or delete this")
# ------------- Expression Analysis ----------------
def analyse_const_expression(self, env):
# Called during the analyse_declarations phase of a
# constant expression. Analyses the expression's type,
# checks whether it is a legal const expression,
# and determines its value.
node = self.analyse_types(env)
node.check_const()
return node
def analyse_expressions(self, env):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for a whole
# expression.
return self.analyse_types(env)
def analyse_target_expression(self, env, rhs):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for the LHS of
# an assignment.
return self.analyse_target_types(env)
def analyse_boolean_expression(self, env):
# Analyse expression and coerce to a boolean.
node = self.analyse_types(env)
bool = node.coerce_to_boolean(env)
return bool
def analyse_temp_boolean_expression(self, env):
# Analyse boolean expression and coerce result into
# a temporary. This is used when a branch is to be
# performed on the result and we won't have an
# opportunity to ensure disposal code is executed
# afterwards. By forcing the result into a temporary,
# we ensure that all disposal has been done by the
# time we get the result.
node = self.analyse_types(env)
return node.coerce_to_boolean(env).coerce_to_simple(env)
# --------------- Type Inference -----------------
def type_dependencies(self, env):
# Returns the list of entries whose types must be determined
# before the type of self can be inferred.
if hasattr(self, 'type') and self.type is not None:
return ()
return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ())
def infer_type(self, env):
# Attempt to deduce the type of self.
# Differs from analyse_types as it avoids unnecessary
# analysis of subexpressions, but can assume everything
# in self.type_dependencies() has been resolved.
if hasattr(self, 'type') and self.type is not None:
return self.type
elif hasattr(self, 'entry') and self.entry is not None:
return self.entry.type
else:
self.not_implemented("infer_type")
def nonlocally_immutable(self):
# Returns whether this variable is a safe reference, i.e.
# can't be modified as part of globals or closures.
return self.is_literal or self.is_temp or self.type.is_array or self.type.is_cfunction
def inferable_item_node(self, index=0):
"""
Return a node that represents the (type) result of an indexing operation,
e.g. for tuple unpacking or iteration.
"""
return IndexNode(self.pos, base=self, index=IntNode(
self.pos, value=str(index), constant_result=index, type=PyrexTypes.c_py_ssize_t_type))
# --------------- Type Analysis ------------------
def analyse_as_module(self, env):
# If this node can be interpreted as a reference to a
# cimported module, return its scope, else None.
return None
def analyse_as_type(self, env):
# If this node can be interpreted as a reference to a
# type, return that type, else None.
return None
def analyse_as_extension_type(self, env):
# If this node can be interpreted as a reference to an
# extension type or builtin type, return its type, else None.
return None
def analyse_types(self, env):
self.not_implemented("analyse_types")
def analyse_target_types(self, env):
return self.analyse_types(env)
def nogil_check(self, env):
# By default, any expression based on Python objects is
# prevented in nogil environments. Subtypes must override
# this if they can work without the GIL.
if self.type and self.type.is_pyobject:
self.gil_error()
def gil_assignment_check(self, env):
if env.nogil and self.type.is_pyobject:
error(self.pos, "Assignment of Python object not allowed without gil")
def check_const(self):
self.not_const()
return False
def not_const(self):
error(self.pos, "Not allowed in a constant expression")
def check_const_addr(self):
self.addr_not_const()
return False
def addr_not_const(self):
error(self.pos, "Address is not constant")
# ----------------- Result Allocation -----------------
def result_in_temp(self):
# Return true if result is in a temporary owned by
# this node or one of its subexpressions. Overridden
# by certain nodes which can share the result of
# a subnode.
return self.is_temp
def target_code(self):
# Return code fragment for use as LHS of a C assignment.
return self.calculate_result_code()
def calculate_result_code(self):
self.not_implemented("calculate_result_code")
# def release_target_temp(self, env):
# # Release temporaries used by LHS of an assignment.
# self.release_subexpr_temps(env)
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("Temp allocated multiple times in %r: %r" % (self.__class__.__name__, self.pos))
type = self.type
if not type.is_void:
if type.is_pyobject:
type = PyrexTypes.py_object_type
elif not (self.result_is_used or type.is_memoryviewslice or self.is_c_result_required()):
self.temp_code = None
return
self.temp_code = code.funcstate.allocate_temp(
type, manage_ref=self.use_managed_ref)
else:
self.temp_code = None
def release_temp_result(self, code):
if not self.temp_code:
if not self.result_is_used:
# not used anyway, so ignore if not set up
return
pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
if self.old_temp:
raise RuntimeError("temp %s released multiple times in %s at %r" % (
self.old_temp, self.__class__.__name__, pos))
else:
raise RuntimeError("no temp, but release requested in %s at %r" % (
self.__class__.__name__, pos))
code.funcstate.release_temp(self.temp_code)
self.old_temp = self.temp_code
self.temp_code = None
# ---------------- Code Generation -----------------
def make_owned_reference(self, code):
"""
If result is a pyobject, make sure we own a reference to it.
If the result is in a temp, it is already a new reference.
"""
if self.type.is_pyobject and not self.result_in_temp():
code.put_incref(self.result(), self.ctype())
def make_owned_memoryviewslice(self, code):
"""
Make sure we own the reference to this memoryview slice.
"""
if not self.result_in_temp():
code.put_incref_memoryviewslice(self.result(),
have_gil=self.in_nogil_context)
def generate_evaluation_code(self, code):
# Generate code to evaluate this node and
# its sub-expressions, and dispose of any
# temporary results of its sub-expressions.
self.generate_subexpr_evaluation_code(code)
code.mark_pos(self.pos)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_result_code(code)
if self.is_temp and not (self.type.is_string or self.type.is_pyunicode_ptr):
# If we are temp we do not need to wait until this node is disposed
# before disposing children.
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def generate_subexpr_evaluation_code(self, code):
for node in self.subexpr_nodes():
node.generate_evaluation_code(code)
def generate_result_code(self, code):
self.not_implemented("generate_result_code")
def generate_disposal_code(self, code):
if self.is_temp:
if self.type.is_string or self.type.is_pyunicode_ptr:
# postponed from self.generate_evaluation_code()
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
if self.result():
if self.type.is_pyobject:
code.put_decref_clear(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(
self.result(), have_gil=not self.in_nogil_context)
code.putln("%s.memview = NULL;" % self.result())
code.putln("%s.data = NULL;" % self.result())
else:
# Already done if self.is_temp
self.generate_subexpr_disposal_code(code)
def generate_subexpr_disposal_code(self, code):
# Generate code to dispose of temporary results
# of all sub-expressions.
for node in self.subexpr_nodes():
node.generate_disposal_code(code)
def generate_post_assignment_code(self, code):
if self.is_temp:
if self.type.is_string or self.type.is_pyunicode_ptr:
# postponed from self.generate_evaluation_code()
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
elif self.type.is_pyobject:
code.putln("%s = 0;" % self.result())
elif self.type.is_memoryviewslice:
code.putln("%s.memview = NULL;" % self.result())
code.putln("%s.data = NULL;" % self.result())
else:
self.generate_subexpr_disposal_code(code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
# Stub method for nodes which are not legal as
# the LHS of an assignment. An error will have
# been reported earlier.
pass
def generate_deletion_code(self, code, ignore_nonexisting=False):
# Stub method for nodes that are not legal as
# the argument of a del statement. An error
# will have been reported earlier.
pass
def free_temps(self, code):
if self.is_temp:
if not self.type.is_void:
self.release_temp_result(code)
else:
self.free_subexpr_temps(code)
def free_subexpr_temps(self, code):
for sub in self.subexpr_nodes():
sub.free_temps(code)
def generate_function_definitions(self, env, code):
pass
# ---------------- Annotation ---------------------
def annotate(self, code):
for node in self.subexpr_nodes():
node.annotate(code)
# ----------------- Coercion ----------------------
def coerce_to(self, dst_type, env):
# Coerce the result so that it can be assigned to
# something of type dst_type. If processing is necessary,
# wraps this node in a coercion node and returns that.
# Otherwise, returns this node unchanged.
#
# This method is called during the analyse_expressions
# phase of the src_node's processing.
#
# Note that subclasses that override this (especially
# ConstNodes) must not (re-)set their own .type attribute
# here. Since expression nodes may turn up in different
# places in the tree (e.g. inside of CloneNodes in cascaded
# assignments), this method must return a new node instance
# if it changes the type.
#
src = self
src_type = self.type
if self.check_for_coercion_error(dst_type, env):
return self
used_as_reference = dst_type.is_reference
if used_as_reference and not src_type.is_reference:
dst_type = dst_type.ref_base_type
if src_type.is_const:
src_type = src_type.const_base_type
if src_type.is_fused or dst_type.is_fused:
# See if we are coercing a fused function to a pointer to a
# specialized function
if (src_type.is_cfunction and not dst_type.is_fused and
dst_type.is_ptr and dst_type.base_type.is_cfunction):
dst_type = dst_type.base_type
for signature in src_type.get_all_specialized_function_types():
if signature.same_as(dst_type):
src.type = signature
src.entry = src.type.entry
src.entry.used = True
return self
if src_type.is_fused:
error(self.pos, "Type is not specialized")
else:
error(self.pos, "Cannot coerce to a type that is not specialized")
self.type = error_type
return self
if self.coercion_type is not None:
# This is purely for error checking purposes!
node = NameNode(self.pos, name='', type=self.coercion_type)
node.coerce_to(dst_type, env)
if dst_type.is_memoryviewslice:
from . import MemoryView
if not src.type.is_memoryviewslice:
if src.type.is_pyobject:
src = CoerceToMemViewSliceNode(src, dst_type, env)
elif src.type.is_array:
src = CythonArrayNode.from_carray(src, env).coerce_to(dst_type, env)
elif not src_type.is_error:
error(self.pos,
"Cannot convert '%s' to memoryviewslice" % (src_type,))
elif not src.type.conforms_to(dst_type, broadcast=self.is_memview_broadcast,
copying=self.is_memview_copy_assignment):
if src.type.dtype.same_as(dst_type.dtype):
msg = "Memoryview '%s' not conformable to memoryview '%s'."
tup = src.type, dst_type
else:
msg = "Different base types for memoryviews (%s, %s)"
tup = src.type.dtype, dst_type.dtype
error(self.pos, msg % tup)
elif dst_type.is_pyobject:
if not src.type.is_pyobject:
if dst_type is bytes_type and src.type.is_int:
src = CoerceIntToBytesNode(src, env)
else:
src = CoerceToPyTypeNode(src, env, type=dst_type)
if not src.type.subtype_of(dst_type):
if src.constant_result is not None:
src = PyTypeTestNode(src, dst_type, env)
elif is_pythran_expr(dst_type) and is_pythran_supported_type(src.type):
# We let the compiler decide whether this is valid
return src
elif is_pythran_expr(src.type):
if is_pythran_supported_type(dst_type):
# Match the case were a pythran expr is assigned to a value, or vice versa.
# We let the C++ compiler decide whether this is valid or not!
return src
# Else, we need to convert the Pythran expression to a Python object
src = CoerceToPyTypeNode(src, env, type=dst_type)
elif src.type.is_pyobject:
if used_as_reference and dst_type.is_cpp_class:
warning(
self.pos,
"Cannot pass Python object as C++ data structure reference (%s &), will pass by copy." % dst_type)
src = CoerceFromPyTypeNode(dst_type, src, env)
elif (dst_type.is_complex
and src_type != dst_type
and dst_type.assignable_from(src_type)):
src = CoerceToComplexNode(src, dst_type, env)
else: # neither src nor dst are py types
# Added the string comparison, since for c types that
# is enough, but Cython gets confused when the types are
# in different pxi files.
# TODO: Remove this hack and require shared declarations.
if not (src.type == dst_type or str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)):
self.fail_assignment(dst_type)
return src
def fail_assignment(self, dst_type):
error(self.pos, "Cannot assign type '%s' to '%s'" % (self.type, dst_type))
def check_for_coercion_error(self, dst_type, env, fail=False, default=None):
if fail and not default:
default = "Cannot assign type '%(FROM)s' to '%(TO)s'"
message = find_coercion_error((self.type, dst_type), default, env)
if message is not None:
error(self.pos, message % {'FROM': self.type, 'TO': dst_type})
return True
if fail:
self.fail_assignment(dst_type)
return True
return False
def coerce_to_pyobject(self, env):
return self.coerce_to(PyrexTypes.py_object_type, env)
def coerce_to_boolean(self, env):
# Coerce result to something acceptable as
# a boolean value.
# if it's constant, calculate the result now
if self.has_constant_result():
bool_value = bool(self.constant_result)
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
type = self.type
if type.is_enum or type.is_error:
return self
elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float:
return CoerceToBooleanNode(self, env)
elif type.is_cpp_class:
return SimpleCallNode(
self.pos,
function=AttributeNode(
self.pos, obj=self, attribute='operator bool'),
args=[]).analyse_types(env)
elif type.is_ctuple:
bool_value = len(type.components) == 0
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
else:
error(self.pos, "Type '%s' not acceptable as a boolean" % type)
return self
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.type.is_int:
return self
else:
return self.coerce_to(PyrexTypes.c_long_type, env)
def coerce_to_temp(self, env):
# Ensure that the result is in a temporary.
if self.result_in_temp():
return self
else:
return CoerceToTempNode(self, env)
def coerce_to_simple(self, env):
# Ensure that the result is simple (see is_simple).
if self.is_simple():
return self
else:
return self.coerce_to_temp(env)
def is_simple(self):
# A node is simple if its result is something that can
# be referred to without performing any operations, e.g.
# a constant, local var, C global var, struct member
# reference, or temporary.
return self.result_in_temp()
def may_be_none(self):
if self.type and not (self.type.is_pyobject or
self.type.is_memoryviewslice):
return False
if self.has_constant_result():
return self.constant_result is not None
return True
def as_cython_attribute(self):
return None
def as_none_safe_node(self, message, error="PyExc_TypeError", format_args=()):
# Wraps the node in a NoneCheckNode if it is not known to be
# not-None (e.g. because it is a Python literal).
if self.may_be_none():
return NoneCheckNode(self, error, message, format_args)
else:
return self
@classmethod
def from_node(cls, node, **kwargs):
"""Instantiate this node class from another node, properly
copying over all attributes that one would forget otherwise.
"""
attributes = "cf_state cf_maybe_null cf_is_null constant_result".split()
for attr_name in attributes:
if attr_name in kwargs:
continue
try:
value = getattr(node, attr_name)
except AttributeError:
pass
else:
kwargs[attr_name] = value
return cls(node.pos, **kwargs)
class AtomicExprNode(ExprNode):
# Abstract base class for expression nodes which have
# no sub-expressions.
subexprs = []
# Override to optimize -- we know we have no children
def generate_subexpr_evaluation_code(self, code):
pass
def generate_subexpr_disposal_code(self, code):
pass
class PyConstNode(AtomicExprNode):
# Abstract base class for constant Python values.
is_literal = 1
type = py_object_type
def is_simple(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self
def calculate_result_code(self):
return self.value
def generate_result_code(self, code):
pass
class NoneNode(PyConstNode):
# The constant value None
is_none = 1
value = "Py_None"
constant_result = None
nogil_check = None
def compile_time_value(self, denv):
return None
def may_be_none(self):
return True
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
value = "Py_Ellipsis"
constant_result = Ellipsis
def compile_time_value(self, denv):
return Ellipsis
class ConstNode(AtomicExprNode):
# Abstract base type for literal constant nodes.
#
# value string C code fragment
is_literal = 1
nogil_check = None
def is_simple(self):
return 1
def nonlocally_immutable(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self # Types are held in class variables
def check_const(self):
return True
def get_constant_c_result_code(self):
return self.calculate_result_code()
def calculate_result_code(self):
return str(self.value)
def generate_result_code(self, code):
pass
class BoolNode(ConstNode):
type = PyrexTypes.c_bint_type
# The constant value True or False
def calculate_constant_result(self):
self.constant_result = self.value
def compile_time_value(self, denv):
return self.value
def calculate_result_code(self):
if self.type.is_pyobject:
return self.value and 'Py_True' or 'Py_False'
else:
return str(int(self.value))
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_int:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.bool_type)
if dst_type.is_int and self.type.is_pyobject:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type)
return ConstNode.coerce_to(self, dst_type, env)
class NullNode(ConstNode):
type = PyrexTypes.c_null_ptr_type
value = "NULL"
constant_result = 0
def get_constant_c_result_code(self):
return self.value
class CharNode(ConstNode):
type = PyrexTypes.c_char_type
def calculate_constant_result(self):
self.constant_result = ord(self.value)
def compile_time_value(self, denv):
return ord(self.value)
def calculate_result_code(self):
return "'%s'" % StringEncoding.escape_char(self.value)
class IntNode(ConstNode):
# unsigned "" or "U"
# longness "" or "L" or "LL"
# is_c_literal True/False/None creator considers this a C integer literal
unsigned = ""
longness = ""
is_c_literal = None # unknown
def __init__(self, pos, **kwds):
ExprNode.__init__(self, pos, **kwds)
if 'type' not in kwds:
self.type = self.find_suitable_type_for_value()
def find_suitable_type_for_value(self):
if self.constant_result is constant_value_not_set:
try:
self.calculate_constant_result()
except ValueError:
pass
# we ignore 'is_c_literal = True' and instead map signed 32bit
# integers as C long values
if self.is_c_literal or \
not self.has_constant_result() or \
self.unsigned or self.longness == 'LL':
# clearly a C literal
rank = (self.longness == 'LL') and 2 or 1
suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"]
if self.type:
suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type)
else:
# C literal or Python literal - split at 32bit boundary
if -2**31 <= self.constant_result < 2**31:
if self.type and self.type.is_int:
suitable_type = self.type
else:
suitable_type = PyrexTypes.c_long_type
else:
suitable_type = PyrexTypes.py_object_type
return suitable_type
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
elif dst_type.is_float:
if self.has_constant_result():
return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type,
constant_result=float(self.constant_result))
else:
return FloatNode(self.pos, value=self.value, type=dst_type,
constant_result=not_a_constant)
if dst_type.is_numeric and not dst_type.is_complex:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type=dst_type, is_c_literal=True,
unsigned=self.unsigned, longness=self.longness)
return node
elif dst_type.is_pyobject:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type=PyrexTypes.py_object_type, is_c_literal=False,
unsigned=self.unsigned, longness=self.longness)
else:
# FIXME: not setting the type here to keep it working with
# complex numbers. Should they be special cased?
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
unsigned=self.unsigned, longness=self.longness)
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def coerce_to_boolean(self, env):
return IntNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type,
unsigned=self.unsigned, longness=self.longness)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
# pre-allocate a Python version of the number
plain_integer_string = str(Utils.str_to_number(self.value))
self.result_code = code.get_py_int(plain_integer_string, self.longness)
else:
self.result_code = self.get_constant_c_result_code()
def get_constant_c_result_code(self):
unsigned, longness = self.unsigned, self.longness
literal = self.value_as_c_integer_string()
if not (unsigned or longness) and self.type.is_int and literal[0] == '-' and literal[1] != '0':
# negative decimal literal => guess longness from type to prevent wrap-around
if self.type.rank >= PyrexTypes.c_longlong_type.rank:
longness = 'LL'
elif self.type.rank >= PyrexTypes.c_long_type.rank:
longness = 'L'
return literal + unsigned + longness
def value_as_c_integer_string(self):
value = self.value
if len(value) <= 2:
# too short to go wrong (and simplifies code below)
return value
neg_sign = ''
if value[0] == '-':
neg_sign = '-'
value = value[1:]
if value[0] == '0':
literal_type = value[1] # 0'o' - 0'b' - 0'x'
# 0x123 hex literals and 0123 octal literals work nicely in C
# but C-incompatible Py3 oct/bin notations need conversion
if neg_sign and literal_type in 'oOxX0123456789' and value[2:].isdigit():
# negative hex/octal literal => prevent C compiler from using
# unsigned integer types by converting to decimal (see C standard 6.4.4.1)
value = str(Utils.str_to_number(value))
elif literal_type in 'oO':
value = '0' + value[2:] # '0o123' => '0123'
elif literal_type in 'bB':
value = str(int(value[2:], 2))
elif value.isdigit() and not self.unsigned and not self.longness:
if not neg_sign:
# C compilers do not consider unsigned types for decimal literals,
# but they do for hex (see C standard 6.4.4.1)
value = '0x%X' % int(value)
return neg_sign + value
def calculate_result_code(self):
return self.result_code
def calculate_constant_result(self):
self.constant_result = Utils.str_to_number(self.value)
def compile_time_value(self, denv):
return Utils.str_to_number(self.value)
class FloatNode(ConstNode):
type = PyrexTypes.c_double_type
def calculate_constant_result(self):
self.constant_result = float(self.value)
def compile_time_value(self, denv):
return float(self.value)
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_float:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.float_type)
if dst_type.is_float and self.type.is_pyobject:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=dst_type)
return ConstNode.coerce_to(self, dst_type, env)
def calculate_result_code(self):
return self.result_code
def get_constant_c_result_code(self):
strval = self.value
assert isinstance(strval, basestring)
cmpval = repr(float(strval))
if cmpval == 'nan':
return "(Py_HUGE_VAL * 0)"
elif cmpval == 'inf':
return "Py_HUGE_VAL"
elif cmpval == '-inf':
return "(-Py_HUGE_VAL)"
else:
return strval
def generate_evaluation_code(self, code):
c_value = self.get_constant_c_result_code()
if self.type.is_pyobject:
self.result_code = code.get_py_float(self.value, c_value)
else:
self.result_code = c_value
def _analyse_name_as_type(name, pos, env):
type = PyrexTypes.parse_basic_type(name)
if type is not None:
return type
from .TreeFragment import TreeFragment
with local_errors(ignore=True):
pos = (pos[0], pos[1], pos[2]-7)
try:
declaration = TreeFragment(u"sizeof(%s)" % name, name=pos[0].filename, initial_pos=pos)
except CompileError:
pass
else:
sizeof_node = declaration.root.stats[0].expr
if isinstance(sizeof_node, SizeofTypeNode):
sizeof_node = sizeof_node.analyse_types(env)
if isinstance(sizeof_node, SizeofTypeNode):
return sizeof_node.arg_type
return None
class BytesNode(ConstNode):
# A char* or bytes literal
#
# value BytesLiteral
is_string_literal = True
# start off as Python 'bytes' to support len() in O(1)
type = bytes_type
def calculate_constant_result(self):
self.constant_result = self.value
def as_sliced_node(self, start, stop, step=None):
value = StringEncoding.bytes_literal(self.value[start:stop:step], self.value.encoding)
return BytesNode(self.pos, value=value, constant_result=value)
def compile_time_value(self, denv):
return self.value.byteencode()
def analyse_as_type(self, env):
return _analyse_name_as_type(self.value.decode('ISO8859-1'), self.pos, env)
def can_coerce_to_char_literal(self):
return len(self.value) == 1
def coerce_to_boolean(self, env):
# This is special because testing a C char* for truth directly
# would yield the wrong result.
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def coerce_to(self, dst_type, env):
if self.type == dst_type:
return self
if dst_type.is_int:
if not self.can_coerce_to_char_literal():
error(self.pos, "Only single-character string literals can be coerced into ints.")
return self
if dst_type.is_unicode_char:
error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.")
return self
return CharNode(self.pos, value=self.value,
constant_result=ord(self.value))
node = BytesNode(self.pos, value=self.value, constant_result=self.constant_result)
if dst_type.is_pyobject:
if dst_type in (py_object_type, Builtin.bytes_type):
node.type = Builtin.bytes_type
else:
self.check_for_coercion_error(dst_type, env, fail=True)
return node
elif dst_type in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
node.type = dst_type
return node
elif dst_type in (PyrexTypes.c_uchar_ptr_type, PyrexTypes.c_const_uchar_ptr_type, PyrexTypes.c_void_ptr_type):
node.type = (PyrexTypes.c_const_char_ptr_type if dst_type == PyrexTypes.c_const_uchar_ptr_type
else PyrexTypes.c_char_ptr_type)
return CastNode(node, dst_type)
elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
node.type = dst_type
return node
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
result = code.get_py_string_const(self.value)
elif self.type.is_const:
result = code.get_string_const(self.value)
else:
# not const => use plain C string literal and cast to mutable type
literal = self.value.as_c_string_literal()
# C++ may require a cast
result = typecast(self.type, PyrexTypes.c_void_ptr_type, literal)
self.result_code = result
def get_constant_c_result_code(self):
return None # FIXME
def calculate_result_code(self):
return self.result_code
class UnicodeNode(ConstNode):
# A Py_UNICODE* or unicode literal
#
# value EncodedString
# bytes_value BytesLiteral the literal parsed as bytes string
# ('-3' unicode literals only)
is_string_literal = True
bytes_value = None
type = unicode_type
def calculate_constant_result(self):
self.constant_result = self.value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.value, self.pos, env)
def as_sliced_node(self, start, stop, step=None):
if StringEncoding.string_contains_surrogates(self.value[:stop]):
# this is unsafe as it may give different results
# in different runtimes
return None
value = StringEncoding.EncodedString(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.bytes_value is not None:
bytes_value = StringEncoding.bytes_literal(
self.bytes_value[start:stop:step], self.bytes_value.encoding)
else:
bytes_value = None
return UnicodeNode(
self.pos, value=value, bytes_value=bytes_value,
constant_result=value)
def coerce_to(self, dst_type, env):
if dst_type is self.type:
pass
elif dst_type.is_unicode_char:
if not self.can_coerce_to_char_literal():
error(self.pos,
"Only single-character Unicode string literals or "
"surrogate pairs can be coerced into Py_UCS4/Py_UNICODE.")
return self
int_value = ord(self.value)
return IntNode(self.pos, type=dst_type, value=str(int_value),
constant_result=int_value)
elif not dst_type.is_pyobject:
if dst_type.is_string and self.bytes_value is not None:
# special case: '-3' enforced unicode literal used in a
# C char* context
return BytesNode(self.pos, value=self.bytes_value
).coerce_to(dst_type, env)
if dst_type.is_pyunicode_ptr:
node = UnicodeNode(self.pos, value=self.value)
node.type = dst_type
return node
error(self.pos,
"Unicode literals do not support coercion to C types other "
"than Py_UNICODE/Py_UCS4 (for characters) or Py_UNICODE* "
"(for strings).")
elif dst_type not in (py_object_type, Builtin.basestring_type):
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return len(self.value) == 1
## or (len(self.value) == 2
## and (0xD800 <= self.value[0] <= 0xDBFF)
## and (0xDC00 <= self.value[1] <= 0xDFFF))
def coerce_to_boolean(self, env):
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def contains_surrogates(self):
return StringEncoding.string_contains_surrogates(self.value)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
if self.contains_surrogates():
# surrogates are not really portable and cannot be
# decoded by the UTF-8 codec in Py3.3
self.result_code = code.get_py_const(py_object_type, 'ustring')
data_cname = code.get_pyunicode_ptr_const(self.value)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PyUnicode_FromUnicode(%s, (sizeof(%s) / sizeof(Py_UNICODE))-1); %s" % (
self.result_code,
data_cname,
data_cname,
code.error_goto_if_null(self.result_code, self.pos)))
code.put_error_if_neg(
self.pos, "__Pyx_PyUnicode_READY(%s)" % self.result_code)
else:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_pyunicode_ptr_const(self.value)
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class StringNode(PyConstNode):
# A Python str object, i.e. a byte string in Python 2.x and a
# unicode string in Python 3.x
#
# value BytesLiteral (or EncodedString with ASCII content)
# unicode_value EncodedString or None
# is_identifier boolean
type = str_type
is_string_literal = True
is_identifier = None
unicode_value = None
def calculate_constant_result(self):
if self.unicode_value is not None:
# only the Unicode value is portable across Py2/3
self.constant_result = self.unicode_value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.unicode_value or self.value.decode('ISO8859-1'), self.pos, env)
def as_sliced_node(self, start, stop, step=None):
value = type(self.value)(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.unicode_value is not None:
if StringEncoding.string_contains_surrogates(self.unicode_value[:stop]):
# this is unsafe as it may give different results in different runtimes
return None
unicode_value = StringEncoding.EncodedString(
self.unicode_value[start:stop:step])
else:
unicode_value = None
return StringNode(
self.pos, value=value, unicode_value=unicode_value,
constant_result=value, is_identifier=self.is_identifier)
def coerce_to(self, dst_type, env):
if dst_type is not py_object_type and not str_type.subtype_of(dst_type):
# if dst_type is Builtin.bytes_type:
# # special case: bytes = 'str literal'
# return BytesNode(self.pos, value=self.value)
if not dst_type.is_pyobject:
return BytesNode(self.pos, value=self.value).coerce_to(dst_type, env)
if dst_type is not Builtin.basestring_type:
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return not self.is_identifier and len(self.value) == 1
def generate_evaluation_code(self, code):
self.result_code = code.get_py_string_const(
self.value, identifier=self.is_identifier, is_str=True,
unicode_value=self.unicode_value)
def get_constant_c_result_code(self):
return None
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
if self.value.is_unicode:
return self.value
if not IS_PYTHON3:
# use plain str/bytes object in Py2
return self.value.byteencode()
# in Py3, always return a Unicode string
if self.unicode_value is not None:
return self.unicode_value
return self.value.decode('iso8859-1')
class IdentifierStringNode(StringNode):
# A special str value that represents an identifier (bytes in Py2,
# unicode in Py3).
is_identifier = True
class ImagNode(AtomicExprNode):
# Imaginary number literal
#
# value string imaginary part (float value)
type = PyrexTypes.c_double_complex_type
def calculate_constant_result(self):
self.constant_result = complex(0.0, float(self.value))
def compile_time_value(self, denv):
return complex(0.0, float(self.value))
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
node = ImagNode(self.pos, value=self.value)
if dst_type.is_pyobject:
node.is_temp = 1
node.type = Builtin.complex_type
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return AtomicExprNode.coerce_to(node, dst_type, env)
gil_message = "Constructing complex number"
def calculate_result_code(self):
if self.type.is_pyobject:
return self.result()
else:
return "%s(0, %r)" % (self.type.from_parts, float(self.value))
def generate_result_code(self, code):
if self.type.is_pyobject:
code.putln(
"%s = PyComplex_FromDoubles(0.0, %r); %s" % (
self.result(),
float(self.value),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class NewExprNode(AtomicExprNode):
# C++ new statement
#
# cppclass node c++ class to create
type = None
def infer_type(self, env):
type = self.cppclass.analyse_as_type(env)
if type is None or not type.is_cpp_class:
error(self.pos, "new operator can only be applied to a C++ class")
self.type = error_type
return
self.cpp_check(env)
constructor = type.scope.lookup(u'<init>')
if constructor is None:
func_type = PyrexTypes.CFuncType(
type, [], exception_check='+', nogil=True)
type.scope.declare_cfunction(u'<init>', func_type, self.pos)
constructor = type.scope.lookup(u'<init>')
self.class_type = type
self.entry = constructor
self.type = constructor.type
return self.type
def analyse_types(self, env):
if self.type is None:
self.infer_type(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
pass
def calculate_result_code(self):
return "new " + self.class_type.empty_declaration_code()
class NameNode(AtomicExprNode):
# Reference to a local or global variable name.
#
# name string Python name of the variable
# entry Entry Symbol table entry
# type_entry Entry For extension type names, the original type entry
# cf_is_null boolean Is uninitialized before this node
# cf_maybe_null boolean Maybe uninitialized before this node
# allow_null boolean Don't raise UnboundLocalError
# nogil boolean Whether it is used in a nogil context
is_name = True
is_cython_module = False
cython_attribute = None
lhs_of_first_assignment = False # TODO: remove me
is_used_as_rvalue = 0
entry = None
type_entry = None
cf_maybe_null = True
cf_is_null = False
allow_null = False
nogil = False
inferred_type = None
def as_cython_attribute(self):
return self.cython_attribute
def type_dependencies(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is not None and self.entry.type.is_unspecified:
return (self,)
else:
return ()
def infer_type(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is None or self.entry.type is unspecified_type:
if self.inferred_type is not None:
return self.inferred_type
return py_object_type
elif (self.entry.type.is_extension_type or self.entry.type.is_builtin_type) and \
self.name == self.entry.type.name:
# Unfortunately the type attribute of type objects
# is used for the pointer to the type they represent.
return type_type
elif self.entry.type.is_cfunction:
if self.entry.scope.is_builtin_scope:
# special case: optimised builtin functions must be treated as Python objects
return py_object_type
else:
# special case: referring to a C function must return its pointer
return PyrexTypes.CPtrType(self.entry.type)
else:
# If entry is inferred as pyobject it's safe to use local
# NameNode's inferred_type.
if self.entry.type.is_pyobject and self.inferred_type:
# Overflow may happen if integer
if not (self.inferred_type.is_int and self.entry.might_overflow):
return self.inferred_type
return self.entry.type
def compile_time_value(self, denv):
try:
return denv.lookup(self.name)
except KeyError:
error(self.pos, "Compile-time name '%s' not defined" % self.name)
def get_constant_c_result_code(self):
if not self.entry or self.entry.type.is_pyobject:
return None
return self.entry.cname
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a builtin
# C function with a Python equivalent, manufacture a NameNode
# referring to the Python builtin.
#print "NameNode.coerce_to:", self.name, dst_type ###
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction:
var_entry = entry.as_variable
if var_entry:
if var_entry.is_builtin and var_entry.is_const:
var_entry = env.declare_builtin(var_entry.name, self.pos)
node = NameNode(self.pos, name = self.name)
node.entry = var_entry
node.analyse_rvalue_entry(env)
return node
return super(NameNode, self).coerce_to(dst_type, env)
def declare_from_annotation(self, env, as_target=False):
"""Implements PEP 526 annotation typing in a fairly relaxed way.
Annotations are ignored for global variables, Python class attributes and already declared variables.
String literals are allowed and ignored.
The ambiguous Python types 'int' and 'long' are ignored and the 'cython.int' form must be used instead.
"""
if not env.directives['annotation_typing']:
return
if env.is_module_scope or env.is_py_class_scope:
# annotations never create global cdef names and Python classes don't support them anyway
return
name = self.name
if self.entry or env.lookup_here(name) is not None:
# already declared => ignore annotation
return
annotation = self.annotation
if annotation.is_string_literal:
# name: "description" => not a type, but still a declared variable or attribute
atype = None
else:
_, atype = analyse_type_annotation(annotation, env)
if atype is None:
atype = unspecified_type if as_target and env.directives['infer_types'] != False else py_object_type
self.entry = env.declare_var(name, atype, self.pos, is_cdef=not as_target)
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
# Returns the module scope, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.as_module:
return entry.as_module
return None
def analyse_as_type(self, env):
if self.cython_attribute:
type = PyrexTypes.parse_basic_type(self.cython_attribute)
else:
type = PyrexTypes.parse_basic_type(self.name)
if type:
return type
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
return entry.type
else:
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type.
# Returns the extension type, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_target_declaration(self, env):
if not self.entry:
self.entry = env.lookup_here(self.name)
if not self.entry and self.annotation is not None:
# name : type = ...
self.declare_from_annotation(env, as_target=True)
if not self.entry:
if env.directives['warn.undeclared']:
warning(self.pos, "implicit declaration of '%s'" % self.name, 1)
if env.directives['infer_types'] != False:
type = unspecified_type
else:
type = py_object_type
self.entry = env.declare_var(self.name, type, self.pos)
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
if self.entry.as_module:
# cimported modules namespace can shadow actual variables
self.entry.is_variable = 1
def analyse_types(self, env):
self.initialized_check = env.directives['initializedcheck']
entry = self.entry
if entry is None:
entry = env.lookup(self.name)
if not entry:
entry = env.declare_builtin(self.name, self.pos)
if entry and entry.is_builtin and entry.is_const:
self.is_literal = True
if not entry:
self.type = PyrexTypes.error_type
return self
self.entry = entry
entry.used = 1
if entry.type.is_buffer:
from . import Buffer
Buffer.used_buffer_aux_vars(entry)
self.analyse_rvalue_entry(env)
return self
def analyse_target_types(self, env):
self.analyse_entry(env, is_target=True)
entry = self.entry
if entry.is_cfunction and entry.as_variable:
# FIXME: unify "is_overridable" flags below
if (entry.is_overridable or entry.type.is_overridable) or not self.is_lvalue() and entry.fused_cfunction:
# We need this for assigning to cpdef names and for the fused 'def' TreeFragment
entry = self.entry = entry.as_variable
self.type = entry.type
if self.type.is_const:
error(self.pos, "Assignment to const '%s'" % self.name)
if self.type.is_reference:
error(self.pos, "Assignment to reference '%s'" % self.name)
if not self.is_lvalue():
error(self.pos, "Assignment to non-lvalue '%s'" % self.name)
self.type = PyrexTypes.error_type
entry.used = 1
if entry.type.is_buffer:
from . import Buffer
Buffer.used_buffer_aux_vars(entry)
return self
def analyse_rvalue_entry(self, env):
#print "NameNode.analyse_rvalue_entry:", self.name ###
#print "Entry:", self.entry.__dict__ ###
self.analyse_entry(env)
entry = self.entry
if entry.is_declared_generic:
self.result_ctype = py_object_type
if entry.is_pyglobal or entry.is_builtin:
if entry.is_builtin and entry.is_const:
self.is_temp = 0
else:
self.is_temp = 1
self.is_used_as_rvalue = 1
elif entry.type.is_memoryviewslice:
self.is_temp = False
self.is_used_as_rvalue = True
self.use_managed_ref = True
return self
def nogil_check(self, env):
self.nogil = True
if self.is_used_as_rvalue:
entry = self.entry
if entry.is_builtin:
if not entry.is_const: # cached builtins are ok
self.gil_error()
elif entry.is_pyglobal:
self.gil_error()
gil_message = "Accessing Python global or builtin"
def analyse_entry(self, env, is_target=False):
#print "NameNode.analyse_entry:", self.name ###
self.check_identifier_kind()
entry = self.entry
type = entry.type
if (not is_target and type.is_pyobject and self.inferred_type and
self.inferred_type.is_builtin_type):
# assume that type inference is smarter than the static entry
type = self.inferred_type
self.type = type
def check_identifier_kind(self):
# Check that this is an appropriate kind of name for use in an
# expression. Also finds the variable entry associated with
# an extension type.
entry = self.entry
if entry.is_type and entry.type.is_extension_type:
self.type_entry = entry
if entry.is_type and entry.type.is_enum:
py_entry = Symtab.Entry(self.name, None, py_object_type)
py_entry.is_pyglobal = True
py_entry.scope = self.entry.scope
self.entry = py_entry
elif not (entry.is_const or entry.is_variable or
entry.is_builtin or entry.is_cfunction or
entry.is_cpp_class):
if self.entry.as_variable:
self.entry = self.entry.as_variable
elif not self.is_cython_module:
error(self.pos, "'%s' is not a constant, variable or function identifier" % self.name)
def is_cimported_module_without_shadow(self, env):
if self.is_cython_module or self.cython_attribute:
return False
entry = self.entry or env.lookup(self.name)
return entry.as_module and not entry.is_variable
def is_simple(self):
# If it's not a C variable, it'll be in a temp.
return 1
def may_be_none(self):
if self.cf_state and self.type and (self.type.is_pyobject or
self.type.is_memoryviewslice):
# gard against infinite recursion on self-dependencies
if getattr(self, '_none_checking', False):
# self-dependency - either this node receives a None
# value from *another* node, or it can not reference
# None at this point => safe to assume "not None"
return False
self._none_checking = True
# evaluate control flow state to see if there were any
# potential None values assigned to the node so far
may_be_none = False
for assignment in self.cf_state:
if assignment.rhs.may_be_none():
may_be_none = True
break
del self._none_checking
return may_be_none
return super(NameNode, self).may_be_none()
def nonlocally_immutable(self):
if ExprNode.nonlocally_immutable(self):
return True
entry = self.entry
if not entry or entry.in_closure:
return False
return entry.is_local or entry.is_arg or entry.is_builtin or entry.is_readonly
def calculate_target_results(self, env):
pass
def check_const(self):
entry = self.entry
if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin):
self.not_const()
return False
return True
def check_const_addr(self):
entry = self.entry
if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin):
self.addr_not_const()
return False
return True
def is_lvalue(self):
return (
self.entry.is_variable and
not self.entry.is_readonly
) or (
self.entry.is_cfunction and
self.entry.is_overridable
)
def is_addressable(self):
return self.entry.is_variable and not self.type.is_memoryviewslice
def is_ephemeral(self):
# Name nodes are never ephemeral, even if the
# result is in a temporary.
return 0
def calculate_result_code(self):
entry = self.entry
if not entry:
return "<error>" # There was an error earlier
return entry.cname
def generate_result_code(self, code):
assert hasattr(self, 'entry')
entry = self.entry
if entry is None:
return # There was an error earlier
if entry.is_builtin and entry.is_const:
return # Lookup already cached
elif entry.is_pyclass_attr:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.is_builtin:
namespace = Naming.builtins_cname
else: # entry.is_pyglobal
namespace = entry.scope.namespace_cname
if not self.cf_is_null:
code.putln(
'%s = PyObject_GetItem(%s, %s);' % (
self.result(),
namespace,
interned_cname))
code.putln('if (unlikely(!%s)) {' % self.result())
code.putln('PyErr_Clear();')
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s);' % (
self.result(),
interned_cname))
if not self.cf_is_null:
code.putln("}")
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
elif entry.is_builtin and not entry.scope.is_module_scope:
# known builtin
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetBuiltinName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_pyglobal or (entry.is_builtin and entry.scope.is_module_scope):
# name in class body, global name or unknown builtin
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.scope.is_module_scope:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
else:
# FIXME: is_pyglobal is also used for class namespace
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetNameInClass(%s, %s); %s' % (
self.result(),
entry.scope.namespace_cname,
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice:
# Raise UnboundLocalError for objects and memoryviewslices
raise_unbound = (
(self.cf_maybe_null or self.cf_is_null) and not self.allow_null)
null_code = entry.type.check_for_null_code(entry.cname)
memslice_check = entry.type.is_memoryviewslice and self.initialized_check
if null_code and raise_unbound and (entry.type.is_pyobject or memslice_check):
code.put_error_if_unbound(self.pos, entry, self.in_nogil_context)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
#print "NameNode.generate_assignment_code:", self.name ###
entry = self.entry
if entry is None:
return # There was an error earlier
if (self.entry.type.is_ptr and isinstance(rhs, ListNode)
and not self.lhs_of_first_assignment and not rhs.in_module_scope):
error(self.pos, "Literal list must be assigned to pointer at time of declaration")
# is_pyglobal seems to be True for module level-globals only.
# We use this to access class->tp_dict if necessary.
if entry.is_pyglobal:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
namespace = self.entry.scope.namespace_cname
if entry.is_member:
# if the entry is a member we have to cheat: SetAttr does not work
# on types, so we create a descriptor which is then added to tp_dict
setter = 'PyDict_SetItem'
namespace = '%s->tp_dict' % namespace
elif entry.scope.is_module_scope:
setter = 'PyDict_SetItem'
namespace = Naming.moddict_cname
elif entry.is_pyclass_attr:
code.globalstate.use_utility_code(UtilityCode.load_cached("SetNameInClass", "ObjectHandling.c"))
setter = '__Pyx_SetNameInClass'
else:
assert False, repr(entry)
code.put_error_if_neg(
self.pos,
'%s(%s, %s, %s)' % (
setter,
namespace,
interned_cname,
rhs.py_result()))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating disposal code for %s" % rhs)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
if entry.is_member:
# in Py2.6+, we need to invalidate the method cache
code.putln("PyType_Modified(%s);" %
entry.scope.parent_type.typeptr_cname)
else:
if self.type.is_memoryviewslice:
self.generate_acquire_memoryviewslice(rhs, code)
elif self.type.is_buffer:
# Generate code for doing the buffer release/acquisition.
# This might raise an exception in which case the assignment (done
# below) will not happen.
#
# The reason this is not in a typetest-like node is because the
# variables that the acquired buffer info is stored to is allocated
# per entry and coupled with it.
self.generate_acquire_buffer(rhs, code)
assigned = False
if self.type.is_pyobject:
#print "NameNode.generate_assignment_code: to", self.name ###
#print "...from", rhs ###
#print "...LHS type", self.type, "ctype", self.ctype() ###
#print "...RHS type", rhs.type, "ctype", rhs.ctype() ###
if self.use_managed_ref:
rhs.make_owned_reference(code)
is_external_ref = entry.is_cglobal or self.entry.in_closure or self.entry.from_closure
if is_external_ref:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xgotref(self.py_result())
else:
code.put_gotref(self.py_result())
assigned = True
if entry.is_cglobal:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xdecref_set(
self.result(), rhs.result_as(self.ctype()))
else:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
assigned = False
if is_external_ref:
code.put_giveref(rhs.py_result())
if not self.type.is_memoryviewslice:
if not assigned:
if overloaded_assignment:
result = rhs.result()
if exception_check == '+':
translate_cpp_exception(code, self.pos, '%s = %s;' % (self.result(), result), exception_value, self.in_nogil_context)
else:
code.putln('%s = %s;' % (self.result(), result))
else:
result = rhs.result_as(self.ctype())
code.putln('%s = %s;' % (self.result(), result))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating post-assignment code for %s" % rhs)
rhs.generate_post_assignment_code(code)
elif rhs.result_in_temp():
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
def generate_acquire_memoryviewslice(self, rhs, code):
"""
Slices, coercions from objects, return values etc are new references.
We have a borrowed reference in case of dst = src
"""
from . import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=self.result(),
lhs_type=self.type,
lhs_pos=self.pos,
rhs=rhs,
code=code,
have_gil=not self.in_nogil_context,
first_assignment=self.cf_is_null)
def generate_acquire_buffer(self, rhs, code):
# rhstmp is only used in case the rhs is a complicated expression leading to
# the object, to avoid repeating the same C expression for every reference
# to the rhs. It does NOT hold a reference.
pretty_rhs = isinstance(rhs, NameNode) or rhs.is_temp
if pretty_rhs:
rhstmp = rhs.result_as(self.ctype())
else:
rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False)
code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype())))
from . import Buffer
Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry,
is_initialized=not self.lhs_of_first_assignment,
pos=self.pos, code=code)
if not pretty_rhs:
code.putln("%s = 0;" % rhstmp)
code.funcstate.release_temp(rhstmp)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if self.entry is None:
return # There was an error earlier
elif self.entry.is_pyclass_attr:
namespace = self.entry.scope.namespace_cname
interned_cname = code.intern_identifier(self.entry.name)
if ignore_nonexisting:
key_error_code = 'PyErr_Clear(); else'
else:
# minor hack: fake a NameError on KeyError
key_error_code = (
'{ PyErr_Clear(); PyErr_Format(PyExc_NameError, "name \'%%s\' is not defined", "%s"); }' %
self.entry.name)
code.putln(
'if (unlikely(PyObject_DelItem(%s, %s) < 0)) {'
' if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) %s'
' %s '
'}' % (namespace, interned_cname,
key_error_code,
code.error_goto(self.pos)))
elif self.entry.is_pyglobal:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
interned_cname = code.intern_identifier(self.entry.name)
del_code = '__Pyx_PyObject_DelAttrStr(%s, %s)' % (
Naming.module_cname, interned_cname)
if ignore_nonexisting:
code.putln(
'if (unlikely(%s < 0)) {'
' if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s '
'}' % (del_code, code.error_goto(self.pos)))
else:
code.put_error_if_neg(self.pos, del_code)
elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice:
if not self.cf_is_null:
if self.cf_maybe_null and not ignore_nonexisting:
code.put_error_if_unbound(self.pos, self.entry)
if self.entry.type.is_pyobject:
if self.entry.in_closure:
# generator
if ignore_nonexisting and self.cf_maybe_null:
code.put_xgotref(self.result())
else:
code.put_gotref(self.result())
if ignore_nonexisting and self.cf_maybe_null:
code.put_xdecref(self.result(), self.ctype())
else:
code.put_decref(self.result(), self.ctype())
code.putln('%s = NULL;' % self.result())
else:
code.put_xdecref_memoryviewslice(self.entry.cname,
have_gil=not self.nogil)
else:
error(self.pos, "Deletion of C names not supported")
def annotate(self, code):
if hasattr(self, 'is_called') and self.is_called:
pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
if self.type.is_pyobject:
style, text = 'py_call', 'python function (%s)'
else:
style, text = 'c_call', 'c function (%s)'
code.annotate(pos, AnnotationItem(style, text % self.type, size=len(self.name)))
class BackquoteNode(ExprNode):
# `expr`
#
# arg ExprNode
type = py_object_type
subexprs = ['arg']
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.is_temp = 1
return self
gil_message = "Backquote expression"
def calculate_constant_result(self):
self.constant_result = repr(self.arg.constant_result)
def generate_result_code(self, code):
code.putln(
"%s = PyObject_Repr(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ImportNode(ExprNode):
# Used as part of import statement implementation.
# Implements result =
# __import__(module_name, globals(), None, name_list, level)
#
# module_name StringNode dotted name of module. Empty module
# name means importing the parent package according
# to level
# name_list ListNode or None list of names to be imported
# level int relative import level:
# -1: attempt both relative import and absolute import;
# 0: absolute import;
# >0: the number of parent directories to search
# relative to the current module.
# None: decide the level according to language level and
# directives
type = py_object_type
subexprs = ['module_name', 'name_list']
def analyse_types(self, env):
if self.level is None:
if (env.directives['py2_import'] or
Future.absolute_import not in env.global_scope().context.future_directives):
self.level = -1
else:
self.level = 0
module_name = self.module_name.analyse_types(env)
self.module_name = module_name.coerce_to_pyobject(env)
if self.name_list:
name_list = self.name_list.analyse_types(env)
self.name_list = name_list.coerce_to_pyobject(env)
self.is_temp = 1
return self
gil_message = "Python import"
def generate_result_code(self, code):
if self.name_list:
name_list_code = self.name_list.py_result()
else:
name_list_code = "0"
code.globalstate.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c"))
import_code = "__Pyx_Import(%s, %s, %d)" % (
self.module_name.py_result(),
name_list_code,
self.level)
if (self.level <= 0 and
self.module_name.is_string_literal and
self.module_name.value in utility_code_for_imports):
helper_func, code_name, code_file = utility_code_for_imports[self.module_name.value]
code.globalstate.use_utility_code(UtilityCode.load_cached(code_name, code_file))
import_code = '%s(%s)' % (helper_func, import_code)
code.putln("%s = %s; %s" % (
self.result(),
import_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class IteratorNode(ExprNode):
# Used as part of for statement implementation.
#
# Implements result = iter(sequence)
#
# sequence ExprNode
type = py_object_type
iter_func_ptr = None
counter_cname = None
cpp_iterator_cname = None
reversed = False # currently only used for list/tuple types (see Optimize.py)
is_async = False
subexprs = ['sequence']
def analyse_types(self, env):
self.sequence = self.sequence.analyse_types(env)
if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
not self.sequence.type.is_string:
# C array iteration will be transformed later on
self.type = self.sequence.type
elif self.sequence.type.is_cpp_class:
self.analyse_cpp_types(env)
else:
self.sequence = self.sequence.coerce_to_pyobject(env)
if self.sequence.type in (list_type, tuple_type):
self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable")
self.is_temp = 1
return self
gil_message = "Iterating over Python object"
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def type_dependencies(self, env):
return self.sequence.type_dependencies(env)
def infer_type(self, env):
sequence_type = self.sequence.infer_type(env)
if sequence_type.is_array or sequence_type.is_ptr:
return sequence_type
elif sequence_type.is_cpp_class:
begin = sequence_type.scope.lookup("begin")
if begin is not None:
return begin.type.return_type
elif sequence_type.is_pyobject:
return sequence_type
return py_object_type
def analyse_cpp_types(self, env):
sequence_type = self.sequence.type
if sequence_type.is_ptr:
sequence_type = sequence_type.base_type
begin = sequence_type.scope.lookup("begin")
end = sequence_type.scope.lookup("end")
if (begin is None
or not begin.type.is_cfunction
or begin.type.args):
error(self.pos, "missing begin() on %s" % self.sequence.type)
self.type = error_type
return
if (end is None
or not end.type.is_cfunction
or end.type.args):
error(self.pos, "missing end() on %s" % self.sequence.type)
self.type = error_type
return
iter_type = begin.type.return_type
if iter_type.is_cpp_class:
if env.lookup_operator_for_types(
self.pos,
"!=",
[iter_type, end.type.return_type]) is None:
error(self.pos, "missing operator!= on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '++', [iter_type]) is None:
error(self.pos, "missing operator++ on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '*', [iter_type]) is None:
error(self.pos, "missing operator* on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
self.type = iter_type
elif iter_type.is_ptr:
if not (iter_type == end.type.return_type):
error(self.pos, "incompatible types for begin() and end()")
self.type = iter_type
else:
error(self.pos, "result type of begin() on %s must be a C++ class or pointer" % self.sequence.type)
self.type = error_type
return
def generate_result_code(self, code):
sequence_type = self.sequence.type
if sequence_type.is_cpp_class:
if self.sequence.is_name:
# safe: C++ won't allow you to reassign to class references
begin_func = "%s.begin" % self.sequence.result()
else:
sequence_type = PyrexTypes.c_ptr_type(sequence_type)
self.cpp_iterator_cname = code.funcstate.allocate_temp(sequence_type, manage_ref=False)
code.putln("%s = &%s;" % (self.cpp_iterator_cname, self.sequence.result()))
begin_func = "%s->begin" % self.cpp_iterator_cname
# TODO: Limit scope.
code.putln("%s = %s();" % (self.result(), begin_func))
return
if sequence_type.is_array or sequence_type.is_ptr:
raise InternalError("for in carray slice not transformed")
is_builtin_sequence = sequence_type in (list_type, tuple_type)
if not is_builtin_sequence:
# reversed() not currently optimised (see Optimize.py)
assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects"
self.may_be_a_sequence = not sequence_type.is_builtin_type
if self.may_be_a_sequence:
code.putln(
"if (likely(PyList_CheckExact(%s)) || PyTuple_CheckExact(%s)) {" % (
self.sequence.py_result(),
self.sequence.py_result()))
if is_builtin_sequence or self.may_be_a_sequence:
self.counter_cname = code.funcstate.allocate_temp(
PyrexTypes.c_py_ssize_t_type, manage_ref=False)
if self.reversed:
if sequence_type is list_type:
init_value = 'PyList_GET_SIZE(%s) - 1' % self.result()
else:
init_value = 'PyTuple_GET_SIZE(%s) - 1' % self.result()
else:
init_value = '0'
code.putln("%s = %s; __Pyx_INCREF(%s); %s = %s;" % (
self.result(),
self.sequence.py_result(),
self.result(),
self.counter_cname,
init_value))
if not is_builtin_sequence:
self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
if self.may_be_a_sequence:
code.putln("%s = NULL;" % self.iter_func_ptr)
code.putln("} else {")
code.put("%s = -1; " % self.counter_cname)
code.putln("%s = PyObject_GetIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
# PyObject_GetIter() fails if "tp_iternext" is not set, but the check below
# makes it visible to the C compiler that the pointer really isn't NULL, so that
# it can distinguish between the special cases and the generic case
code.putln("%s = Py_TYPE(%s)->tp_iternext; %s" % (
self.iter_func_ptr, self.py_result(),
code.error_goto_if_null(self.iter_func_ptr, self.pos)))
if self.may_be_a_sequence:
code.putln("}")
def generate_next_sequence_item(self, test_name, result_name, code):
assert self.counter_cname, "internal error: counter_cname temp not prepared"
final_size = 'Py%s_GET_SIZE(%s)' % (test_name, self.py_result())
if self.sequence.is_sequence_constructor:
item_count = len(self.sequence.args)
if self.sequence.mult_factor is None:
final_size = item_count
elif isinstance(self.sequence.mult_factor.constant_result, _py_int_types):
final_size = item_count * self.sequence.mult_factor.constant_result
code.putln("if (%s >= %s) break;" % (self.counter_cname, final_size))
if self.reversed:
inc_dec = '--'
else:
inc_dec = '++'
code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS")
code.putln(
"%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s; %s" % (
result_name,
test_name,
self.py_result(),
self.counter_cname,
result_name,
self.counter_cname,
inc_dec,
# use the error label to avoid C compiler warnings if we only use it below
code.error_goto_if_neg('0', self.pos)
))
code.putln("#else")
code.putln(
"%s = PySequence_ITEM(%s, %s); %s%s; %s" % (
result_name,
self.py_result(),
self.counter_cname,
self.counter_cname,
inc_dec,
code.error_goto_if_null(result_name, self.pos)))
code.put_gotref(result_name)
code.putln("#endif")
def generate_iter_next_result_code(self, result_name, code):
sequence_type = self.sequence.type
if self.reversed:
code.putln("if (%s < 0) break;" % self.counter_cname)
if sequence_type.is_cpp_class:
if self.cpp_iterator_cname:
end_func = "%s->end" % self.cpp_iterator_cname
else:
end_func = "%s.end" % self.sequence.result()
# TODO: Cache end() call?
code.putln("if (!(%s != %s())) break;" % (
self.result(),
end_func))
code.putln("%s = *%s;" % (
result_name,
self.result()))
code.putln("++%s;" % self.result())
return
elif sequence_type is list_type:
self.generate_next_sequence_item('List', result_name, code)
return
elif sequence_type is tuple_type:
self.generate_next_sequence_item('Tuple', result_name, code)
return
if self.may_be_a_sequence:
code.putln("if (likely(!%s)) {" % self.iter_func_ptr)
code.putln("if (likely(PyList_CheckExact(%s))) {" % self.py_result())
self.generate_next_sequence_item('List', result_name, code)
code.putln("} else {")
self.generate_next_sequence_item('Tuple', result_name, code)
code.putln("}")
code.put("} else ")
code.putln("{")
code.putln(
"%s = %s(%s);" % (
result_name,
self.iter_func_ptr,
self.py_result()))
code.putln("if (unlikely(!%s)) {" % result_name)
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("break;")
code.putln("}")
code.put_gotref(result_name)
code.putln("}")
def free_temps(self, code):
if self.counter_cname:
code.funcstate.release_temp(self.counter_cname)
if self.iter_func_ptr:
code.funcstate.release_temp(self.iter_func_ptr)
self.iter_func_ptr = None
if self.cpp_iterator_cname:
code.funcstate.release_temp(self.cpp_iterator_cname)
ExprNode.free_temps(self, code)
class NextNode(AtomicExprNode):
# Used as part of for statement implementation.
# Implements result = next(iterator)
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def nogil_check(self, env):
# ignore - errors (if any) are already handled by IteratorNode
pass
def type_dependencies(self, env):
return self.iterator.type_dependencies(env)
def infer_type(self, env, iterator_type=None):
if iterator_type is None:
iterator_type = self.iterator.infer_type(env)
if iterator_type.is_ptr or iterator_type.is_array:
return iterator_type.base_type
elif iterator_type.is_cpp_class:
item_type = env.lookup_operator_for_types(self.pos, "*", [iterator_type]).type.return_type
if item_type.is_reference:
item_type = item_type.ref_base_type
if item_type.is_const:
item_type = item_type.const_base_type
return item_type
else:
# Avoid duplication of complicated logic.
fake_index_node = IndexNode(
self.pos,
base=self.iterator.sequence,
index=IntNode(self.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
return fake_index_node.infer_type(env)
def analyse_types(self, env):
self.type = self.infer_type(env, self.iterator.type)
self.is_temp = 1
return self
def generate_result_code(self, code):
self.iterator.generate_iter_next_result_code(self.result(), code)
class AsyncIteratorNode(ExprNode):
# Used as part of 'async for' statement implementation.
#
# Implements result = sequence.__aiter__()
#
# sequence ExprNode
subexprs = ['sequence']
is_async = True
type = py_object_type
is_temp = 1
def infer_type(self, env):
return py_object_type
def analyse_types(self, env):
self.sequence = self.sequence.analyse_types(env)
if not self.sequence.type.is_pyobject:
error(self.pos, "async for loops not allowed on C/C++ types")
self.sequence = self.sequence.coerce_to_pyobject(env)
return self
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c"))
code.putln("%s = __Pyx_Coroutine_GetAsyncIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class AsyncNextNode(AtomicExprNode):
# Used as part of 'async for' statement implementation.
# Implements result = iterator.__anext__()
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
type = py_object_type
is_temp = 1
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def infer_type(self, env):
return py_object_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c"))
code.putln("%s = __Pyx_Coroutine_AsyncIterNext(%s); %s" % (
self.result(),
self.iterator.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class WithExitCallNode(ExprNode):
# The __exit__() call of a 'with' statement. Used in both the
# except and finally clauses.
# with_stat WithStatNode the surrounding 'with' statement
# args TupleNode or ResultStatNode the exception info tuple
# await_expr AwaitExprNode the await expression of an 'async with' statement
subexprs = ['args', 'await_expr']
test_if_run = True
await_expr = None
def analyse_types(self, env):
self.args = self.args.analyse_types(env)
if self.await_expr:
self.await_expr = self.await_expr.analyse_types(env)
self.type = PyrexTypes.c_bint_type
self.is_temp = True
return self
def generate_evaluation_code(self, code):
if self.test_if_run:
# call only if it was not already called (and decref-cleared)
code.putln("if (%s) {" % self.with_stat.exit_var)
self.args.generate_evaluation_code(code)
result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.mark_pos(self.pos)
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_Call(%s, %s, NULL);" % (
result_var,
self.with_stat.exit_var,
self.args.result()))
code.put_decref_clear(self.with_stat.exit_var, type=py_object_type)
self.args.generate_disposal_code(code)
self.args.free_temps(code)
code.putln(code.error_goto_if_null(result_var, self.pos))
code.put_gotref(result_var)
if self.await_expr:
# FIXME: result_var temp currently leaks into the closure
self.await_expr.generate_evaluation_code(code, source_cname=result_var, decref_source=True)
code.putln("%s = %s;" % (result_var, self.await_expr.py_result()))
self.await_expr.generate_post_assignment_code(code)
self.await_expr.free_temps(code)
if self.result_is_used:
self.allocate_temp_result(code)
code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var))
code.put_decref_clear(result_var, type=py_object_type)
if self.result_is_used:
code.put_error_if_neg(self.pos, self.result())
code.funcstate.release_temp(result_var)
if self.test_if_run:
code.putln("}")
class ExcValueNode(AtomicExprNode):
# Node created during analyse_types phase
# of an ExceptClauseNode to fetch the current
# exception value.
type = py_object_type
def __init__(self, pos):
ExprNode.__init__(self, pos)
def set_var(self, var):
self.var = var
def calculate_result_code(self):
return self.var
def generate_result_code(self, code):
pass
def analyse_types(self, env):
return self
class TempNode(ExprNode):
# Node created during analyse_types phase
# of some nodes to hold a temporary value.
#
# Note: One must call "allocate" and "release" on
# the node during code generation to get/release the temp.
# This is because the temp result is often used outside of
# the regular cycle.
subexprs = []
def __init__(self, pos, type, env=None):
ExprNode.__init__(self, pos)
self.type = type
if type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
def analyse_types(self, env):
return self
def analyse_target_declaration(self, env):
pass
def generate_result_code(self, code):
pass
def allocate(self, code):
self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True)
def release(self, code):
code.funcstate.release_temp(self.temp_cname)
self.temp_cname = None
def result(self):
try:
return self.temp_cname
except:
assert False, "Remember to call allocate/release on TempNode"
raise
# Do not participate in normal temp alloc/dealloc:
def allocate_temp_result(self, code):
pass
def release_temp_result(self, code):
pass
class PyTempNode(TempNode):
# TempNode holding a Python value.
def __init__(self, pos, env):
TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
class RawCNameExprNode(ExprNode):
subexprs = []
def __init__(self, pos, type=None, cname=None):
ExprNode.__init__(self, pos, type=type)
if cname is not None:
self.cname = cname
def analyse_types(self, env):
return self
def set_cname(self, cname):
self.cname = cname
def result(self):
return self.cname
def generate_result_code(self, code):
pass
#-------------------------------------------------------------------
#
# F-strings
#
#-------------------------------------------------------------------
class JoinedStrNode(ExprNode):
# F-strings
#
# values [UnicodeNode|FormattedValueNode] Substrings of the f-string
#
type = unicode_type
is_temp = True
subexprs = ['values']
def analyse_types(self, env):
self.values = [v.analyse_types(env).coerce_to_pyobject(env) for v in self.values]
return self
def may_be_none(self):
# PyUnicode_Join() always returns a Unicode string or raises an exception
return False
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
num_items = len(self.values)
list_var = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
ulength_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
max_char_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ucs4_type, manage_ref=False)
code.putln('%s = PyTuple_New(%s); %s' % (
list_var,
num_items,
code.error_goto_if_null(list_var, self.pos)))
code.put_gotref(list_var)
code.putln("%s = 0;" % ulength_var)
code.putln("%s = 127;" % max_char_var) # at least ASCII character range
for i, node in enumerate(self.values):
node.generate_evaluation_code(code)
node.make_owned_reference(code)
ulength = "__Pyx_PyUnicode_GET_LENGTH(%s)" % node.py_result()
max_char_value = "__Pyx_PyUnicode_MAX_CHAR_VALUE(%s)" % node.py_result()
is_ascii = False
if isinstance(node, UnicodeNode):
try:
# most strings will be ASCII or at least Latin-1
node.value.encode('iso8859-1')
max_char_value = '255'
node.value.encode('us-ascii')
is_ascii = True
except UnicodeEncodeError:
if max_char_value != '255':
# not ISO8859-1 => check BMP limit
max_char = max(map(ord, node.value))
if max_char < 0xD800:
# BMP-only, no surrogate pairs used
max_char_value = '65535'
ulength = str(len(node.value))
elif max_char >= 65536:
# cleary outside of BMP, and not on a 16-bit Unicode system
max_char_value = '1114111'
ulength = str(len(node.value))
else:
# not really worth implementing a check for surrogate pairs here
# drawback: C code can differ when generating on Py2 with 2-byte Unicode
pass
else:
ulength = str(len(node.value))
elif isinstance(node, FormattedValueNode) and node.value.type.is_numeric:
is_ascii = True # formatted C numbers are always ASCII
if not is_ascii:
code.putln("%s = (%s > %s) ? %s : %s;" % (
max_char_var, max_char_value, max_char_var, max_char_value, max_char_var))
code.putln("%s += %s;" % (ulength_var, ulength))
code.put_giveref(node.py_result())
code.putln('PyTuple_SET_ITEM(%s, %s, %s);' % (list_var, i, node.py_result()))
node.generate_post_assignment_code(code)
node.free_temps(code)
code.mark_pos(self.pos)
self.allocate_temp_result(code)
code.globalstate.use_utility_code(UtilityCode.load_cached("JoinPyUnicode", "StringTools.c"))
code.putln('%s = __Pyx_PyUnicode_Join(%s, %d, %s, %s); %s' % (
self.result(),
list_var,
num_items,
ulength_var,
max_char_var,
code.error_goto_if_null(self.py_result(), self.pos)))
code.put_gotref(self.py_result())
code.put_decref_clear(list_var, py_object_type)
code.funcstate.release_temp(list_var)
code.funcstate.release_temp(ulength_var)
code.funcstate.release_temp(max_char_var)
class FormattedValueNode(ExprNode):
# {}-delimited portions of an f-string
#
# value ExprNode The expression itself
# conversion_char str or None Type conversion (!s, !r, !a, or none)
# format_spec JoinedStrNode or None Format string passed to __format__
# c_format_spec str or None If not None, formatting can be done at the C level
subexprs = ['value', 'format_spec']
type = unicode_type
is_temp = True
c_format_spec = None
find_conversion_func = {
's': 'PyObject_Str',
'r': 'PyObject_Repr',
'a': 'PyObject_ASCII', # NOTE: mapped to PyObject_Repr() in Py2
}.get
def may_be_none(self):
# PyObject_Format() always returns a Unicode string or raises an exception
return False
def analyse_types(self, env):
self.value = self.value.analyse_types(env)
if not self.format_spec or self.format_spec.is_string_literal:
c_format_spec = self.format_spec.value if self.format_spec else self.value.type.default_format_spec
if self.value.type.can_coerce_to_pystring(env, format_spec=c_format_spec):
self.c_format_spec = c_format_spec
if self.format_spec:
self.format_spec = self.format_spec.analyse_types(env).coerce_to_pyobject(env)
if self.c_format_spec is None:
self.value = self.value.coerce_to_pyobject(env)
if not self.format_spec and (not self.conversion_char or self.conversion_char == 's'):
if self.value.type is unicode_type and not self.value.may_be_none():
# value is definitely a unicode string and we don't format it any special
return self.value
return self
def generate_result_code(self, code):
if self.c_format_spec is not None and not self.value.type.is_pyobject:
convert_func_call = self.value.type.convert_to_pystring(
self.value.result(), code, self.c_format_spec)
code.putln("%s = %s; %s" % (
self.result(),
convert_func_call,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
return
value_result = self.value.py_result()
value_is_unicode = self.value.type is unicode_type and not self.value.may_be_none()
if self.format_spec:
format_func = '__Pyx_PyObject_Format'
format_spec = self.format_spec.py_result()
else:
# common case: expect simple Unicode pass-through if no format spec
format_func = '__Pyx_PyObject_FormatSimple'
# passing a Unicode format string in Py2 forces PyObject_Format() to also return a Unicode string
format_spec = Naming.empty_unicode
conversion_char = self.conversion_char
if conversion_char == 's' and value_is_unicode:
# no need to pipe unicode strings through str()
conversion_char = None
if conversion_char:
fn = self.find_conversion_func(conversion_char)
assert fn is not None, "invalid conversion character found: '%s'" % conversion_char
value_result = '%s(%s)' % (fn, value_result)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectFormatAndDecref", "StringTools.c"))
format_func += 'AndDecref'
elif self.format_spec:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectFormat", "StringTools.c"))
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectFormatSimple", "StringTools.c"))
code.putln("%s = %s(%s, %s); %s" % (
self.result(),
format_func,
value_result,
format_spec,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
#-------------------------------------------------------------------
#
# Parallel nodes (cython.parallel.thread(savailable|id))
#
#-------------------------------------------------------------------
class ParallelThreadsAvailableNode(AtomicExprNode):
"""
Note: this is disabled and not a valid directive at this moment
Implements cython.parallel.threadsavailable(). If we are called from the
sequential part of the application, we need to call omp_get_max_threads(),
and in the parallel part we can just call omp_get_num_threads()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("if (omp_in_parallel()) %s = omp_get_max_threads();" %
self.temp_code)
code.putln("else %s = omp_get_num_threads();" % self.temp_code)
code.putln("#else")
code.putln("%s = 1;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode):
"""
Implements cython.parallel.threadid()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("%s = omp_get_thread_num();" % self.temp_code)
code.putln("#else")
code.putln("%s = 0;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
#-------------------------------------------------------------------
#
# Trailer nodes
#
#-------------------------------------------------------------------
class _IndexingBaseNode(ExprNode):
# Base class for indexing nodes.
#
# base ExprNode the value being indexed
def is_ephemeral(self):
# in most cases, indexing will return a safe reference to an object in a container,
# so we consider the result safe if the base object is
return self.base.is_ephemeral() or self.base.type in (
basestring_type, str_type, bytes_type, bytearray_type, unicode_type)
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
def is_lvalue(self):
# NOTE: references currently have both is_reference and is_ptr
# set. Since pointers and references have different lvalue
# rules, we must be careful to separate the two.
if self.type.is_reference:
if self.type.ref_base_type.is_array:
# fixed-sized arrays aren't l-values
return False
elif self.type.is_ptr:
# non-const pointers can always be reassigned
return True
# Just about everything else returned by the index operator
# can be an lvalue.
return True
class IndexNode(_IndexingBaseNode):
# Sequence indexing.
#
# base ExprNode
# index ExprNode
# type_indices [PyrexType]
#
# is_fused_index boolean Whether the index is used to specialize a
# c(p)def function
subexprs = ['base', 'index']
type_indices = None
is_subscript = True
is_fused_index = False
def calculate_constant_result(self):
self.constant_result = self.base.constant_result[self.index.constant_result]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
index = self.index.compile_time_value(denv)
try:
return base[index]
except Exception as e:
self.compile_time_value_error(e)
def is_simple(self):
base = self.base
return (base.is_simple() and self.index.is_simple()
and base.type and (base.type.is_ptr or base.type.is_array))
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if isinstance(self.index, SliceNode):
# slicing!
if base_type in (bytes_type, bytearray_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def analyse_target_declaration(self, env):
pass
def analyse_as_type(self, env):
base_type = self.base.analyse_as_type(env)
if base_type and not base_type.is_pyobject:
if base_type.is_cpp_class:
if isinstance(self.index, TupleNode):
template_values = self.index.args
else:
template_values = [self.index]
type_node = Nodes.TemplatedTypeNode(
pos=self.pos,
positional_args=template_values,
keyword_args=None)
return type_node.analyse(env, base_type=base_type)
else:
index = self.index.compile_time_value(env)
if index is not None:
return PyrexTypes.CArrayType(base_type, int(index))
error(self.pos, "Array size must be a compile time constant")
return None
def type_dependencies(self, env):
return self.base.type_dependencies(env) + self.index.type_dependencies(env)
def infer_type(self, env):
base_type = self.base.infer_type(env)
if self.index.is_slice:
# slicing!
if base_type.is_string:
# sliced C strings must coerce to Python
return bytes_type
elif base_type.is_pyunicode_ptr:
# sliced Py_UNICODE* strings must coerce to Python
return unicode_type
elif base_type in (unicode_type, bytes_type, str_type,
bytearray_type, list_type, tuple_type):
# slicing these returns the same type
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
index_type = self.index.infer_type(env)
if index_type and index_type.is_int or isinstance(self.index, IntNode):
# indexing!
if base_type is unicode_type:
# Py_UCS4 will automatically coerce to a unicode string
# if required, so this is safe. We only infer Py_UCS4
# when the index is a C integer type. Otherwise, we may
# need to use normal Python item access, in which case
# it's faster to return the one-char unicode string than
# to receive it, throw it away, and potentially rebuild it
# on a subsequent PyObject coercion.
return PyrexTypes.c_py_ucs4_type
elif base_type is str_type:
# always returns str - Py2: bytes, Py3: unicode
return base_type
elif base_type is bytearray_type:
return PyrexTypes.c_uchar_type
elif isinstance(self.base, BytesNode):
#if env.global_scope().context.language_level >= 3:
# # inferring 'char' can be made to work in Python 3 mode
# return PyrexTypes.c_char_type
# Py2/3 return different types on indexing bytes objects
return py_object_type
elif base_type in (tuple_type, list_type):
# if base is a literal, take a look at its values
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is not None:
return item_type
elif base_type.is_ptr or base_type.is_array:
return base_type.base_type
elif base_type.is_ctuple and isinstance(self.index, IntNode):
if self.index.has_constant_result():
index = self.index.constant_result
if index < 0:
index += base_type.size
if 0 <= index < base_type.size:
return base_type.components[index]
if base_type.is_cpp_class:
class FakeOperand:
def __init__(self, **kwds):
self.__dict__.update(kwds)
operands = [
FakeOperand(pos=self.pos, type=base_type),
FakeOperand(pos=self.pos, type=index_type),
]
index_func = env.lookup_operator('[]', operands)
if index_func is not None:
return index_func.type.return_type
# may be slicing or indexing, we don't know
if base_type in (unicode_type, str_type):
# these types always returns their own type on Python indexing/slicing
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
def analyse_types(self, env):
return self.analyse_base_and_index_types(env, getting=True)
def analyse_target_types(self, env):
node = self.analyse_base_and_index_types(env, setting=True)
if node.type.is_const:
error(self.pos, "Assignment to const dereference")
if node is self and not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % node.type)
return node
def analyse_base_and_index_types(self, env, getting=False, setting=False,
analyse_base=True):
# Note: This might be cleaned up by having IndexNode
# parsed in a saner way and only construct the tuple if
# needed.
if analyse_base:
self.base = self.base.analyse_types(env)
if self.base.type.is_error:
# Do not visit child tree if base is undeclared to avoid confusing
# error messages
self.type = PyrexTypes.error_type
return self
is_slice = self.index.is_slice
if not env.directives['wraparound']:
if is_slice:
check_negative_indices(self.index.start, self.index.stop)
else:
check_negative_indices(self.index)
# Potentially overflowing index value.
if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value):
self.index = self.index.coerce_to_pyobject(env)
is_memslice = self.base.type.is_memoryviewslice
# Handle the case where base is a literal char* (and we expect a string, not an int)
if not is_memslice and (isinstance(self.base, BytesNode) or is_slice):
if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array):
self.base = self.base.coerce_to_pyobject(env)
replacement_node = self.analyse_as_buffer_operation(env, getting)
if replacement_node is not None:
return replacement_node
self.nogil = env.nogil
base_type = self.base.type
if not base_type.is_cfunction:
self.index = self.index.analyse_types(env)
self.original_index_type = self.index.type
if base_type.is_unicode_char:
# we infer Py_UNICODE/Py_UCS4 for unicode strings in some
# cases, but indexing must still work for them
if setting:
warning(self.pos, "cannot assign to Unicode string index", level=1)
elif self.index.constant_result in (0, -1):
# uchar[0] => uchar
return self.base
self.base = self.base.coerce_to_pyobject(env)
base_type = self.base.type
if base_type.is_pyobject:
return self.analyse_as_pyobject(env, is_slice, getting, setting)
elif base_type.is_ptr or base_type.is_array:
return self.analyse_as_c_array(env, is_slice)
elif base_type.is_cpp_class:
return self.analyse_as_cpp(env, setting)
elif base_type.is_cfunction:
return self.analyse_as_c_function(env)
elif base_type.is_ctuple:
return self.analyse_as_c_tuple(env, getting, setting)
else:
error(self.pos,
"Attempting to index non-array type '%s'" %
base_type)
self.type = PyrexTypes.error_type
return self
def analyse_as_pyobject(self, env, is_slice, getting, setting):
base_type = self.base.type
if self.index.type.is_unicode_char and base_type is not dict_type:
# TODO: eventually fold into case below and remove warning, once people have adapted their code
warning(self.pos,
"Item lookup of unicode character codes now always converts to a Unicode string. "
"Use an explicit C integer cast to get back the previous integer lookup behaviour.", level=1)
self.index = self.index.coerce_to_pyobject(env)
self.is_temp = 1
elif self.index.type.is_int and base_type is not dict_type:
if (getting
and (base_type in (list_type, tuple_type, bytearray_type))
and (not self.index.type.signed
or not env.directives['wraparound']
or (isinstance(self.index, IntNode) and
self.index.has_constant_result() and self.index.constant_result >= 0))
and not env.directives['boundscheck']):
self.is_temp = 0
else:
self.is_temp = 1
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env)
self.original_index_type.create_to_py_utility_code(env)
else:
self.index = self.index.coerce_to_pyobject(env)
self.is_temp = 1
if self.index.type.is_int and base_type is unicode_type:
# Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string
# if required, so this is fast and safe
self.type = PyrexTypes.c_py_ucs4_type
elif self.index.type.is_int and base_type is bytearray_type:
if setting:
self.type = PyrexTypes.c_uchar_type
else:
# not using 'uchar' to enable fast and safe error reporting as '-1'
self.type = PyrexTypes.c_int_type
elif is_slice and base_type in (bytes_type, bytearray_type, str_type, unicode_type, list_type, tuple_type):
self.type = base_type
else:
item_type = None
if base_type in (list_type, tuple_type) and self.index.type.is_int:
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is None:
item_type = py_object_type
self.type = item_type
if base_type in (list_type, tuple_type, dict_type):
# do the None check explicitly (not in a helper) to allow optimising it away
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
self.wrap_in_nonecheck_node(env, getting)
return self
def analyse_as_c_array(self, env, is_slice):
base_type = self.base.type
self.type = base_type.base_type
if is_slice:
self.type = base_type
elif self.index.type.is_pyobject:
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
elif not self.index.type.is_int:
error(self.pos, "Invalid index type '%s'" % self.index.type)
return self
def analyse_as_cpp(self, env, setting):
base_type = self.base.type
function = env.lookup_operator("[]", [self.base, self.index])
if function is None:
error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return self
func_type = function.type
if func_type.is_ptr:
func_type = func_type.base_type
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check:
if not setting:
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
self.index = self.index.coerce_to(func_type.args[0].type, env)
self.type = func_type.return_type
if setting and not func_type.return_type.is_reference:
error(self.pos, "Can't set non-reference result '%s'" % self.type)
return self
def analyse_as_c_function(self, env):
base_type = self.base.type
if base_type.is_fused:
self.parse_indexed_fused_cdef(env)
else:
self.type_indices = self.parse_index_as_types(env)
self.index = None # FIXME: use a dedicated Node class instead of generic IndexNode
if base_type.templates is None:
error(self.pos, "Can only parameterize template functions.")
self.type = error_type
elif self.type_indices is None:
# Error recorded earlier.
self.type = error_type
elif len(base_type.templates) != len(self.type_indices):
error(self.pos, "Wrong number of template arguments: expected %s, got %s" % (
(len(base_type.templates), len(self.type_indices))))
self.type = error_type
else:
self.type = base_type.specialize(dict(zip(base_type.templates, self.type_indices)))
# FIXME: use a dedicated Node class instead of generic IndexNode
return self
def analyse_as_c_tuple(self, env, getting, setting):
base_type = self.base.type
if isinstance(self.index, IntNode) and self.index.has_constant_result():
index = self.index.constant_result
if -base_type.size <= index < base_type.size:
if index < 0:
index += base_type.size
self.type = base_type.components[index]
else:
error(self.pos,
"Index %s out of bounds for '%s'" %
(index, base_type))
self.type = PyrexTypes.error_type
return self
else:
self.base = self.base.coerce_to_pyobject(env)
return self.analyse_base_and_index_types(env, getting=getting, setting=setting, analyse_base=False)
def analyse_as_buffer_operation(self, env, getting):
"""
Analyse buffer indexing and memoryview indexing/slicing
"""
if isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
base_type = self.base.type
replacement_node = None
if base_type.is_memoryviewslice:
# memoryviewslice indexing or slicing
from . import MemoryView
have_slices, indices, newaxes = MemoryView.unellipsify(indices, base_type.ndim)
if have_slices:
replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=self.base)
else:
replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=self.base)
elif base_type.is_buffer or base_type.is_pythran_expr:
if base_type.is_pythran_expr or len(indices) == base_type.ndim:
# Buffer indexing
is_buffer_access = True
indices = [index.analyse_types(env) for index in indices]
if base_type.is_pythran_expr:
do_replacement = all(index.type.is_int or index.is_slice or index.type.is_pythran_expr for index in indices)
if do_replacement:
for i,index in enumerate(indices):
if index.is_slice:
index = SliceIntNode(index.pos, start=index.start, stop=index.stop, step=index.step)
index = index.analyse_types(env)
indices[i] = index
else:
do_replacement = all(index.type.is_int for index in indices)
if do_replacement:
replacement_node = BufferIndexNode(self.pos, indices=indices, base=self.base)
# On cloning, indices is cloned. Otherwise, unpack index into indices.
assert not isinstance(self.index, CloneNode)
if replacement_node is not None:
replacement_node = replacement_node.analyse_types(env, getting)
return replacement_node
def wrap_in_nonecheck_node(self, env, getting):
if not env.directives['nonecheck'] or not self.base.may_be_none():
return
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
def parse_index_as_types(self, env, required=True):
if isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
type_indices = []
for index in indices:
type_indices.append(index.analyse_as_type(env))
if type_indices[-1] is None:
if required:
error(index.pos, "not parsable as a type")
return None
return type_indices
def parse_indexed_fused_cdef(self, env):
"""
Interpret fused_cdef_func[specific_type1, ...]
Note that if this method is called, we are an indexed cdef function
with fused argument types, and this IndexNode will be replaced by the
NameNode with specific entry just after analysis of expressions by
AnalyseExpressionsTransform.
"""
self.type = PyrexTypes.error_type
self.is_fused_index = True
base_type = self.base.type
positions = []
if self.index.is_name or self.index.is_attribute:
positions.append(self.index.pos)
elif isinstance(self.index, TupleNode):
for arg in self.index.args:
positions.append(arg.pos)
specific_types = self.parse_index_as_types(env, required=False)
if specific_types is None:
self.index = self.index.analyse_types(env)
if not self.base.entry.as_variable:
error(self.pos, "Can only index fused functions with types")
else:
# A cpdef function indexed with Python objects
self.base.entry = self.entry = self.base.entry.as_variable
self.base.type = self.type = self.entry.type
self.base.is_temp = True
self.is_temp = True
self.entry.used = True
self.is_fused_index = False
return
for i, type in enumerate(specific_types):
specific_types[i] = type.specialize_fused(env)
fused_types = base_type.get_fused_types()
if len(specific_types) > len(fused_types):
return error(self.pos, "Too many types specified")
elif len(specific_types) < len(fused_types):
t = fused_types[len(specific_types)]
return error(self.pos, "Not enough types specified to specialize "
"the function, %s is still fused" % t)
# See if our index types form valid specializations
for pos, specific_type, fused_type in zip(positions,
specific_types,
fused_types):
if not any([specific_type.same_as(t) for t in fused_type.types]):
return error(pos, "Type not in fused type")
if specific_type is None or specific_type.is_error:
return
fused_to_specific = dict(zip(fused_types, specific_types))
type = base_type.specialize(fused_to_specific)
if type.is_fused:
# Only partially specific, this is invalid
error(self.pos,
"Index operation makes function only partially specific")
else:
# Fully specific, find the signature with the specialized entry
for signature in self.base.type.get_all_specialized_function_types():
if type.same_as(signature):
self.type = signature
if self.base.is_attribute:
# Pretend to be a normal attribute, for cdef extension
# methods
self.entry = signature.entry
self.is_attribute = True
self.obj = self.base.obj
self.type.entry.used = True
self.base.type = signature
self.base.entry = signature.entry
break
else:
# This is a bug
raise InternalError("Couldn't find the right signature")
gil_message = "Indexing Python object"
def calculate_result_code(self):
if self.base.type in (list_type, tuple_type, bytearray_type):
if self.base.type is list_type:
index_code = "PyList_GET_ITEM(%s, %s)"
elif self.base.type is tuple_type:
index_code = "PyTuple_GET_ITEM(%s, %s)"
elif self.base.type is bytearray_type:
index_code = "((unsigned char)(PyByteArray_AS_STRING(%s)[%s]))"
else:
assert False, "unexpected base type in indexing: %s" % self.base.type
elif self.base.type.is_cfunction:
return "%s<%s>" % (
self.base.result(),
",".join([param.empty_declaration_code() for param in self.type_indices]))
elif self.base.type.is_ctuple:
index = self.index.constant_result
if index < 0:
index += self.base.type.size
return "%s.f%s" % (self.base.result(), index)
else:
if (self.type.is_ptr or self.type.is_array) and self.type == self.base.type:
error(self.pos, "Invalid use of pointer slice")
return
index_code = "(%s[%s])"
return index_code % (self.base.result(), self.index.result())
def extra_index_params(self, code):
if self.index.type.is_int:
is_list = self.base.type is list_type
wraparound = (
bool(code.globalstate.directives['wraparound']) and
self.original_index_type.signed and
not (isinstance(self.index.constant_result, _py_int_types)
and self.index.constant_result >= 0))
boundscheck = bool(code.globalstate.directives['boundscheck'])
return ", %s, %d, %s, %d, %d, %d" % (
self.original_index_type.empty_declaration_code(),
self.original_index_type.signed and 1 or 0,
self.original_index_type.to_py_function,
is_list, wraparound, boundscheck)
else:
return ""
def generate_result_code(self, code):
if not self.is_temp:
# all handled in self.calculate_result_code()
return
if self.type.is_pyobject:
error_value = 'NULL'
if self.index.type.is_int:
if self.base.type is list_type:
function = "__Pyx_GetItemInt_List"
elif self.base.type is tuple_type:
function = "__Pyx_GetItemInt_Tuple"
else:
function = "__Pyx_GetItemInt"
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
else:
if self.base.type is dict_type:
function = "__Pyx_PyDict_GetItem"
code.globalstate.use_utility_code(
UtilityCode.load_cached("DictGetItem", "ObjectHandling.c"))
else:
function = "PyObject_GetItem"
elif self.type.is_unicode_char and self.base.type is unicode_type:
assert self.index.type.is_int
function = "__Pyx_GetItemInt_Unicode"
error_value = '(Py_UCS4)-1'
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c"))
elif self.base.type is bytearray_type:
assert self.index.type.is_int
assert self.type.is_int
function = "__Pyx_GetItemInt_ByteArray"
error_value = '-1'
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c"))
elif not (self.base.type.is_cpp_class and self.exception_check):
assert False, "unexpected type %s and base type %s for indexing" % (
self.type, self.base.type)
if self.index.type.is_int:
index_code = self.index.result()
else:
index_code = self.index.py_result()
if self.base.type.is_cpp_class and self.exception_check:
translate_cpp_exception(code, self.pos,
"%s = %s[%s];" % (self.result(), self.base.result(),
self.index.result()),
self.exception_value, self.in_nogil_context)
else:
error_check = '!%s' if error_value == 'NULL' else '%%s == %s' % error_value
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result(),
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
code.error_goto_if(error_check % self.result(), self.pos)))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def generate_setitem_code(self, value_code, code):
if self.index.type.is_int:
if self.base.type is bytearray_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemIntByteArray", "StringTools.c"))
function = "__Pyx_SetItemInt_ByteArray"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemInt", "ObjectHandling.c"))
function = "__Pyx_SetItemInt"
index_code = self.index.result()
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_SetItem"
# It would seem that we could specialized lists/tuples, but that
# shouldn't happen here.
# Both PyList_SetItem() and PyTuple_SetItem() take a Py_ssize_t as
# index instead of an object, and bad conversion here would give
# the wrong exception. Also, tuples are supposed to be immutable,
# and raise a TypeError when trying to set their entries
# (PyTuple_SetItem() is for creating new tuples from scratch).
else:
function = "PyObject_SetItem"
code.putln(code.error_goto_if_neg(
"%s(%s, %s, %s%s)" % (
function,
self.base.py_result(),
index_code,
value_code,
self.extra_index_params(code)),
self.pos))
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
self.generate_setitem_code(rhs.py_result(), code)
elif self.base.type is bytearray_type:
value_code = self._check_byte_value(code, rhs)
self.generate_setitem_code(value_code, code)
elif self.base.type.is_cpp_class and self.exception_check and self.exception_check == '+':
if overloaded_assignment and exception_check and \
self.exception_value != exception_value:
# Handle the case that both the index operator and the assignment
# operator have a c++ exception handler and they are not the same.
translate_double_cpp_exception(code, self.pos, self.type,
self.result(), rhs.result(), self.exception_value,
exception_value, self.in_nogil_context)
else:
# Handle the case that only the index operator has a
# c++ exception handler, or that
# both exception handlers are the same.
translate_cpp_exception(code, self.pos,
"%s = %s;" % (self.result(), rhs.result()),
self.exception_value, self.in_nogil_context)
else:
code.putln(
"%s = %s;" % (self.result(), rhs.result()))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def _check_byte_value(self, code, rhs):
# TODO: should we do this generally on downcasts, or just here?
assert rhs.type.is_int, repr(rhs.type)
value_code = rhs.result()
if rhs.has_constant_result():
if 0 <= rhs.constant_result < 256:
return value_code
needs_cast = True # make at least the C compiler happy
warning(rhs.pos,
"value outside of range(0, 256)"
" when assigning to byte: %s" % rhs.constant_result,
level=1)
else:
needs_cast = rhs.type != PyrexTypes.c_uchar_type
if not self.nogil:
conditions = []
if rhs.is_literal or rhs.type.signed:
conditions.append('%s < 0' % value_code)
if (rhs.is_literal or not
(rhs.is_temp and rhs.type in (
PyrexTypes.c_uchar_type, PyrexTypes.c_char_type,
PyrexTypes.c_schar_type))):
conditions.append('%s > 255' % value_code)
if conditions:
code.putln("if (unlikely(%s)) {" % ' || '.join(conditions))
code.putln(
'PyErr_SetString(PyExc_ValueError,'
' "byte must be in range(0, 256)"); %s' %
code.error_goto(self.pos))
code.putln("}")
if needs_cast:
value_code = '((unsigned char)%s)' % value_code
return value_code
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.generate_subexpr_evaluation_code(code)
#if self.type.is_pyobject:
if self.index.type.is_int:
function = "__Pyx_DelItemInt"
index_code = self.index.result()
code.globalstate.use_utility_code(
UtilityCode.load_cached("DelItemInt", "ObjectHandling.c"))
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_DelItem"
else:
function = "PyObject_DelItem"
code.putln(code.error_goto_if_neg(
"%s(%s, %s%s)" % (
function,
self.base.py_result(),
index_code,
self.extra_index_params(code)),
self.pos))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
class BufferIndexNode(_IndexingBaseNode):
"""
Indexing of buffers and memoryviews. This node is created during type
analysis from IndexNode and replaces it.
Attributes:
base - base node being indexed
indices - list of indexing expressions
"""
subexprs = ['base', 'indices']
is_buffer_access = True
# Whether we're assigning to a buffer (in that case it needs to be writable)
writable_needed = False
def analyse_target_types(self, env):
self.analyse_types(env, getting=False)
def analyse_types(self, env, getting=True):
"""
Analyse types for buffer indexing only. Overridden by memoryview
indexing and slicing subclasses
"""
# self.indices are already analyzed
if not self.base.is_name and not is_pythran_expr(self.base.type):
error(self.pos, "Can only index buffer variables")
self.type = error_type
return self
if not getting:
if not self.base.entry.type.writable:
error(self.pos, "Writing to readonly buffer")
else:
self.writable_needed = True
if self.base.type.is_buffer:
self.base.entry.buffer_aux.writable_needed = True
self.none_error_message = "'NoneType' object is not subscriptable"
self.analyse_buffer_index(env, getting)
self.wrap_in_nonecheck_node(env)
return self
def analyse_buffer_index(self, env, getting):
if is_pythran_expr(self.base.type):
self.type = PythranExpr(pythran_indexing_type(self.base.type, self.indices))
else:
self.base = self.base.coerce_to_simple(env)
self.type = self.base.type.dtype
self.buffer_type = self.base.type
if getting and (self.type.is_pyobject or self.type.is_pythran_expr):
self.is_temp = True
def analyse_assignment(self, rhs):
"""
Called by IndexNode when this node is assigned to,
with the rhs of the assignment
"""
def wrap_in_nonecheck_node(self, env):
if not env.directives['nonecheck'] or not self.base.may_be_none():
return
self.base = self.base.as_none_safe_node(self.none_error_message)
def nogil_check(self, env):
if self.is_buffer_access or self.is_memview_index:
if env.directives['boundscheck']:
warning(self.pos, "Use boundscheck(False) for faster access",
level=1)
if self.type.is_pyobject:
error(self.pos, "Cannot access buffer with object dtype without gil")
self.type = error_type
def calculate_result_code(self):
return "(*%s)" % self.buffer_ptr_code
def buffer_entry(self):
base = self.base
if self.base.is_nonecheck:
base = base.arg
return base.type.get_entry(base)
def get_index_in_temp(self, code, ivar):
ret = code.funcstate.allocate_temp(
PyrexTypes.widest_numeric_type(
ivar.type,
PyrexTypes.c_ssize_t_type if ivar.type.signed else PyrexTypes.c_size_t_type),
manage_ref=False)
code.putln("%s = %s;" % (ret, ivar.result()))
return ret
def buffer_lookup_code(self, code):
"""
ndarray[1, 2, 3] and memslice[1, 2, 3]
"""
# Assign indices to temps of at least (s)size_t to allow further index calculations.
index_temps = [self.get_index_in_temp(code,ivar) for ivar in self.indices]
# Generate buffer access code using these temps
from . import Buffer
buffer_entry = self.buffer_entry()
if buffer_entry.type.is_buffer:
negative_indices = buffer_entry.type.negative_indices
else:
negative_indices = Buffer.buffer_defaults['negative_indices']
return buffer_entry, Buffer.put_buffer_lookup_code(
entry=buffer_entry,
index_signeds=[ivar.type.signed for ivar in self.indices],
index_cnames=index_temps,
directives=code.globalstate.directives,
pos=self.pos, code=code,
negative_indices=negative_indices,
in_nogil_context=self.in_nogil_context)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
self.generate_subexpr_evaluation_code(code)
self.generate_buffer_setitem_code(rhs, code)
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_buffer_setitem_code(self, rhs, code, op=""):
base_type = self.base.type
if is_pythran_expr(base_type) and is_pythran_supported_type(rhs.type):
obj = code.funcstate.allocate_temp(PythranExpr(pythran_type(self.base.type)), manage_ref=False)
# We have got to do this because we have to declare pythran objects
# at the beginning of the functions.
# Indeed, Cython uses "goto" statement for error management, and
# RAII doesn't work with that kind of construction.
# Moreover, the way Pythran expressions are made is that they don't
# support move-assignation easily.
# This, we explicitly destroy then in-place new objects in this
# case.
code.putln("__Pyx_call_destructor(%s);" % obj)
code.putln("new (&%s) decltype(%s){%s};" % (obj, obj, self.base.pythran_result()))
code.putln("%s%s %s= %s;" % (
obj,
pythran_indexing_code(self.indices),
op,
rhs.pythran_result()))
return
# Used from generate_assignment_code and InPlaceAssignmentNode
buffer_entry, ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject:
# Must manage refcounts. Decref what is already there
# and incref what we put in.
ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type,
manage_ref=False)
rhs_code = rhs.result()
code.putln("%s = %s;" % (ptr, ptrexpr))
code.put_gotref("*%s" % ptr)
code.putln("__Pyx_INCREF(%s); __Pyx_DECREF(*%s);" % (
rhs_code, ptr))
code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
code.put_giveref("*%s" % ptr)
code.funcstate.release_temp(ptr)
else:
# Simple case
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
def generate_result_code(self, code):
if is_pythran_expr(self.base.type):
res = self.result()
code.putln("__Pyx_call_destructor(%s);" % res)
code.putln("new (&%s) decltype(%s){%s%s};" % (
res,
res,
self.base.pythran_result(),
pythran_indexing_code(self.indices)))
return
buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
if self.type.is_pyobject:
# is_temp is True, so must pull out value and incref it.
# NOTE: object temporary results for nodes are declared
# as PyObject *, so we need a cast
code.putln("%s = (PyObject *) *%s;" % (self.result(), self.buffer_ptr_code))
code.putln("__Pyx_INCREF((PyObject*)%s);" % self.result())
class MemoryViewIndexNode(BufferIndexNode):
is_memview_index = True
is_buffer_access = False
warned_untyped_idx = False
def analyse_types(self, env, getting=True):
# memoryviewslice indexing or slicing
from . import MemoryView
self.is_pythran_mode = has_np_pythran(env)
indices = self.indices
have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim)
self.memslice_index = (not newaxes and len(indices) == self.base.type.ndim)
axes = []
index_type = PyrexTypes.c_py_ssize_t_type
new_indices = []
if len(indices) - len(newaxes) > self.base.type.ndim:
self.type = error_type
error(indices[self.base.type.ndim].pos,
"Too many indices specified for type %s" % self.base.type)
return self
axis_idx = 0
for i, index in enumerate(indices[:]):
index = index.analyse_types(env)
if index.is_none:
self.is_memview_slice = True
new_indices.append(index)
axes.append(('direct', 'strided'))
continue
access, packing = self.base.type.axes[axis_idx]
axis_idx += 1
if index.is_slice:
self.is_memview_slice = True
if index.step.is_none:
axes.append((access, packing))
else:
axes.append((access, 'strided'))
# Coerce start, stop and step to temps of the right type
for attr in ('start', 'stop', 'step'):
value = getattr(index, attr)
if not value.is_none:
value = value.coerce_to(index_type, env)
#value = value.coerce_to_temp(env)
setattr(index, attr, value)
new_indices.append(value)
elif index.type.is_int or index.type.is_pyobject:
if index.type.is_pyobject and not self.warned_untyped_idx:
warning(index.pos, "Index should be typed for more efficient access", level=2)
MemoryViewIndexNode.warned_untyped_idx = True
self.is_memview_index = True
index = index.coerce_to(index_type, env)
indices[i] = index
new_indices.append(index)
else:
self.type = error_type
error(index.pos, "Invalid index for memoryview specified, type %s" % index.type)
return self
### FIXME: replace by MemoryViewSliceNode if is_memview_slice ?
self.is_memview_index = self.is_memview_index and not self.is_memview_slice
self.indices = new_indices
# All indices with all start/stop/step for slices.
# We need to keep this around.
self.original_indices = indices
self.nogil = env.nogil
self.analyse_operation(env, getting, axes)
self.wrap_in_nonecheck_node(env)
return self
def analyse_operation(self, env, getting, axes):
self.none_error_message = "Cannot index None memoryview slice"
self.analyse_buffer_index(env, getting)
def analyse_broadcast_operation(self, rhs):
"""
Support broadcasting for slice assignment.
E.g.
m_2d[...] = m_1d # or,
m_1d[...] = m_2d # if the leading dimension has extent 1
"""
if self.type.is_memoryviewslice:
lhs = self
if lhs.is_memview_broadcast or rhs.is_memview_broadcast:
lhs.is_memview_broadcast = True
rhs.is_memview_broadcast = True
def analyse_as_memview_scalar_assignment(self, rhs):
lhs = self.analyse_assignment(rhs)
if lhs:
rhs.is_memview_copy_assignment = lhs.is_memview_copy_assignment
return lhs
return self
class MemoryViewSliceNode(MemoryViewIndexNode):
is_memview_slice = True
# No-op slicing operation, this node will be replaced
is_ellipsis_noop = False
is_memview_scalar_assignment = False
is_memview_index = False
is_memview_broadcast = False
def analyse_ellipsis_noop(self, env, getting):
"""Slicing operations needing no evaluation, i.e. m[...] or m[:, :]"""
### FIXME: replace directly
self.is_ellipsis_noop = all(
index.is_slice and index.start.is_none and index.stop.is_none and index.step.is_none
for index in self.indices)
if self.is_ellipsis_noop:
self.type = self.base.type
def analyse_operation(self, env, getting, axes):
from . import MemoryView
if not getting:
self.is_memview_broadcast = True
self.none_error_message = "Cannot assign to None memoryview slice"
else:
self.none_error_message = "Cannot slice None memoryview slice"
self.analyse_ellipsis_noop(env, getting)
if self.is_ellipsis_noop:
return
self.index = None
self.is_temp = True
self.use_managed_ref = True
if not MemoryView.validate_axes(self.pos, axes):
self.type = error_type
return
self.type = PyrexTypes.MemoryViewSliceType(self.base.type.dtype, axes)
if not (self.base.is_simple() or self.base.result_in_temp()):
self.base = self.base.coerce_to_temp(env)
def analyse_assignment(self, rhs):
if not rhs.type.is_memoryviewslice and (
self.type.dtype.assignable_from(rhs.type) or
rhs.type.is_pyobject):
# scalar assignment
return MemoryCopyScalar(self.pos, self)
else:
return MemoryCopySlice(self.pos, self)
def is_simple(self):
if self.is_ellipsis_noop:
# TODO: fix SimpleCallNode.is_simple()
return self.base.is_simple() or self.base.result_in_temp()
return self.result_in_temp()
def calculate_result_code(self):
"""This is called in case this is a no-op slicing node"""
return self.base.result()
def generate_result_code(self, code):
if self.is_ellipsis_noop:
return ### FIXME: remove
buffer_entry = self.buffer_entry()
have_gil = not self.in_nogil_context
# TODO Mark: this is insane, do it better
have_slices = False
it = iter(self.indices)
for index in self.original_indices:
if index.is_slice:
have_slices = True
if not index.start.is_none:
index.start = next(it)
if not index.stop.is_none:
index.stop = next(it)
if not index.step.is_none:
index.step = next(it)
else:
next(it)
assert not list(it)
buffer_entry.generate_buffer_slice_code(
code, self.original_indices, self.result(),
have_gil=have_gil, have_slices=have_slices,
directives=code.globalstate.directives)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
if self.is_ellipsis_noop:
self.generate_subexpr_evaluation_code(code)
else:
self.generate_evaluation_code(code)
if self.is_memview_scalar_assignment:
self.generate_memoryviewslice_assign_scalar_code(rhs, code)
else:
self.generate_memoryviewslice_setslice_code(rhs, code)
if self.is_ellipsis_noop:
self.generate_subexpr_disposal_code(code)
else:
self.generate_disposal_code(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
class MemoryCopyNode(ExprNode):
"""
Wraps a memoryview slice for slice assignment.
dst: destination mememoryview slice
"""
subexprs = ['dst']
def __init__(self, pos, dst):
super(MemoryCopyNode, self).__init__(pos)
self.dst = dst
self.type = dst.type
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
self.dst.generate_evaluation_code(code)
self._generate_assignment_code(rhs, code)
self.dst.generate_disposal_code(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
class MemoryCopySlice(MemoryCopyNode):
"""
Copy the contents of slice src to slice dst. Does not support indirect
slices.
memslice1[...] = memslice2
memslice1[:] = memslice2
"""
is_memview_copy_assignment = True
copy_slice_cname = "__pyx_memoryview_copy_contents"
def _generate_assignment_code(self, src, code):
dst = self.dst
src.type.assert_direct_dims(src.pos)
dst.type.assert_direct_dims(dst.pos)
code.putln(code.error_goto_if_neg(
"%s(%s, %s, %d, %d, %d)" % (self.copy_slice_cname,
src.result(), dst.result(),
src.type.ndim, dst.type.ndim,
dst.type.dtype.is_pyobject),
dst.pos))
class MemoryCopyScalar(MemoryCopyNode):
"""
Assign a scalar to a slice. dst must be simple, scalar will be assigned
to a correct type and not just something assignable.
memslice1[...] = 0.0
memslice1[:] = 0.0
"""
def __init__(self, pos, dst):
super(MemoryCopyScalar, self).__init__(pos, dst)
self.type = dst.type.dtype
def _generate_assignment_code(self, scalar, code):
from . import MemoryView
self.dst.type.assert_direct_dims(self.dst.pos)
dtype = self.dst.type.dtype
type_decl = dtype.declaration_code("")
slice_decl = self.dst.type.declaration_code("")
code.begin_block()
code.putln("%s __pyx_temp_scalar = %s;" % (type_decl, scalar.result()))
if self.dst.result_in_temp() or self.dst.is_simple():
dst_temp = self.dst.result()
else:
code.putln("%s __pyx_temp_slice = %s;" % (slice_decl, self.dst.result()))
dst_temp = "__pyx_temp_slice"
slice_iter_obj = MemoryView.slice_iter(self.dst.type, dst_temp,
self.dst.type.ndim, code)
p = slice_iter_obj.start_loops()
if dtype.is_pyobject:
code.putln("Py_DECREF(*(PyObject **) %s);" % p)
code.putln("*((%s *) %s) = __pyx_temp_scalar;" % (type_decl, p))
if dtype.is_pyobject:
code.putln("Py_INCREF(__pyx_temp_scalar);")
slice_iter_obj.end_loops()
code.end_block()
class SliceIndexNode(ExprNode):
# 2-element slice indexing
#
# base ExprNode
# start ExprNode or None
# stop ExprNode or None
# slice ExprNode or None constant slice object
subexprs = ['base', 'start', 'stop', 'slice']
slice = None
def infer_type(self, env):
base_type = self.base.infer_type(env)
if base_type.is_string or base_type.is_cpp_class:
return bytes_type
elif base_type.is_pyunicode_ptr:
return unicode_type
elif base_type in (bytes_type, bytearray_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return base_type
elif base_type.is_ptr or base_type.is_array:
return PyrexTypes.c_array_type(base_type.base_type, None)
return py_object_type
def inferable_item_node(self, index=0):
# slicing shouldn't change the result type of the base, but the index might
if index is not not_a_constant and self.start:
if self.start.has_constant_result():
index += self.start.constant_result
else:
index = not_a_constant
return self.base.inferable_item_node(index)
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def calculate_constant_result(self):
if self.start is None:
start = None
else:
start = self.start.constant_result
if self.stop is None:
stop = None
else:
stop = self.stop.constant_result
self.constant_result = self.base.constant_result[start:stop]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
if self.start is None:
start = 0
else:
start = self.start.compile_time_value(denv)
if self.stop is None:
stop = None
else:
stop = self.stop.compile_time_value(denv)
try:
return base[start:stop]
except Exception as e:
self.compile_time_value_error(e)
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, getting=False)
# when assigning, we must accept any Python type
if node.type.is_pyobject:
node.type = py_object_type
return node
def analyse_types(self, env, getting=True):
self.base = self.base.analyse_types(env)
if self.base.type.is_buffer or self.base.type.is_pythran_expr or self.base.type.is_memoryviewslice:
none_node = NoneNode(self.pos)
index = SliceNode(self.pos,
start=self.start or none_node,
stop=self.stop or none_node,
step=none_node)
index_node = IndexNode(self.pos, index=index, base=self.base)
return index_node.analyse_base_and_index_types(
env, getting=getting, setting=not getting,
analyse_base=False)
if self.start:
self.start = self.start.analyse_types(env)
if self.stop:
self.stop = self.stop.analyse_types(env)
if not env.directives['wraparound']:
check_negative_indices(self.start, self.stop)
base_type = self.base.type
if base_type.is_array and not getting:
# cannot assign directly to C array => try to assign by making a copy
if not self.start and not self.stop:
self.type = base_type
else:
self.type = PyrexTypes.CPtrType(base_type.base_type)
elif base_type.is_string or base_type.is_cpp_string:
self.type = default_str_type(env)
elif base_type.is_pyunicode_ptr:
self.type = unicode_type
elif base_type.is_ptr:
self.type = base_type
elif base_type.is_array:
# we need a ptr type here instead of an array type, as
# array types can result in invalid type casts in the C
# code
self.type = PyrexTypes.CPtrType(base_type.base_type)
else:
self.base = self.base.coerce_to_pyobject(env)
self.type = py_object_type
if base_type.is_builtin_type:
# slicing builtin types returns something of the same type
self.type = base_type
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
if self.type is py_object_type:
if (not self.start or self.start.is_literal) and \
(not self.stop or self.stop.is_literal):
# cache the constant slice object, in case we need it
none_node = NoneNode(self.pos)
self.slice = SliceNode(
self.pos,
start=copy.deepcopy(self.start or none_node),
stop=copy.deepcopy(self.stop or none_node),
step=none_node
).analyse_types(env)
else:
c_int = PyrexTypes.c_py_ssize_t_type
if self.start:
self.start = self.start.coerce_to(c_int, env)
if self.stop:
self.stop = self.stop.coerce_to(c_int, env)
self.is_temp = 1
return self
nogil_check = Node.gil_error
gil_message = "Slicing Python object"
get_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Get'})
set_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Set'})
def coerce_to(self, dst_type, env):
if ((self.base.type.is_string or self.base.type.is_cpp_string)
and dst_type in (bytes_type, bytearray_type, str_type, unicode_type)):
if (dst_type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(self.pos,
"default encoding required for conversion from '%s' to '%s'" %
(self.base.type, dst_type))
self.type = dst_type
if dst_type.is_array and self.base.type.is_array:
if not self.start and not self.stop:
# redundant slice building, copy C arrays directly
return self.base.coerce_to(dst_type, env)
# else: check array size if possible
return super(SliceIndexNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
if not self.type.is_pyobject:
error(self.pos,
"Slicing is not currently supported for '%s'." % self.type)
return
base_result = self.base.result()
result = self.result()
start_code = self.start_code()
stop_code = self.stop_code()
if self.base.type.is_string:
base_result = self.base.result()
if self.base.type not in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
base_result = '((const char*)%s)' % base_result
if self.type is bytearray_type:
type_name = 'ByteArray'
else:
type_name = self.type.name.title()
if self.stop is None:
code.putln(
"%s = __Pyx_Py%s_FromString(%s + %s); %s" % (
result,
type_name,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_Py%s_FromStringAndSize(%s + %s, %s - %s); %s" % (
result,
type_name,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type.is_pyunicode_ptr:
base_result = self.base.result()
if self.base.type != PyrexTypes.c_py_unicode_ptr_type:
base_result = '((const Py_UNICODE*)%s)' % base_result
if self.stop is None:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicode(%s + %s); %s" % (
result,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicodeAndLength(%s + %s, %s - %s); %s" % (
result,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type is unicode_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyUnicode_Substring", "StringTools.c"))
code.putln(
"%s = __Pyx_PyUnicode_Substring(%s, %s, %s); %s" % (
result,
base_result,
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
elif self.type is py_object_type:
code.globalstate.use_utility_code(self.get_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.putln(
"%s = __Pyx_PyObject_GetSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d); %s" % (
result,
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound']),
code.error_goto_if_null(result, self.pos)))
else:
if self.base.type is list_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyList_GetSlice'
elif self.base.type is tuple_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyTuple_GetSlice'
else:
cfunc = 'PySequence_GetSlice'
code.putln(
"%s = %s(%s, %s, %s); %s" % (
result,
cfunc,
self.base.py_result(),
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
code.put_gotref(self.py_result())
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_SetSlice(%s, %s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
rhs.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
else:
start_offset = self.start_code() if self.start else '0'
if rhs.type.is_array:
array_length = rhs.type.size
self.generate_slice_guard_code(code, array_length)
else:
array_length = '%s - %s' % (self.stop_code(), start_offset)
code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
code.putln("memcpy(&(%s[%s]), %s, sizeof(%s[0]) * (%s));" % (
self.base.result(), start_offset,
rhs.result(),
self.base.result(), array_length
))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if not self.base.type.is_pyobject:
error(self.pos,
"Deleting slices is only supported for Python types, not '%s'." % self.type)
return
self.generate_subexpr_evaluation_code(code)
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_DelSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def get_slice_config(self):
has_c_start, c_start, py_start = False, '0', 'NULL'
if self.start:
has_c_start = not self.start.type.is_pyobject
if has_c_start:
c_start = self.start.result()
else:
py_start = '&%s' % self.start.py_result()
has_c_stop, c_stop, py_stop = False, '0', 'NULL'
if self.stop:
has_c_stop = not self.stop.type.is_pyobject
if has_c_stop:
c_stop = self.stop.result()
else:
py_stop = '&%s' % self.stop.py_result()
py_slice = self.slice and '&%s' % self.slice.py_result() or 'NULL'
return (has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice)
def generate_slice_guard_code(self, code, target_size):
if not self.base.type.is_array:
return
slice_size = self.base.type.size
try:
total_length = slice_size = int(slice_size)
except ValueError:
total_length = None
start = stop = None
if self.stop:
stop = self.stop.result()
try:
stop = int(stop)
if stop < 0:
if total_length is None:
slice_size = '%s + %d' % (slice_size, stop)
else:
slice_size += stop
else:
slice_size = stop
stop = None
except ValueError:
pass
if self.start:
start = self.start.result()
try:
start = int(start)
if start < 0:
if total_length is None:
start = '%s + %d' % (self.base.type.size, start)
else:
start += total_length
if isinstance(slice_size, _py_int_types):
slice_size -= start
else:
slice_size = '%s - (%s)' % (slice_size, start)
start = None
except ValueError:
pass
runtime_check = None
compile_time_check = False
try:
int_target_size = int(target_size)
except ValueError:
int_target_size = None
else:
compile_time_check = isinstance(slice_size, _py_int_types)
if compile_time_check and slice_size < 0:
if int_target_size > 0:
error(self.pos, "Assignment to empty slice.")
elif compile_time_check and start is None and stop is None:
# we know the exact slice length
if int_target_size != slice_size:
error(self.pos, "Assignment to slice of wrong length, expected %s, got %s" % (
slice_size, target_size))
elif start is not None:
if stop is None:
stop = slice_size
runtime_check = "(%s)-(%s)" % (stop, start)
elif stop is not None:
runtime_check = stop
else:
runtime_check = slice_size
if runtime_check:
code.putln("if (unlikely((%s) != (%s))) {" % (runtime_check, target_size))
code.putln(
'PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length,'
' expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d",'
' (Py_ssize_t)(%s), (Py_ssize_t)(%s));' % (
target_size, runtime_check))
code.putln(code.error_goto(self.pos))
code.putln("}")
def start_code(self):
if self.start:
return self.start.result()
else:
return "0"
def stop_code(self):
if self.stop:
return self.stop.result()
elif self.base.type.is_array:
return self.base.type.size
else:
return "PY_SSIZE_T_MAX"
def calculate_result_code(self):
# self.result() is not used, but this method must exist
return "<unused>"
class SliceNode(ExprNode):
# start:stop:step in subscript list
#
# start ExprNode
# stop ExprNode
# step ExprNode
subexprs = ['start', 'stop', 'step']
is_slice = True
type = slice_type
is_temp = 1
def calculate_constant_result(self):
self.constant_result = slice(
self.start.constant_result,
self.stop.constant_result,
self.step.constant_result)
def compile_time_value(self, denv):
start = self.start.compile_time_value(denv)
stop = self.stop.compile_time_value(denv)
step = self.step.compile_time_value(denv)
try:
return slice(start, stop, step)
except Exception as e:
self.compile_time_value_error(e)
def may_be_none(self):
return False
def analyse_types(self, env):
start = self.start.analyse_types(env)
stop = self.stop.analyse_types(env)
step = self.step.analyse_types(env)
self.start = start.coerce_to_pyobject(env)
self.stop = stop.coerce_to_pyobject(env)
self.step = step.coerce_to_pyobject(env)
if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
self.is_literal = True
self.is_temp = False
return self
gil_message = "Constructing Python slice object"
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
if self.is_literal:
self.result_code = code.get_py_const(py_object_type, 'slice', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PySlice_New(%s, %s, %s); %s" % (
self.result(),
self.start.py_result(),
self.stop.py_result(),
self.step.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if self.is_literal:
code.put_giveref(self.py_result())
class SliceIntNode(SliceNode):
# start:stop:step in subscript list
# This is just a node to hold start,stop and step nodes that can be
# converted to integers. This does not generate a slice python object.
#
# start ExprNode
# stop ExprNode
# step ExprNode
is_temp = 0
def calculate_constant_result(self):
self.constant_result = slice(
self.start.constant_result,
self.stop.constant_result,
self.step.constant_result)
def compile_time_value(self, denv):
start = self.start.compile_time_value(denv)
stop = self.stop.compile_time_value(denv)
step = self.step.compile_time_value(denv)
try:
return slice(start, stop, step)
except Exception as e:
self.compile_time_value_error(e)
def may_be_none(self):
return False
def analyse_types(self, env):
self.start = self.start.analyse_types(env)
self.stop = self.stop.analyse_types(env)
self.step = self.step.analyse_types(env)
if not self.start.is_none:
self.start = self.start.coerce_to_integer(env)
if not self.stop.is_none:
self.stop = self.stop.coerce_to_integer(env)
if not self.step.is_none:
self.step = self.step.coerce_to_integer(env)
if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
self.is_literal = True
self.is_temp = False
return self
def calculate_result_code(self):
pass
def generate_result_code(self, code):
for a in self.start,self.stop,self.step:
if isinstance(a, CloneNode):
a.arg.result()
class CallNode(ExprNode):
# allow overriding the default 'may_be_none' behaviour
may_return_none = None
def infer_type(self, env):
# TODO(robertwb): Reduce redundancy with analyse_types.
function = self.function
func_type = function.infer_type(env)
if isinstance(function, NewExprNode):
# note: needs call to infer_type() above
return PyrexTypes.CPtrType(function.class_type)
if func_type is py_object_type:
# function might have lied for safety => try to find better type
entry = getattr(function, 'entry', None)
if entry is not None:
func_type = entry.type or func_type
if func_type.is_ptr:
func_type = func_type.base_type
if func_type.is_cfunction:
if getattr(self.function, 'entry', None) and hasattr(self, 'args'):
alternatives = self.function.entry.all_alternatives()
arg_types = [arg.infer_type(env) for arg in self.args]
func_entry = PyrexTypes.best_match(arg_types, alternatives)
if func_entry:
func_type = func_entry.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type.return_type
return func_type.return_type
elif func_type is type_type:
if function.is_name and function.entry and function.entry.type:
result_type = function.entry.type
if result_type.is_extension_type:
return result_type
elif result_type.is_builtin_type:
if function.entry.name == 'float':
return PyrexTypes.c_double_type
elif function.entry.name in Builtin.types_that_construct_their_instance:
return result_type
return py_object_type
def type_dependencies(self, env):
# TODO: Update when Danilo's C++ code merged in to handle the
# the case of function overloading.
return self.function.type_dependencies(env)
def is_simple(self):
# C function calls could be considered simple, but they may
# have side-effects that may hit when multiple operations must
# be effected in order, e.g. when constructing the argument
# sequence for a function call or comparing values.
return False
def may_be_none(self):
if self.may_return_none is not None:
return self.may_return_none
func_type = self.function.type
if func_type is type_type and self.function.is_name:
entry = self.function.entry
if entry.type.is_extension_type:
return False
if (entry.type.is_builtin_type and
entry.name in Builtin.types_that_construct_their_instance):
return False
return ExprNode.may_be_none(self)
def analyse_as_type_constructor(self, env):
type = self.function.analyse_as_type(env)
if type and type.is_struct_or_union:
args, kwds = self.explicit_args_kwds()
items = []
for arg, member in zip(args, type.scope.var_entries):
items.append(DictItemNode(pos=arg.pos, key=StringNode(pos=arg.pos, value=member.name), value=arg))
if kwds:
items += kwds.key_value_pairs
self.key_value_pairs = items
self.__class__ = DictNode
self.analyse_types(env) # FIXME
self.coerce_to(type, env)
return True
elif type and type.is_cpp_class:
self.args = [ arg.analyse_types(env) for arg in self.args ]
constructor = type.scope.lookup("<init>")
if not constructor:
error(self.function.pos, "no constructor found for C++ type '%s'" % self.function.name)
self.type = error_type
return self
self.function = RawCNameExprNode(self.function.pos, constructor.type)
self.function.entry = constructor
self.function.set_cname(type.empty_declaration_code())
self.analyse_c_function_call(env)
self.type = type
return True
def is_lvalue(self):
return self.type.is_reference
def nogil_check(self, env):
func_type = self.function_type()
if func_type.is_pyobject:
self.gil_error()
elif not getattr(func_type, 'nogil', False):
self.gil_error()
gil_message = "Calling gil-requiring function"
class SimpleCallNode(CallNode):
# Function call without keyword, * or ** args.
#
# function ExprNode
# args [ExprNode]
# arg_tuple ExprNode or None used internally
# self ExprNode or None used internally
# coerced_self ExprNode or None used internally
# wrapper_call bool used internally
# has_optional_args bool used internally
# nogil bool used internally
subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
self = None
coerced_self = None
arg_tuple = None
wrapper_call = False
has_optional_args = False
nogil = False
analysed = False
overflowcheck = False
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
args = [arg.compile_time_value(denv) for arg in self.args]
try:
return function(*args)
except Exception as e:
self.compile_time_value_error(e)
def analyse_as_type(self, env):
attr = self.function.as_cython_attribute()
if attr == 'pointer':
if len(self.args) != 1:
error(self.args.pos, "only one type allowed.")
else:
type = self.args[0].analyse_as_type(env)
if not type:
error(self.args[0].pos, "Unknown type")
else:
return PyrexTypes.CPtrType(type)
elif attr == 'typeof':
if len(self.args) != 1:
error(self.args.pos, "only one type allowed.")
operand = self.args[0].analyse_types(env)
return operand.type
def explicit_args_kwds(self):
return self.args, None
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
if self.analysed:
return self
self.analysed = True
self.function.is_called = 1
self.function = self.function.analyse_types(env)
function = self.function
if function.is_attribute and function.entry and function.entry.is_cmethod:
# Take ownership of the object from which the attribute
# was obtained, because we need to pass it as 'self'.
self.self = function.obj
function.obj = CloneNode(self.self)
func_type = self.function_type()
self.is_numpy_call_with_exprs = False
if has_np_pythran(env) and self.function.is_numpy_attribute:
has_pythran_args = True
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env)
for arg in self.arg_tuple.args:
has_pythran_args &= is_pythran_supported_node_or_none(arg)
self.is_numpy_call_with_exprs = bool(has_pythran_args)
if self.is_numpy_call_with_exprs:
self.args = None
env.add_include_file("pythonic/numpy/%s.hpp" % self.function.attribute)
self.type = PythranExpr(pythran_func_type(self.function.attribute, self.arg_tuple.args))
self.may_return_none = True
self.is_temp = 1
elif func_type.is_pyobject:
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env)
self.args = None
if func_type is Builtin.type_type and function.is_name and \
function.entry and \
function.entry.is_builtin and \
function.entry.name in Builtin.types_that_construct_their_instance:
# calling a builtin type that returns a specific object type
if function.entry.name == 'float':
# the following will come true later on in a transform
self.type = PyrexTypes.c_double_type
self.result_ctype = PyrexTypes.c_double_type
else:
self.type = Builtin.builtin_types[function.entry.name]
self.result_ctype = py_object_type
self.may_return_none = False
elif function.is_name and function.type_entry:
# We are calling an extension type constructor. As
# long as we do not support __new__(), the result type
# is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
else:
self.args = [ arg.analyse_types(env) for arg in self.args ]
self.analyse_c_function_call(env)
if func_type.exception_check == '+':
self.is_temp = True
return self
def function_type(self):
# Return the type of the function being called, coercing a function
# pointer to a function if necessary. If the function has fused
# arguments, return the specific type.
func_type = self.function.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type
def analyse_c_function_call(self, env):
func_type = self.function.type
if func_type is error_type:
self.type = error_type
return
if func_type.is_cfunction and func_type.is_static_method:
if self.self and self.self.type.is_extension_type:
# To support this we'd need to pass self to determine whether
# it was overloaded in Python space (possibly via a Cython
# superclass turning a cdef method into a cpdef one).
error(self.pos, "Cannot call a static method on an instance variable.")
args = self.args
elif self.self:
args = [self.self] + self.args
else:
args = self.args
if func_type.is_cpp_class:
overloaded_entry = self.function.type.scope.lookup("operator()")
if overloaded_entry is None:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
elif hasattr(self.function, 'entry'):
overloaded_entry = self.function.entry
elif self.function.is_subscript and self.function.is_fused_index:
overloaded_entry = self.function.type.entry
else:
overloaded_entry = None
if overloaded_entry:
if self.function.type.is_fused:
functypes = self.function.type.get_all_specialized_function_types()
alternatives = [f.entry for f in functypes]
else:
alternatives = overloaded_entry.all_alternatives()
entry = PyrexTypes.best_match(
[arg.type for arg in args], alternatives, self.pos, env, args)
if not entry:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
entry.used = True
if not func_type.is_cpp_class:
self.function.entry = entry
self.function.type = entry.type
func_type = self.function_type()
else:
entry = None
func_type = self.function_type()
if not func_type.is_cfunction:
error(self.pos, "Calling non-function type '%s'" % func_type)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
# Check no. of args
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(args)
if func_type.optional_arg_count and expected_nargs != actual_nargs:
self.has_optional_args = 1
self.is_temp = 1
# check 'self' argument
if entry and entry.is_cmethod and func_type.args and not func_type.is_static_method:
formal_arg = func_type.args[0]
arg = args[0]
if formal_arg.not_none:
if self.self:
self.self = self.self.as_none_safe_node(
"'NoneType' object has no attribute '%{0}s'".format('.30' if len(entry.name) <= 30 else ''),
error='PyExc_AttributeError',
format_args=[entry.name])
else:
# unbound method
arg = arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[entry.name, formal_arg.type.name])
if self.self:
if formal_arg.accept_builtin_subtypes:
arg = CMethodSelfCloneNode(self.self)
else:
arg = CloneNode(self.self)
arg = self.coerced_self = arg.coerce_to(formal_arg.type, env)
elif formal_arg.type.is_builtin_type:
# special case: unbound methods of builtins accept subtypes
arg = arg.coerce_to(formal_arg.type, env)
if arg.type.is_builtin_type and isinstance(arg, PyTypeTestNode):
arg.exact_builtin_type = False
args[0] = arg
# Coerce arguments
some_args_in_temps = False
for i in range(min(max_nargs, actual_nargs)):
formal_arg = func_type.args[i]
formal_type = formal_arg.type
if formal_type.is_const:
formal_type = formal_type.const_base_type
arg = args[i].coerce_to(formal_type, env)
if formal_arg.not_none:
# C methods must do the None checks at *call* time
arg = arg.as_none_safe_node(
"cannot pass None into a C function argument that is declared 'not None'")
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if i == 0 and self.self is not None:
# a method's cloned "self" argument is ok
pass
elif arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
args[i] = arg
# handle additional varargs parameters
for i in range(max_nargs, actual_nargs):
arg = args[i]
if arg.type.is_pyobject:
if arg.type is str_type:
arg_ctype = PyrexTypes.c_char_ptr_type
else:
arg_ctype = arg.type.default_coerced_ctype()
if arg_ctype is None:
error(self.args[i].pos,
"Python object cannot be passed as a varargs parameter")
else:
args[i] = arg = arg.coerce_to(arg_ctype, env)
if arg.is_temp and i > 0:
some_args_in_temps = True
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in range(actual_nargs-1):
if i == 0 and self.self is not None:
continue # self is ok
arg = args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0 or i == 1 and self.self is not None: # skip first arg
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
self.args[:] = args
# Calc result type and code fragment
if isinstance(self.function, NewExprNode):
self.type = PyrexTypes.CPtrType(self.function.class_type)
else:
self.type = func_type.return_type
if self.function.is_name or self.function.is_attribute:
func_entry = self.function.entry
if func_entry and (func_entry.utility_code or func_entry.utility_code_definition):
self.is_temp = 1 # currently doesn't work for self.calculate_result_code()
if self.type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
elif func_type.exception_value is not None or func_type.exception_check:
self.is_temp = 1
elif self.type.is_memoryviewslice:
self.is_temp = 1
# func_type.exception_check = True
if self.is_temp and self.type.is_reference:
self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
# Called in 'nogil' context?
self.nogil = env.nogil
if (self.nogil and
func_type.exception_check and
func_type.exception_check != '+'):
env.use_utility_code(pyerr_occurred_withgil_utility_code)
# C++ exception handler
if func_type.exception_check == '+':
if func_type.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
self.overflowcheck = env.directives['overflowcheck']
def calculate_result_code(self):
return self.c_call_code()
def c_call_code(self):
func_type = self.function_type()
if self.type is PyrexTypes.error_type or not func_type.is_cfunction:
return "<error>"
formal_args = func_type.args
arg_list_code = []
args = list(zip(formal_args, self.args))
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(self.args)
for formal_arg, actual_arg in args[:expected_nargs]:
arg_code = actual_arg.result_as(formal_arg.type)
arg_list_code.append(arg_code)
if func_type.is_overridable:
arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod)))
if func_type.optional_arg_count:
if expected_nargs == actual_nargs:
optional_args = 'NULL'
else:
optional_args = "&%s" % self.opt_arg_struct
arg_list_code.append(optional_args)
for actual_arg in self.args[len(formal_args):]:
arg_list_code.append(actual_arg.result())
result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code))
return result
def is_c_result_required(self):
func_type = self.function_type()
if not func_type.exception_value or func_type.exception_check == '+':
return False # skip allocation of unused result temp
return True
def generate_result_code(self, code):
func_type = self.function_type()
if self.function.is_name or self.function.is_attribute:
code.globalstate.use_entry_utility_code(self.function.entry)
if func_type.is_pyobject:
if func_type is not type_type and not self.arg_tuple.args and self.arg_tuple.is_literal:
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCallNoArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
self.result(),
self.function.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
else:
arg_code = self.arg_tuple.py_result()
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
self.function.py_result(),
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif func_type.is_cfunction:
if self.has_optional_args:
actual_nargs = len(self.args)
expected_nargs = len(func_type.args) - func_type.optional_arg_count
self.opt_arg_struct = code.funcstate.allocate_temp(
func_type.op_arg_struct.base_type, manage_ref=True)
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
Naming.pyrex_prefix + "n",
len(self.args) - expected_nargs))
args = list(zip(func_type.args, self.args))
for formal_arg, actual_arg in args[expected_nargs:actual_nargs]:
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
func_type.opt_arg_cname(formal_arg.name),
actual_arg.result_as(formal_arg.type)))
exc_checks = []
if self.type.is_pyobject and self.is_temp:
exc_checks.append("!%s" % self.result())
elif self.type.is_memoryviewslice:
assert self.is_temp
exc_checks.append(self.type.error_condition(self.result()))
else:
exc_val = func_type.exception_value
exc_check = func_type.exception_check
if exc_val is not None:
exc_checks.append("%s == %s" % (self.result(), func_type.return_type.cast_code(exc_val)))
if exc_check:
if self.nogil:
exc_checks.append("__Pyx_ErrOccurredWithGIL()")
else:
exc_checks.append("PyErr_Occurred()")
if self.is_temp or exc_checks:
rhs = self.c_call_code()
if self.result():
lhs = "%s = " % self.result()
if self.is_temp and self.type.is_pyobject:
#return_type = self.type # func_type.return_type
#print "SimpleCallNode.generate_result_code: casting", rhs, \
# "from", return_type, "to pyobject" ###
rhs = typecast(py_object_type, self.type, rhs)
else:
lhs = ""
if func_type.exception_check == '+':
translate_cpp_exception(code, self.pos, '%s%s;' % (lhs, rhs),
func_type.exception_value, self.nogil)
else:
if (self.overflowcheck
and self.type.is_int
and self.type.signed
and self.function.result() in ('abs', 'labs', '__Pyx_abs_longlong')):
goto_error = 'if (unlikely(%s < 0)) { PyErr_SetString(PyExc_OverflowError, "value too large"); %s; }' % (self.result(), code.error_goto(self.pos))
elif exc_checks:
goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos)
else:
goto_error = ""
code.putln("%s%s; %s" % (lhs, rhs, goto_error))
if self.type.is_pyobject and self.result():
code.put_gotref(self.py_result())
if self.has_optional_args:
code.funcstate.release_temp(self.opt_arg_struct)
@classmethod
def from_node(cls, node, **kwargs):
ret = super(SimpleCallNode, cls).from_node(node, **kwargs)
ret.is_numpy_call_with_exprs = node.is_numpy_call_with_exprs
return ret
class PyMethodCallNode(SimpleCallNode):
# Specialised call to a (potential) PyMethodObject with non-constant argument tuple.
# Allows the self argument to be injected directly instead of repacking a tuple for it.
#
# function ExprNode the function/method object to call
# arg_tuple TupleNode the arguments for the args tuple
subexprs = ['function', 'arg_tuple']
is_temp = True
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.function.generate_evaluation_code(code)
assert self.arg_tuple.mult_factor is None
args = self.arg_tuple.args
for arg in args:
arg.generate_evaluation_code(code)
if self.is_numpy_call_with_exprs:
code.putln("// function evaluation code for numpy function")
code.putln("__Pyx_call_destructor(%s);" % self.result())
code.putln("new (&%s) decltype(%s){pythonic::numpy::functor::%s{}(%s)};" % (
self.result(),
self.result(),
self.function.attribute,
", ".join(a.pythran_result() for a in self.arg_tuple.args)))
return
# make sure function is in temp so that we can replace the reference below if it's a method
reuse_function_temp = self.function.is_temp
if reuse_function_temp:
function = self.function.result()
else:
function = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.function.make_owned_reference(code)
code.put("%s = %s; " % (function, self.function.py_result()))
self.function.generate_disposal_code(code)
self.function.free_temps(code)
self_arg = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln("%s = NULL;" % self_arg)
arg_offset_cname = None
if len(args) > 1:
arg_offset_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = 0;" % arg_offset_cname)
def attribute_is_likely_method(attr):
obj = attr.obj
if obj.is_name and obj.entry.is_pyglobal:
return False # more likely to be a function
return True
if self.function.is_attribute:
likely_method = 'likely' if attribute_is_likely_method(self.function) else 'unlikely'
elif self.function.is_name and self.function.cf_state:
# not an attribute itself, but might have been assigned from one (e.g. bound method)
for assignment in self.function.cf_state:
value = assignment.rhs
if value and value.is_attribute and value.obj.type.is_pyobject:
if attribute_is_likely_method(value):
likely_method = 'likely'
break
else:
likely_method = 'unlikely'
else:
likely_method = 'unlikely'
code.putln("if (CYTHON_UNPACK_METHODS && %s(PyMethod_Check(%s))) {" % (likely_method, function))
code.putln("%s = PyMethod_GET_SELF(%s);" % (self_arg, function))
# the following is always true in Py3 (kept only for safety),
# but is false for unbound methods in Py2
code.putln("if (likely(%s)) {" % self_arg)
code.putln("PyObject* function = PyMethod_GET_FUNCTION(%s);" % function)
code.put_incref(self_arg, py_object_type)
code.put_incref("function", py_object_type)
# free method object as early to possible to enable reuse from CPython's freelist
code.put_decref_set(function, "function")
if len(args) > 1:
code.putln("%s = 1;" % arg_offset_cname)
code.putln("}")
code.putln("}")
if not args:
# fastest special case: try to avoid tuple creation
code.putln("if (%s) {" % self_arg)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCallOneArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
self.result(),
function, self_arg,
code.error_goto_if_null(self.result(), self.pos)))
code.put_decref_clear(self_arg, py_object_type)
code.funcstate.release_temp(self_arg)
code.putln("} else {")
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCallNoArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
self.result(),
function,
code.error_goto_if_null(self.result(), self.pos)))
code.putln("}")
code.put_gotref(self.py_result())
else:
if len(args) == 1:
code.putln("if (!%s) {" % self_arg)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCallOneArg", "ObjectHandling.c"))
arg = args[0]
code.putln(
"%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
self.result(),
function, arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
arg.generate_disposal_code(code)
code.put_gotref(self.py_result())
code.putln("} else {")
arg_offset = 1
else:
arg_offset = arg_offset_cname
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyFunctionFastCall", "ObjectHandling.c"))
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyCFunctionFastCall", "ObjectHandling.c"))
for test_func, call_prefix in [('PyFunction_Check', 'Py'), ('__Pyx_PyFastCFunction_Check', 'PyC')]:
code.putln("#if CYTHON_FAST_%sCALL" % call_prefix.upper())
code.putln("if (%s(%s)) {" % (test_func, function))
code.putln("PyObject *%s[%d] = {%s, %s};" % (
Naming.quick_temp_cname,
len(args)+1,
self_arg,
', '.join(arg.py_result() for arg in args)))
code.putln("%s = __Pyx_%sFunction_FastCall(%s, %s+1-%s, %d+%s); %s" % (
self.result(),
call_prefix,
function,
Naming.quick_temp_cname,
arg_offset,
len(args),
arg_offset,
code.error_goto_if_null(self.result(), self.pos)))
code.put_xdecref_clear(self_arg, py_object_type)
code.put_gotref(self.py_result())
for arg in args:
arg.generate_disposal_code(code)
code.putln("} else")
code.putln("#endif")
code.putln("{")
args_tuple = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln("%s = PyTuple_New(%d+%s); %s" % (
args_tuple, len(args), arg_offset,
code.error_goto_if_null(args_tuple, self.pos)))
code.put_gotref(args_tuple)
if len(args) > 1:
code.putln("if (%s) {" % self_arg)
code.putln("__Pyx_GIVEREF(%s); PyTuple_SET_ITEM(%s, 0, %s); %s = NULL;" % (
self_arg, args_tuple, self_arg, self_arg)) # stealing owned ref in this case
code.funcstate.release_temp(self_arg)
if len(args) > 1:
code.putln("}")
for i, arg in enumerate(args):
arg.make_owned_reference(code)
code.put_giveref(arg.py_result())
code.putln("PyTuple_SET_ITEM(%s, %d+%s, %s);" % (
args_tuple, i, arg_offset, arg.py_result()))
if len(args) > 1:
code.funcstate.release_temp(arg_offset_cname)
for arg in args:
arg.generate_post_assignment_code(code)
arg.free_temps(code)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
function, args_tuple,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.put_decref_clear(args_tuple, py_object_type)
code.funcstate.release_temp(args_tuple)
if len(args) == 1:
code.putln("}")
code.putln("}") # !CYTHON_FAST_PYCALL
if reuse_function_temp:
self.function.generate_disposal_code(code)
self.function.free_temps(code)
else:
code.put_decref_clear(function, py_object_type)
code.funcstate.release_temp(function)
class InlinedDefNodeCallNode(CallNode):
# Inline call to defnode
#
# function PyCFunctionNode
# function_name NameNode
# args [ExprNode]
subexprs = ['args', 'function_name']
is_temp = 1
type = py_object_type
function = None
function_name = None
def can_be_inlined(self):
func_type= self.function.def_node
if func_type.star_arg or func_type.starstar_arg:
return False
if len(func_type.args) != len(self.args):
return False
if func_type.num_kwonly_args:
return False # actually wrong number of arguments
return True
def analyse_types(self, env):
self.function_name = self.function_name.analyse_types(env)
self.args = [ arg.analyse_types(env) for arg in self.args ]
func_type = self.function.def_node
actual_nargs = len(self.args)
# Coerce arguments
some_args_in_temps = False
for i in range(actual_nargs):
formal_type = func_type.args[i].type
arg = self.args[i].coerce_to(formal_type, env)
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
self.args[i] = arg
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in range(actual_nargs-1):
arg = self.args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0:
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
return self
def generate_result_code(self, code):
arg_code = [self.function_name.py_result()]
func_type = self.function.def_node
for arg, proto_arg in zip(self.args, func_type.args):
if arg.type.is_pyobject:
arg_code.append(arg.result_as(proto_arg.type))
else:
arg_code.append(arg.result())
arg_code = ', '.join(arg_code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
self.function.def_node.entry.pyfunc_cname,
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PythonCapiFunctionNode(ExprNode):
subexprs = []
def __init__(self, pos, py_name, cname, func_type, utility_code = None):
ExprNode.__init__(self, pos, name=py_name, cname=cname,
type=func_type, utility_code=utility_code)
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if self.utility_code:
code.globalstate.use_utility_code(self.utility_code)
def calculate_result_code(self):
return self.cname
class PythonCapiCallNode(SimpleCallNode):
# Python C-API Function call (only created in transforms)
# By default, we assume that the call never returns None, as this
# is true for most C-API functions in CPython. If this does not
# apply to a call, set the following to True (or None to inherit
# the default behaviour).
may_return_none = False
def __init__(self, pos, function_name, func_type,
utility_code = None, py_name=None, **kwargs):
self.type = func_type.return_type
self.result_ctype = self.type
self.function = PythonCapiFunctionNode(
pos, py_name, function_name, func_type,
utility_code = utility_code)
# call this last so that we can override the constructed
# attributes above with explicit keyword arguments if required
SimpleCallNode.__init__(self, pos, **kwargs)
class GeneralCallNode(CallNode):
# General Python function call, including keyword,
# * and ** arguments.
#
# function ExprNode
# positional_args ExprNode Tuple of positional arguments
# keyword_args ExprNode or None Dict of keyword arguments
type = py_object_type
subexprs = ['function', 'positional_args', 'keyword_args']
nogil_check = Node.gil_error
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
positional_args = self.positional_args.compile_time_value(denv)
keyword_args = self.keyword_args.compile_time_value(denv)
try:
return function(*positional_args, **keyword_args)
except Exception as e:
self.compile_time_value_error(e)
def explicit_args_kwds(self):
if (self.keyword_args and not self.keyword_args.is_dict_literal or
not self.positional_args.is_sequence_constructor):
raise CompileError(self.pos,
'Compile-time keyword arguments must be explicit.')
return self.positional_args.args, self.keyword_args
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
self.function = self.function.analyse_types(env)
if not self.function.type.is_pyobject:
if self.function.type.is_error:
self.type = error_type
return self
if hasattr(self.function, 'entry'):
node = self.map_to_simple_call_node()
if node is not None and node is not self:
return node.analyse_types(env)
elif self.function.entry.as_variable:
self.function = self.function.coerce_to_pyobject(env)
elif node is self:
error(self.pos,
"Non-trivial keyword arguments and starred "
"arguments not allowed in cdef functions.")
else:
# error was already reported
pass
else:
self.function = self.function.coerce_to_pyobject(env)
if self.keyword_args:
self.keyword_args = self.keyword_args.analyse_types(env)
self.positional_args = self.positional_args.analyse_types(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
function = self.function
if function.is_name and function.type_entry:
# We are calling an extension type constructor. As long
# as we do not support __new__(), the result type is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
return self
def map_to_simple_call_node(self):
"""
Tries to map keyword arguments to declared positional arguments.
Returns self to try a Python call, None to report an error
or a SimpleCallNode if the mapping succeeds.
"""
if not isinstance(self.positional_args, TupleNode):
# has starred argument
return self
if not self.keyword_args.is_dict_literal:
# keywords come from arbitrary expression => nothing to do here
return self
function = self.function
entry = getattr(function, 'entry', None)
if not entry:
return self
function_type = entry.type
if function_type.is_ptr:
function_type = function_type.base_type
if not function_type.is_cfunction:
return self
pos_args = self.positional_args.args
kwargs = self.keyword_args
declared_args = function_type.args
if entry.is_cmethod:
declared_args = declared_args[1:] # skip 'self'
if len(pos_args) > len(declared_args):
error(self.pos, "function call got too many positional arguments, "
"expected %d, got %s" % (len(declared_args),
len(pos_args)))
return None
matched_args = set([ arg.name for arg in declared_args[:len(pos_args)]
if arg.name ])
unmatched_args = declared_args[len(pos_args):]
matched_kwargs_count = 0
args = list(pos_args)
# check for duplicate keywords
seen = set(matched_args)
has_errors = False
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name in seen:
error(arg.pos, "argument '%s' passed twice" % name)
has_errors = True
# continue to report more errors if there are any
seen.add(name)
# match keywords that are passed in order
for decl_arg, arg in zip(unmatched_args, kwargs.key_value_pairs):
name = arg.key.value
if decl_arg.name == name:
matched_args.add(name)
matched_kwargs_count += 1
args.append(arg.value)
else:
break
# match keyword arguments that are passed out-of-order, but keep
# the evaluation of non-simple arguments in order by moving them
# into temps
from .UtilNodes import EvalWithTempExprNode, LetRefNode
temps = []
if len(kwargs.key_value_pairs) > matched_kwargs_count:
unmatched_args = declared_args[len(args):]
keywords = dict([ (arg.key.value, (i+len(pos_args), arg))
for i, arg in enumerate(kwargs.key_value_pairs) ])
first_missing_keyword = None
for decl_arg in unmatched_args:
name = decl_arg.name
if name not in keywords:
# missing keyword argument => either done or error
if not first_missing_keyword:
first_missing_keyword = name
continue
elif first_missing_keyword:
if entry.as_variable:
# we might be able to convert the function to a Python
# object, which then allows full calling semantics
# with default values in gaps - currently, we only
# support optional arguments at the end
return self
# wasn't the last keyword => gaps are not supported
error(self.pos, "C function call is missing "
"argument '%s'" % first_missing_keyword)
return None
pos, arg = keywords[name]
matched_args.add(name)
matched_kwargs_count += 1
if arg.value.is_simple():
args.append(arg.value)
else:
temp = LetRefNode(arg.value)
assert temp.is_simple()
args.append(temp)
temps.append((pos, temp))
if temps:
# may have to move preceding non-simple args into temps
final_args = []
new_temps = []
first_temp_arg = temps[0][-1]
for arg_value in args:
if arg_value is first_temp_arg:
break # done
if arg_value.is_simple():
final_args.append(arg_value)
else:
temp = LetRefNode(arg_value)
new_temps.append(temp)
final_args.append(temp)
if new_temps:
args = final_args
temps = new_temps + [ arg for i,arg in sorted(temps) ]
# check for unexpected keywords
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name not in matched_args:
has_errors = True
error(arg.pos,
"C function got unexpected keyword argument '%s'" %
name)
if has_errors:
# error was reported already
return None
# all keywords mapped to positional arguments
# if we are missing arguments, SimpleCallNode will figure it out
node = SimpleCallNode(self.pos, function=function, args=args)
for temp in temps[::-1]:
node = EvalWithTempExprNode(temp, node)
return node
def generate_result_code(self, code):
if self.type.is_error: return
if self.keyword_args:
kwargs = self.keyword_args.py_result()
else:
kwargs = 'NULL'
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, %s); %s" % (
self.result(),
self.function.py_result(),
self.positional_args.py_result(),
kwargs,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AsTupleNode(ExprNode):
# Convert argument to tuple. Used for normalising
# the * argument of a function call.
#
# arg ExprNode
subexprs = ['arg']
is_temp = 1
def calculate_constant_result(self):
self.constant_result = tuple(self.arg.constant_result)
def compile_time_value(self, denv):
arg = self.arg.compile_time_value(denv)
try:
return tuple(arg)
except Exception as e:
self.compile_time_value_error(e)
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env).coerce_to_pyobject(env)
if self.arg.type is tuple_type:
return self.arg.as_none_safe_node("'NoneType' object is not iterable")
self.type = tuple_type
return self
def may_be_none(self):
return False
nogil_check = Node.gil_error
gil_message = "Constructing Python tuple"
def generate_result_code(self, code):
cfunc = "__Pyx_PySequence_Tuple" if self.arg.type in (py_object_type, tuple_type) else "PySequence_Tuple"
code.putln(
"%s = %s(%s); %s" % (
self.result(),
cfunc, self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class MergedDictNode(ExprNode):
# Helper class for keyword arguments and other merged dicts.
#
# keyword_args [DictNode or other ExprNode]
subexprs = ['keyword_args']
is_temp = 1
type = dict_type
reject_duplicates = True
def calculate_constant_result(self):
result = {}
reject_duplicates = self.reject_duplicates
for item in self.keyword_args:
if item.is_dict_literal:
# process items in order
items = ((key.constant_result, value.constant_result)
for key, value in item.key_value_pairs)
else:
items = item.constant_result.iteritems()
for key, value in items:
if reject_duplicates and key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
self.constant_result = result
def compile_time_value(self, denv):
result = {}
reject_duplicates = self.reject_duplicates
for item in self.keyword_args:
if item.is_dict_literal:
# process items in order
items = [(key.compile_time_value(denv), value.compile_time_value(denv))
for key, value in item.key_value_pairs]
else:
items = item.compile_time_value(denv).iteritems()
try:
for key, value in items:
if reject_duplicates and key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
except Exception as e:
self.compile_time_value_error(e)
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return dict_type
def analyse_types(self, env):
args = [
arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after ** must be a mapping, not NoneType')
for arg in self.keyword_args
]
if len(args) == 1 and args[0].type is dict_type:
# strip this intermediate node and use the bare dict
arg = args[0]
if arg.is_name and arg.entry.is_arg and len(arg.entry.cf_assignments) == 1:
# passing **kwargs through to function call => allow NULL
arg.allow_null = True
return arg
self.keyword_args = args
return self
def may_be_none(self):
return False
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
args = iter(self.keyword_args)
item = next(args)
item.generate_evaluation_code(code)
if item.type is not dict_type:
# CPython supports calling functions with non-dicts, so do we
code.putln('if (likely(PyDict_CheckExact(%s))) {' %
item.py_result())
if item.is_dict_literal:
item.make_owned_reference(code)
code.putln("%s = %s;" % (self.result(), item.py_result()))
item.generate_post_assignment_code(code)
else:
code.putln("%s = PyDict_Copy(%s); %s" % (
self.result(),
item.py_result(),
code.error_goto_if_null(self.result(), item.pos)))
code.put_gotref(self.result())
item.generate_disposal_code(code)
if item.type is not dict_type:
code.putln('} else {')
code.putln("%s = PyObject_CallFunctionObjArgs((PyObject*)&PyDict_Type, %s, NULL); %s" % (
self.result(),
item.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
item.generate_disposal_code(code)
code.putln('}')
item.free_temps(code)
helpers = set()
for item in args:
if item.is_dict_literal:
# inline update instead of creating an intermediate dict
for arg in item.key_value_pairs:
arg.generate_evaluation_code(code)
if self.reject_duplicates:
code.putln("if (unlikely(PyDict_Contains(%s, %s))) {" % (
self.result(),
arg.key.py_result()))
helpers.add("RaiseDoubleKeywords")
# FIXME: find out function name at runtime!
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
arg.key.py_result(),
code.error_goto(self.pos)))
code.putln("}")
code.put_error_if_neg(arg.key.pos, "PyDict_SetItem(%s, %s, %s)" % (
self.result(),
arg.key.py_result(),
arg.value.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
else:
item.generate_evaluation_code(code)
if self.reject_duplicates:
# merge mapping into kwdict one by one as we need to check for duplicates
helpers.add("MergeKeywords")
code.put_error_if_neg(item.pos, "__Pyx_MergeKeywords(%s, %s)" % (
self.result(), item.py_result()))
else:
# simple case, just add all entries
helpers.add("RaiseMappingExpected")
code.putln("if (unlikely(PyDict_Update(%s, %s) < 0)) {" % (
self.result(), item.py_result()))
code.putln("if (PyErr_ExceptionMatches(PyExc_AttributeError)) "
"__Pyx_RaiseMappingExpectedError(%s);" % item.py_result())
code.putln(code.error_goto(item.pos))
code.putln("}")
item.generate_disposal_code(code)
item.free_temps(code)
for helper in sorted(helpers):
code.globalstate.use_utility_code(UtilityCode.load_cached(helper, "FunctionArguments.c"))
def annotate(self, code):
for item in self.keyword_args:
item.annotate(code)
class AttributeNode(ExprNode):
# obj.attribute
#
# obj ExprNode
# attribute string
# needs_none_check boolean Used if obj is an extension type.
# If set to True, it is known that the type is not None.
#
# Used internally:
#
# is_py_attr boolean Is a Python getattr operation
# member string C name of struct member
# is_called boolean Function call is being done on result
# entry Entry Symbol table entry of attribute
is_attribute = 1
subexprs = ['obj']
type = PyrexTypes.error_type
entry = None
is_called = 0
needs_none_check = True
is_memslice_transpose = False
is_special_lookup = False
is_py_attr = 0
def as_cython_attribute(self):
if (isinstance(self.obj, NameNode) and
self.obj.is_cython_module and not
self.attribute == u"parallel"):
return self.attribute
cy = self.obj.as_cython_attribute()
if cy:
return "%s.%s" % (cy, self.attribute)
return None
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a cpdef function
# we can create the corresponding attribute
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction and entry.as_variable:
# must be a cpdef function
self.is_temp = 1
self.entry = entry.as_variable
self.analyse_as_python_attribute(env)
return self
return ExprNode.coerce_to(self, dst_type, env)
def calculate_constant_result(self):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
return
self.constant_result = getattr(self.obj.constant_result, attr)
def compile_time_value(self, denv):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
error(self.pos,
"Invalid attribute name '%s' in compile-time expression" % attr)
return None
obj = self.obj.compile_time_value(denv)
try:
return getattr(obj, attr)
except Exception as e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return self.obj.type_dependencies(env)
def infer_type(self, env):
# FIXME: this is way too redundant with analyse_types()
node = self.analyse_as_cimported_attribute_node(env, target=False)
if node is not None:
return node.entry.type
node = self.analyse_as_type_attribute(env)
if node is not None:
return node.entry.type
obj_type = self.obj.infer_type(env)
self.analyse_attribute(env, obj_type=obj_type)
if obj_type.is_builtin_type and self.type.is_cfunction:
# special case: C-API replacements for C methods of
# builtin types cannot be inferred as C functions as
# that would prevent their use as bound methods
return py_object_type
elif self.entry and self.entry.is_cmethod:
# special case: bound methods should not be inferred
# as their unbound method types
return py_object_type
return self.type
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, target = 1)
if node.type.is_const:
error(self.pos, "Assignment to const attribute '%s'" % self.attribute)
if not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type)
return node
def analyse_types(self, env, target = 0):
self.initialized_check = env.directives['initializedcheck']
node = self.analyse_as_cimported_attribute_node(env, target)
if node is None and not target:
node = self.analyse_as_type_attribute(env)
if node is None:
node = self.analyse_as_ordinary_attribute_node(env, target)
assert node is not None
if node.entry:
node.entry.used = True
if node.is_attribute:
node.wrap_obj_in_nonecheck(env)
return node
def analyse_as_cimported_attribute_node(self, env, target):
# Try to interpret this as a reference to an imported
# C const, type, var or function. If successful, mutates
# this node into a NameNode and returns 1, otherwise
# returns 0.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and (
entry.is_cglobal or entry.is_cfunction
or entry.is_type or entry.is_const):
return self.as_name_node(env, entry, target)
if self.is_cimported_module_without_shadow(env):
error(self.pos, "cimported module has no attribute '%s'" % self.attribute)
return self
return None
def analyse_as_type_attribute(self, env):
# Try to interpret this as a reference to an unbound
# C method of an extension type or builtin type. If successful,
# creates a corresponding NameNode and returns it, otherwise
# returns None.
if self.obj.is_string_literal:
return
type = self.obj.analyse_as_type(env)
if type:
if type.is_extension_type or type.is_builtin_type or type.is_cpp_class:
entry = type.scope.lookup_here(self.attribute)
if entry and (entry.is_cmethod or type.is_cpp_class and entry.type.is_cfunction):
if type.is_builtin_type:
if not self.is_called:
# must handle this as Python object
return None
ubcm_entry = entry
else:
# Create a temporary entry describing the C method
# as an ordinary function.
if entry.func_cname and not hasattr(entry.type, 'op_arg_struct'):
cname = entry.func_cname
if entry.type.is_static_method or (
env.parent_scope and env.parent_scope.is_cpp_class_scope):
ctype = entry.type
elif type.is_cpp_class:
error(self.pos, "%s not a static member of %s" % (entry.name, type))
ctype = PyrexTypes.error_type
else:
# Fix self type.
ctype = copy.copy(entry.type)
ctype.args = ctype.args[:]
ctype.args[0] = PyrexTypes.CFuncTypeArg('self', type, 'self', None)
else:
cname = "%s->%s" % (type.vtabptr_cname, entry.cname)
ctype = entry.type
ubcm_entry = Symtab.Entry(entry.name, cname, ctype)
ubcm_entry.is_cfunction = 1
ubcm_entry.func_cname = entry.func_cname
ubcm_entry.is_unbound_cmethod = 1
ubcm_entry.scope = entry.scope
return self.as_name_node(env, ubcm_entry, target=False)
elif type.is_enum:
if self.attribute in type.values:
for entry in type.entry.enum_values:
if entry.name == self.attribute:
return self.as_name_node(env, entry, target=False)
else:
error(self.pos, "%s not a known value of %s" % (self.attribute, type))
else:
error(self.pos, "%s not a known value of %s" % (self.attribute, type))
return None
def analyse_as_type(self, env):
module_scope = self.obj.analyse_as_module(env)
if module_scope:
return module_scope.lookup_type(self.attribute)
if not self.obj.is_string_literal:
base_type = self.obj.analyse_as_type(env)
if base_type and hasattr(base_type, 'scope') and base_type.scope is not None:
return base_type.scope.lookup_type(self.attribute)
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type
# in a cimported module. Returns the extension type, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module
# in another cimported module. Returns the module scope, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.as_module:
return entry.as_module
return None
def as_name_node(self, env, entry, target):
# Create a corresponding NameNode from this node and complete the
# analyse_types phase.
node = NameNode.from_node(self, name=self.attribute, entry=entry)
if target:
node = node.analyse_target_types(env)
else:
node = node.analyse_rvalue_entry(env)
node.entry.used = 1
return node
def analyse_as_ordinary_attribute_node(self, env, target):
self.obj = self.obj.analyse_types(env)
self.analyse_attribute(env)
if self.entry and self.entry.is_cmethod and not self.is_called:
# error(self.pos, "C method can only be called")
pass
## Reference to C array turns into pointer to first element.
#while self.type.is_array:
# self.type = self.type.element_ptr_type()
if self.is_py_attr:
if not target:
self.is_temp = 1
self.result_ctype = py_object_type
elif target and self.obj.type.is_builtin_type:
error(self.pos, "Assignment to an immutable object field")
#elif self.type.is_memoryviewslice and not target:
# self.is_temp = True
return self
def analyse_attribute(self, env, obj_type = None):
# Look up attribute and set self.type and self.member.
immutable_obj = obj_type is not None # used during type inference
self.is_py_attr = 0
self.member = self.attribute
if obj_type is None:
if self.obj.type.is_string or self.obj.type.is_pyunicode_ptr:
self.obj = self.obj.coerce_to_pyobject(env)
obj_type = self.obj.type
else:
if obj_type.is_string or obj_type.is_pyunicode_ptr:
obj_type = py_object_type
if obj_type.is_ptr or obj_type.is_array:
obj_type = obj_type.base_type
self.op = "->"
elif obj_type.is_extension_type or obj_type.is_builtin_type:
self.op = "->"
elif obj_type.is_reference and obj_type.is_fake_reference:
self.op = "->"
else:
self.op = "."
if obj_type.has_attributes:
if obj_type.attributes_known():
entry = obj_type.scope.lookup_here(self.attribute)
if obj_type.is_memoryviewslice and not entry:
if self.attribute == 'T':
self.is_memslice_transpose = True
self.is_temp = True
self.use_managed_ref = True
self.type = self.obj.type.transpose(self.pos)
return
else:
obj_type.declare_attribute(self.attribute, env, self.pos)
entry = obj_type.scope.lookup_here(self.attribute)
if entry and entry.is_member:
entry = None
else:
error(self.pos,
"Cannot select attribute of incomplete type '%s'"
% obj_type)
self.type = PyrexTypes.error_type
return
self.entry = entry
if entry:
if obj_type.is_extension_type and entry.name == "__weakref__":
error(self.pos, "Illegal use of special attribute __weakref__")
# def methods need the normal attribute lookup
# because they do not have struct entries
# fused function go through assignment synthesis
# (foo = pycfunction(foo_func_obj)) and need to go through
# regular Python lookup as well
if (entry.is_variable and not entry.fused_cfunction) or entry.is_cmethod:
self.type = entry.type
self.member = entry.cname
return
else:
# If it's not a variable or C method, it must be a Python
# method of an extension type, so we treat it like a Python
# attribute.
pass
# If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
# attribute reference.
self.analyse_as_python_attribute(env, obj_type, immutable_obj)
def analyse_as_python_attribute(self, env, obj_type=None, immutable_obj=False):
if obj_type is None:
obj_type = self.obj.type
# mangle private '__*' Python attributes used inside of a class
self.attribute = env.mangle_class_private_name(self.attribute)
self.member = self.attribute
self.type = py_object_type
self.is_py_attr = 1
if not obj_type.is_pyobject and not obj_type.is_error:
# Expose python methods for immutable objects.
if (obj_type.is_string or obj_type.is_cpp_string
or obj_type.is_buffer or obj_type.is_memoryviewslice
or obj_type.is_numeric
or (obj_type.is_ctuple and obj_type.can_coerce_to_pyobject(env))
or (obj_type.is_struct and obj_type.can_coerce_to_pyobject(env))):
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
elif (obj_type.is_cfunction and (self.obj.is_name or self.obj.is_attribute)
and self.obj.entry.as_variable
and self.obj.entry.as_variable.type.is_pyobject):
# might be an optimised builtin function => unpack it
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
else:
error(self.pos,
"Object of type '%s' has no attribute '%s'" %
(obj_type, self.attribute))
def wrap_obj_in_nonecheck(self, env):
if not env.directives['nonecheck']:
return
msg = None
format_args = ()
if (self.obj.type.is_extension_type and self.needs_none_check and not
self.is_py_attr):
msg = "'NoneType' object has no attribute '%{0}s'".format('.30' if len(self.attribute) <= 30 else '')
format_args = (self.attribute,)
elif self.obj.type.is_memoryviewslice:
if self.is_memslice_transpose:
msg = "Cannot transpose None memoryview slice"
else:
entry = self.obj.type.scope.lookup_here(self.attribute)
if entry:
# copy/is_c_contig/shape/strides etc
msg = "Cannot access '%s' attribute of None memoryview slice"
format_args = (entry.name,)
if msg:
self.obj = self.obj.as_none_safe_node(msg, 'PyExc_AttributeError',
format_args=format_args)
def nogil_check(self, env):
if self.is_py_attr:
self.gil_error()
gil_message = "Accessing Python attribute"
def is_cimported_module_without_shadow(self, env):
return self.obj.is_cimported_module_without_shadow(env)
def is_simple(self):
if self.obj:
return self.result_in_temp() or self.obj.is_simple()
else:
return NameNode.is_simple(self)
def is_lvalue(self):
if self.obj:
return True
else:
return NameNode.is_lvalue(self)
def is_ephemeral(self):
if self.obj:
return self.obj.is_ephemeral()
else:
return NameNode.is_ephemeral(self)
def calculate_result_code(self):
#print "AttributeNode.calculate_result_code:", self.member ###
#print "...obj node =", self.obj, "code", self.obj.result() ###
#print "...obj type", self.obj.type, "ctype", self.obj.ctype() ###
obj = self.obj
obj_code = obj.result_as(obj.type)
#print "...obj_code =", obj_code ###
if self.entry and self.entry.is_cmethod:
if obj.type.is_extension_type and not self.entry.is_builtin_cmethod:
if self.entry.final_func_cname:
return self.entry.final_func_cname
if self.type.from_fused:
# If the attribute was specialized through indexing, make
# sure to get the right fused name, as our entry was
# replaced by our parent index node
# (AnalyseExpressionsTransform)
self.member = self.entry.cname
return "((struct %s *)%s%s%s)->%s" % (
obj.type.vtabstruct_cname, obj_code, self.op,
obj.type.vtabslot_cname, self.member)
elif self.result_is_used:
return self.member
# Generating no code at all for unused access to optimised builtin
# methods fixes the problem that some optimisations only exist as
# macros, i.e. there is no function pointer to them, so we would
# generate invalid C code here.
return
elif obj.type.is_complex:
return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code)
else:
if obj.type.is_builtin_type and self.entry and self.entry.is_variable:
# accessing a field of a builtin type, need to cast better than result_as() does
obj_code = obj.type.cast_code(obj.result(), to_object_struct = True)
return "%s%s%s" % (obj_code, self.op, self.member)
def generate_result_code(self, code):
if self.is_py_attr:
if self.is_special_lookup:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_LookupSpecial'
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_GetAttrStr'
code.putln(
'%s = %s(%s, %s); %s' % (
self.result(),
lookup_func_name,
self.obj.py_result(),
code.intern_identifier(self.attribute),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.type.is_memoryviewslice:
if self.is_memslice_transpose:
# transpose the slice
for access, packing in self.type.axes:
if access == 'ptr':
error(self.pos, "Transposing not supported for slices "
"with indirect dimensions")
return
code.putln("%s = %s;" % (self.result(), self.obj.result()))
code.put_incref_memoryviewslice(self.result(), have_gil=True)
T = "__pyx_memslice_transpose(&%s) == 0"
code.putln(code.error_goto_if(T % self.result(), self.pos))
elif self.initialized_check:
code.putln(
'if (unlikely(!%s.memview)) {'
'PyErr_SetString(PyExc_AttributeError,'
'"Memoryview is not initialized");'
'%s'
'}' % (self.result(), code.error_goto(self.pos)))
else:
# result_code contains what is needed, but we may need to insert
# a check and raise an exception
if self.obj.type and self.obj.type.is_extension_type:
pass
elif self.entry and self.entry.is_cmethod:
# C method implemented as function call with utility code
code.globalstate.use_entry_utility_code(self.entry)
def generate_disposal_code(self, code):
if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose:
# mirror condition for putting the memview incref here:
code.put_xdecref_memoryviewslice(
self.result(), have_gil=True)
code.putln("%s.memview = NULL;" % self.result())
code.putln("%s.data = NULL;" % self.result())
else:
ExprNode.generate_disposal_code(self, code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_SetAttrStr(%s, %s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute),
rhs.py_result()))
rhs.generate_disposal_code(code)
rhs.free_temps(code)
elif self.obj.type.is_complex:
code.putln("__Pyx_SET_C%s(%s, %s);" % (
self.member.upper(),
self.obj.result_as(self.obj.type),
rhs.result_as(self.ctype())))
else:
select_code = self.result()
if self.type.is_pyobject and self.use_managed_ref:
rhs.make_owned_reference(code)
code.put_giveref(rhs.py_result())
code.put_gotref(select_code)
code.put_decref(select_code, self.ctype())
elif self.type.is_memoryviewslice:
from . import MemoryView
MemoryView.put_assign_to_memviewslice(
select_code, rhs, rhs.result(), self.type, code)
if not self.type.is_memoryviewslice:
code.putln(
"%s = %s;" % (
select_code,
rhs.result_as(self.ctype())))
#rhs.result()))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.obj.generate_evaluation_code(code)
if self.is_py_attr or (self.entry.scope.is_property_scope
and u'__del__' in self.entry.scope.entries):
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_DelAttrStr(%s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute)))
else:
error(self.pos, "Cannot delete C attribute of extension type")
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def annotate(self, code):
if self.is_py_attr:
style, text = 'py_attr', 'python attribute (%s)'
else:
style, text = 'c_attr', 'c attribute (%s)'
code.annotate(self.pos, AnnotationItem(style, text % self.type, size=len(self.attribute)))
#-------------------------------------------------------------------
#
# Constructor nodes
#
#-------------------------------------------------------------------
class StarredUnpackingNode(ExprNode):
# A starred expression like "*a"
#
# This is only allowed in sequence assignment or construction such as
#
# a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4]
#
# and will be special cased during type analysis (or generate an error
# if it's found at unexpected places).
#
# target ExprNode
subexprs = ['target']
is_starred = 1
type = py_object_type
is_temp = 1
starred_expr_allowed_here = False
def __init__(self, pos, target):
ExprNode.__init__(self, pos, target=target)
def analyse_declarations(self, env):
if not self.starred_expr_allowed_here:
error(self.pos, "starred expression is not allowed here")
self.target.analyse_declarations(env)
def infer_type(self, env):
return self.target.infer_type(env)
def analyse_types(self, env):
if not self.starred_expr_allowed_here:
error(self.pos, "starred expression is not allowed here")
self.target = self.target.analyse_types(env)
self.type = self.target.type
return self
def analyse_target_declaration(self, env):
self.target.analyse_target_declaration(env)
def analyse_target_types(self, env):
self.target = self.target.analyse_target_types(env)
self.type = self.target.type
return self
def calculate_result_code(self):
return ""
def generate_result_code(self, code):
pass
class SequenceNode(ExprNode):
# Base class for list and tuple constructor nodes.
# Contains common code for performing sequence unpacking.
#
# args [ExprNode]
# unpacked_items [ExprNode] or None
# coerced_unpacked_items [ExprNode] or None
# mult_factor ExprNode the integer number of content repetitions ([1,2]*3)
subexprs = ['args', 'mult_factor']
is_sequence_constructor = 1
unpacked_items = None
mult_factor = None
slow = False # trade speed for code size (e.g. use PyTuple_Pack())
def compile_time_value_list(self, denv):
return [arg.compile_time_value(denv) for arg in self.args]
def replace_starred_target_node(self):
# replace a starred node in the targets by the contained expression
self.starred_assignment = False
args = []
for arg in self.args:
if arg.is_starred:
if self.starred_assignment:
error(arg.pos, "more than 1 starred expression in assignment")
self.starred_assignment = True
arg = arg.target
arg.is_starred = True
args.append(arg)
self.args = args
def analyse_target_declaration(self, env):
self.replace_starred_target_node()
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_types(self, env, skip_children=False):
for i, arg in enumerate(self.args):
if not skip_children:
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
if self.mult_factor:
self.mult_factor = self.mult_factor.analyse_types(env)
if not self.mult_factor.type.is_int:
self.mult_factor = self.mult_factor.coerce_to_pyobject(env)
self.is_temp = 1
# not setting self.type here, subtypes do this
return self
def coerce_to_ctuple(self, dst_type, env):
if self.type == dst_type:
return self
assert not self.mult_factor
if len(self.args) != dst_type.size:
error(self.pos, "trying to coerce sequence to ctuple of wrong length, expected %d, got %d" % (
dst_type.size, len(self.args)))
coerced_args = [arg.coerce_to(type, env) for arg, type in zip(self.args, dst_type.components)]
return TupleNode(self.pos, args=coerced_args, type=dst_type, is_temp=True)
def _create_merge_node_if_necessary(self, env):
self._flatten_starred_args()
if not any(arg.is_starred for arg in self.args):
return self
# convert into MergedSequenceNode by building partial sequences
args = []
values = []
for arg in self.args:
if arg.is_starred:
if values:
args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
values = []
args.append(arg.target)
else:
values.append(arg)
if values:
args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
node = MergedSequenceNode(self.pos, args, self.type)
if self.mult_factor:
node = binop_node(
self.pos, '*', node, self.mult_factor.coerce_to_pyobject(env),
inplace=True, type=self.type, is_temp=True)
return node
def _flatten_starred_args(self):
args = []
for arg in self.args:
if arg.is_starred and arg.target.is_sequence_constructor and not arg.target.mult_factor:
args.extend(arg.target.args)
else:
args.append(arg)
self.args[:] = args
def may_be_none(self):
return False
def analyse_target_types(self, env):
if self.mult_factor:
error(self.pos, "can't assign to multiplied sequence")
self.unpacked_items = []
self.coerced_unpacked_items = []
self.any_coerced_items = False
for i, arg in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_types(env)
if arg.is_starred:
if not arg.type.assignable_from(list_type):
error(arg.pos,
"starred target must have Python object (list) type")
if arg.type is py_object_type:
arg.type = list_type
unpacked_item = PyTempNode(self.pos, env)
coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env)
if unpacked_item is not coerced_unpacked_item:
self.any_coerced_items = True
self.unpacked_items.append(unpacked_item)
self.coerced_unpacked_items.append(coerced_unpacked_item)
self.type = py_object_type
return self
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_sequence_packing_code(self, code, target=None, plain=False):
if target is None:
target = self.result()
size_factor = c_mult = ''
mult_factor = None
if self.mult_factor and not plain:
mult_factor = self.mult_factor
if mult_factor.type.is_int:
c_mult = mult_factor.result()
if (isinstance(mult_factor.constant_result, _py_int_types) and
mult_factor.constant_result > 0):
size_factor = ' * %s' % mult_factor.constant_result
elif mult_factor.type.signed:
size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult)
else:
size_factor = ' * (%s)' % (c_mult,)
if self.type is tuple_type and (self.is_literal or self.slow) and not c_mult:
# use PyTuple_Pack() to avoid generating huge amounts of one-time code
code.putln('%s = PyTuple_Pack(%d, %s); %s' % (
target,
len(self.args),
', '.join(arg.py_result() for arg in self.args),
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
elif self.type.is_ctuple:
for i, arg in enumerate(self.args):
code.putln("%s.f%s = %s;" % (
target, i, arg.result()))
else:
# build the tuple/list step by step, potentially multiplying it as we go
if self.type is list_type:
create_func, set_item_func = 'PyList_New', 'PyList_SET_ITEM'
elif self.type is tuple_type:
create_func, set_item_func = 'PyTuple_New', 'PyTuple_SET_ITEM'
else:
raise InternalError("sequence packing for unexpected type %s" % self.type)
arg_count = len(self.args)
code.putln("%s = %s(%s%s); %s" % (
target, create_func, arg_count, size_factor,
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
if c_mult:
# FIXME: can't use a temp variable here as the code may
# end up in the constant building function. Temps
# currently don't work there.
#counter = code.funcstate.allocate_temp(mult_factor.type, manage_ref=False)
counter = Naming.quick_temp_cname
code.putln('{ Py_ssize_t %s;' % counter)
if arg_count == 1:
offset = counter
else:
offset = '%s * %s' % (counter, arg_count)
code.putln('for (%s=0; %s < %s; %s++) {' % (
counter, counter, c_mult, counter
))
else:
offset = ''
for i in range(arg_count):
arg = self.args[i]
if c_mult or not arg.result_in_temp():
code.put_incref(arg.result(), arg.ctype())
code.put_giveref(arg.py_result())
code.putln("%s(%s, %s, %s);" % (
set_item_func,
target,
(offset and i) and ('%s + %s' % (offset, i)) or (offset or i),
arg.py_result()))
if c_mult:
code.putln('}')
#code.funcstate.release_temp(counter)
code.putln('}')
if mult_factor is not None and mult_factor.type.is_pyobject:
code.putln('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % (
Naming.quick_temp_cname, target, mult_factor.py_result(),
code.error_goto_if_null(Naming.quick_temp_cname, self.pos)
))
code.put_gotref(Naming.quick_temp_cname)
code.put_decref(target, py_object_type)
code.putln('%s = %s;' % (target, Naming.quick_temp_cname))
code.putln('}')
def generate_subexpr_disposal_code(self, code):
if self.mult_factor and self.mult_factor.type.is_int:
super(SequenceNode, self).generate_subexpr_disposal_code(code)
elif self.type is tuple_type and (self.is_literal or self.slow):
super(SequenceNode, self).generate_subexpr_disposal_code(code)
else:
# We call generate_post_assignment_code here instead
# of generate_disposal_code, because values were stored
# in the tuple using a reference-stealing operation.
for arg in self.args:
arg.generate_post_assignment_code(code)
# Should NOT call free_temps -- this is invoked by the default
# generate_evaluation_code which will do that.
if self.mult_factor:
self.mult_factor.generate_disposal_code(code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
if self.starred_assignment:
self.generate_starred_assignment_code(rhs, code)
else:
self.generate_parallel_assignment_code(rhs, code)
for item in self.unpacked_items:
item.release(code)
rhs.free_temps(code)
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def generate_parallel_assignment_code(self, rhs, code):
# Need to work around the fact that generate_evaluation_code
# allocates the temps in a rather hacky way -- the assignment
# is evaluated twice, within each if-block.
for item in self.unpacked_items:
item.allocate(code)
special_unpack = (rhs.type is py_object_type
or rhs.type in (tuple_type, list_type)
or not rhs.type.is_builtin_type)
long_enough_for_a_loop = len(self.unpacked_items) > 3
if special_unpack:
self.generate_special_parallel_unpacking_code(
code, rhs, use_loop=long_enough_for_a_loop)
else:
code.putln("{")
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop)
code.putln("}")
for value_node in self.coerced_unpacked_items:
value_node.generate_evaluation_code(code)
for i in range(len(self.args)):
self.args[i].generate_assignment_code(
self.coerced_unpacked_items[i], code)
def generate_special_parallel_unpacking_code(self, code, rhs, use_loop):
sequence_type_test = '1'
none_check = "likely(%s != Py_None)" % rhs.py_result()
if rhs.type is list_type:
sequence_types = ['List']
if rhs.may_be_none():
sequence_type_test = none_check
elif rhs.type is tuple_type:
sequence_types = ['Tuple']
if rhs.may_be_none():
sequence_type_test = none_check
else:
sequence_types = ['Tuple', 'List']
tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result()
list_check = 'PyList_CheckExact(%s)' % rhs.py_result()
sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check)
code.putln("if (%s) {" % sequence_type_test)
code.putln("PyObject* sequence = %s;" % rhs.py_result())
# list/tuple => check size
code.putln("#if !CYTHON_COMPILING_IN_PYPY")
code.putln("Py_ssize_t size = Py_SIZE(sequence);")
code.putln("#else")
code.putln("Py_ssize_t size = PySequence_Size(sequence);") # < 0 => exception
code.putln("#endif")
code.putln("if (unlikely(size != %d)) {" % len(self.args))
code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
len(self.args), len(self.args)))
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);")
code.putln(code.error_goto(self.pos))
code.putln("}")
code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS")
# unpack items from list/tuple in unrolled loop (can't fail)
if len(sequence_types) == 2:
code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0])
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[0], i))
if len(sequence_types) == 2:
code.putln("} else {")
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[1], i))
code.putln("}")
for item in self.unpacked_items:
code.put_incref(item.result(), item.ctype())
code.putln("#else")
# in non-CPython, use the PySequence protocol (which can fail)
if not use_loop:
for i, item in enumerate(self.unpacked_items):
code.putln("%s = PySequence_ITEM(sequence, %d); %s" % (
item.result(), i,
code.error_goto_if_null(item.result(), self.pos)))
code.put_gotref(item.result())
else:
code.putln("{")
code.putln("Py_ssize_t i;")
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in self.unpacked_items])))
code.putln("for (i=0; i < %s; i++) {" % len(self.unpacked_items))
code.putln("PyObject* item = PySequence_ITEM(sequence, i); %s" % (
code.error_goto_if_null('item', self.pos)))
code.put_gotref('item')
code.putln("*(temps[i]) = item;")
code.putln("}")
code.putln("}")
code.putln("#endif")
rhs.generate_disposal_code(code)
if sequence_type_test == '1':
code.putln("}") # all done
elif sequence_type_test == none_check:
# either tuple/list or None => save some code by generating the error directly
code.putln("} else {")
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseNoneIterError", "ObjectHandling.c"))
code.putln("__Pyx_RaiseNoneNotIterableError(); %s" % code.error_goto(self.pos))
code.putln("}") # all done
else:
code.putln("} else {") # needs iteration fallback code
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=use_loop)
code.putln("}")
def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True):
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.globalstate.use_utility_code(UtilityCode.load_cached("IterFinish", "ObjectHandling.c"))
code.putln("Py_ssize_t index = -1;") # must be at the start of a C block!
if use_loop:
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in unpacked_items])))
iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
"%s = PyObject_GetIter(%s); %s" % (
iterator_temp,
rhs.py_result(),
code.error_goto_if_null(iterator_temp, self.pos)))
code.put_gotref(iterator_temp)
rhs.generate_disposal_code(code)
iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (
iternext_func, iterator_temp))
unpacking_error_label = code.new_label('unpacking_failed')
unpack_code = "%s(%s)" % (iternext_func, iterator_temp)
if use_loop:
code.putln("for (index=0; index < %s; index++) {" % len(unpacked_items))
code.put("PyObject* item = %s; if (unlikely(!item)) " % unpack_code)
code.put_goto(unpacking_error_label)
code.put_gotref("item")
code.putln("*(temps[index]) = item;")
code.putln("}")
else:
for i, item in enumerate(unpacked_items):
code.put(
"index = %d; %s = %s; if (unlikely(!%s)) " % (
i,
item.result(),
unpack_code,
item.result()))
code.put_goto(unpacking_error_label)
code.put_gotref(item.py_result())
if terminate:
code.globalstate.use_utility_code(
UtilityCode.load_cached("UnpackItemEndCheck", "ObjectHandling.c"))
code.put_error_if_neg(self.pos, "__Pyx_IternextUnpackEndCheck(%s, %d)" % (
unpack_code,
len(unpacked_items)))
code.putln("%s = NULL;" % iternext_func)
code.put_decref_clear(iterator_temp, py_object_type)
unpacking_done_label = code.new_label('unpacking_done')
code.put_goto(unpacking_done_label)
code.put_label(unpacking_error_label)
code.put_decref_clear(iterator_temp, py_object_type)
code.putln("%s = NULL;" % iternext_func)
code.putln("if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);")
code.putln(code.error_goto(self.pos))
code.put_label(unpacking_done_label)
code.funcstate.release_temp(iternext_func)
if terminate:
code.funcstate.release_temp(iterator_temp)
iterator_temp = None
return iterator_temp
def generate_starred_assignment_code(self, rhs, code):
for i, arg in enumerate(self.args):
if arg.is_starred:
starred_target = self.unpacked_items[i]
unpacked_fixed_items_left = self.unpacked_items[:i]
unpacked_fixed_items_right = self.unpacked_items[i+1:]
break
else:
assert False
iterator_temp = None
if unpacked_fixed_items_left:
for item in unpacked_fixed_items_left:
item.allocate(code)
code.putln('{')
iterator_temp = self.generate_generic_parallel_unpacking_code(
code, rhs, unpacked_fixed_items_left,
use_loop=True, terminate=False)
for i, item in enumerate(unpacked_fixed_items_left):
value_node = self.coerced_unpacked_items[i]
value_node.generate_evaluation_code(code)
code.putln('}')
starred_target.allocate(code)
target_list = starred_target.result()
code.putln("%s = PySequence_List(%s); %s" % (
target_list,
iterator_temp or rhs.py_result(),
code.error_goto_if_null(target_list, self.pos)))
code.put_gotref(target_list)
if iterator_temp:
code.put_decref_clear(iterator_temp, py_object_type)
code.funcstate.release_temp(iterator_temp)
else:
rhs.generate_disposal_code(code)
if unpacked_fixed_items_right:
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list))
code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right)))
code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % (
len(unpacked_fixed_items_left), length_temp,
code.error_goto(self.pos)))
code.putln('}')
for item in unpacked_fixed_items_right[::-1]:
item.allocate(code)
for i, (item, coerced_arg) in enumerate(zip(unpacked_fixed_items_right[::-1],
self.coerced_unpacked_items[::-1])):
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln("%s = PyList_GET_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
# resize the list the hard way
code.putln("((PyVarObject*)%s)->ob_size--;" % target_list)
code.putln('#else')
code.putln("%s = PySequence_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
code.putln('#endif')
code.put_gotref(item.py_result())
coerced_arg.generate_evaluation_code(code)
code.putln('#if !CYTHON_COMPILING_IN_CPYTHON')
sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln('%s = PySequence_GetSlice(%s, 0, %s-%d); %s' % (
sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right),
code.error_goto_if_null(sublist_temp, self.pos)))
code.put_gotref(sublist_temp)
code.funcstate.release_temp(length_temp)
code.put_decref(target_list, py_object_type)
code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp))
code.putln('#else')
code.putln('%s = %s;' % (sublist_temp, sublist_temp)) # avoid warning about unused variable
code.funcstate.release_temp(sublist_temp)
code.putln('#endif')
for i, arg in enumerate(self.args):
arg.generate_assignment_code(self.coerced_unpacked_items[i], code)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
if self.unpacked_items:
for arg in self.unpacked_items:
arg.annotate(code)
for arg in self.coerced_unpacked_items:
arg.annotate(code)
class TupleNode(SequenceNode):
# Tuple constructor.
type = tuple_type
is_partly_literal = False
gil_message = "Constructing Python tuple"
def infer_type(self, env):
if self.mult_factor or not self.args:
return tuple_type
arg_types = [arg.infer_type(env) for arg in self.args]
if any(type.is_pyobject or type.is_unspecified or type.is_fused for type in arg_types):
return tuple_type
else:
return env.declare_tuple_type(self.pos, arg_types).type
def analyse_types(self, env, skip_children=False):
if len(self.args) == 0:
self.is_temp = False
self.is_literal = True
return self
if not skip_children:
for i, arg in enumerate(self.args):
if arg.is_starred:
arg.starred_expr_allowed_here = True
self.args[i] = arg.analyse_types(env)
if (not self.mult_factor and
not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_fused) for arg in self.args)):
self.type = env.declare_tuple_type(self.pos, (arg.type for arg in self.args)).type
self.is_temp = 1
return self
node = SequenceNode.analyse_types(self, env, skip_children=True)
node = node._create_merge_node_if_necessary(env)
if not node.is_sequence_constructor:
return node
if not all(child.is_literal for child in node.args):
return node
if not node.mult_factor or (
node.mult_factor.is_literal and
isinstance(node.mult_factor.constant_result, _py_int_types)):
node.is_temp = False
node.is_literal = True
else:
if not node.mult_factor.type.is_pyobject:
node.mult_factor = node.mult_factor.coerce_to_pyobject(env)
node.is_temp = True
node.is_partly_literal = True
return node
def analyse_as_type(self, env):
# ctuple type
if not self.args:
return None
item_types = [arg.analyse_as_type(env) for arg in self.args]
if any(t is None for t in item_types):
return None
entry = env.declare_tuple_type(self.pos, item_types)
return entry.type
def coerce_to(self, dst_type, env):
if self.type.is_ctuple:
if dst_type.is_ctuple and self.type.size == dst_type.size:
return self.coerce_to_ctuple(dst_type, env)
elif dst_type is tuple_type or dst_type is py_object_type:
coerced_args = [arg.coerce_to_pyobject(env) for arg in self.args]
return TupleNode(self.pos, args=coerced_args, type=tuple_type, is_temp=1).analyse_types(env, skip_children=True)
else:
return self.coerce_to_pyobject(env).coerce_to(dst_type, env)
elif dst_type.is_ctuple and not self.mult_factor:
return self.coerce_to_ctuple(dst_type, env)
else:
return SequenceNode.coerce_to(self, dst_type, env)
def as_list(self):
t = ListNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, tuple):
t.constant_result = list(self.constant_result)
return t
def is_simple(self):
# either temp or constant => always simple
return True
def nonlocally_immutable(self):
# either temp or constant => always safe
return True
def calculate_result_code(self):
if len(self.args) > 0:
return self.result_code
else:
return Naming.empty_tuple
def calculate_constant_result(self):
self.constant_result = tuple([
arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = self.compile_time_value_list(denv)
try:
return tuple(values)
except Exception as e:
self.compile_time_value_error(e)
def generate_operation_code(self, code):
if len(self.args) == 0:
# result_code is Naming.empty_tuple
return
if self.is_partly_literal:
# underlying tuple is const, but factor is not
tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
const_code = code.get_cached_constants_writer()
const_code.mark_pos(self.pos)
self.generate_sequence_packing_code(const_code, tuple_target, plain=True)
const_code.put_giveref(tuple_target)
code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
self.result(), tuple_target, self.mult_factor.py_result(),
code.error_goto_if_null(self.result(), self.pos)
))
code.put_gotref(self.py_result())
elif self.is_literal:
# non-empty cached tuple => result is global constant,
# creation code goes into separate code writer
self.result_code = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
self.generate_sequence_packing_code(code)
code.put_giveref(self.py_result())
else:
self.type.entry.used = True
self.generate_sequence_packing_code(code)
class ListNode(SequenceNode):
# List constructor.
# obj_conversion_errors [PyrexError] used internally
# orignial_args [ExprNode] used internally
obj_conversion_errors = []
type = list_type
in_module_scope = False
gil_message = "Constructing Python list"
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer non-object list arrays.
return list_type
def analyse_expressions(self, env):
for arg in self.args:
if arg.is_starred:
arg.starred_expr_allowed_here = True
node = SequenceNode.analyse_expressions(self, env)
return node.coerce_to_pyobject(env)
def analyse_types(self, env):
with local_errors(ignore=True) as errors:
self.original_args = list(self.args)
node = SequenceNode.analyse_types(self, env)
node.obj_conversion_errors = errors
if env.is_module_scope:
self.in_module_scope = True
node = node._create_merge_node_if_necessary(env)
return node
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
elif (dst_type.is_array or dst_type.is_ptr) and dst_type.base_type is not PyrexTypes.c_void_type:
array_length = len(self.args)
if self.mult_factor:
if isinstance(self.mult_factor.constant_result, _py_int_types):
if self.mult_factor.constant_result <= 0:
error(self.pos, "Cannot coerce non-positively multiplied list to '%s'" % dst_type)
else:
array_length *= self.mult_factor.constant_result
else:
error(self.pos, "Cannot coerce dynamically multiplied list to '%s'" % dst_type)
base_type = dst_type.base_type
self.type = PyrexTypes.CArrayType(base_type, array_length)
for i in range(len(self.original_args)):
arg = self.args[i]
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(base_type, env)
elif dst_type.is_cpp_class:
# TODO(robertwb): Avoid object conversion for vector/list/set.
return TypecastNode(self.pos, operand=self, type=PyrexTypes.py_object_type).coerce_to(dst_type, env)
elif self.mult_factor:
error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type)
elif dst_type.is_struct:
if len(self.args) > len(dst_type.scope.var_entries):
error(self.pos, "Too many members for '%s'" % dst_type)
else:
if len(self.args) < len(dst_type.scope.var_entries):
warning(self.pos, "Too few members for '%s'" % dst_type, 1)
for i, (arg, member) in enumerate(zip(self.original_args, dst_type.scope.var_entries)):
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(member.type, env)
self.type = dst_type
elif dst_type.is_ctuple:
return self.coerce_to_ctuple(dst_type, env)
else:
self.type = error_type
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
return self
def as_list(self): # dummy for compatibility with TupleNode
return self
def as_tuple(self):
t = TupleNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, list):
t.constant_result = tuple(self.constant_result)
return t
def allocate_temp_result(self, code):
if self.type.is_array and self.in_module_scope:
self.temp_code = code.funcstate.allocate_temp(
self.type, manage_ref=False, static=True)
else:
SequenceNode.allocate_temp_result(self, code)
def release_temp_result(self, env):
if self.type.is_array:
# To be valid C++, we must allocate the memory on the stack
# manually and be sure not to reuse it for something else.
# Yes, this means that we leak a temp array variable.
pass
else:
SequenceNode.release_temp_result(self, env)
def calculate_constant_result(self):
if self.mult_factor:
raise ValueError() # may exceed the compile time memory
self.constant_result = [
arg.constant_result for arg in self.args]
def compile_time_value(self, denv):
l = self.compile_time_value_list(denv)
if self.mult_factor:
l *= self.mult_factor.compile_time_value(denv)
return l
def generate_operation_code(self, code):
if self.type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.generate_sequence_packing_code(code)
elif self.type.is_array:
if self.mult_factor:
code.putln("{")
code.putln("Py_ssize_t %s;" % Naming.quick_temp_cname)
code.putln("for ({i} = 0; {i} < {count}; {i}++) {{".format(
i=Naming.quick_temp_cname, count=self.mult_factor.result()))
offset = '+ (%d * %s)' % (len(self.args), Naming.quick_temp_cname)
else:
offset = ''
for i, arg in enumerate(self.args):
if arg.type.is_array:
code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
code.putln("memcpy(&(%s[%s%s]), %s, sizeof(%s[0]));" % (
self.result(), i, offset,
arg.result(), self.result()
))
else:
code.putln("%s[%s%s] = %s;" % (
self.result(),
i,
offset,
arg.result()))
if self.mult_factor:
code.putln("}")
code.putln("}")
elif self.type.is_struct:
for arg, member in zip(self.args, self.type.scope.var_entries):
code.putln("%s.%s = %s;" % (
self.result(),
member.cname,
arg.result()))
else:
raise InternalError("List type never specified")
class ScopedExprNode(ExprNode):
# Abstract base class for ExprNodes that have their own local
# scope, such as generator expressions.
#
# expr_scope Scope the inner scope of the expression
subexprs = []
expr_scope = None
# does this node really have a local scope, e.g. does it leak loop
# variables or not? non-leaking Py3 behaviour is default, except
# for list comprehensions where the behaviour differs in Py2 and
# Py3 (set in Parsing.py based on parser context)
has_local_scope = True
def init_scope(self, outer_scope, expr_scope=None):
if expr_scope is not None:
self.expr_scope = expr_scope
elif self.has_local_scope:
self.expr_scope = Symtab.GeneratorExpressionScope(outer_scope)
else:
self.expr_scope = None
def analyse_declarations(self, env):
self.init_scope(env)
def analyse_scoped_declarations(self, env):
# this is called with the expr_scope as env
pass
def analyse_types(self, env):
# no recursion here, the children will be analysed separately below
return self
def analyse_scoped_expressions(self, env):
# this is called with the expr_scope as env
return self
def generate_evaluation_code(self, code):
# set up local variables and free their references on exit
generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code
if not self.has_local_scope or not self.expr_scope.var_entries:
# no local variables => delegate, done
generate_inner_evaluation_code(code)
return
code.putln('{ /* enter inner scope */')
py_entries = []
for _, entry in sorted(item for item in self.expr_scope.entries.items() if item[0]):
if not entry.in_closure:
if entry.type.is_pyobject and entry.used:
py_entries.append(entry)
if not py_entries:
# no local Python references => no cleanup required
generate_inner_evaluation_code(code)
code.putln('} /* exit inner scope */')
return
# must free all local Python references at each exit point
old_loop_labels = code.new_loop_labels()
old_error_label = code.new_error_label()
generate_inner_evaluation_code(code)
# normal (non-error) exit
self._generate_vars_cleanup(code, py_entries)
# error/loop body exit points
exit_scope = code.new_label('exit_scope')
code.put_goto(exit_scope)
for label, old_label in ([(code.error_label, old_error_label)] +
list(zip(code.get_loop_labels(), old_loop_labels))):
if code.label_used(label):
code.put_label(label)
self._generate_vars_cleanup(code, py_entries)
code.put_goto(old_label)
code.put_label(exit_scope)
code.putln('} /* exit inner scope */')
code.set_loop_labels(old_loop_labels)
code.error_label = old_error_label
def _generate_vars_cleanup(self, code, py_entries):
for entry in py_entries:
if entry.is_cglobal:
code.put_var_gotref(entry)
code.put_decref_set(entry.cname, "Py_None")
else:
code.put_var_xdecref_clear(entry)
class ComprehensionNode(ScopedExprNode):
# A list/set/dict comprehension
child_attrs = ["loop"]
is_temp = True
constant_result = not_a_constant
def infer_type(self, env):
return self.type
def analyse_declarations(self, env):
self.append.target = self # this is used in the PyList_Append of the inner loop
self.init_scope(env)
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def analyse_scoped_expressions(self, env):
if self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_operation_code(self, code):
if self.type is Builtin.list_type:
create_code = 'PyList_New(0)'
elif self.type is Builtin.set_type:
create_code = 'PySet_New(NULL)'
elif self.type is Builtin.dict_type:
create_code = 'PyDict_New()'
else:
raise InternalError("illegal type for comprehension: %s" % self.type)
code.putln('%s = %s; %s' % (
self.result(), create_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
self.loop.generate_execution_code(code)
def annotate(self, code):
self.loop.annotate(code)
class ComprehensionAppendNode(Node):
# Need to be careful to avoid infinite recursion:
# target must not be in child_attrs/subexprs
child_attrs = ['expr']
target = None
type = PyrexTypes.c_int_type
def analyse_expressions(self, env):
self.expr = self.expr.analyse_expressions(env)
if not self.expr.type.is_pyobject:
self.expr = self.expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
if self.target.type is list_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ListCompAppend", "Optimize.c"))
function = "__Pyx_ListComp_Append"
elif self.target.type is set_type:
function = "PySet_Add"
else:
raise InternalError(
"Invalid type for comprehension node: %s" % self.target.type)
self.expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % (
function,
self.target.result(),
self.expr.result()
), self.pos))
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class DictComprehensionAppendNode(ComprehensionAppendNode):
child_attrs = ['key_expr', 'value_expr']
def analyse_expressions(self, env):
self.key_expr = self.key_expr.analyse_expressions(env)
if not self.key_expr.type.is_pyobject:
self.key_expr = self.key_expr.coerce_to_pyobject(env)
self.value_expr = self.value_expr.analyse_expressions(env)
if not self.value_expr.type.is_pyobject:
self.value_expr = self.value_expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
self.key_expr.generate_evaluation_code(code)
self.value_expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % (
self.target.result(),
self.key_expr.result(),
self.value_expr.result()
), self.pos))
self.key_expr.generate_disposal_code(code)
self.key_expr.free_temps(code)
self.value_expr.generate_disposal_code(code)
self.value_expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.key_expr.generate_function_definitions(env, code)
self.value_expr.generate_function_definitions(env, code)
def annotate(self, code):
self.key_expr.annotate(code)
self.value_expr.annotate(code)
class InlinedGeneratorExpressionNode(ExprNode):
# An inlined generator expression for which the result is calculated
# inside of the loop and returned as a single, first and only Generator
# return value.
# This will only be created by transforms when replacing safe builtin
# calls on generator expressions.
#
# gen GeneratorExpressionNode the generator, not containing any YieldExprNodes
# orig_func String the name of the builtin function this node replaces
# target ExprNode or None a 'target' for a ComprehensionAppend node
subexprs = ["gen"]
orig_func = None
target = None
is_temp = True
type = py_object_type
def __init__(self, pos, gen, comprehension_type=None, **kwargs):
gbody = gen.def_node.gbody
gbody.is_inlined = True
if comprehension_type is not None:
assert comprehension_type in (list_type, set_type, dict_type), comprehension_type
gbody.inlined_comprehension_type = comprehension_type
kwargs.update(
target=RawCNameExprNode(pos, comprehension_type, Naming.retval_cname),
type=comprehension_type,
)
super(InlinedGeneratorExpressionNode, self).__init__(pos, gen=gen, **kwargs)
def may_be_none(self):
return self.orig_func not in ('any', 'all', 'sorted')
def infer_type(self, env):
return self.type
def analyse_types(self, env):
self.gen = self.gen.analyse_expressions(env)
return self
def generate_result_code(self, code):
code.putln("%s = __Pyx_Generator_Next(%s); %s" % (
self.result(), self.gen.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class MergedSequenceNode(ExprNode):
"""
Merge a sequence of iterables into a set/list/tuple.
The target collection is determined by self.type, which must be set externally.
args [ExprNode]
"""
subexprs = ['args']
is_temp = True
gil_message = "Constructing Python collection"
def __init__(self, pos, args, type):
if type in (list_type, tuple_type) and args and args[0].is_sequence_constructor:
# construct a list directly from the first argument that we can then extend
if args[0].type is not list_type:
args[0] = ListNode(args[0].pos, args=args[0].args, is_temp=True)
ExprNode.__init__(self, pos, args=args, type=type)
def calculate_constant_result(self):
result = []
for item in self.args:
if item.is_sequence_constructor and item.mult_factor:
if item.mult_factor.constant_result <= 0:
continue
# otherwise, adding each item once should be enough
if item.is_set_literal or item.is_sequence_constructor:
# process items in order
items = (arg.constant_result for arg in item.args)
else:
items = item.constant_result
result.extend(items)
if self.type is set_type:
result = set(result)
elif self.type is tuple_type:
result = tuple(result)
else:
assert self.type is list_type
self.constant_result = result
def compile_time_value(self, denv):
result = []
for item in self.args:
if item.is_sequence_constructor and item.mult_factor:
if item.mult_factor.compile_time_value(denv) <= 0:
continue
if item.is_set_literal or item.is_sequence_constructor:
# process items in order
items = (arg.compile_time_value(denv) for arg in item.args)
else:
items = item.compile_time_value(denv)
result.extend(items)
if self.type is set_type:
try:
result = set(result)
except Exception as e:
self.compile_time_value_error(e)
elif self.type is tuple_type:
result = tuple(result)
else:
assert self.type is list_type
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return self.type
def analyse_types(self, env):
args = [
arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after * must be an iterable, not NoneType')
for arg in self.args
]
if len(args) == 1 and args[0].type is self.type:
# strip this intermediate node and use the bare collection
return args[0]
assert self.type in (set_type, list_type, tuple_type)
self.args = args
return self
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
is_set = self.type is set_type
args = iter(self.args)
item = next(args)
item.generate_evaluation_code(code)
if (is_set and item.is_set_literal or
not is_set and item.is_sequence_constructor and item.type is list_type):
code.putln("%s = %s;" % (self.result(), item.py_result()))
item.generate_post_assignment_code(code)
else:
code.putln("%s = %s(%s); %s" % (
self.result(),
'PySet_New' if is_set else 'PySequence_List',
item.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
item.generate_disposal_code(code)
item.free_temps(code)
helpers = set()
if is_set:
add_func = "PySet_Add"
extend_func = "__Pyx_PySet_Update"
else:
add_func = "__Pyx_ListComp_Append"
extend_func = "__Pyx_PyList_Extend"
for item in args:
if (is_set and (item.is_set_literal or item.is_sequence_constructor) or
(item.is_sequence_constructor and not item.mult_factor)):
if not is_set and item.args:
helpers.add(("ListCompAppend", "Optimize.c"))
for arg in item.args:
arg.generate_evaluation_code(code)
code.put_error_if_neg(arg.pos, "%s(%s, %s)" % (
add_func,
self.result(),
arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
continue
if is_set:
helpers.add(("PySet_Update", "Builtins.c"))
else:
helpers.add(("ListExtend", "Optimize.c"))
item.generate_evaluation_code(code)
code.put_error_if_neg(item.pos, "%s(%s, %s)" % (
extend_func,
self.result(),
item.py_result()))
item.generate_disposal_code(code)
item.free_temps(code)
if self.type is tuple_type:
code.putln("{")
code.putln("PyObject *%s = PyList_AsTuple(%s);" % (
Naming.quick_temp_cname,
self.result()))
code.put_decref(self.result(), py_object_type)
code.putln("%s = %s; %s" % (
self.result(),
Naming.quick_temp_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
code.putln("}")
for helper in sorted(helpers):
code.globalstate.use_utility_code(UtilityCode.load_cached(*helper))
def annotate(self, code):
for item in self.args:
item.annotate(code)
class SetNode(ExprNode):
"""
Set constructor.
"""
subexprs = ['args']
type = set_type
is_set_literal = True
gil_message = "Constructing Python set"
def analyse_types(self, env):
for i in range(len(self.args)):
arg = self.args[i]
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
self.type = set_type
self.is_temp = 1
return self
def may_be_none(self):
return False
def calculate_constant_result(self):
self.constant_result = set([arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = [arg.compile_time_value(denv) for arg in self.args]
try:
return set(values)
except Exception as e:
self.compile_time_value_error(e)
def generate_evaluation_code(self, code):
for arg in self.args:
arg.generate_evaluation_code(code)
self.allocate_temp_result(code)
code.putln(
"%s = PySet_New(0); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for arg in self.args:
code.put_error_if_neg(
self.pos,
"PySet_Add(%s, %s)" % (self.result(), arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
class DictNode(ExprNode):
# Dictionary constructor.
#
# key_value_pairs [DictItemNode]
# exclude_null_values [boolean] Do not add NULL values to dict
#
# obj_conversion_errors [PyrexError] used internally
subexprs = ['key_value_pairs']
is_temp = 1
exclude_null_values = False
type = dict_type
is_dict_literal = True
reject_duplicates = False
obj_conversion_errors = []
@classmethod
def from_pairs(cls, pos, pairs):
return cls(pos, key_value_pairs=[
DictItemNode(pos, key=k, value=v) for k, v in pairs])
def calculate_constant_result(self):
self.constant_result = dict([
item.constant_result for item in self.key_value_pairs])
def compile_time_value(self, denv):
pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.key_value_pairs]
try:
return dict(pairs)
except Exception as e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer struct constructors.
return dict_type
def analyse_types(self, env):
with local_errors(ignore=True) as errors:
self.key_value_pairs = [
item.analyse_types(env)
for item in self.key_value_pairs
]
self.obj_conversion_errors = errors
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
self.release_errors()
if self.type.is_struct_or_union:
if not dict_type.subtype_of(dst_type):
error(self.pos, "Cannot interpret struct as non-dict type '%s'" % dst_type)
return DictNode(self.pos, key_value_pairs=[
DictItemNode(item.pos, key=item.key.coerce_to_pyobject(env),
value=item.value.coerce_to_pyobject(env))
for item in self.key_value_pairs])
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
elif dst_type.is_struct_or_union:
self.type = dst_type
if not dst_type.is_struct and len(self.key_value_pairs) != 1:
error(self.pos, "Exactly one field must be specified to convert to union '%s'" % dst_type)
elif dst_type.is_struct and len(self.key_value_pairs) < len(dst_type.scope.var_entries):
warning(self.pos, "Not all members given for struct '%s'" % dst_type, 1)
for item in self.key_value_pairs:
if isinstance(item.key, CoerceToPyTypeNode):
item.key = item.key.arg
if not item.key.is_string_literal:
error(item.key.pos, "Invalid struct field identifier")
item.key = StringNode(item.key.pos, value="<error>")
else:
key = str(item.key.value) # converts string literals to unicode in Py3
member = dst_type.scope.lookup_here(key)
if not member:
error(item.key.pos, "struct '%s' has no field '%s'" % (dst_type, key))
else:
value = item.value
if isinstance(value, CoerceToPyTypeNode):
value = value.arg
item.value = value.coerce_to(member.type, env)
else:
self.type = error_type
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
return self
def release_errors(self):
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
# Custom method used here because key-value
# pairs are evaluated and used one at a time.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
is_dict = self.type.is_pyobject
if is_dict:
self.release_errors()
code.putln(
"%s = __Pyx_PyDict_NewPresized(%d); %s" % (
self.result(),
len(self.key_value_pairs),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
keys_seen = set()
key_type = None
needs_error_helper = False
for item in self.key_value_pairs:
item.generate_evaluation_code(code)
if is_dict:
if self.exclude_null_values:
code.putln('if (%s) {' % item.value.py_result())
key = item.key
if self.reject_duplicates:
if keys_seen is not None:
# avoid runtime 'in' checks for literals that we can do at compile time
if not key.is_string_literal:
keys_seen = None
elif key.value in keys_seen:
# FIXME: this could be a compile time error, at least in Cython code
keys_seen = None
elif key_type is not type(key.value):
if key_type is None:
key_type = type(key.value)
keys_seen.add(key.value)
else:
# different types => may not be able to compare at compile time
keys_seen = None
else:
keys_seen.add(key.value)
if keys_seen is None:
code.putln('if (unlikely(PyDict_Contains(%s, %s))) {' % (
self.result(), key.py_result()))
# currently only used in function calls
needs_error_helper = True
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
key.py_result(),
code.error_goto(item.pos)))
code.putln("} else {")
code.put_error_if_neg(self.pos, "PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
if self.reject_duplicates and keys_seen is None:
code.putln('}')
if self.exclude_null_values:
code.putln('}')
else:
code.putln("%s.%s = %s;" % (
self.result(),
item.key.value,
item.value.result()))
item.generate_disposal_code(code)
item.free_temps(code)
if needs_error_helper:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c"))
def annotate(self, code):
for item in self.key_value_pairs:
item.annotate(code)
class DictItemNode(ExprNode):
# Represents a single item in a DictNode
#
# key ExprNode
# value ExprNode
subexprs = ['key', 'value']
nogil_check = None # Parent DictNode takes care of it
def calculate_constant_result(self):
self.constant_result = (
self.key.constant_result, self.value.constant_result)
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
self.value = self.value.coerce_to_pyobject(env)
return self
def generate_evaluation_code(self, code):
self.key.generate_evaluation_code(code)
self.value.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.key.generate_disposal_code(code)
self.value.generate_disposal_code(code)
def free_temps(self, code):
self.key.free_temps(code)
self.value.free_temps(code)
def __iter__(self):
return iter([self.key, self.value])
class SortedDictKeysNode(ExprNode):
# build sorted list of dict keys, e.g. for dir()
subexprs = ['arg']
is_temp = True
def __init__(self, arg):
ExprNode.__init__(self, arg.pos, arg=arg)
self.type = Builtin.list_type
def analyse_types(self, env):
arg = self.arg.analyse_types(env)
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node(
"'NoneType' object is not iterable")
self.arg = arg
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
dict_result = self.arg.py_result()
if self.arg.type is Builtin.dict_type:
code.putln('%s = PyDict_Keys(%s); %s' % (
self.result(), dict_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
else:
# originally used PyMapping_Keys() here, but that may return a tuple
code.globalstate.use_utility_code(UtilityCode.load_cached(
'PyObjectCallMethod0', 'ObjectHandling.c'))
keys_cname = code.intern_identifier(StringEncoding.EncodedString("keys"))
code.putln('%s = __Pyx_PyObject_CallMethod0(%s, %s); %s' % (
self.result(), dict_result, keys_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.putln("if (unlikely(!PyList_Check(%s))) {" % self.result())
code.put_decref_set(self.result(), "PySequence_List(%s)" % self.result())
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
code.putln("}")
code.put_error_if_neg(
self.pos, 'PyList_Sort(%s)' % self.py_result())
class ModuleNameMixin(object):
def get_py_mod_name(self, code):
return code.get_py_string_const(
self.module_name, identifier=True)
def get_py_qualified_name(self, code):
return code.get_py_string_const(
self.qualname, identifier=True)
class ClassNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# bases ExprNode Base class tuple
# dict ExprNode Class dict (not owned by this node)
# doc ExprNode or None Doc string
# module_name EncodedString Name of defining module
subexprs = ['bases', 'doc']
type = py_object_type
is_temp = True
def infer_type(self, env):
# TODO: could return 'type' in some cases
return py_object_type
def analyse_types(self, env):
self.bases = self.bases.analyse_types(env)
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
env.use_utility_code(UtilityCode.load_cached("CreateClass", "ObjectHandling.c"))
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
if self.doc:
code.put_error_if_neg(self.pos,
'PyDict_SetItem(%s, %s, %s)' % (
self.dict.py_result(),
code.intern_identifier(
StringEncoding.EncodedString("__doc__")),
self.doc.py_result()))
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
code.putln(
'%s = __Pyx_CreateClass(%s, %s, %s, %s, %s); %s' % (
self.result(),
self.bases.py_result(),
self.dict.py_result(),
cname,
qualname,
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class Py3ClassNode(ExprNode):
# Helper class used in the implementation of Python3+
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# dict ExprNode Class dict (not owned by this node)
# module_name EncodedString Name of defining module
# calculate_metaclass bool should call CalculateMetaclass()
# allow_py2_metaclass bool should look for Py2 metaclass
subexprs = []
type = py_object_type
is_temp = True
def infer_type(self, env):
# TODO: could return 'type' in some cases
return py_object_type
def analyse_types(self, env):
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("Py3ClassCreate", "ObjectHandling.c"))
cname = code.intern_identifier(self.name)
if self.mkw:
mkw = self.mkw.py_result()
else:
mkw = 'NULL'
if self.metaclass:
metaclass = self.metaclass.py_result()
else:
metaclass = "((PyObject*)&__Pyx_DefaultClassType)"
code.putln(
'%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s, %d, %d); %s' % (
self.result(),
metaclass,
cname,
self.bases.py_result(),
self.dict.py_result(),
mkw,
self.calculate_metaclass,
self.allow_py2_metaclass,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyClassMetaclassNode(ExprNode):
# Helper class holds Python3 metaclass object
#
# bases ExprNode Base class tuple (not owned by this node)
# mkw ExprNode Class keyword arguments (not owned by this node)
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = True
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
if self.mkw:
code.globalstate.use_utility_code(
UtilityCode.load_cached("Py3MetaclassGet", "ObjectHandling.c"))
call = "__Pyx_Py3MetaclassGet(%s, %s)" % (
self.bases.result(),
self.mkw.result())
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CalculateMetaclass", "ObjectHandling.c"))
call = "__Pyx_CalculateMetaclass(NULL, %s)" % (
self.bases.result())
code.putln(
"%s = %s; %s" % (
self.result(), call,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyClassNamespaceNode(ExprNode, ModuleNameMixin):
# Helper class holds Python3 namespace object
#
# All this are not owned by this node
# metaclass ExprNode Metaclass object
# bases ExprNode Base class tuple
# mkw ExprNode Class keyword arguments
# doc ExprNode or None Doc string (owned)
subexprs = ['doc']
def analyse_types(self, env):
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
if self.doc:
doc_code = self.doc.result()
else:
doc_code = '(PyObject *) NULL'
if self.mkw:
mkw = self.mkw.py_result()
else:
mkw = '(PyObject *) NULL'
if self.metaclass:
metaclass = self.metaclass.py_result()
else:
metaclass = "(PyObject *) NULL"
code.putln(
"%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s, %s); %s" % (
self.result(),
metaclass,
self.bases.result(),
cname,
qualname,
mkw,
py_mod_name,
doc_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ClassCellInjectorNode(ExprNode):
# Initialize CyFunction.func_classobj
is_temp = True
type = py_object_type
subexprs = []
is_active = False
def analyse_expressions(self, env):
return self
def generate_evaluation_code(self, code):
if self.is_active:
self.allocate_temp_result(code)
code.putln(
'%s = PyList_New(0); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
def generate_injection_code(self, code, classobj_cname):
if self.is_active:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CyFunctionClassCell", "CythonFunction.c"))
code.put_error_if_neg(self.pos, '__Pyx_CyFunction_InitClassCell(%s, %s)' % (
self.result(), classobj_cname))
class ClassCellNode(ExprNode):
# Class Cell for noargs super()
subexprs = []
is_temp = True
is_generator = False
type = py_object_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if not self.is_generator:
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
self.result(),
Naming.self_cname))
else:
code.putln('%s = %s->classobj;' % (
self.result(), Naming.generator_cname))
code.putln(
'if (!%s) { PyErr_SetString(PyExc_SystemError, '
'"super(): empty __class__ cell"); %s }' % (
self.result(),
code.error_goto(self.pos)))
code.put_incref(self.result(), py_object_type)
class BoundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an bound method
# object from a class and a function.
#
# function ExprNode Function object
# self_object ExprNode self object
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
self.type = py_object_type
self.is_temp = 1
return self
gil_message = "Constructing a bound method"
def generate_result_code(self, code):
code.putln(
"%s = __Pyx_PyMethod_New(%s, %s, (PyObject*)%s->ob_type); %s" % (
self.result(),
self.function.py_result(),
self.self_object.py_result(),
self.self_object.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class UnboundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an unbound method
# object from a class and a function.
#
# function ExprNode Function object
type = py_object_type
is_temp = 1
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
return self
def may_be_none(self):
return False
gil_message = "Constructing an unbound method"
def generate_result_code(self, code):
class_cname = code.pyclass_stack[-1].classobj.result()
code.putln(
"%s = __Pyx_PyMethod_New(%s, 0, %s); %s" % (
self.result(),
self.function.py_result(),
class_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyCFunctionNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# functions. Constructs a PyCFunction object
# from a PyMethodDef struct.
#
# pymethdef_cname string PyMethodDef structure
# self_object ExprNode or None
# binding bool
# def_node DefNode the Python function node
# module_name EncodedString Name of defining module
# code_object CodeObjectNode the PyCodeObject creator node
subexprs = ['code_object', 'defaults_tuple', 'defaults_kwdict',
'annotations_dict']
self_object = None
code_object = None
binding = False
def_node = None
defaults = None
defaults_struct = None
defaults_pyobjects = 0
defaults_tuple = None
defaults_kwdict = None
annotations_dict = None
type = py_object_type
is_temp = 1
specialized_cpdefs = None
is_specialization = False
@classmethod
def from_defnode(cls, node, binding):
return cls(node.pos,
def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
binding=binding or node.specialized_cpdefs,
specialized_cpdefs=node.specialized_cpdefs,
code_object=CodeObjectNode(node))
def analyse_types(self, env):
if self.binding:
self.analyse_default_args(env)
return self
def analyse_default_args(self, env):
"""
Handle non-literal function's default arguments.
"""
nonliteral_objects = []
nonliteral_other = []
default_args = []
default_kwargs = []
annotations = []
# For global cpdef functions and def/cpdef methods in cdef classes, we must use global constants
# for default arguments to avoid the dependency on the CyFunction object as 'self' argument
# in the underlying C function. Basically, cpdef functions/methods are static C functions,
# so their optional arguments must be static, too.
# TODO: change CyFunction implementation to pass both function object and owning object for method calls
must_use_constants = env.is_c_class_scope or (self.def_node.is_wrapper and env.is_module_scope)
for arg in self.def_node.args:
if arg.default and not must_use_constants:
if not arg.default.is_literal:
arg.is_dynamic = True
if arg.type.is_pyobject:
nonliteral_objects.append(arg)
else:
nonliteral_other.append(arg)
else:
arg.default = DefaultLiteralArgNode(arg.pos, arg.default)
if arg.kw_only:
default_kwargs.append(arg)
else:
default_args.append(arg)
if arg.annotation:
arg.annotation = self.analyse_annotation(env, arg.annotation)
annotations.append((arg.pos, arg.name, arg.annotation))
for arg in (self.def_node.star_arg, self.def_node.starstar_arg):
if arg and arg.annotation:
arg.annotation = self.analyse_annotation(env, arg.annotation)
annotations.append((arg.pos, arg.name, arg.annotation))
annotation = self.def_node.return_type_annotation
if annotation:
annotation = self.analyse_annotation(env, annotation)
self.def_node.return_type_annotation = annotation
annotations.append((annotation.pos, StringEncoding.EncodedString("return"), annotation))
if nonliteral_objects or nonliteral_other:
module_scope = env.global_scope()
cname = module_scope.next_id(Naming.defaults_struct_prefix)
scope = Symtab.StructOrUnionScope(cname)
self.defaults = []
for arg in nonliteral_objects:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=True)
self.defaults.append((arg, entry))
for arg in nonliteral_other:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=False)
self.defaults.append((arg, entry))
entry = module_scope.declare_struct_or_union(
None, 'struct', scope, 1, None, cname=cname)
self.defaults_struct = scope
self.defaults_pyobjects = len(nonliteral_objects)
for arg, entry in self.defaults:
arg.default_value = '%s->%s' % (
Naming.dynamic_args_cname, entry.cname)
self.def_node.defaults_struct = self.defaults_struct.name
if default_args or default_kwargs:
if self.defaults_struct is None:
if default_args:
defaults_tuple = TupleNode(self.pos, args=[
arg.default for arg in default_args])
self.defaults_tuple = defaults_tuple.analyse_types(env).coerce_to_pyobject(env)
if default_kwargs:
defaults_kwdict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
arg.pos,
key=IdentifierStringNode(arg.pos, value=arg.name),
value=arg.default)
for arg in default_kwargs])
self.defaults_kwdict = defaults_kwdict.analyse_types(env)
else:
if default_args:
defaults_tuple = DefaultsTupleNode(
self.pos, default_args, self.defaults_struct)
else:
defaults_tuple = NoneNode(self.pos)
if default_kwargs:
defaults_kwdict = DefaultsKwDictNode(
self.pos, default_kwargs, self.defaults_struct)
else:
defaults_kwdict = NoneNode(self.pos)
defaults_getter = Nodes.DefNode(
self.pos, args=[], star_arg=None, starstar_arg=None,
body=Nodes.ReturnStatNode(
self.pos, return_type=py_object_type,
value=TupleNode(
self.pos, args=[defaults_tuple, defaults_kwdict])),
decorators=None,
name=StringEncoding.EncodedString("__defaults__"))
# defaults getter must never live in class scopes, it's always a module function
module_scope = env.global_scope()
defaults_getter.analyse_declarations(module_scope)
defaults_getter = defaults_getter.analyse_expressions(module_scope)
defaults_getter.body = defaults_getter.body.analyse_expressions(
defaults_getter.local_scope)
defaults_getter.py_wrapper_required = False
defaults_getter.pymethdef_required = False
self.def_node.defaults_getter = defaults_getter
if annotations:
annotations_dict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
pos, key=IdentifierStringNode(pos, value=name),
value=value)
for pos, name, value in annotations])
self.annotations_dict = annotations_dict.analyse_types(env)
def analyse_annotation(self, env, annotation):
if annotation is None:
return None
atype = annotation.analyse_as_type(env)
if atype is not None:
# Keep parsed types as strings as they might not be Python representable.
annotation = UnicodeNode(
annotation.pos,
value=StringEncoding.EncodedString(atype.declaration_code('', for_display=True)))
annotation = annotation.analyse_types(env)
if not annotation.type.is_pyobject:
annotation = annotation.coerce_to_pyobject(env)
return annotation
def may_be_none(self):
return False
gil_message = "Constructing Python function"
def self_result_code(self):
if self.self_object is None:
self_result = "NULL"
else:
self_result = self.self_object.py_result()
return self_result
def generate_result_code(self, code):
if self.binding:
self.generate_cyfunction_code(code)
else:
self.generate_pycfunction_code(code)
def generate_pycfunction_code(self, code):
py_mod_name = self.get_py_mod_name(code)
code.putln(
'%s = PyCFunction_NewEx(&%s, %s, %s); %s' % (
self.result(),
self.pymethdef_cname,
self.self_result_code(),
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def generate_cyfunction_code(self, code):
if self.specialized_cpdefs:
def_node = self.specialized_cpdefs[0]
else:
def_node = self.def_node
if self.specialized_cpdefs or self.is_specialization:
code.globalstate.use_utility_code(
UtilityCode.load_cached("FusedFunction", "CythonFunction.c"))
constructor = "__pyx_FusedFunction_NewEx"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CythonFunction", "CythonFunction.c"))
constructor = "__Pyx_CyFunction_NewEx"
if self.code_object:
code_object_result = self.code_object.py_result()
else:
code_object_result = 'NULL'
flags = []
if def_node.is_staticmethod:
flags.append('__Pyx_CYFUNCTION_STATICMETHOD')
elif def_node.is_classmethod:
flags.append('__Pyx_CYFUNCTION_CLASSMETHOD')
if def_node.local_scope.parent_scope.is_c_class_scope and not def_node.entry.is_anonymous:
flags.append('__Pyx_CYFUNCTION_CCLASS')
if flags:
flags = ' | '.join(flags)
else:
flags = '0'
code.putln(
'%s = %s(&%s, %s, %s, %s, %s, %s, %s); %s' % (
self.result(),
constructor,
self.pymethdef_cname,
flags,
self.get_py_qualified_name(code),
self.self_result_code(),
self.get_py_mod_name(code),
Naming.moddict_cname,
code_object_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if def_node.requires_classobj:
assert code.pyclass_stack, "pyclass_stack is empty"
class_node = code.pyclass_stack[-1]
code.put_incref(self.py_result(), py_object_type)
code.putln(
'PyList_Append(%s, %s);' % (
class_node.class_cell.result(),
self.result()))
code.put_giveref(self.py_result())
if self.defaults:
code.putln(
'if (!__Pyx_CyFunction_InitDefaults(%s, sizeof(%s), %d)) %s' % (
self.result(), self.defaults_struct.name,
self.defaults_pyobjects, code.error_goto(self.pos)))
defaults = '__Pyx_CyFunction_Defaults(%s, %s)' % (
self.defaults_struct.name, self.result())
for arg, entry in self.defaults:
arg.generate_assignment_code(code, target='%s->%s' % (
defaults, entry.cname))
if self.defaults_tuple:
code.putln('__Pyx_CyFunction_SetDefaultsTuple(%s, %s);' % (
self.result(), self.defaults_tuple.py_result()))
if self.defaults_kwdict:
code.putln('__Pyx_CyFunction_SetDefaultsKwDict(%s, %s);' % (
self.result(), self.defaults_kwdict.py_result()))
if def_node.defaults_getter:
code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % (
self.result(), def_node.defaults_getter.entry.pyfunc_cname))
if self.annotations_dict:
code.putln('__Pyx_CyFunction_SetAnnotationsDict(%s, %s);' % (
self.result(), self.annotations_dict.py_result()))
class InnerFunctionNode(PyCFunctionNode):
# Special PyCFunctionNode that depends on a closure class
#
binding = True
needs_self_code = True
def self_result_code(self):
if self.needs_self_code:
return "((PyObject*)%s)" % Naming.cur_scope_cname
return "NULL"
class CodeObjectNode(ExprNode):
# Create a PyCodeObject for a CyFunction instance.
#
# def_node DefNode the Python function node
# varnames TupleNode a tuple with all local variable names
subexprs = ['varnames']
is_temp = False
result_code = None
def __init__(self, def_node):
ExprNode.__init__(self, def_node.pos, def_node=def_node)
args = list(def_node.args)
# if we have args/kwargs, then the first two in var_entries are those
local_vars = [arg for arg in def_node.local_scope.var_entries if arg.name]
self.varnames = TupleNode(
def_node.pos,
args=[IdentifierStringNode(arg.pos, value=arg.name)
for arg in args + local_vars],
is_temp=0,
is_literal=1)
def may_be_none(self):
return False
def calculate_result_code(self, code=None):
if self.result_code is None:
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
return self.result_code
def generate_result_code(self, code):
if self.result_code is None:
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
func = self.def_node
func_name = code.get_py_string_const(
func.name, identifier=True, is_str=False, unicode_value=func.name)
# FIXME: better way to get the module file path at module init time? Encoding to use?
file_path = StringEncoding.bytes_literal(func.pos[0].get_filenametable_entry().encode('utf8'), 'utf8')
file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
# This combination makes CPython create a new dict for "frame.f_locals" (see GH #1836).
flags = ['CO_OPTIMIZED', 'CO_NEWLOCALS']
if self.def_node.star_arg:
flags.append('CO_VARARGS')
if self.def_node.starstar_arg:
flags.append('CO_VARKEYWORDS')
code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
self.result_code,
len(func.args) - func.num_kwonly_args, # argcount
func.num_kwonly_args, # kwonlyargcount (Py3 only)
len(self.varnames.args), # nlocals
'|'.join(flags) or '0', # flags
Naming.empty_bytes, # code
Naming.empty_tuple, # consts
Naming.empty_tuple, # names (FIXME)
self.varnames.result(), # varnames
Naming.empty_tuple, # freevars (FIXME)
Naming.empty_tuple, # cellvars (FIXME)
file_path_const, # filename
func_name, # name
self.pos[1], # firstlineno
Naming.empty_bytes, # lnotab
code.error_goto_if_null(self.result_code, self.pos),
))
class DefaultLiteralArgNode(ExprNode):
# CyFunction's literal argument default value
#
# Evaluate literal only once.
subexprs = []
is_literal = True
is_temp = False
def __init__(self, pos, arg):
super(DefaultLiteralArgNode, self).__init__(pos)
self.arg = arg
self.type = self.arg.type
self.evaluated = False
def analyse_types(self, env):
return self
def generate_result_code(self, code):
pass
def generate_evaluation_code(self, code):
if not self.evaluated:
self.arg.generate_evaluation_code(code)
self.evaluated = True
def result(self):
return self.type.cast_code(self.arg.result())
class DefaultNonLiteralArgNode(ExprNode):
# CyFunction's non-literal argument default value
subexprs = []
def __init__(self, pos, arg, defaults_struct):
super(DefaultNonLiteralArgNode, self).__init__(pos)
self.arg = arg
self.defaults_struct = defaults_struct
def analyse_types(self, env):
self.type = self.arg.type
self.is_temp = False
return self
def generate_result_code(self, code):
pass
def result(self):
return '__Pyx_CyFunction_Defaults(%s, %s)->%s' % (
self.defaults_struct.name, Naming.self_cname,
self.defaults_struct.lookup(self.arg.name).cname)
class DefaultsTupleNode(TupleNode):
# CyFunction's __defaults__ tuple
def __init__(self, pos, defaults, defaults_struct):
args = []
for arg in defaults:
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
args.append(arg)
super(DefaultsTupleNode, self).__init__(pos, args=args)
def analyse_types(self, env, skip_children=False):
return super(DefaultsTupleNode, self).analyse_types(env, skip_children).coerce_to_pyobject(env)
class DefaultsKwDictNode(DictNode):
# CyFunction's __kwdefaults__ dict
def __init__(self, pos, defaults, defaults_struct):
items = []
for arg in defaults:
name = IdentifierStringNode(arg.pos, value=arg.name)
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
items.append(DictItemNode(arg.pos, key=name, value=arg))
super(DefaultsKwDictNode, self).__init__(pos, key_value_pairs=items)
class LambdaNode(InnerFunctionNode):
# Lambda expression node (only used as a function reference)
#
# args [CArgDeclNode] formal arguments
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
# lambda_name string a module-globally unique lambda name
# result_expr ExprNode
# def_node DefNode the underlying function 'def' node
child_attrs = ['def_node']
name = StringEncoding.EncodedString('<lambda>')
def analyse_declarations(self, env):
self.lambda_name = self.def_node.lambda_name = env.next_id('lambda')
self.def_node.no_assignment_synthesis = True
self.def_node.pymethdef_required = True
self.def_node.analyse_declarations(env)
self.def_node.is_cyfunction = True
self.pymethdef_cname = self.def_node.entry.pymethdef_cname
env.add_lambda_def(self.def_node)
def analyse_types(self, env):
self.def_node = self.def_node.analyse_expressions(env)
return super(LambdaNode, self).analyse_types(env)
def generate_result_code(self, code):
self.def_node.generate_execution_code(code)
super(LambdaNode, self).generate_result_code(code)
class GeneratorExpressionNode(LambdaNode):
# A generator expression, e.g. (i for i in range(10))
#
# Result is a generator.
#
# loop ForStatNode the for-loop, containing a YieldExprNode
# def_node DefNode the underlying generator 'def' node
name = StringEncoding.EncodedString('genexpr')
binding = False
def analyse_declarations(self, env):
self.genexpr_name = env.next_id('genexpr')
super(GeneratorExpressionNode, self).analyse_declarations(env)
# No pymethdef required
self.def_node.pymethdef_required = False
self.def_node.py_wrapper_required = False
self.def_node.is_cyfunction = False
# Force genexpr signature
self.def_node.entry.signature = TypeSlots.pyfunction_noargs
def generate_result_code(self, code):
code.putln(
'%s = %s(%s); %s' % (
self.result(),
self.def_node.entry.pyfunc_cname,
self.self_result_code(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class YieldExprNode(ExprNode):
# Yield expression node
#
# arg ExprNode the value to return from the generator
# label_num integer yield label number
# is_yield_from boolean is a YieldFromExprNode to delegate to another generator
subexprs = ['arg']
type = py_object_type
label_num = 0
is_yield_from = False
is_await = False
in_async_gen = False
expr_keyword = 'yield'
def analyse_types(self, env):
if not self.label_num or (self.is_yield_from and self.in_async_gen):
error(self.pos, "'%s' not supported here" % self.expr_keyword)
self.is_temp = 1
if self.arg is not None:
self.arg = self.arg.analyse_types(env)
if not self.arg.type.is_pyobject:
self.coerce_yield_argument(env)
return self
def coerce_yield_argument(self, env):
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
if self.arg:
self.arg.generate_evaluation_code(code)
self.arg.make_owned_reference(code)
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_post_assignment_code(code)
self.arg.free_temps(code)
else:
code.put_init_to_py_none(Naming.retval_cname, py_object_type)
self.generate_yield_code(code)
def generate_yield_code(self, code):
"""
Generate the code to return the argument in 'Naming.retval_cname'
and to continue at the yield label.
"""
label_num, label_name = code.new_yield_label(
self.expr_keyword.replace(' ', '_'))
code.use_label(label_name)
saved = []
code.funcstate.closure_temps.reset()
for cname, type, manage_ref in code.funcstate.temps_in_use():
save_cname = code.funcstate.closure_temps.allocate_temp(type)
saved.append((cname, save_cname, type))
if type.is_pyobject:
code.put_xgiveref(cname)
code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname))
code.put_xgiveref(Naming.retval_cname)
profile = code.globalstate.directives['profile']
linetrace = code.globalstate.directives['linetrace']
if profile or linetrace:
code.put_trace_return(Naming.retval_cname,
nogil=not code.funcstate.gil_owned)
code.put_finish_refcount_context()
if code.funcstate.current_except is not None:
# inside of an except block => save away currently handled exception
code.putln("__Pyx_Coroutine_SwapException(%s);" % Naming.generator_cname)
else:
# no exceptions being handled => restore exception state of caller
code.putln("__Pyx_Coroutine_ResetAndClearException(%s);" % Naming.generator_cname)
code.putln("/* return from %sgenerator, %sing value */" % (
'async ' if self.in_async_gen else '',
'await' if self.is_await else 'yield'))
code.putln("%s->resume_label = %d;" % (
Naming.generator_cname, label_num))
if self.in_async_gen and not self.is_await:
# __Pyx__PyAsyncGenValueWrapperNew() steals a reference to the return value
code.putln("return __Pyx__PyAsyncGenValueWrapperNew(%s);" % Naming.retval_cname)
else:
code.putln("return %s;" % Naming.retval_cname)
code.put_label(label_name)
for cname, save_cname, type in saved:
code.putln('%s = %s->%s;' % (cname, Naming.cur_scope_cname, save_cname))
if type.is_pyobject:
code.putln('%s->%s = 0;' % (Naming.cur_scope_cname, save_cname))
code.put_xgotref(cname)
self.generate_sent_value_handling_code(code, Naming.sent_value_cname)
if self.result_is_used:
self.allocate_temp_result(code)
code.put('%s = %s; ' % (self.result(), Naming.sent_value_cname))
code.put_incref(self.result(), py_object_type)
def generate_sent_value_handling_code(self, code, value_cname):
code.putln(code.error_goto_if_null(value_cname, self.pos))
class _YieldDelegationExprNode(YieldExprNode):
def yield_from_func(self, code):
raise NotImplementedError()
def generate_evaluation_code(self, code, source_cname=None, decref_source=False):
if source_cname is None:
self.arg.generate_evaluation_code(code)
code.putln("%s = %s(%s, %s);" % (
Naming.retval_cname,
self.yield_from_func(code),
Naming.generator_cname,
self.arg.py_result() if source_cname is None else source_cname))
if source_cname is None:
self.arg.generate_disposal_code(code)
self.arg.free_temps(code)
elif decref_source:
code.put_decref_clear(source_cname, py_object_type)
code.put_xgotref(Naming.retval_cname)
code.putln("if (likely(%s)) {" % Naming.retval_cname)
self.generate_yield_code(code)
code.putln("} else {")
# either error or sub-generator has normally terminated: return value => node result
if self.result_is_used:
self.fetch_iteration_result(code)
else:
self.handle_iteration_exception(code)
code.putln("}")
def fetch_iteration_result(self, code):
# YieldExprNode has allocated the result temp for us
code.putln("%s = NULL;" % self.result())
code.put_error_if_neg(self.pos, "__Pyx_PyGen_FetchStopIterationValue(&%s)" % self.result())
code.put_gotref(self.result())
def handle_iteration_exception(self, code):
code.putln("PyObject* exc_type = __Pyx_PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(exc_type == PyExc_StopIteration || (exc_type != PyExc_GeneratorExit &&"
" __Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
class YieldFromExprNode(_YieldDelegationExprNode):
# "yield from GEN" expression
is_yield_from = True
expr_keyword = 'yield from'
def coerce_yield_argument(self, env):
if not self.arg.type.is_string:
# FIXME: support C arrays and C++ iterators?
error(self.pos, "yielding from non-Python object not supported")
self.arg = self.arg.coerce_to_pyobject(env)
def yield_from_func(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("GeneratorYieldFrom", "Coroutine.c"))
return "__Pyx_Generator_Yield_From"
class AwaitExprNode(_YieldDelegationExprNode):
# 'await' expression node
#
# arg ExprNode the Awaitable value to await
# label_num integer yield label number
is_await = True
expr_keyword = 'await'
def coerce_yield_argument(self, env):
if self.arg is not None:
# FIXME: use same check as in YieldFromExprNode.coerce_yield_argument() ?
self.arg = self.arg.coerce_to_pyobject(env)
def yield_from_func(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("CoroutineYieldFrom", "Coroutine.c"))
return "__Pyx_Coroutine_Yield_From"
class AwaitIterNextExprNode(AwaitExprNode):
# 'await' expression node as part of 'async for' iteration
#
# Breaks out of loop on StopAsyncIteration exception.
def _generate_break(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
code.putln("PyObject* exc_type = __Pyx_PyErr_Occurred();")
code.putln("if (unlikely(exc_type && (exc_type == __Pyx_PyExc_StopAsyncIteration || ("
" exc_type != PyExc_StopIteration && exc_type != PyExc_GeneratorExit &&"
" __Pyx_PyErr_GivenExceptionMatches(exc_type, __Pyx_PyExc_StopAsyncIteration))))) {")
code.putln("PyErr_Clear();")
code.putln("break;")
code.putln("}")
def fetch_iteration_result(self, code):
assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
self._generate_break(code)
super(AwaitIterNextExprNode, self).fetch_iteration_result(code)
def generate_sent_value_handling_code(self, code, value_cname):
assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
code.putln("if (unlikely(!%s)) {" % value_cname)
self._generate_break(code)
# all non-break exceptions are errors, as in parent class
code.putln(code.error_goto(self.pos))
code.putln("}")
class GlobalsExprNode(AtomicExprNode):
type = dict_type
is_temp = 1
def analyse_types(self, env):
env.use_utility_code(Builtin.globals_utility_code)
return self
gil_message = "Constructing globals dict"
def may_be_none(self):
return False
def generate_result_code(self, code):
code.putln('%s = __Pyx_Globals(); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class LocalsDictItemNode(DictItemNode):
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
if self.value.type.can_coerce_to_pyobject(env):
self.value = self.value.coerce_to_pyobject(env)
else:
self.value = None
return self
class FuncLocalsExprNode(DictNode):
def __init__(self, pos, env):
local_vars = sorted([
entry.name for entry in env.entries.values() if entry.name])
items = [LocalsDictItemNode(
pos, key=IdentifierStringNode(pos, value=var),
value=NameNode(pos, name=var, allow_null=True))
for var in local_vars]
DictNode.__init__(self, pos, key_value_pairs=items,
exclude_null_values=True)
def analyse_types(self, env):
node = super(FuncLocalsExprNode, self).analyse_types(env)
node.key_value_pairs = [ i for i in node.key_value_pairs
if i.value is not None ]
return node
class PyClassLocalsExprNode(AtomicExprNode):
def __init__(self, pos, pyclass_dict):
AtomicExprNode.__init__(self, pos)
self.pyclass_dict = pyclass_dict
def analyse_types(self, env):
self.type = self.pyclass_dict.type
self.is_temp = False
return self
def may_be_none(self):
return False
def result(self):
return self.pyclass_dict.result()
def generate_result_code(self, code):
pass
def LocalsExprNode(pos, scope_node, env):
if env.is_module_scope:
return GlobalsExprNode(pos)
if env.is_py_class_scope:
return PyClassLocalsExprNode(pos, scope_node.dict)
return FuncLocalsExprNode(pos, env)
#-------------------------------------------------------------------
#
# Unary operator nodes
#
#-------------------------------------------------------------------
compile_time_unary_operators = {
'not': operator.not_,
'~': operator.inv,
'-': operator.neg,
'+': operator.pos,
}
class UnopNode(ExprNode):
# operator string
# operand ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when the operand is not a pyobject.
# - Check operand type and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand']
infix = True
def calculate_constant_result(self):
func = compile_time_unary_operators[self.operator]
self.constant_result = func(self.operand.constant_result)
def compile_time_value(self, denv):
func = compile_time_unary_operators.get(self.operator)
if not func:
error(self.pos,
"Unary '%s' not supported in compile-time expression"
% self.operator)
operand = self.operand.compile_time_value(denv)
try:
return func(operand)
except Exception as e:
self.compile_time_value_error(e)
def infer_type(self, env):
operand_type = self.operand.infer_type(env)
if operand_type.is_cpp_class or operand_type.is_ptr:
cpp_type = operand_type.find_cpp_operation_type(self.operator)
if cpp_type is not None:
return cpp_type
return self.infer_unop_type(env, operand_type)
def infer_unop_type(self, env, operand_type):
if operand_type.is_pyobject:
return py_object_type
else:
return operand_type
def may_be_none(self):
if self.operand.type and self.operand.type.is_builtin_type:
if self.operand.type is not type_type:
return False
return ExprNode.may_be_none(self)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
if self.is_pythran_operation(env):
self.type = PythranExpr(pythran_unaryop_type(self.operator, self.operand.type))
self.is_temp = 1
elif self.is_py_operation():
self.coerce_operand_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
return self
def check_const(self):
return self.operand.check_const()
def is_py_operation(self):
return self.operand.type.is_pyobject or self.operand.type.is_ctuple
def is_pythran_operation(self, env):
np_pythran = has_np_pythran(env)
op_type = self.operand.type
return np_pythran and (op_type.is_buffer or op_type.is_pythran_expr)
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def is_cpp_operation(self):
type = self.operand.type
return type.is_cpp_class
def coerce_operand_to_pyobject(self, env):
self.operand = self.operand.coerce_to_pyobject(env)
def generate_result_code(self, code):
if self.type.is_pythran_expr:
code.putln("// Pythran unaryop")
code.putln("__Pyx_call_destructor(%s);" % self.result())
code.putln("new (&%s) decltype(%s){%s%s};" % (
self.result(),
self.result(),
self.operator,
self.operand.pythran_result()))
elif self.operand.type.is_pyobject:
self.generate_py_operation_code(code)
elif self.is_temp:
if self.is_cpp_operation() and self.exception_check == '+':
translate_cpp_exception(code, self.pos,
"%s = %s %s;" % (self.result(), self.operator, self.operand.result()),
self.exception_value, self.in_nogil_context)
else:
code.putln("%s = %s %s;" % (self.result(), self.operator, self.operand.result()))
def generate_py_operation_code(self, code):
function = self.py_operation_function(code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
function,
self.operand.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def type_error(self):
if not self.operand.type.is_error:
error(self.pos, "Invalid operand type for '%s' (%s)" %
(self.operator, self.operand.type))
self.type = PyrexTypes.error_type
def analyse_cpp_operation(self, env, overload_check=True):
entry = env.lookup_operator(self.operator, [self.operand])
if overload_check and not entry:
self.type_error()
return
if entry:
self.exception_check = entry.type.exception_check
self.exception_value = entry.type.exception_value
if self.exception_check == '+':
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
else:
self.exception_check = ''
self.exception_value = ''
cpp_type = self.operand.type.find_cpp_operation_type(self.operator)
if overload_check and cpp_type is None:
error(self.pos, "'%s' operator not defined for %s" % (
self.operator, type))
self.type_error()
return
self.type = cpp_type
class NotNode(UnopNode):
# 'not' operator
#
# operand ExprNode
operator = '!'
type = PyrexTypes.c_bint_type
def calculate_constant_result(self):
self.constant_result = not self.operand.constant_result
def compile_time_value(self, denv):
operand = self.operand.compile_time_value(denv)
try:
return not operand
except Exception as e:
self.compile_time_value_error(e)
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_bint_type
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
operand_type = self.operand.type
if operand_type.is_cpp_class:
self.analyse_cpp_operation(env)
else:
self.operand = self.operand.coerce_to_boolean(env)
return self
def calculate_result_code(self):
return "(!%s)" % self.operand.result()
class UnaryPlusNode(UnopNode):
# unary '+' operator
operator = '+'
def analyse_c_operation(self, env):
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
def py_operation_function(self, code):
return "PyNumber_Positive"
def calculate_result_code(self):
if self.is_cpp_operation():
return "(+%s)" % self.operand.result()
else:
return self.operand.result()
class UnaryMinusNode(UnopNode):
# unary '-' operator
operator = '-'
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
if self.type.is_complex:
self.infix = False
def py_operation_function(self, code):
return "PyNumber_Negative"
def calculate_result_code(self):
if self.infix:
return "(-%s)" % self.operand.result()
else:
return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result())
def get_constant_c_result_code(self):
value = self.operand.get_constant_c_result_code()
if value:
return "(-%s)" % value
class TildeNode(UnopNode):
# unary '~' operator
def analyse_c_operation(self, env):
if self.operand.type.is_int:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
def py_operation_function(self, code):
return "PyNumber_Invert"
def calculate_result_code(self):
return "(~%s)" % self.operand.result()
class CUnopNode(UnopNode):
def is_py_operation(self):
return False
class DereferenceNode(CUnopNode):
# unary * operator
operator = '*'
def infer_unop_type(self, env, operand_type):
if operand_type.is_ptr:
return operand_type.base_type
else:
return PyrexTypes.error_type
def analyse_c_operation(self, env):
if self.operand.type.is_ptr:
self.type = self.operand.type.base_type
else:
self.type_error()
def calculate_result_code(self):
return "(*%s)" % self.operand.result()
class DecrementIncrementNode(CUnopNode):
# unary ++/-- operator
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_ptr:
self.type = self.operand.type
else:
self.type_error()
def calculate_result_code(self):
if self.is_prefix:
return "(%s%s)" % (self.operator, self.operand.result())
else:
return "(%s%s)" % (self.operand.result(), self.operator)
def inc_dec_constructor(is_prefix, operator):
return lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds)
class AmpersandNode(CUnopNode):
# The C address-of operator.
#
# operand ExprNode
operator = '&'
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_ptr_type(operand_type)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
argtype = self.operand.type
if argtype.is_cpp_class:
self.analyse_cpp_operation(env, overload_check=False)
if not (argtype.is_cfunction or argtype.is_reference or self.operand.is_addressable()):
if argtype.is_memoryviewslice:
self.error("Cannot take address of memoryview slice")
else:
self.error("Taking address of non-lvalue (type %s)" % argtype)
return self
if argtype.is_pyobject:
self.error("Cannot take address of Python %s" % (
"variable '%s'" % self.operand.name if self.operand.is_name else
"object attribute '%s'" % self.operand.attribute if self.operand.is_attribute else
"object"))
return self
if not argtype.is_cpp_class or not self.type:
self.type = PyrexTypes.c_ptr_type(argtype)
return self
def check_const(self):
return self.operand.check_const_addr()
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
def calculate_result_code(self):
return "(&%s)" % self.operand.result()
def generate_result_code(self, code):
if (self.operand.type.is_cpp_class and self.exception_check == '+'):
translate_cpp_exception(code, self.pos,
"%s = %s %s;" % (self.result(), self.operator, self.operand.result()),
self.exception_value, self.in_nogil_context)
unop_node_classes = {
"+": UnaryPlusNode,
"-": UnaryMinusNode,
"~": TildeNode,
}
def unop_node(pos, operator, operand):
# Construct unnop node of appropriate class for
# given operator.
if isinstance(operand, IntNode) and operator == '-':
return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)),
longness=operand.longness, unsigned=operand.unsigned)
elif isinstance(operand, UnopNode) and operand.operator == operator in '+-':
warning(pos, "Python has no increment/decrement operator: %s%sx == %s(%sx) == x" % ((operator,)*4), 5)
return unop_node_classes[operator](pos,
operator = operator,
operand = operand)
class TypecastNode(ExprNode):
# C type cast
#
# operand ExprNode
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# typecheck boolean
#
# If used from a transform, one can if wanted specify the attribute
# "type" directly and leave base_type and declarator to None
subexprs = ['operand']
base_type = declarator = type = None
def type_dependencies(self, env):
return ()
def infer_type(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
return self.type
def analyse_types(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
if self.operand.has_constant_result():
# Must be done after self.type is resolved.
self.calculate_constant_result()
if self.type.is_cfunction:
error(self.pos,
"Cannot cast to a function type")
self.type = PyrexTypes.error_type
self.operand = self.operand.analyse_types(env)
if self.type is PyrexTypes.c_bint_type:
# short circuit this to a coercion
return self.operand.coerce_to_boolean(env)
to_py = self.type.is_pyobject
from_py = self.operand.type.is_pyobject
if from_py and not to_py and self.operand.is_ephemeral():
if not self.type.is_numeric and not self.type.is_cpp_class:
error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
if to_py and not from_py:
if self.type is bytes_type and self.operand.type.is_int:
return CoerceIntToBytesNode(self.operand, env)
elif self.operand.type.can_coerce_to_pyobject(env):
self.result_ctype = py_object_type
self.operand = self.operand.coerce_to(self.type, env)
else:
if self.operand.type.is_ptr:
if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast from pointers of primitive types")
else:
# Should this be an error?
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.operand.type, self.type))
self.operand = self.operand.coerce_to_simple(env)
elif from_py and not to_py:
if self.type.create_from_py_utility_code(env):
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_ptr:
if not (self.type.base_type.is_void or self.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast to pointers of primitive types")
else:
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.type, self.operand.type))
elif from_py and to_py:
if self.typecheck:
self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True)
elif isinstance(self.operand, SliceIndexNode):
# This cast can influence the created type of string slices.
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_complex and self.operand.type.is_complex:
self.operand = self.operand.coerce_to_simple(env)
elif self.operand.type.is_fused:
self.operand = self.operand.coerce_to(self.type, env)
#self.type = self.operand.type
return self
def is_simple(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_simple()
def is_ephemeral(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_ephemeral()
def nonlocally_immutable(self):
return self.is_temp or self.operand.nonlocally_immutable()
def nogil_check(self, env):
if self.type and self.type.is_pyobject and self.is_temp:
self.gil_error()
def check_const(self):
return self.operand.check_const()
def calculate_constant_result(self):
self.constant_result = self.calculate_result_code(self.operand.constant_result)
def calculate_result_code(self, operand_result = None):
if operand_result is None:
operand_result = self.operand.result()
if self.type.is_complex:
operand_result = self.operand.result()
if self.operand.type.is_complex:
real_part = self.type.real_type.cast_code("__Pyx_CREAL(%s)" % operand_result)
imag_part = self.type.real_type.cast_code("__Pyx_CIMAG(%s)" % operand_result)
else:
real_part = self.type.real_type.cast_code(operand_result)
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
else:
return self.type.cast_code(operand_result)
def get_constant_c_result_code(self):
operand_result = self.operand.get_constant_c_result_code()
if operand_result:
return self.type.cast_code(operand_result)
def result_as(self, type):
if self.type.is_pyobject and not self.is_temp:
# Optimise away some unnecessary casting
return self.operand.result_as(type)
else:
return ExprNode.result_as(self, type)
def generate_result_code(self, code):
if self.is_temp:
code.putln(
"%s = (PyObject *)%s;" % (
self.result(),
self.operand.result()))
code.put_incref(self.result(), self.ctype())
ERR_START = "Start may not be given"
ERR_NOT_STOP = "Stop must be provided to indicate shape"
ERR_STEPS = ("Strides may only be given to indicate contiguity. "
"Consider slicing it after conversion")
ERR_NOT_POINTER = "Can only create cython.array from pointer or array"
ERR_BASE_TYPE = "Pointer base type does not match cython.array base type"
class CythonArrayNode(ExprNode):
"""
Used when a pointer of base_type is cast to a memoryviewslice with that
base type. i.e.
<int[:M:1, :N]> p
creates a fortran-contiguous cython.array.
We leave the type set to object so coercions to object are more efficient
and less work. Acquiring a memoryviewslice from this will be just as
efficient. ExprNode.coerce_to() will do the additional typecheck on
self.compile_time_type
This also handles <int[:, :]> my_c_array
operand ExprNode the thing we're casting
base_type_node MemoryViewSliceTypeNode the cast expression node
"""
subexprs = ['operand', 'shapes']
shapes = None
is_temp = True
mode = "c"
array_dtype = None
shape_type = PyrexTypes.c_py_ssize_t_type
def analyse_types(self, env):
from . import MemoryView
self.operand = self.operand.analyse_types(env)
if self.array_dtype:
array_dtype = self.array_dtype
else:
array_dtype = self.base_type_node.base_type_node.analyse(env)
axes = self.base_type_node.axes
self.type = error_type
self.shapes = []
ndim = len(axes)
# Base type of the pointer or C array we are converting
base_type = self.operand.type
if not self.operand.type.is_ptr and not self.operand.type.is_array:
error(self.operand.pos, ERR_NOT_POINTER)
return self
# Dimension sizes of C array
array_dimension_sizes = []
if base_type.is_array:
while base_type.is_array:
array_dimension_sizes.append(base_type.size)
base_type = base_type.base_type
elif base_type.is_ptr:
base_type = base_type.base_type
else:
error(self.pos, "unexpected base type %s found" % base_type)
return self
if not (base_type.same_as(array_dtype) or base_type.is_void):
error(self.operand.pos, ERR_BASE_TYPE)
return self
elif self.operand.type.is_array and len(array_dimension_sizes) != ndim:
error(self.operand.pos,
"Expected %d dimensions, array has %d dimensions" %
(ndim, len(array_dimension_sizes)))
return self
# Verify the start, stop and step values
# In case of a C array, use the size of C array in each dimension to
# get an automatic cast
for axis_no, axis in enumerate(axes):
if not axis.start.is_none:
error(axis.start.pos, ERR_START)
return self
if axis.stop.is_none:
if array_dimension_sizes:
dimsize = array_dimension_sizes[axis_no]
axis.stop = IntNode(self.pos, value=str(dimsize),
constant_result=dimsize,
type=PyrexTypes.c_int_type)
else:
error(axis.pos, ERR_NOT_STOP)
return self
axis.stop = axis.stop.analyse_types(env)
shape = axis.stop.coerce_to(self.shape_type, env)
if not shape.is_literal:
shape.coerce_to_temp(env)
self.shapes.append(shape)
first_or_last = axis_no in (0, ndim - 1)
if not axis.step.is_none and first_or_last:
# '1' in the first or last dimension denotes F or C contiguity
axis.step = axis.step.analyse_types(env)
if (not axis.step.type.is_int and axis.step.is_literal and not
axis.step.type.is_error):
error(axis.step.pos, "Expected an integer literal")
return self
if axis.step.compile_time_value(env) != 1:
error(axis.step.pos, ERR_STEPS)
return self
if axis_no == 0:
self.mode = "fortran"
elif not axis.step.is_none and not first_or_last:
# step provided in some other dimension
error(axis.step.pos, ERR_STEPS)
return self
if not self.operand.is_name:
self.operand = self.operand.coerce_to_temp(env)
axes = [('direct', 'follow')] * len(axes)
if self.mode == "fortran":
axes[0] = ('direct', 'contig')
else:
axes[-1] = ('direct', 'contig')
self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes)
self.coercion_type.validate_memslice_dtype(self.pos)
self.type = self.get_cython_array_type(env)
MemoryView.use_cython_array_utility_code(env)
env.use_utility_code(MemoryView.typeinfo_to_format_code)
return self
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("temp allocated multiple times")
self.temp_code = code.funcstate.allocate_temp(self.type, True)
def infer_type(self, env):
return self.get_cython_array_type(env)
def get_cython_array_type(self, env):
cython_scope = env.global_scope().context.cython_scope
cython_scope.load_cythonscope()
return cython_scope.viewscope.lookup("array").type
def generate_result_code(self, code):
from . import Buffer
shapes = [self.shape_type.cast_code(shape.result())
for shape in self.shapes]
dtype = self.coercion_type.dtype
shapes_temp = code.funcstate.allocate_temp(py_object_type, True)
format_temp = code.funcstate.allocate_temp(py_object_type, True)
itemsize = "sizeof(%s)" % dtype.empty_declaration_code()
type_info = Buffer.get_type_information_cname(code, dtype)
if self.operand.type.is_ptr:
code.putln("if (!%s) {" % self.operand.result())
code.putln( 'PyErr_SetString(PyExc_ValueError,'
'"Cannot create cython.array from NULL pointer");')
code.putln(code.error_goto(self.operand.pos))
code.putln("}")
code.putln("%s = __pyx_format_from_typeinfo(&%s);" %
(format_temp, type_info))
buildvalue_fmt = " __PYX_BUILD_PY_SSIZE_T " * len(shapes)
code.putln('%s = Py_BuildValue((char*) "(" %s ")", %s);' % (
shapes_temp, buildvalue_fmt, ", ".join(shapes)))
err = "!%s || !%s || !PyBytes_AsString(%s)" % (format_temp,
shapes_temp,
format_temp)
code.putln(code.error_goto_if(err, self.pos))
code.put_gotref(format_temp)
code.put_gotref(shapes_temp)
tup = (self.result(), shapes_temp, itemsize, format_temp,
self.mode, self.operand.result())
code.putln('%s = __pyx_array_new('
'%s, %s, PyBytes_AS_STRING(%s), '
'(char *) "%s", (char *) %s);' % tup)
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.result())
def dispose(temp):
code.put_decref_clear(temp, py_object_type)
code.funcstate.release_temp(temp)
dispose(shapes_temp)
dispose(format_temp)
@classmethod
def from_carray(cls, src_node, env):
"""
Given a C array type, return a CythonArrayNode
"""
pos = src_node.pos
base_type = src_node.type
none_node = NoneNode(pos)
axes = []
while base_type.is_array:
axes.append(SliceNode(pos, start=none_node, stop=none_node,
step=none_node))
base_type = base_type.base_type
axes[-1].step = IntNode(pos, value="1", is_c_literal=True)
memslicenode = Nodes.MemoryViewSliceTypeNode(pos, axes=axes,
base_type_node=base_type)
result = CythonArrayNode(pos, base_type_node=memslicenode,
operand=src_node, array_dtype=base_type)
result = result.analyse_types(env)
return result
class SizeofNode(ExprNode):
# Abstract base class for sizeof(x) expression nodes.
type = PyrexTypes.c_size_t_type
def check_const(self):
return True
def generate_result_code(self, code):
pass
class SizeofTypeNode(SizeofNode):
# C sizeof function applied to a type
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
subexprs = []
arg_type = None
def analyse_types(self, env):
# we may have incorrectly interpreted a dotted name as a type rather than an attribute
# this could be better handled by more uniformly treating types as runtime-available objects
if 0 and self.base_type.module_path:
path = self.base_type.module_path
obj = env.lookup(path[0])
if obj.as_module is None:
operand = NameNode(pos=self.pos, name=path[0])
for attr in path[1:]:
operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr)
operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name)
node = SizeofVarNode(self.pos, operand=operand).analyse_types(env)
return node
if self.arg_type is None:
base_type = self.base_type.analyse(env)
_, arg_type = self.declarator.analyse(base_type, env)
self.arg_type = arg_type
self.check_type()
return self
def check_type(self):
arg_type = self.arg_type
if not arg_type:
return
if arg_type.is_pyobject and not arg_type.is_extension_type:
error(self.pos, "Cannot take sizeof Python object")
elif arg_type.is_void:
error(self.pos, "Cannot take sizeof void")
elif not arg_type.is_complete():
error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type)
def calculate_result_code(self):
if self.arg_type.is_extension_type:
# the size of the pointer is boring
# we want the size of the actual struct
arg_code = self.arg_type.declaration_code("", deref=1)
else:
arg_code = self.arg_type.empty_declaration_code()
return "(sizeof(%s))" % arg_code
class SizeofVarNode(SizeofNode):
# C sizeof function applied to a variable
#
# operand ExprNode
subexprs = ['operand']
def analyse_types(self, env):
# We may actually be looking at a type rather than a variable...
# If we are, traditional analysis would fail...
operand_as_type = self.operand.analyse_as_type(env)
if operand_as_type:
self.arg_type = operand_as_type
if self.arg_type.is_fused:
self.arg_type = self.arg_type.specialize(env.fused_to_specific)
self.__class__ = SizeofTypeNode
self.check_type()
else:
self.operand = self.operand.analyse_types(env)
return self
def calculate_result_code(self):
return "(sizeof(%s))" % self.operand.result()
def generate_result_code(self, code):
pass
class TypeidNode(ExprNode):
# C++ typeid operator applied to a type or variable
#
# operand ExprNode
# arg_type ExprNode
# is_variable boolean
type = PyrexTypes.error_type
subexprs = ['operand']
arg_type = None
is_variable = None
is_temp = 1
def get_type_info_type(self, env):
env_module = env
while not env_module.is_module_scope:
env_module = env_module.outer_scope
typeinfo_module = env_module.find_module('libcpp.typeinfo', self.pos)
typeinfo_entry = typeinfo_module.lookup('type_info')
return PyrexTypes.CFakeReferenceType(PyrexTypes.c_const_type(typeinfo_entry.type))
def analyse_types(self, env):
type_info = self.get_type_info_type(env)
if not type_info:
self.error("The 'libcpp.typeinfo' module must be cimported to use the typeid() operator")
return self
self.type = type_info
as_type = self.operand.analyse_as_type(env)
if as_type:
self.arg_type = as_type
self.is_type = True
else:
self.arg_type = self.operand.analyse_types(env)
self.is_type = False
if self.arg_type.type.is_pyobject:
self.error("Cannot use typeid on a Python object")
return self
elif self.arg_type.type.is_void:
self.error("Cannot use typeid on void")
return self
elif not self.arg_type.type.is_complete():
self.error("Cannot use typeid on incomplete type '%s'" % self.arg_type.type)
return self
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
return self
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
def check_const(self):
return True
def calculate_result_code(self):
return self.temp_code
def generate_result_code(self, code):
if self.is_type:
arg_code = self.arg_type.empty_declaration_code()
else:
arg_code = self.arg_type.result()
translate_cpp_exception(code, self.pos,
"%s = typeid(%s);" % (self.temp_code, arg_code),
None, self.in_nogil_context)
class TypeofNode(ExprNode):
# Compile-time type of an expression, as a string.
#
# operand ExprNode
# literal StringNode # internal
literal = None
type = py_object_type
subexprs = ['literal'] # 'operand' will be ignored after type analysis!
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name())
literal = StringNode(self.pos, value=value)
literal = literal.analyse_types(env)
self.literal = literal.coerce_to_pyobject(env)
return self
def analyse_as_type(env):
self.operand = self.operand.analyse_types(env)
return self.operand.type
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
self.literal.generate_evaluation_code(code)
def calculate_result_code(self):
return self.literal.calculate_result_code()
#-------------------------------------------------------------------
#
# Binary operator nodes
#
#-------------------------------------------------------------------
try:
matmul_operator = operator.matmul
except AttributeError:
def matmul_operator(a, b):
try:
func = a.__matmul__
except AttributeError:
func = b.__rmatmul__
return func(a, b)
compile_time_binary_operators = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'is': operator.is_,
'is_not': operator.is_not,
'+': operator.add,
'&': operator.and_,
'/': operator.truediv,
'//': operator.floordiv,
'<<': operator.lshift,
'%': operator.mod,
'*': operator.mul,
'|': operator.or_,
'**': operator.pow,
'>>': operator.rshift,
'-': operator.sub,
'^': operator.xor,
'@': matmul_operator,
'in': lambda x, seq: x in seq,
'not_in': lambda x, seq: x not in seq,
}
def get_compile_time_binop(node):
func = compile_time_binary_operators.get(node.operator)
if not func:
error(node.pos,
"Binary '%s' not supported in compile-time expression"
% node.operator)
return func
class BinopNode(ExprNode):
# operator string
# operand1 ExprNode
# operand2 ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when neither operand is a pyobject.
# - Check operand types and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand1', 'operand2']
inplace = False
def calculate_constant_result(self):
func = compile_time_binary_operators[self.operator]
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
func = get_compile_time_binop(self)
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
return func(operand1, operand2)
except Exception as e:
self.compile_time_value_error(e)
def infer_type(self, env):
return self.result_type(self.operand1.infer_type(env),
self.operand2.infer_type(env), env)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
self.analyse_operation(env)
return self
def analyse_operation(self, env):
if self.is_pythran_operation(env):
self.type = self.result_type(self.operand1.type,
self.operand2.type, env)
assert self.type.is_pythran_expr
self.is_temp = 1
elif self.is_py_operation():
self.coerce_operands_to_pyobjects(env)
self.type = self.result_type(self.operand1.type,
self.operand2.type, env)
assert self.type.is_pyobject
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
def is_py_operation(self):
return self.is_py_operation_types(self.operand1.type, self.operand2.type)
def is_py_operation_types(self, type1, type2):
return type1.is_pyobject or type2.is_pyobject or type1.is_ctuple or type2.is_ctuple
def is_pythran_operation(self, env):
return self.is_pythran_operation_types(self.operand1.type, self.operand2.type, env)
def is_pythran_operation_types(self, type1, type2, env):
# Support only expr op supported_type, or supported_type op expr
return has_np_pythran(env) and \
(is_pythran_supported_operation_type(type1) and is_pythran_supported_operation_type(type2)) and \
(is_pythran_expr(type1) or is_pythran_expr(type2))
def is_cpp_operation(self):
return (self.operand1.type.is_cpp_class
or self.operand2.type.is_cpp_class)
def analyse_cpp_operation(self, env):
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if not entry:
self.type_error()
return
func_type = entry.type
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check == '+':
# Used by NumBinopNodes to break up expressions involving multiple
# operators so that exceptions can be handled properly.
self.is_temp = 1
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def result_type(self, type1, type2, env):
if self.is_pythran_operation_types(type1, type2, env):
return PythranExpr(pythran_binop_type(self.operator, type1, type2))
if self.is_py_operation_types(type1, type2):
if type2.is_string:
type2 = Builtin.bytes_type
elif type2.is_pyunicode_ptr:
type2 = Builtin.unicode_type
if type1.is_string:
type1 = Builtin.bytes_type
elif type1.is_pyunicode_ptr:
type1 = Builtin.unicode_type
if type1.is_builtin_type or type2.is_builtin_type:
if type1 is type2 and self.operator in '**%+|&^':
# FIXME: at least these operators should be safe - others?
return type1
result_type = self.infer_builtin_types_operation(type1, type2)
if result_type is not None:
return result_type
return py_object_type
elif type1.is_error or type2.is_error:
return PyrexTypes.error_type
else:
return self.compute_c_result_type(type1, type2)
def infer_builtin_types_operation(self, type1, type2):
return None
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def is_ephemeral(self):
return (super(BinopNode, self).is_ephemeral() or
self.operand1.is_ephemeral() or self.operand2.is_ephemeral())
def generate_result_code(self, code):
if self.type.is_pythran_expr:
code.putln("// Pythran binop")
code.putln("__Pyx_call_destructor(%s);" % self.result())
code.putln("new (&%s) decltype(%s){%s %s %s};" % (
self.result(),
self.result(),
self.operand1.pythran_result(),
self.operator,
self.operand2.pythran_result()))
elif self.operand1.type.is_pyobject:
function = self.py_operation_function(code)
if self.operator == '**':
extra_args = ", Py_None"
else:
extra_args = ""
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result(),
function,
self.operand1.py_result(),
self.operand2.py_result(),
extra_args,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.is_temp:
# C++ overloaded operators with exception values are currently all
# handled through temporaries.
if self.is_cpp_operation() and self.exception_check == '+':
translate_cpp_exception(code, self.pos,
"%s = %s;" % (self.result(), self.calculate_result_code()),
self.exception_value, self.in_nogil_context)
else:
code.putln("%s = %s;" % (self.result(), self.calculate_result_code()))
def type_error(self):
if not (self.operand1.type.is_error
or self.operand2.type.is_error):
error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
(self.operator, self.operand1.type,
self.operand2.type))
self.type = PyrexTypes.error_type
class CBinopNode(BinopNode):
def analyse_types(self, env):
node = BinopNode.analyse_types(self, env)
if node.is_py_operation():
node.type = PyrexTypes.error_type
return node
def py_operation_function(self, code):
return ""
def calculate_result_code(self):
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
def compute_c_result_type(self, type1, type2):
cpp_type = None
if type1.is_cpp_class or type1.is_ptr:
cpp_type = type1.find_cpp_operation_type(self.operator, type2)
# FIXME: handle the reversed case?
#if cpp_type is None and (type2.is_cpp_class or type2.is_ptr):
# cpp_type = type2.find_cpp_operation_type(self.operator, type1)
# FIXME: do we need to handle other cases here?
return cpp_type
def c_binop_constructor(operator):
def make_binop_node(pos, **operands):
return CBinopNode(pos, operator=operator, **operands)
return make_binop_node
class NumBinopNode(BinopNode):
# Binary operation taking numeric arguments.
infix = True
overflow_check = False
overflow_bit_node = None
def analyse_c_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.type = self.compute_c_result_type(type1, type2)
if not self.type:
self.type_error()
return
if self.type.is_complex:
self.infix = False
if (self.type.is_int
and env.directives['overflowcheck']
and self.operator in self.overflow_op_names):
if (self.operator in ('+', '*')
and self.operand1.has_constant_result()
and not self.operand2.has_constant_result()):
self.operand1, self.operand2 = self.operand2, self.operand1
self.overflow_check = True
self.overflow_fold = env.directives['overflowcheck.fold']
self.func = self.type.overflow_check_binop(
self.overflow_op_names[self.operator],
env,
const_rhs = self.operand2.has_constant_result())
self.is_temp = True
if not self.infix or (type1.is_numeric and type2.is_numeric):
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
if widest_type is PyrexTypes.c_bint_type:
if self.operator not in '|^&':
# False + False == 0 # not False!
widest_type = PyrexTypes.c_int_type
else:
widest_type = PyrexTypes.widest_numeric_type(
widest_type, PyrexTypes.c_int_type)
return widest_type
else:
return None
def may_be_none(self):
if self.type and self.type.is_builtin_type:
# if we know the result type, we know the operation, so it can't be None
return False
type1 = self.operand1.type
type2 = self.operand2.type
if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type:
# XXX: I can't think of any case where a binary operation
# on builtin types evaluates to None - add a special case
# here if there is one.
return False
return super(NumBinopNode, self).may_be_none()
def get_constant_c_result_code(self):
value1 = self.operand1.get_constant_c_result_code()
value2 = self.operand2.get_constant_c_result_code()
if value1 and value2:
return "(%s %s %s)" % (value1, self.operator, value2)
else:
return None
def c_types_okay(self, type1, type2):
#print "NumBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_numeric or type1.is_enum) \
and (type2.is_numeric or type2.is_enum)
def generate_evaluation_code(self, code):
if self.overflow_check:
self.overflow_bit_node = self
self.overflow_bit = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = 0;" % self.overflow_bit)
super(NumBinopNode, self).generate_evaluation_code(code)
if self.overflow_check:
code.putln("if (unlikely(%s)) {" % self.overflow_bit)
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large");')
code.putln(code.error_goto(self.pos))
code.putln("}")
code.funcstate.release_temp(self.overflow_bit)
def calculate_result_code(self):
if self.overflow_bit_node is not None:
return "%s(%s, %s, &%s)" % (
self.func,
self.operand1.result(),
self.operand2.result(),
self.overflow_bit_node.overflow_bit)
elif self.type.is_cpp_class or self.infix:
if is_pythran_expr(self.type):
result1, result2 = self.operand1.pythran_result(), self.operand2.pythran_result()
else:
result1, result2 = self.operand1.result(), self.operand2.result()
return "(%s %s %s)" % (result1, self.operator, result2)
else:
func = self.type.binary_op(self.operator)
if func is None:
error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type))
return "%s(%s, %s)" % (
func,
self.operand1.result(),
self.operand2.result())
def is_py_operation_types(self, type1, type2):
return (type1.is_unicode_char or
type2.is_unicode_char or
BinopNode.is_py_operation_types(self, type1, type2))
def py_operation_function(self, code):
function_name = self.py_functions[self.operator]
if self.inplace:
function_name = function_name.replace('PyNumber_', 'PyNumber_InPlace')
return function_name
py_functions = {
"|": "PyNumber_Or",
"^": "PyNumber_Xor",
"&": "PyNumber_And",
"<<": "PyNumber_Lshift",
">>": "PyNumber_Rshift",
"+": "PyNumber_Add",
"-": "PyNumber_Subtract",
"*": "PyNumber_Multiply",
"@": "__Pyx_PyNumber_MatrixMultiply",
"/": "__Pyx_PyNumber_Divide",
"//": "PyNumber_FloorDivide",
"%": "PyNumber_Remainder",
"**": "PyNumber_Power",
}
overflow_op_names = {
"+": "add",
"-": "sub",
"*": "mul",
"<<": "lshift",
}
class IntBinopNode(NumBinopNode):
# Binary operation taking integer arguments.
def c_types_okay(self, type1, type2):
#print "IntBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_int or type1.is_enum) \
and (type2.is_int or type2.is_enum)
class AddNode(NumBinopNode):
# '+' operator.
def is_py_operation_types(self, type1, type2):
if type1.is_string and type2.is_string or type1.is_pyunicode_ptr and type2.is_pyunicode_ptr:
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# b'abc' + 'abc' raises an exception in Py3,
# so we can safely infer the Py2 type for bytes here
string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2 in string_types:
return string_types[max(string_types.index(type1),
string_types.index(type2))]
return None
def compute_c_result_type(self, type1, type2):
#print "AddNode.compute_c_result_type:", type1, self.operator, type2 ###
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type2.is_ptr or type2.is_array) and (type1.is_int or type1.is_enum):
return type2
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
def py_operation_function(self, code):
is_unicode_concat = False
if isinstance(self.operand1, FormattedValueNode) or isinstance(self.operand2, FormattedValueNode):
is_unicode_concat = True
else:
type1, type2 = self.operand1.type, self.operand2.type
if type1 is unicode_type or type2 is unicode_type:
is_unicode_concat = type1.is_builtin_type and type2.is_builtin_type
if is_unicode_concat:
if self.operand1.may_be_none() or self.operand2.may_be_none():
return '__Pyx_PyUnicode_ConcatSafe'
else:
return '__Pyx_PyUnicode_Concat'
return super(AddNode, self).py_operation_function(code)
class SubNode(NumBinopNode):
# '-' operator.
def compute_c_result_type(self, type1, type2):
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array):
return PyrexTypes.c_ptrdiff_t_type
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
class MulNode(NumBinopNode):
# '*' operator.
def is_py_operation_types(self, type1, type2):
if ((type1.is_string and type2.is_int) or
(type2.is_string and type1.is_int)):
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# let's assume that whatever builtin type you multiply a string with
# will either return a string of the same type or fail with an exception
string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2.is_builtin_type:
return type1
if type2 in string_types and type1.is_builtin_type:
return type2
# multiplication of containers/numbers with an integer value
# always (?) returns the same type
if type1.is_int:
return type2
if type2.is_int:
return type1
return None
class MatMultNode(NumBinopNode):
# '@' operator.
def is_py_operation_types(self, type1, type2):
return True
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("MatrixMultiply", "ObjectHandling.c"))
super(MatMultNode, self).generate_evaluation_code(code)
class DivNode(NumBinopNode):
# '/' or '//' operator.
cdivision = None
truedivision = None # == "unknown" if operator == '/'
ctruedivision = False
cdivision_warnings = False
zerodivision_check = None
def find_compile_time_binary_operator(self, op1, op2):
func = compile_time_binary_operators[self.operator]
if self.operator == '/' and self.truedivision is None:
# => true div for floats, floor div for integers
if isinstance(op1, _py_int_types) and isinstance(op2, _py_int_types):
func = compile_time_binary_operators['//']
return func
def calculate_constant_result(self):
op1 = self.operand1.constant_result
op2 = self.operand2.constant_result
func = self.find_compile_time_binary_operator(op1, op2)
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
func = self.find_compile_time_binary_operator(
operand1, operand2)
return func(operand1, operand2)
except Exception as e:
self.compile_time_value_error(e)
def _check_truedivision(self, env):
if self.cdivision or env.directives['cdivision']:
self.ctruedivision = False
else:
self.ctruedivision = self.truedivision
def infer_type(self, env):
self._check_truedivision(env)
return self.result_type(
self.operand1.infer_type(env),
self.operand2.infer_type(env), env)
def analyse_operation(self, env):
self._check_truedivision(env)
NumBinopNode.analyse_operation(self, env)
if self.is_cpp_operation():
self.cdivision = True
if not self.type.is_pyobject:
self.zerodivision_check = (
self.cdivision is None and not env.directives['cdivision']
and (not self.operand2.has_constant_result() or
self.operand2.constant_result == 0))
if self.zerodivision_check or env.directives['cdivision_warnings']:
# Need to check ahead of time to warn or raise zero division error
self.operand1 = self.operand1.coerce_to_simple(env)
self.operand2 = self.operand2.coerce_to_simple(env)
def compute_c_result_type(self, type1, type2):
if self.operator == '/' and self.ctruedivision:
if not type1.is_float and not type2.is_float:
widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type)
widest_type = PyrexTypes.widest_numeric_type(type2, widest_type)
return widest_type
return NumBinopNode.compute_c_result_type(self, type1, type2)
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float division"
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.type.is_complex:
if self.cdivision is None:
self.cdivision = (code.globalstate.directives['cdivision']
or not self.type.signed
or self.type.is_float)
if not self.cdivision:
code.globalstate.use_utility_code(
UtilityCode.load_cached("DivInt", "CMath.c").specialize(self.type))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def generate_div_warning_code(self, code):
in_nogil = self.in_nogil_context
if not self.type.is_pyobject:
if self.zerodivision_check:
if not self.infix:
zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result())
else:
zero_test = "%s == 0" % self.operand2.result()
code.putln("if (unlikely(%s)) {" % zero_test)
if in_nogil:
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message())
if in_nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if self.type.is_int and self.type.signed and self.operator != '%':
code.globalstate.use_utility_code(UtilityCode.load_cached("UnaryNegOverflows", "Overflow.c"))
if self.operand2.type.signed == 2:
# explicitly signed, no runtime check needed
minus1_check = 'unlikely(%s == -1)' % self.operand2.result()
else:
type_of_op2 = self.operand2.type.empty_declaration_code()
minus1_check = '(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % (
type_of_op2, self.operand2.result(), type_of_op2)
code.putln("else if (sizeof(%s) == sizeof(long) && %s "
" && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % (
self.type.empty_declaration_code(),
minus1_check,
self.operand1.result()))
if in_nogil:
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");')
if in_nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if code.globalstate.directives['cdivision_warnings'] and self.operator != '/':
code.globalstate.use_utility_code(
UtilityCode.load_cached("CDivisionWarning", "CMath.c"))
code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % (
self.operand1.result(),
self.operand2.result()))
warning_code = "__Pyx_cdivision_warning(%(FILENAME)s, %(LINENO)s)" % {
'FILENAME': Naming.filename_cname,
'LINENO': Naming.lineno_cname,
}
if in_nogil:
result_code = 'result'
code.putln("int %s;" % result_code)
code.put_ensure_gil()
code.putln(code.set_error_info(self.pos, used=True))
code.putln("%s = %s;" % (result_code, warning_code))
code.put_release_ensured_gil()
else:
result_code = warning_code
code.putln(code.set_error_info(self.pos, used=True))
code.put("if (unlikely(%s)) " % result_code)
code.put_goto(code.error_label)
code.putln("}")
def calculate_result_code(self):
if self.type.is_complex:
return NumBinopNode.calculate_result_code(self)
elif self.type.is_float and self.operator == '//':
return "floor(%s / %s)" % (
self.operand1.result(),
self.operand2.result())
elif self.truedivision or self.cdivision:
op1 = self.operand1.result()
op2 = self.operand2.result()
if self.truedivision:
if self.type != self.operand1.type:
op1 = self.type.cast_code(op1)
if self.type != self.operand2.type:
op2 = self.type.cast_code(op2)
return "(%s / %s)" % (op1, op2)
else:
return "__Pyx_div_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
class ModNode(DivNode):
# '%' operator.
def is_py_operation_types(self, type1, type2):
return (type1.is_string
or type2.is_string
or NumBinopNode.is_py_operation_types(self, type1, type2))
def infer_builtin_types_operation(self, type1, type2):
# b'%s' % xyz raises an exception in Py3, so it's safe to infer the type for Py2
if type1 is unicode_type:
# None + xyz may be implemented by RHS
if type2.is_builtin_type or not self.operand1.may_be_none():
return type1
elif type1 in (bytes_type, str_type, basestring_type):
if type2 is unicode_type:
return type2
elif type2.is_numeric:
return type1
elif type1 is bytes_type and not type2.is_builtin_type:
return None # RHS might implement '% operator differently in Py3
else:
return basestring_type # either str or unicode, can't tell
return None
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float divmod()"
def analyse_operation(self, env):
DivNode.analyse_operation(self, env)
if not self.type.is_pyobject:
if self.cdivision is None:
self.cdivision = env.directives['cdivision'] or not self.type.signed
if not self.cdivision and not self.type.is_int and not self.type.is_float:
error(self.pos, "mod operator not supported for type '%s'" % self.type)
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.cdivision:
if self.type.is_int:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ModInt", "CMath.c").specialize(self.type))
else: # float
code.globalstate.use_utility_code(
UtilityCode.load_cached("ModFloat", "CMath.c").specialize(
self.type, math_h_modifier=self.type.math_h_modifier))
# NOTE: skipping over DivNode here
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def calculate_result_code(self):
if self.cdivision:
if self.type.is_float:
return "fmod%s(%s, %s)" % (
self.type.math_h_modifier,
self.operand1.result(),
self.operand2.result())
else:
return "(%s %% %s)" % (
self.operand1.result(),
self.operand2.result())
else:
return "__Pyx_mod_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
def py_operation_function(self, code):
if self.operand1.type is unicode_type:
if self.operand1.may_be_none():
return '__Pyx_PyUnicode_FormatSafe'
else:
return 'PyUnicode_Format'
elif self.operand1.type is str_type:
if self.operand1.may_be_none():
return '__Pyx_PyString_FormatSafe'
else:
return '__Pyx_PyString_Format'
return super(ModNode, self).py_operation_function(code)
class PowNode(NumBinopNode):
# '**' operator.
def analyse_c_operation(self, env):
NumBinopNode.analyse_c_operation(self, env)
if self.type.is_complex:
if self.type.real_type.is_float:
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
self.pow_func = self.type.binary_op('**')
else:
error(self.pos, "complex int powers not supported")
self.pow_func = "<error>"
elif self.type.is_float:
self.pow_func = "pow" + self.type.math_h_modifier
elif self.type.is_int:
self.pow_func = "__Pyx_pow_%s" % self.type.empty_declaration_code().replace(' ', '_')
env.use_utility_code(
UtilityCode.load_cached("IntPow", "CMath.c").specialize(
func_name=self.pow_func,
type=self.type.empty_declaration_code(),
signed=self.type.signed and 1 or 0))
elif not self.type.is_error:
error(self.pos, "got unexpected types for C power operator: %s, %s" %
(self.operand1.type, self.operand2.type))
def calculate_result_code(self):
# Work around MSVC overloading ambiguity.
def typecast(operand):
if self.type == operand.type:
return operand.result()
else:
return self.type.cast_code(operand.result())
return "%s(%s, %s)" % (
self.pow_func,
typecast(self.operand1),
typecast(self.operand2))
def py_operation_function(self, code):
if (self.type.is_pyobject and
self.operand1.constant_result == 2 and
isinstance(self.operand1.constant_result, _py_int_types) and
self.operand2.type is py_object_type):
code.globalstate.use_utility_code(UtilityCode.load_cached('PyNumberPow2', 'Optimize.c'))
if self.inplace:
return '__Pyx_PyNumber_InPlacePowerOf2'
else:
return '__Pyx_PyNumber_PowerOf2'
return super(PowNode, self).py_operation_function(code)
class BoolBinopNode(ExprNode):
"""
Short-circuiting boolean operation.
Note that this node provides the same code generation method as
BoolBinopResultNode to simplify expression nesting.
operator string "and"/"or"
operand1 BoolBinopNode/BoolBinopResultNode left operand
operand2 BoolBinopNode/BoolBinopResultNode right operand
"""
subexprs = ['operand1', 'operand2']
is_temp = True
operator = None
operand1 = None
operand2 = None
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
return PyrexTypes.independent_spanning_type(type1, type2)
def may_be_none(self):
if self.operator == 'or':
return self.operand2.may_be_none()
else:
return self.operand1.may_be_none() or self.operand2.may_be_none()
def calculate_constant_result(self):
operand1 = self.operand1.constant_result
operand2 = self.operand2.constant_result
if self.operator == 'and':
self.constant_result = operand1 and operand2
else:
self.constant_result = operand1 or operand2
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
if self.operator == 'and':
return operand1 and operand2
else:
return operand1 or operand2
def is_ephemeral(self):
return self.operand1.is_ephemeral() or self.operand2.is_ephemeral()
def analyse_types(self, env):
# Note: we do not do any coercion here as we most likely do not know the final type anyway.
# We even accept to set self.type to ErrorType if both operands do not have a spanning type.
# The coercion to the final type and to a "simple" value is left to coerce_to().
operand1 = self.operand1.analyse_types(env)
operand2 = self.operand2.analyse_types(env)
self.type = PyrexTypes.independent_spanning_type(
operand1.type, operand2.type)
self.operand1 = self._wrap_operand(operand1, env)
self.operand2 = self._wrap_operand(operand2, env)
return self
def _wrap_operand(self, operand, env):
if not isinstance(operand, (BoolBinopNode, BoolBinopResultNode)):
operand = BoolBinopResultNode(operand, self.type, env)
return operand
def wrap_operands(self, env):
"""
Must get called by transforms that want to create a correct BoolBinopNode
after the type analysis phase.
"""
self.operand1 = self._wrap_operand(self.operand1, env)
self.operand2 = self._wrap_operand(self.operand2, env)
def coerce_to_boolean(self, env):
return self.coerce_to(PyrexTypes.c_bint_type, env)
def coerce_to(self, dst_type, env):
operand1 = self.operand1.coerce_to(dst_type, env)
operand2 = self.operand2.coerce_to(dst_type, env)
return BoolBinopNode.from_node(
self, type=dst_type,
operator=self.operator,
operand1=operand1, operand2=operand2)
def generate_bool_evaluation_code(self, code, final_result_temp, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
outer_labels = (and_label, or_label)
if self.operator == 'and':
my_label = and_label = code.new_label('next_and')
else:
my_label = or_label = code.new_label('next_or')
self.operand1.generate_bool_evaluation_code(
code, final_result_temp, and_label, or_label, end_label, my_label)
and_label, or_label = outer_labels
code.put_label(my_label)
self.operand2.generate_bool_evaluation_code(
code, final_result_temp, and_label, or_label, end_label, fall_through)
def generate_evaluation_code(self, code):
self.allocate_temp_result(code)
or_label = and_label = None
end_label = code.new_label('bool_binop_done')
self.generate_bool_evaluation_code(code, self.result(), and_label, or_label, end_label, end_label)
code.put_label(end_label)
gil_message = "Truth-testing Python object"
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def generate_subexpr_disposal_code(self, code):
pass # nothing to do here, all done in generate_evaluation_code()
def free_subexpr_temps(self, code):
pass # nothing to do here, all done in generate_evaluation_code()
def generate_operand1_test(self, code):
# Generate code to test the truth of the first operand.
if self.type.is_pyobject:
test_result = code.funcstate.allocate_temp(
PyrexTypes.c_bint_type, manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.operand1.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.operand1.result()
return (test_result, self.type.is_pyobject)
class BoolBinopResultNode(ExprNode):
"""
Intermediate result of a short-circuiting and/or expression.
Tests the result for 'truthiness' and takes care of coercing the final result
of the overall expression to the target type.
Note that this node provides the same code generation method as
BoolBinopNode to simplify expression nesting.
arg ExprNode the argument to test
value ExprNode the coerced result value node
"""
subexprs = ['arg', 'value']
is_temp = True
arg = None
value = None
def __init__(self, arg, result_type, env):
# using 'arg' multiple times, so it must be a simple/temp value
arg = arg.coerce_to_simple(env)
# wrap in ProxyNode, in case a transform wants to replace self.arg later
arg = ProxyNode(arg)
super(BoolBinopResultNode, self).__init__(
arg.pos, arg=arg, type=result_type,
value=CloneNode(arg).coerce_to(result_type, env))
def coerce_to_boolean(self, env):
return self.coerce_to(PyrexTypes.c_bint_type, env)
def coerce_to(self, dst_type, env):
# unwrap, coerce, rewrap
arg = self.arg.arg
if dst_type is PyrexTypes.c_bint_type:
arg = arg.coerce_to_boolean(env)
# TODO: unwrap more coercion nodes?
return BoolBinopResultNode(arg, dst_type, env)
def nogil_check(self, env):
# let's leave all errors to BoolBinopNode
pass
def generate_operand_test(self, code):
# Generate code to test the truth of the first operand.
if self.arg.type.is_pyobject:
test_result = code.funcstate.allocate_temp(
PyrexTypes.c_bint_type, manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.arg.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.arg.result()
return (test_result, self.arg.type.is_pyobject)
def generate_bool_evaluation_code(self, code, final_result_temp, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
# x => x
# x and ... or ... => next 'and' / 'or'
# False ... or x => next 'or'
# True and x => next 'and'
# True or x => True (operand)
self.arg.generate_evaluation_code(code)
if and_label or or_label:
test_result, uses_temp = self.generate_operand_test(code)
if uses_temp and (and_label and or_label):
# cannot become final result => free early
# disposal: uses_temp and (and_label and or_label)
self.arg.generate_disposal_code(code)
sense = '!' if or_label else ''
code.putln("if (%s%s) {" % (sense, test_result))
if uses_temp:
code.funcstate.release_temp(test_result)
if not uses_temp or not (and_label and or_label):
# disposal: (not uses_temp) or {not (and_label and or_label) [if]}
self.arg.generate_disposal_code(code)
if or_label and or_label != fall_through:
# value is false => short-circuit to next 'or'
code.put_goto(or_label)
if and_label:
# value is true => go to next 'and'
if or_label:
code.putln("} else {")
if not uses_temp:
# disposal: (not uses_temp) and {(and_label and or_label) [else]}
self.arg.generate_disposal_code(code)
if and_label != fall_through:
code.put_goto(and_label)
if not and_label or not or_label:
# if no next 'and' or 'or', we provide the result
if and_label or or_label:
code.putln("} else {")
self.value.generate_evaluation_code(code)
self.value.make_owned_reference(code)
code.putln("%s = %s;" % (final_result_temp, self.value.result()))
self.value.generate_post_assignment_code(code)
# disposal: {not (and_label and or_label) [else]}
self.arg.generate_disposal_code(code)
self.value.free_temps(code)
if end_label != fall_through:
code.put_goto(end_label)
if and_label or or_label:
code.putln("}")
self.arg.free_temps(code)
class CondExprNode(ExprNode):
# Short-circuiting conditional expression.
#
# test ExprNode
# true_val ExprNode
# false_val ExprNode
true_val = None
false_val = None
subexprs = ['test', 'true_val', 'false_val']
def type_dependencies(self, env):
return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
def infer_type(self, env):
return PyrexTypes.independent_spanning_type(
self.true_val.infer_type(env),
self.false_val.infer_type(env))
def calculate_constant_result(self):
if self.test.constant_result:
self.constant_result = self.true_val.constant_result
else:
self.constant_result = self.false_val.constant_result
def is_ephemeral(self):
return self.true_val.is_ephemeral() or self.false_val.is_ephemeral()
def analyse_types(self, env):
self.test = self.test.analyse_types(env).coerce_to_boolean(env)
self.true_val = self.true_val.analyse_types(env)
self.false_val = self.false_val.analyse_types(env)
self.is_temp = 1
return self.analyse_result_type(env)
def analyse_result_type(self, env):
self.type = PyrexTypes.independent_spanning_type(
self.true_val.type, self.false_val.type)
if self.type.is_reference:
self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
if self.type.is_pyobject:
self.result_ctype = py_object_type
elif self.true_val.is_ephemeral() or self.false_val.is_ephemeral():
error(self.pos, "Unsafe C derivative of temporary Python reference used in conditional expression")
if self.true_val.type.is_pyobject or self.false_val.type.is_pyobject:
self.true_val = self.true_val.coerce_to(self.type, env)
self.false_val = self.false_val.coerce_to(self.type, env)
if self.type.is_error:
self.type_error()
return self
def coerce_to_integer(self, env):
self.true_val = self.true_val.coerce_to_integer(env)
self.false_val = self.false_val.coerce_to_integer(env)
self.result_ctype = None
return self.analyse_result_type(env)
def coerce_to(self, dst_type, env):
self.true_val = self.true_val.coerce_to(dst_type, env)
self.false_val = self.false_val.coerce_to(dst_type, env)
self.result_ctype = None
return self.analyse_result_type(env)
def type_error(self):
if not (self.true_val.type.is_error or self.false_val.type.is_error):
error(self.pos, "Incompatible types in conditional expression (%s; %s)" %
(self.true_val.type, self.false_val.type))
self.type = PyrexTypes.error_type
def check_const(self):
return (self.test.check_const()
and self.true_val.check_const()
and self.false_val.check_const())
def generate_evaluation_code(self, code):
# Because subexprs may not be evaluated we can use a more optimal
# subexpr allocation strategy than the default, so override evaluation_code.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.test.generate_evaluation_code(code)
code.putln("if (%s) {" % self.test.result())
self.eval_and_get(code, self.true_val)
code.putln("} else {")
self.eval_and_get(code, self.false_val)
code.putln("}")
self.test.generate_disposal_code(code)
self.test.free_temps(code)
def eval_and_get(self, code, expr):
expr.generate_evaluation_code(code)
if self.type.is_memoryviewslice:
expr.make_owned_memoryviewslice(code)
else:
expr.make_owned_reference(code)
code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype())))
expr.generate_post_assignment_code(code)
expr.free_temps(code)
def generate_subexpr_disposal_code(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
def free_subexpr_temps(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
richcmp_constants = {
"<" : "Py_LT",
"<=": "Py_LE",
"==": "Py_EQ",
"!=": "Py_NE",
"<>": "Py_NE",
">" : "Py_GT",
">=": "Py_GE",
# the following are faked by special compare functions
"in" : "Py_EQ",
"not_in": "Py_NE",
}
class CmpNode(object):
# Mixin class containing code common to PrimaryCmpNodes
# and CascadedCmpNodes.
special_bool_cmp_function = None
special_bool_cmp_utility_code = None
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def calculate_cascaded_constant_result(self, operand1_result):
func = compile_time_binary_operators[self.operator]
operand2_result = self.operand2.constant_result
if (isinstance(operand1_result, any_string_type) and
isinstance(operand2_result, any_string_type) and
type(operand1_result) != type(operand2_result)):
# string comparison of different types isn't portable
return
if self.operator in ('in', 'not_in'):
if isinstance(self.operand2, (ListNode, TupleNode, SetNode)):
if not self.operand2.args:
self.constant_result = self.operator == 'not_in'
return
elif isinstance(self.operand2, ListNode) and not self.cascade:
# tuples are more efficient to store than lists
self.operand2 = self.operand2.as_tuple()
elif isinstance(self.operand2, DictNode):
if not self.operand2.key_value_pairs:
self.constant_result = self.operator == 'not_in'
return
self.constant_result = func(operand1_result, operand2_result)
def cascaded_compile_time_value(self, operand1, denv):
func = get_compile_time_binop(self)
operand2 = self.operand2.compile_time_value(denv)
try:
result = func(operand1, operand2)
except Exception as e:
self.compile_time_value_error(e)
result = None
if result:
cascade = self.cascade
if cascade:
result = result and cascade.cascaded_compile_time_value(operand2, denv)
return result
def is_cpp_comparison(self):
return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class
def find_common_int_type(self, env, op, operand1, operand2):
# type1 != type2 and at least one of the types is not a C int
type1 = operand1.type
type2 = operand2.type
type1_can_be_int = False
type2_can_be_int = False
if operand1.is_string_literal and operand1.can_coerce_to_char_literal():
type1_can_be_int = True
if operand2.is_string_literal and operand2.can_coerce_to_char_literal():
type2_can_be_int = True
if type1.is_int:
if type2_can_be_int:
return type1
elif type2.is_int:
if type1_can_be_int:
return type2
elif type1_can_be_int:
if type2_can_be_int:
if Builtin.unicode_type in (type1, type2):
return PyrexTypes.c_py_ucs4_type
else:
return PyrexTypes.c_uchar_type
return None
def find_common_type(self, env, op, operand1, common_type=None):
operand2 = self.operand2
type1 = operand1.type
type2 = operand2.type
new_common_type = None
# catch general errors
if (type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or
type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type))):
error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3")
new_common_type = error_type
# try to use numeric comparisons where possible
elif type1.is_complex or type2.is_complex:
if (op not in ('==', '!=')
and (type1.is_complex or type1.is_numeric)
and (type2.is_complex or type2.is_numeric)):
error(self.pos, "complex types are unordered")
new_common_type = error_type
elif type1.is_pyobject:
new_common_type = Builtin.complex_type if type1.subtype_of(Builtin.complex_type) else py_object_type
elif type2.is_pyobject:
new_common_type = Builtin.complex_type if type2.subtype_of(Builtin.complex_type) else py_object_type
else:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif type1.is_numeric and type2.is_numeric:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif common_type is None or not common_type.is_pyobject:
new_common_type = self.find_common_int_type(env, op, operand1, operand2)
if new_common_type is None:
# fall back to generic type compatibility tests
if type1.is_ctuple or type2.is_ctuple:
new_common_type = py_object_type
elif type1 == type2:
new_common_type = type1
elif type1.is_pyobject or type2.is_pyobject:
if type2.is_numeric or type2.is_string:
if operand2.check_for_coercion_error(type1, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif type1.is_numeric or type1.is_string:
if operand1.check_for_coercion_error(type2, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif py_object_type.assignable_from(type1) and py_object_type.assignable_from(type2):
new_common_type = py_object_type
else:
# one Python type and one non-Python type, not assignable
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
elif type1.assignable_from(type2):
new_common_type = type1
elif type2.assignable_from(type1):
new_common_type = type2
else:
# C types that we couldn't handle up to here are an error
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
if new_common_type.is_string and (isinstance(operand1, BytesNode) or
isinstance(operand2, BytesNode)):
# special case when comparing char* to bytes literal: must
# compare string values!
new_common_type = bytes_type
# recursively merge types
if common_type is None or new_common_type.is_error:
common_type = new_common_type
else:
# we could do a lot better by splitting the comparison
# into a non-Python part and a Python part, but this is
# safer for now
common_type = PyrexTypes.spanning_type(common_type, new_common_type)
if self.cascade:
common_type = self.cascade.find_common_type(env, self.operator, operand2, common_type)
return common_type
def invalid_types_error(self, operand1, op, operand2):
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(op, operand1.type, operand2.type))
def is_python_comparison(self):
return (not self.is_ptr_contains()
and not self.is_c_string_contains()
and (self.has_python_operands()
or (self.cascade and self.cascade.is_python_comparison())
or self.operator in ('in', 'not_in')))
def coerce_operands_to(self, dst_type, env):
operand2 = self.operand2
if operand2.type != dst_type:
self.operand2 = operand2.coerce_to(dst_type, env)
if self.cascade:
self.cascade.coerce_operands_to(dst_type, env)
def is_python_result(self):
return ((self.has_python_operands() and
self.special_bool_cmp_function is None and
self.operator not in ('is', 'is_not', 'in', 'not_in') and
not self.is_c_string_contains() and
not self.is_ptr_contains())
or (self.cascade and self.cascade.is_python_result()))
def is_c_string_contains(self):
return self.operator in ('in', 'not_in') and \
((self.operand1.type.is_int
and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
(self.operand1.type.is_unicode_char
and self.operand2.type is unicode_type))
def is_ptr_contains(self):
if self.operator in ('in', 'not_in'):
container_type = self.operand2.type
return (container_type.is_ptr or container_type.is_array) \
and not container_type.is_string
def find_special_bool_compare_function(self, env, operand1, result_is_bool=False):
# note: currently operand1 must get coerced to a Python object if we succeed here!
if self.operator in ('==', '!='):
type1, type2 = operand1.type, self.operand2.type
if result_is_bool or (type1.is_builtin_type and type2.is_builtin_type):
if type1 is Builtin.unicode_type or type2 is Builtin.unicode_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.bytes_type or type2 is Builtin.bytes_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("BytesEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyBytes_Equals"
return True
elif type1 is Builtin.basestring_type or type2 is Builtin.basestring_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.str_type or type2 is Builtin.str_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("StrEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyString_Equals"
return True
elif self.operator in ('in', 'not_in'):
if self.operand2.type is Builtin.dict_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PyDict_ContainsTF"
return True
elif self.operand2.type is Builtin.unicode_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_ContainsTF"
return True
else:
if not self.operand2.type.is_pyobject:
self.operand2 = self.operand2.coerce_to_pyobject(env)
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySequenceContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PySequence_ContainsTF"
return True
return False
def generate_operation_code(self, code, result_code,
operand1, op , operand2):
if self.type.is_pyobject:
error_clause = code.error_goto_if_null
got_ref = "__Pyx_XGOTREF(%s); " % result_code
if self.special_bool_cmp_function:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyBoolOrNullFromLong", "ObjectHandling.c"))
coerce_result = "__Pyx_PyBoolOrNull_FromLong"
else:
coerce_result = "__Pyx_PyBool_FromLong"
else:
error_clause = code.error_goto_if_neg
got_ref = ""
coerce_result = ""
if self.special_bool_cmp_function:
if operand1.type.is_pyobject:
result1 = operand1.py_result()
else:
result1 = operand1.result()
if operand2.type.is_pyobject:
result2 = operand2.py_result()
else:
result2 = operand2.result()
if self.special_bool_cmp_utility_code:
code.globalstate.use_utility_code(self.special_bool_cmp_utility_code)
code.putln(
"%s = %s(%s(%s, %s, %s)); %s%s" % (
result_code,
coerce_result,
self.special_bool_cmp_function,
result1, result2, richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_pyobject and op not in ('is', 'is_not'):
assert op not in ('in', 'not_in'), op
code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s%s" % (
result_code,
operand1.py_result(),
operand2.py_result(),
richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_complex:
code.putln("%s = %s(%s%s(%s, %s));" % (
result_code,
coerce_result,
op == "!=" and "!" or "",
operand1.type.unary_op('eq'),
operand1.result(),
operand2.result()))
else:
type1 = operand1.type
type2 = operand2.type
if (type1.is_extension_type or type2.is_extension_type) \
and not type1.same_as(type2):
common_type = py_object_type
elif type1.is_numeric:
common_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
common_type = type1
code1 = operand1.result_as(common_type)
code2 = operand2.result_as(common_type)
statement = "%s = %s(%s %s %s);" % (
result_code,
coerce_result,
code1,
self.c_operator(op),
code2)
if self.is_cpp_comparison() and self.exception_check == '+':
translate_cpp_exception(code, self.pos, statement, self.exception_value, self.in_nogil_context)
code.putln(statement)
def c_operator(self, op):
if op == 'is':
return "=="
elif op == 'is_not':
return "!="
else:
return op
class PrimaryCmpNode(ExprNode, CmpNode):
# Non-cascaded comparison or first comparison of
# a cascaded sequence.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
# cascade CascadedCmpNode
# We don't use the subexprs mechanism, because
# things here are too complicated for it to handle.
# Instead, we override all the framework methods
# which use it.
child_attrs = ['operand1', 'operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
is_memslice_nonecheck = False
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def calculate_constant_result(self):
assert not self.cascade
self.calculate_cascaded_constant_result(self.operand1.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
return self.cascaded_compile_time_value(operand1, denv)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
if self.is_cpp_comparison():
self.analyse_cpp_comparison(env)
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for cpp types.")
return self
type1 = self.operand1.type
type2 = self.operand2.type
if is_pythran_expr(type1) or is_pythran_expr(type2):
if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
self.type = PythranExpr(pythran_binop_type(self.operator, type1, type2))
self.is_pycmp = False
return self
if self.analyse_memoryviewslice_comparison(env):
return self
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
if self.operator in ('in', 'not_in'):
if self.is_c_string_contains():
self.is_pycmp = False
common_type = None
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.")
return self
if self.operand2.type is unicode_type:
env.use_utility_code(UtilityCode.load_cached("PyUCS4InUnicode", "StringTools.c"))
else:
if self.operand1.type is PyrexTypes.c_uchar_type:
self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env)
if self.operand2.type is not bytes_type:
self.operand2 = self.operand2.coerce_to(bytes_type, env)
env.use_utility_code(UtilityCode.load_cached("BytesContains", "StringTools.c"))
self.operand2 = self.operand2.as_none_safe_node(
"argument of type 'NoneType' is not iterable")
elif self.is_ptr_contains():
if self.cascade:
error(self.pos, "Cascading comparison not supported for 'val in sliced pointer'.")
self.type = PyrexTypes.c_bint_type
# Will be transformed by IterationTransform
return self
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = py_object_type
self.is_pycmp = True
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = self.find_common_type(env, self.operator, self.operand1)
self.is_pycmp = common_type.is_pyobject
if common_type is not None and not common_type.is_error:
if self.operand1.type != common_type:
self.operand1 = self.operand1.coerce_to(common_type, env)
self.coerce_operands_to(common_type, env)
if self.cascade:
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
operand2 = self.cascade.optimise_comparison(self.operand2, env)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
if self.is_python_result():
self.type = PyrexTypes.py_object_type
else:
self.type = PyrexTypes.c_bint_type
cdr = self.cascade
while cdr:
cdr.type = self.type
cdr = cdr.cascade
if self.is_pycmp or self.cascade or self.special_bool_cmp_function:
# 1) owned reference, 2) reused value, 3) potential function error return value
self.is_temp = 1
return self
def analyse_cpp_comparison(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.is_pycmp = False
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if entry is None:
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(self.operator, type1, type2))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check == '+':
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def analyse_memoryviewslice_comparison(self, env):
have_none = self.operand1.is_none or self.operand2.is_none
have_slice = (self.operand1.type.is_memoryviewslice or
self.operand2.type.is_memoryviewslice)
ops = ('==', '!=', 'is', 'is_not')
if have_slice and have_none and self.operator in ops:
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_memslice_nonecheck = True
return True
return False
def coerce_to_boolean(self, env):
if self.is_pycmp:
# coercing to bool => may allow for more efficient comparison code
if self.find_special_bool_compare_function(
env, self.operand1, result_is_bool=True):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_temp = 1
if self.cascade:
operand2 = self.cascade.optimise_comparison(
self.operand2, env, result_is_bool=True)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return self
# TODO: check if we can optimise parts of the cascade here
return ExprNode.coerce_to_boolean(self, env)
def has_python_operands(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
def check_const(self):
if self.cascade:
self.not_const()
return False
else:
return self.operand1.check_const() and self.operand2.check_const()
def calculate_result_code(self):
operand1, operand2 = self.operand1, self.operand2
if operand1.type.is_complex:
if self.operator == "!=":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
operand1.type.binary_op('=='),
operand1.result(),
operand2.result())
elif self.is_c_string_contains():
if operand2.type is unicode_type:
method = "__Pyx_UnicodeContainsUCS4"
else:
method = "__Pyx_BytesContains"
if self.operator == "not_in":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
method,
operand2.result(),
operand1.result())
else:
if is_pythran_expr(self.type):
result1, result2 = operand1.pythran_result(), operand2.pythran_result()
else:
result1, result2 = operand1.result(), operand2.result()
if self.is_memslice_nonecheck:
if operand1.type.is_memoryviewslice:
result1 = "((PyObject *) %s.memview)" % result1
else:
result2 = "((PyObject *) %s.memview)" % result2
return "(%s %s %s)" % (
result1,
self.c_operator(self.operator),
result2)
def generate_evaluation_code(self, code):
self.operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_operation_code(code, self.result(),
self.operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, self.result(), self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
self.operand1.generate_disposal_code(code)
self.operand1.free_temps(code)
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
def generate_subexpr_disposal_code(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
def free_subexpr_temps(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.free_temps(code)
self.operand2.free_temps(code)
def annotate(self, code):
self.operand1.annotate(code)
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
class CascadedCmpNode(Node, CmpNode):
# A CascadedCmpNode is not a complete expression node. It
# hangs off the side of another comparison node, shares
# its left operand with that node, and shares its result
# with the PrimaryCmpNode at the head of the chain.
#
# operator string
# operand2 ExprNode
# cascade CascadedCmpNode
child_attrs = ['operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
constant_result = constant_value_not_set # FIXME: where to calculate this?
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def analyse_types(self, env):
self.operand2 = self.operand2.analyse_types(env)
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
return self
def has_python_operands(self):
return self.operand2.type.is_pyobject
def is_cpp_comparison(self):
# cascaded comparisons aren't currently implemented for c++ classes.
return False
def optimise_comparison(self, operand1, env, result_is_bool=False):
if self.find_special_bool_compare_function(env, operand1, result_is_bool):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
if not operand1.type.is_pyobject:
operand1 = operand1.coerce_to_pyobject(env)
if self.cascade:
operand2 = self.cascade.optimise_comparison(self.operand2, env, result_is_bool)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return operand1
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
def coerce_cascaded_operands_to_temp(self, env):
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
def generate_evaluation_code(self, code, result, operand1, needs_evaluation=False):
if self.type.is_pyobject:
code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
code.put_decref(result, self.type)
else:
code.putln("if (%s) {" % result)
if needs_evaluation:
operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
self.generate_operation_code(code, result,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, result, self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
if needs_evaluation:
operand1.generate_disposal_code(code)
operand1.free_temps(code)
# Cascaded cmp result is always temp
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
code.putln("}")
def annotate(self, code):
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
binop_node_classes = {
"or": BoolBinopNode,
"and": BoolBinopNode,
"|": IntBinopNode,
"^": IntBinopNode,
"&": IntBinopNode,
"<<": IntBinopNode,
">>": IntBinopNode,
"+": AddNode,
"-": SubNode,
"*": MulNode,
"@": MatMultNode,
"/": DivNode,
"//": DivNode,
"%": ModNode,
"**": PowNode,
}
def binop_node(pos, operator, operand1, operand2, inplace=False, **kwargs):
# Construct binop node of appropriate class for
# given operator.
return binop_node_classes[operator](
pos,
operator=operator,
operand1=operand1,
operand2=operand2,
inplace=inplace,
**kwargs)
#-------------------------------------------------------------------
#
# Coercion nodes
#
# Coercion nodes are special in that they are created during
# the analyse_types phase of parse tree processing.
# Their __init__ methods consequently incorporate some aspects
# of that phase.
#
#-------------------------------------------------------------------
class CoercionNode(ExprNode):
# Abstract base class for coercion nodes.
#
# arg ExprNode node being coerced
subexprs = ['arg']
constant_result = not_a_constant
def __init__(self, arg):
super(CoercionNode, self).__init__(arg.pos)
self.arg = arg
if debug_coercion:
print("%s Coercing %s" % (self, self.arg))
def calculate_constant_result(self):
# constant folding can break type coercion, so this is disabled
pass
def annotate(self, code):
self.arg.annotate(code)
if self.arg.type != self.type:
file, line, col = self.pos
code.annotate((file, line, col-1), AnnotationItem(
style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type)))
class CoerceToMemViewSliceNode(CoercionNode):
"""
Coerce an object to a memoryview slice. This holds a new reference in
a managed temp.
"""
def __init__(self, arg, dst_type, env):
assert dst_type.is_memoryviewslice
assert not arg.type.is_memoryviewslice
CoercionNode.__init__(self, arg)
self.type = dst_type
self.is_temp = 1
self.env = env
self.use_managed_ref = True
self.arg = arg
def generate_result_code(self, code):
self.type.create_from_py_utility_code(self.env)
code.putln("%s = %s(%s);" % (self.result(),
self.type.from_py_function,
self.arg.py_result()))
error_cond = self.type.error_condition(self.result())
code.putln(code.error_goto_if(error_cond, self.pos))
class CastNode(CoercionNode):
# Wrap a node in a C type cast.
def __init__(self, arg, new_type):
CoercionNode.__init__(self, arg)
self.type = new_type
def may_be_none(self):
return self.arg.may_be_none()
def calculate_result_code(self):
return self.arg.result_as(self.type)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
class PyTypeTestNode(CoercionNode):
# This node is used to check that a generic Python
# object is an instance of a particular extension type.
# This node borrows the result of its argument node.
exact_builtin_type = True
def __init__(self, arg, dst_type, env, notnone=False):
# The arg is know to be a Python object, and
# the dst_type is known to be an extension type.
assert dst_type.is_extension_type or dst_type.is_builtin_type, "PyTypeTest on non extension type"
CoercionNode.__init__(self, arg)
self.type = dst_type
self.result_ctype = arg.ctype()
self.notnone = notnone
nogil_check = Node.gil_error
gil_message = "Python type test"
def analyse_types(self, env):
return self
def may_be_none(self):
if self.notnone:
return False
return self.arg.may_be_none()
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def is_ephemeral(self):
return self.arg.is_ephemeral()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_constant_result(self):
# FIXME
pass
def calculate_result_code(self):
return self.arg.result()
def generate_result_code(self, code):
if self.type.typeobj_is_available():
if self.type.is_builtin_type:
type_test = self.type.type_test_code(
self.arg.py_result(),
self.notnone, exact=self.exact_builtin_type)
else:
type_test = self.type.type_test_code(
self.arg.py_result(), self.notnone)
code.globalstate.use_utility_code(
UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
code.putln("if (!(%s)) %s" % (
type_test, code.error_goto(self.pos)))
else:
error(self.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class NoneCheckNode(CoercionNode):
# This node is used to check that a Python object is not None and
# raises an appropriate exception (as specified by the creating
# transform).
is_nonecheck = True
def __init__(self, arg, exception_type_cname, exception_message,
exception_format_args=()):
CoercionNode.__init__(self, arg)
self.type = arg.type
self.result_ctype = arg.ctype()
self.exception_type_cname = exception_type_cname
self.exception_message = exception_message
self.exception_format_args = tuple(exception_format_args or ())
nogil_check = None # this node only guards an operation that would fail already
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_result_code(self):
return self.arg.result()
def condition(self):
if self.type.is_pyobject:
return self.arg.py_result()
elif self.type.is_memoryviewslice:
return "((PyObject *) %s.memview)" % self.arg.result()
else:
raise Exception("unsupported type")
@classmethod
def generate(cls, arg, code, exception_message,
exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False):
node = cls(arg, exception_type_cname, exception_message, exception_format_args)
node.in_nogil_context = in_nogil_context
node.put_nonecheck(code)
@classmethod
def generate_if_needed(cls, arg, code, exception_message,
exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False):
if arg.may_be_none():
cls.generate(arg, code, exception_message, exception_type_cname, exception_format_args, in_nogil_context)
def put_nonecheck(self, code):
code.putln(
"if (unlikely(%s == Py_None)) {" % self.condition())
if self.in_nogil_context:
code.put_ensure_gil()
escape = StringEncoding.escape_byte_string
if self.exception_format_args:
code.putln('PyErr_Format(%s, "%s", %s);' % (
self.exception_type_cname,
StringEncoding.escape_byte_string(
self.exception_message.encode('UTF-8')),
', '.join([ '"%s"' % escape(str(arg).encode('UTF-8'))
for arg in self.exception_format_args ])))
else:
code.putln('PyErr_SetString(%s, "%s");' % (
self.exception_type_cname,
escape(self.exception_message.encode('UTF-8'))))
if self.in_nogil_context:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
def generate_result_code(self, code):
self.put_nonecheck(code)
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CoerceToPyTypeNode(CoercionNode):
# This node is used to convert a C data type
# to a Python object.
type = py_object_type
target_type = py_object_type
is_temp = 1
def __init__(self, arg, env, type=py_object_type):
if not arg.type.create_to_py_utility_code(env):
error(arg.pos, "Cannot convert '%s' to Python object" % arg.type)
elif arg.type.is_complex:
# special case: complex coercion is so complex that it
# uses a macro ("__pyx_PyComplex_FromComplex()"), for
# which the argument must be simple
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
if type is py_object_type:
# be specific about some known types
if arg.type.is_string or arg.type.is_cpp_string:
self.type = default_str_type(env)
elif arg.type.is_pyunicode_ptr or arg.type.is_unicode_char:
self.type = unicode_type
elif arg.type.is_complex:
self.type = Builtin.complex_type
self.target_type = self.type
elif arg.type.is_string or arg.type.is_cpp_string:
if (type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(arg.pos,
"default encoding required for conversion from '%s' to '%s'" %
(arg.type, type))
self.type = self.target_type = type
else:
# FIXME: check that the target type and the resulting type are compatible
self.target_type = type
gil_message = "Converting to Python object"
def may_be_none(self):
# FIXME: is this always safe?
return False
def coerce_to_boolean(self, env):
arg_type = self.arg.type
if (arg_type == PyrexTypes.c_bint_type or
(arg_type.is_pyobject and arg_type.name == 'bool')):
return self.arg.coerce_to_temp(env)
else:
return CoerceToBooleanNode(self, env)
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.arg.type.is_int:
return self.arg
else:
return self.arg.coerce_to(PyrexTypes.c_long_type, env)
def analyse_types(self, env):
# The arg is always already analysed
return self
def generate_result_code(self, code):
code.putln('%s; %s' % (
self.arg.type.to_py_call_code(
self.arg.result(),
self.result(),
self.target_type),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class CoerceIntToBytesNode(CoerceToPyTypeNode):
# This node is used to convert a C int type to a Python bytes
# object.
is_temp = 1
def __init__(self, arg, env):
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
self.type = Builtin.bytes_type
def generate_result_code(self, code):
arg = self.arg
arg_result = arg.result()
if arg.type not in (PyrexTypes.c_char_type,
PyrexTypes.c_uchar_type,
PyrexTypes.c_schar_type):
if arg.type.signed:
code.putln("if ((%s < 0) || (%s > 255)) {" % (
arg_result, arg_result))
else:
code.putln("if (%s > 255) {" % arg_result)
code.putln('PyErr_SetString(PyExc_OverflowError, '
'"value too large to pack into a byte"); %s' % (
code.error_goto(self.pos)))
code.putln('}')
temp = None
if arg.type is not PyrexTypes.c_char_type:
temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False)
code.putln("%s = (char)%s;" % (temp, arg_result))
arg_result = temp
code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % (
self.result(),
arg_result,
code.error_goto_if_null(self.result(), self.pos)))
if temp is not None:
code.funcstate.release_temp(temp)
code.put_gotref(self.py_result())
class CoerceFromPyTypeNode(CoercionNode):
# This node is used to convert a Python object
# to a C data type.
def __init__(self, result_type, arg, env):
CoercionNode.__init__(self, arg)
self.type = result_type
self.is_temp = 1
if not result_type.create_from_py_utility_code(env):
error(arg.pos,
"Cannot convert Python object to '%s'" % result_type)
if self.type.is_string or self.type.is_pyunicode_ptr:
if self.arg.is_name and self.arg.entry and self.arg.entry.is_pyglobal:
warning(arg.pos,
"Obtaining '%s' from externally modifiable global Python value" % result_type,
level=1)
def analyse_types(self, env):
# The arg is always already analysed
return self
def is_ephemeral(self):
return (self.type.is_ptr and not self.type.is_array) and self.arg.is_ephemeral()
def generate_result_code(self, code):
from_py_function = None
# for certain source types, we can do better than the generic coercion
if self.type.is_string and self.arg.type is bytes_type:
if self.type.from_py_function.startswith('__Pyx_PyObject_As'):
from_py_function = '__Pyx_PyBytes' + self.type.from_py_function[len('__Pyx_PyObject'):]
NoneCheckNode.generate_if_needed(self.arg, code, "expected bytes, NoneType found")
code.putln(self.type.from_py_call_code(
self.arg.py_result(), self.result(), self.pos, code, from_py_function=from_py_function))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def nogil_check(self, env):
error(self.pos, "Coercion from Python not allowed without the GIL")
class CoerceToBooleanNode(CoercionNode):
# This node is used when a result needs to be used
# in a boolean context.
type = PyrexTypes.c_bint_type
_special_builtins = {
Builtin.list_type: 'PyList_GET_SIZE',
Builtin.tuple_type: 'PyTuple_GET_SIZE',
Builtin.set_type: 'PySet_GET_SIZE',
Builtin.frozenset_type: 'PySet_GET_SIZE',
Builtin.bytes_type: 'PyBytes_GET_SIZE',
Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
Builtin.unicode_type: '__Pyx_PyUnicode_IS_TRUE',
}
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
if arg.type.is_pyobject:
self.is_temp = 1
def nogil_check(self, env):
if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None:
self.gil_error()
gil_message = "Truth-testing Python object"
def check_const(self):
if self.is_temp:
self.not_const()
return False
return self.arg.check_const()
def calculate_result_code(self):
return "(%s != 0)" % self.arg.result()
def generate_result_code(self, code):
if not self.is_temp:
return
test_func = self._special_builtins.get(self.arg.type)
if test_func is not None:
checks = ["(%s != Py_None)" % self.arg.py_result()] if self.arg.may_be_none() else []
checks.append("(%s(%s) != 0)" % (test_func, self.arg.py_result()))
code.putln("%s = %s;" % (self.result(), '&&'.join(checks)))
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_neg(self.result(), self.pos)))
class CoerceToComplexNode(CoercionNode):
def __init__(self, arg, dst_type, env):
if arg.type.is_complex:
arg = arg.coerce_to_simple(env)
self.type = dst_type
CoercionNode.__init__(self, arg)
dst_type.create_declaration_utility_code(env)
def calculate_result_code(self):
if self.arg.type.is_complex:
real_part = "__Pyx_CREAL(%s)" % self.arg.result()
imag_part = "__Pyx_CIMAG(%s)" % self.arg.result()
else:
real_part = self.arg.result()
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
def generate_result_code(self, code):
pass
class CoerceToTempNode(CoercionNode):
# This node is used to force the result of another node
# to be stored in a temporary. It is only used if the
# argument node's result is not already in a temporary.
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
self.type = self.arg.type.as_argument_type()
self.constant_result = self.arg.constant_result
self.is_temp = 1
if self.type.is_pyobject:
self.result_ctype = py_object_type
gil_message = "Creating temporary Python reference"
def analyse_types(self, env):
# The arg is always already analysed
return self
def coerce_to_boolean(self, env):
self.arg = self.arg.coerce_to_boolean(env)
if self.arg.is_simple():
return self.arg
self.type = self.arg.type
self.result_ctype = self.type
return self
def generate_result_code(self, code):
#self.arg.generate_evaluation_code(code) # Already done
# by generic generate_subexpr_evaluation_code!
code.putln("%s = %s;" % (
self.result(), self.arg.result_as(self.ctype())))
if self.use_managed_ref:
if self.type.is_pyobject:
code.put_incref(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_incref_memoryviewslice(self.result(),
not self.in_nogil_context)
class ProxyNode(CoercionNode):
"""
A node that should not be replaced by transforms or other means,
and hence can be useful to wrap the argument to a clone node
MyNode -> ProxyNode -> ArgNode
CloneNode -^
"""
nogil_check = None
def __init__(self, arg):
super(ProxyNode, self).__init__(arg)
self.constant_result = arg.constant_result
self._proxy_type()
def analyse_types(self, env):
self.arg = self.arg.analyse_expressions(env)
self._proxy_type()
return self
def infer_type(self, env):
return self.arg.infer_type(env)
def _proxy_type(self):
if hasattr(self.arg, 'type'):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def result(self):
return self.arg.result()
def is_simple(self):
return self.arg.is_simple()
def may_be_none(self):
return self.arg.may_be_none()
def generate_evaluation_code(self, code):
self.arg.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.arg.generate_disposal_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CloneNode(CoercionNode):
# This node is employed when the result of another node needs
# to be used multiple times. The argument node's result must
# be in a temporary. This node "borrows" the result from the
# argument node, and does not generate any evaluation or
# disposal code for it. The original owner of the argument
# node is responsible for doing those things.
subexprs = [] # Arg is not considered a subexpr
nogil_check = None
def __init__(self, arg):
CoercionNode.__init__(self, arg)
self.constant_result = arg.constant_result
if hasattr(arg, 'type'):
self.type = arg.type
self.result_ctype = arg.result_ctype
if hasattr(arg, 'entry'):
self.entry = arg.entry
def result(self):
return self.arg.result()
def may_be_none(self):
return self.arg.may_be_none()
def type_dependencies(self, env):
return self.arg.type_dependencies(env)
def infer_type(self, env):
return self.arg.infer_type(env)
def analyse_types(self, env):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
self.is_temp = 1
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
return self
def coerce_to(self, dest_type, env):
if self.arg.is_literal:
return self.arg.coerce_to(dest_type, env)
return super(CloneNode, self).coerce_to(dest_type, env)
def is_simple(self):
return True # result is always in a temp (or a name)
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
def generate_disposal_code(self, code):
pass
def free_temps(self, code):
pass
class CMethodSelfCloneNode(CloneNode):
# Special CloneNode for the self argument of builtin C methods
# that accepts subtypes of the builtin type. This is safe only
# for 'final' subtypes, as subtypes of the declared type may
# override the C method.
def coerce_to(self, dst_type, env):
if dst_type.is_builtin_type and self.type.subtype_of(dst_type):
return self
return CloneNode.coerce_to(self, dst_type, env)
class ModuleRefNode(ExprNode):
# Simple returns the module object
type = py_object_type
is_temp = False
subexprs = []
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def calculate_result_code(self):
return Naming.module_cname
def generate_result_code(self, code):
pass
class DocstringRefNode(ExprNode):
# Extracts the docstring of the body element
subexprs = ['body']
type = py_object_type
is_temp = True
def __init__(self, pos, body):
ExprNode.__init__(self, pos)
assert body.type.is_pyobject
self.body = body
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.putln('%s = __Pyx_GetAttr(%s, %s); %s' % (
self.result(), self.body.result(),
code.intern_identifier(StringEncoding.EncodedString("__doc__")),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
pyerr_occurred_withgil_utility_code= UtilityCode(
proto = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void); /* proto */
""",
impl = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) {
int err;
#ifdef WITH_THREAD
PyGILState_STATE _save = PyGILState_Ensure();
#endif
err = !!PyErr_Occurred();
#ifdef WITH_THREAD
PyGILState_Release(_save);
#endif
return err;
}
"""
)
#------------------------------------------------------------------------------------
raise_unbound_local_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
""")
raise_closure_name_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) {
PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname);
}
""")
# Don't inline the function, it should really never be called in production
raise_unbound_memoryview_utility_code_nogil = UtilityCode(
proto = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname);
""",
impl = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname) {
#ifdef WITH_THREAD
PyGILState_STATE gilstate = PyGILState_Ensure();
#endif
__Pyx_RaiseUnboundLocalError(varname);
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
}
""",
requires = [raise_unbound_local_error_utility_code])
#------------------------------------------------------------------------------------
raise_too_many_values_to_unpack = UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c")
raise_need_more_values_to_unpack = UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c")
tuple_unpacking_error_code = UtilityCode.load_cached("UnpackTupleError", "ObjectHandling.c")
|
py | b415e49e2b3c1250b191517c4a31aa4d83417997 | from sklearn.ensemble import IsolationForest
from sklearn.tree import DecisionTreeClassifier
from sklearn.inspection import permutation_importance
import pandas as pd
import os
import logging
def get_outlier_scores(df, file_name, machine_settings, n_estimators, max_samples, contamination=0.01,
model='IsolationForest'):
scores_dir = f"../../data/outlier_scores/{machine_settings['ma_nr']}"
subdir = f"{machine_settings['wsg_id']}_{machine_settings['wzd_id']}_{machine_settings['st_id']}_{machine_settings['at_id']}_{machine_settings['start_date']}_{machine_settings['end_date']}"
path = f"{scores_dir}/{subdir}"
file = f"{path}/{file_name}"
if os.path.exists(file):
scores = pd.read_pickle(file)
logging.info(f"loaded scores from {file}")
else:
if os.path.exists(path):
pass
else:
os.makedirs(path)
if model == 'IsolationForest':
outlier_detector = IsolationForest(n_estimators=n_estimators, max_samples=max_samples,
contamination=contamination)
outlier_detector.fit(df)
labels = outlier_detector.predict(df)
scores = pd.DataFrame({'scores': outlier_detector.score_samples(df), 'labels': labels},
index=df.index)
scores.to_pickle(file)
logging.info(f"saved scores in {file}")
else:
raise Exception('Only IsolationForest implemented at the moment')
return scores
def get_outlier_labels(scores, threshold=-0.5):
labels = scores['scores'].apply(lambda x: -1 if x < threshold else 1).to_frame(name='labels')
return labels
def get_percent_threshold(scores, percentile=0.01):
threshold = scores['scores'].quantile(percentile)
return threshold
def get_feature_importances(df, labels, file_name, machine_settings):
f_dir = f"../../data/feature_importances/{machine_settings['ma_nr']}"
subdir = f"{machine_settings['wsg_id']}_{machine_settings['wzd_id']}_{machine_settings['st_id']}_{machine_settings['at_id']}_{machine_settings['start_date']}_{machine_settings['end_date']}"
path = f"{f_dir}/{subdir}"
file = f"{path}/{file_name}"
if os.path.exists(file):
feature_importances = pd.read_pickle(file)
logging.info(f"loaded feature_importances from {file}")
else:
if os.path.exists(path):
pass
else:
os.makedirs(path)
# fit a tree regressor
class_weights = labels.value_counts(normalize=True).to_dict()
classifier = DecisionTreeClassifier(class_weight=class_weights)
classifier.fit(df, labels)
# get permutation feature importances
feature_importances = pd.DataFrame(
{'feature': df.columns, 'mean_importance': classifier.feature_importances_}).sort_values(
'mean_importance').reset_index()
#save results
feature_importances.to_pickle(file)
logging.info(f'feature importances saved: {file}')
return feature_importances
|
py | b415e4e0e6a1847b69e6fbb492007f55b34b4497 | def reader():
while True:
try:
print(sum(map(int, input().split())))
except:
break
if __name__ == '__main__':
reader()
|
py | b415e5e0ec490b9d0ac1a1da6a902dc0f2733a5b | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.0-beta.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_replica_set_spec import V1beta1ReplicaSetSpec
class TestV1beta1ReplicaSetSpec(unittest.TestCase):
""" V1beta1ReplicaSetSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1ReplicaSetSpec(self):
"""
Test V1beta1ReplicaSetSpec
"""
model = kubernetes.client.models.v1beta1_replica_set_spec.V1beta1ReplicaSetSpec()
if __name__ == '__main__':
unittest.main()
|
py | b415e70c909aa19298866cc0bcc6512b3ba91427 | #! /usr/bin/env python
# by caozj
# Jun 4, 2019
# 8:09:11 PM
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import time
import argparse
import dca.api
import Cell_BLAST as cb
import utils
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", dest="input", type=str, required=True)
parser.add_argument("-g", "--genes", dest="genes", type=str, required=True)
parser.add_argument("-o", "--output", dest="output", type=str, required=True)
parser.add_argument("--n-latent", dest="n_latent", type=int, default=32)
parser.add_argument("--n-hidden", dest="n_hidden", type=int, default=64)
parser.add_argument("--n-layers", dest="n_layers", type=int, default=1)
parser.add_argument("--n-epochs", dest="n_epochs", type=int, default=1000)
parser.add_argument("--patience", dest="patience", type=int, default=30)
parser.add_argument("-s", "--seed", dest="seed", type=int, default=None) # Not exactly be reproducible though
parser.add_argument("-t", "--threads", dest="threads", type=int, default=None)
parser.add_argument("-d", "--device", dest="device", type=str, default=None)
parser.add_argument("--clean", dest="clean", type=str, default=None)
cmd_args = parser.parse_args()
cmd_args.output_path = os.path.dirname(cmd_args.output)
if not os.path.exists(cmd_args.output_path):
os.makedirs(cmd_args.output_path)
os.environ["CUDA_VISIBLE_DEVICES"] = utils.pick_gpu_lowest_memory() \
if cmd_args.device is None else cmd_args.device
return cmd_args
def main(cmd_args):
dataset = cb.data.ExprDataSet.read_dataset(cmd_args.input, sparsify=True)
if cmd_args.clean is not None:
dataset = utils.clean_dataset(dataset, cmd_args.clean)
if cmd_args.genes is not None:
dataset = dataset[:, dataset.uns[cmd_args.genes]]
dataset = dataset.to_anndata()
start_time = time.time()
dataset, model = dca.api.dca(
dataset, mode="latent",
hidden_size=
(cmd_args.n_hidden, ) * cmd_args.n_layers +
(cmd_args.n_latent, ) +
(cmd_args.n_hidden, ) * cmd_args.n_layers,
epochs=cmd_args.n_epochs, early_stop=cmd_args.patience,
random_state=cmd_args.seed, threads=cmd_args.threads,
return_model=True, copy=True
)
cb.data.write_hybrid_path(
time.time() - start_time,
"//".join([cmd_args.output, "time"])
)
cb.data.write_hybrid_path(
dataset.obsm["X_dca"],
"//".join([cmd_args.output, "latent"])
)
model.encoder.save(os.path.join(cmd_args.output_path, "model.h5"))
if __name__ == "__main__":
main(parse_args())
print("Done!")
|
py | b415e73231e0ae8ca730425eb91ed2b2a21a3095 | import imp
import sys
import collections
from decoder_config import LM_SOURCE, CHARMAP_PATH, SPACE,\
SPECIALS_LIST, LM_ARPA_FILE
from decoder_utils import load_chars, load_words
def scrub():
words = load_words()
chars = load_chars()
fid = open(CHARMAP_PATH+'wordlist','w')
specials = set(SPECIALS_LIST)
for word in words:
if word in specials:
continue
skip = False
for t in word:
# ignore words with bad symbols
if t not in chars.keys():
print word
skip = True
break
if not skip:
fid.write(word+'\n')
fid.close()
class Node:
def __init__(self):
self.isPrefix = False
self.isWord = False
self.children = None
class PrefixTree:
def __init__(self,chars,words,lm):
specials = set(SPECIALS_LIST)
self.path_count = 0
self.lm = lm
self.chars = chars
self.root = Node()
self.root.isPrefix = True
self.space = self.chars[SPACE]
self.nodeFn = lambda : Node()
self.root.children = collections.defaultdict(self.nodeFn)
for word in list(specials):
node = self.root.children[self.chars[word]]
node.isWord = True
node.id = lm.get_word_id(word)
count = 0
for word in words:
if (count % 10000) == 0:
print ".",
sys.stdout.flush()
self.addPath(word,self.root,lm.get_word_id(word))
count += 1
def addPath(self,prefix,node,wordId):
self.path_count += 1
p,rest = prefix[0],prefix[1:]
if node.children is None:
node.children = collections.defaultdict(self.nodeFn)
next = node.children[self.chars[p]]
if len(rest)==0:
next.isWord = True
next.id = wordId
return
else:
next.isPrefix = True
self.addPath(rest,next,wordId)
def load_and_pickle_lm():
import cPickle as pickle
lmMod = imp.load_source('lm', LM_SOURCE)
lm = lmMod.LM(arpafile=LM_ARPA_FILE)
lm.to_file(os.path.split_exit(LM_ARPA_FILE)[0] + '.bin')
def load_lm():
lmMod = imp.load_source('lm', LM_SOURCE)
lm = lmMod.LM(arpafile=LM_ARPA_FILE)
return lm
def loadPrefixTree():
lm = load_lm()
chars = load_chars()
words = load_words()
return PrefixTree(chars,words,lm)
if __name__=='__main__':
pt = loadPrefixTree()
print 'Added %d prefix paths' % pt.path_count
|
py | b415e7701273f8dc4af0aab2d796c7ba68dfa20b | from marshmallow import ValidationError
from marshmallow.fields import Field
class PolyField(Field):
"""
A field that (de)serializes to one of many types. Passed in functions
are called to disambiguate what schema to use for the (de)serialization
Intended to assist in working with fields that can contain any subclass
of a base type
"""
def __init__(
self,
serialization_schema_selector=None,
deserialization_schema_selector=None,
many=False,
**metadata
):
"""
:param serialization_schema_selector: Function that takes in either
an object representing that object, it's parent object
and returns the appropriate schema.
:param deserialization_schema_selector: Function that takes in either
an a dict representing that object, dict representing it's parent dict
and returns the appropriate schema
"""
super(PolyField, self).__init__(**metadata)
self.many = many
self.serialization_schema_selector = serialization_schema_selector
self.deserialization_schema_selector = deserialization_schema_selector
def _deserialize(self, value, attr, data):
if not self.many:
value = [value]
results = []
for v in value:
schema = None
try:
schema = self.deserialization_schema_selector(v, data)
assert hasattr(schema, 'load')
except Exception:
schema_message = None
if schema:
schema_message = str(type(schema))
raise ValidationError(
"Unable to use schema. Ensure there is a deserialization_schema_selector"
" and that it returns a schema when the function is passed in {value_passed}."
" This is the class I got. Make sure it is a schema: {class_type}".format(
value_passed=v,
class_type=schema_message
)
)
schema.context.update(getattr(self, 'context', {}))
# Will raise ValidationError if any problems
data = schema.load(v)
results.append(data)
if self.many:
return results
else:
# Will be at least one otherwise value would have been None
return results[0]
def _serialize(self, value, key, obj):
if value is None:
return None
try:
if self.many:
res = []
for v in value:
schema = self.serialization_schema_selector(v, obj)
schema.context.update(getattr(self, 'context', {}))
data = schema.dump(v)
res.append(data)
return res
else:
schema = self.serialization_schema_selector(value, obj)
schema.context.update(getattr(self, 'context', {}))
return schema.dump(value)
except Exception as err:
raise TypeError(
'Failed to serialize object. Error: {0}\n'
' Ensure the serialization_schema_selector exists and '
' returns a Schema and that schema'
' can serialize this value {1}'.format(err, value))
|
py | b415e875130b7fad8aff31e6c5fafc597e5d0d4e | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenlet
from eventlet import greenpool
from eventlet import greenthread
from sysinv.openstack.common import log as logging
from sysinv.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
def _thread_done(gt, *args, **kwargs):
""" Callback function to be passed to GreenThread.link() when we spawn()
Calls the :class:`ThreadGroup` to notify if.
"""
kwargs['group'].thread_done(kwargs['thread'])
class Thread(object):
""" Wrapper around a greenthread, that holds a reference to the
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
it has done so it can be removed from the threads list.
"""
def __init__(self, thread, group):
self.thread = thread
self.thread.link(_thread_done, group=group, thread=self)
def stop(self):
self.thread.kill()
def wait(self):
return self.thread.wait()
class ThreadGroup(object):
""" The point of the ThreadGroup classis to:
* keep track of timers and greenthreads (making it easier to stop them
when need be).
* provide an easy API to add timers.
"""
def __init__(self, thread_pool_size=10):
self.pool = greenpool.GreenPool(thread_pool_size)
self.threads = []
self.timers = []
def add_dynamic_timer(self, callback, initial_delay=None,
periodic_interval_max=None, *args, **kwargs):
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
timer.start(initial_delay=initial_delay,
periodic_interval_max=periodic_interval_max)
self.timers.append(timer)
def add_timer(self, interval, callback, initial_delay=None,
*args, **kwargs):
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
pulse.start(interval=interval,
initial_delay=initial_delay)
self.timers.append(pulse)
def add_thread(self, callback, *args, **kwargs):
gt = self.pool.spawn(callback, *args, **kwargs)
th = Thread(gt, self)
self.threads.append(th)
def thread_done(self, thread):
self.threads.remove(thread)
def stop(self):
current = greenthread.getcurrent()
for x in self.threads:
if x is current:
# don't kill the current thread.
continue
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
for x in self.timers:
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
self.timers = []
def wait(self):
for x in self.timers:
try:
x.wait()
except greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
current = greenthread.getcurrent()
for x in self.threads:
if x is current:
continue
try:
x.wait()
except greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
|
py | b415e8c8fa425ab204aff4f667a227a8af973d6d | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import logging
import sys
from absl.testing import parameterized
import numpy as np
import six
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.callbacks import Callback
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.rmsprop import RMSPropOptimizer
try:
import scipy.sparse as scipy_sparse # pylint: disable=g-import-not-at-top
except ImportError:
scipy_sparse = None
class CompileTest(keras_parameterized.TestCase):
def _get_multi_output_model(self):
input_a = keras.layers.Input(shape=(3,), name='input_a')
output_a = keras.layers.Dense(1, name='dense_1')(input_a)
output_b = keras.layers.Dense(1, name='dense_2')(input_a)
return keras.models.Model(input_a, [output_a, output_b])
def _do_test_compile_with_model_and_single_loss(self, model, loss):
model.compile(
optimizer='adam',
loss=loss,
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(model.loss, loss)
loss = losses.get(loss)
if not isinstance(loss, list):
loss_list = [loss] * len(model.outputs)
self.assertEqual(len(model.loss_functions), len(loss_list))
for i in range(len(loss_list)):
self.assertIsInstance(model.loss_functions[i], losses.LossFunctionWrapper)
if not isinstance(loss_list[i], losses.LossFunctionWrapper):
self.assertEqual(model.loss_functions[i].fn, loss_list[i])
self.assertAllEqual(model._loss_weights_list, [1.] * len(loss_list))
def test_respect_run_functions_eagerly(self):
with context.eager_mode():
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=2, input_dim=3)
model.compile('sgd', 'mse')
def_function.run_functions_eagerly(True)
self.assertTrue(model.run_eagerly)
def_function.run_functions_eagerly(False)
self.assertFalse(model.run_eagerly)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(('loss_string', 'mse'),
('loss_function', losses.mean_squared_error),
('loss_instance', losses.MeanSquaredError()))
def test_compile_with_single_output(self, loss):
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=2, input_dim=3)
self._do_test_compile_with_model_and_single_loss(model, loss)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(('loss_string', 'mse'),
('loss_function', losses.mean_squared_error),
('loss_instance', losses.MeanSquaredError()))
def test_compile_with_multi_output(self, loss):
model = self._get_multi_output_model()
self._do_test_compile_with_model_and_single_loss(model, loss)
@keras_parameterized.run_all_keras_modes
def test_compile_with_multi_output_and_multi_loss(self):
model = self._get_multi_output_model()
# Test loss is a list.
loss = ['mse', 'mae']
model.compile(
optimizer='adam',
loss=loss,
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(model.loss_functions[0].fn, losses.mean_squared_error)
self.assertEqual(model.loss_functions[1].fn, losses.mean_absolute_error)
self.assertAllEqual(model._loss_weights_list, [1., 1.])
# Test loss is a dict.
loss = {'dense_1': 'mae', 'dense_2': 'mse'}
model.compile(
optimizer='adam',
loss=loss,
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(model.loss_functions[0].fn, losses.mean_absolute_error)
self.assertEqual(model.loss_functions[1].fn, losses.mean_squared_error)
self.assertAllEqual(model._loss_weights_list, [1., 1.])
@keras_parameterized.run_all_keras_modes
def test_compile_with_multi_output_and_loss_weights_list(self):
model = self._get_multi_output_model()
loss_weights = [1., 2.]
model.compile(
optimizer='adam',
loss='mse',
loss_weights=loss_weights,
run_eagerly=testing_utils.should_run_eagerly())
self.assertAllEqual(model._loss_weights_list, [1., 2.])
def test_compile_with_multi_output_and_loss_weights_dict(self):
with context.graph_mode():
model = self._get_multi_output_model()
loss_weights = {'dense_1': 1., 'dense_2': 2.}
model.compile(optimizer='adam', loss='mse', loss_weights=loss_weights)
self.assertAllEqual(model._loss_weights_list, [1., 2.])
input_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 1))
output_b_np = np.random.random((10, 1))
with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
total_loss, y_preds = sess.run(
[model.total_loss, model.outputs],
feed_dict={
'input_a:0': input_np,
'dense_1_target:0': output_a_np,
'dense_2_target:0': output_b_np
})
self.assertAllClose(
total_loss,
np.mean(
np.add((output_a_np - y_preds[0])**2,
2 * (output_b_np - y_preds[1])**2)))
@keras_parameterized.run_all_keras_modes
def test_compile_with_incorrect_loss_size(self):
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=2, input_dim=3)
with self.assertRaisesRegexp(ValueError, 'The model has 1 outputs'):
model.compile(
optimizer='adam',
loss=['mse', 'mae'],
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_compile_with_incorrect_loss_key(self):
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=2, input_dim=3)
with self.assertRaisesRegexp(
ValueError,
r'Unknown entries in loss dictionary: \[\'unknown_output\'\]. '
r'Only expected following keys: \[\'dense_1\'\]'):
model.compile(
optimizer='adam',
loss={'unknown_output': 'mse'},
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_compile_with_incorrect_loss_weights_size(self):
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=2, input_dim=3)
with self.assertRaisesRegexp(ValueError,
'it should have one entry per model output'):
model.compile(
optimizer='adam',
loss='mse',
loss_weights=[1., 2.],
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_compile_with_incorrect_loss_weights_key(self):
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=2, input_dim=3)
with self.assertRaisesRegexp(
ValueError,
r'Unknown entries in loss_weights dictionary: \[\'unknown_output\'\]. '
r'Only expected following keys: \[\'dense_1\'\]'):
model.compile(
optimizer='adam',
loss='mse',
loss_weights={'unknown_output': 1.},
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_compile_with_incorrect_sample_weight_mode(self):
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=2, input_dim=3)
with self.assertRaisesRegexp(
ValueError,
r'Unknown entries in sample_weight_mode dictionary: \[\'unknown\'\]. '
r'Only expected following keys: \[\'dense_1\'\]'):
model.compile(
optimizer='adam',
loss='mse',
sample_weight_mode={'unknown': 'temporal'},
run_eagerly=testing_utils.should_run_eagerly())
class TrainingTest(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_fit_training_arg(self):
class ReturnTraining(keras.layers.Layer):
def call(self, inputs, training):
if training:
return inputs + array_ops.constant([100], 'float32')
else:
return inputs + array_ops.constant([0], 'float32')
model = keras.Sequential([ReturnTraining()])
model.compile('sgd', 'mse')
hist = model.fit(x=np.array([0.]), y=np.array([0.]))
self.assertAllClose(hist.history['loss'][0], (10000,))
@keras_parameterized.run_with_all_model_types(exclude_models='sequential')
@keras_parameterized.run_all_keras_modes
def test_fit_on_arrays(self):
input_a = keras.layers.Input(shape=(3,), name='input_a')
input_b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
dropout = keras.layers.Dropout(0.5, name='dropout')
branch_a = [input_a, dense]
branch_b = [input_b, dense, dropout]
model = testing_utils.get_multi_io_model(branch_a, branch_b)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
loss_weights=loss_weights,
run_eagerly=testing_utils.should_run_eagerly())
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test fit at different verbosity
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=2)
model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
# Test model with input data as a list of lists
model.fit(
[np.ndarray.tolist(input_a_np), np.ndarray.tolist(input_b_np)],
[output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=2)
# Test with validation data
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=2)
# Test with validation split
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=0,
validation_split=0.2)
if testing_utils.get_model_type() == 'functional':
# Test with dictionary inputs
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
},
epochs=1,
batch_size=5,
verbose=0)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
},
epochs=1,
batch_size=5,
verbose=1)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
},
validation_data=({
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
}),
epochs=1,
batch_size=5,
verbose=0)
model.train_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
})
# Test with lists for loss, metrics
loss = ['mae', 'mse']
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
# Test with dictionaries for loss, metrics, loss weights
if testing_utils.get_model_type() == 'functional':
loss = {'dense': 'mse', 'dropout': 'mae'}
loss_weights = {'dense': 1., 'dropout': 0.5}
metrics = {
'dense': 'mse',
'dropout': metrics_module.CategoricalAccuracy()
}
model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights,
run_eagerly=testing_utils.should_run_eagerly())
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
# Invalid use cases
with self.assertRaises(ValueError):
model.train_on_batch({'input_a': input_a_np},
[output_d_np, output_e_np])
with self.assertRaises(AttributeError):
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
validation_data=([input_a_np, input_b_np], 0, 0),
verbose=0)
with self.assertRaises(ValueError):
model.train_on_batch([input_a_np], [output_d_np, output_e_np])
with self.assertRaises(AttributeError):
model.train_on_batch(1, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
model.train_on_batch(input_a_np, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_input = np.random.random((11, 3))
model.train_on_batch([bad_input, input_b_np],
[output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_target = np.random.random((11, 4))
model.train_on_batch([input_a_np, input_b_np],
[bad_target, output_e_np])
# Build single-input model
x = keras.layers.Input(shape=(3,), name='input_a')
y = keras.layers.Dense(4)(x)
model = keras.models.Model(x, y)
model.compile(optimizer, loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
# This will work
model.fit([input_a_np], output_d_np, epochs=1)
# TODO(gsundeep) Test only works in eager, file ticket
if testing_utils.should_run_eagerly() and context.executing_eagerly():
with self.assertRaises(ValueError):
model.fit([input_a_np, input_a_np], output_d_np, epochs=1)
# Test model on a list of floats
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 4))
model.fit([np.ndarray.tolist(input_a_np)],
[np.ndarray.tolist(input_b_np)],
epochs=2,
batch_size=5,
verbose=2)
@keras_parameterized.run_all_keras_modes
def test_evaluate_predict_on_arrays(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=['mae', metrics_module.CategoricalAccuracy()],
loss_weights=loss_weights,
sample_weight_mode=None,
run_eagerly=testing_utils.should_run_eagerly())
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test evaluate at different verbosity
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=0)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=1)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=2)
self.assertEqual(len(out), 7)
out = model.test_on_batch([input_a_np, input_b_np],
[output_d_np, output_e_np])
self.assertEqual(len(out), 7)
# Test evaluate with dictionary inputs
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
},
batch_size=5,
verbose=0)
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
},
batch_size=5,
verbose=1)
# Test predict
out = model.predict([input_a_np, input_b_np], batch_size=5)
self.assertEqual(len(out), 2)
out = model.predict({'input_a': input_a_np, 'input_b': input_b_np})
self.assertEqual(len(out), 2)
out = model.predict_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
})
self.assertEqual(len(out), 2)
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_activity_regularizer_fit(self):
loss = {}
for reg in [None, 'l2']:
layers = [
keras.layers.Dense(
10, activation='relu', activity_regularizer=reg,
kernel_initializer='ones', use_bias=False),
keras.layers.Dense(
1, activation='sigmoid', kernel_initializer='ones',
use_bias=False),
]
model = testing_utils.get_model_from_layers(
layers, input_shape=(10,))
x = np.ones((10, 10), 'float32')
y = np.ones((10, 1), 'float32')
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(optimizer, 'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=5)
loss[reg] = model.evaluate(x, y)
self.assertLess(loss[None], loss['l2'])
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_activity_regularizer_loss_value(self):
layer = keras.layers.Dense(
1, kernel_initializer=keras.initializers.zeros(),
bias_initializer=keras.initializers.ones(), activity_regularizer='l2')
model = testing_utils.get_model_from_layers([layer], input_shape=(10,))
x = np.ones((10, 10), 'float32')
y = np.ones((10, 1), 'float32')
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(optimizer, 'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.test_on_batch(x, y)
self.assertAlmostEqual(0.01, loss, places=4)
@keras_parameterized.run_all_keras_modes
def test_activity_regularizer_batch_independent(self):
inputs = keras.layers.Input(shape=(10,))
x = keras.layers.Dense(
10, activation='relu', activity_regularizer='l2')(
inputs)
outputs = keras.layers.Dense(1, activation='sigmoid')(x)
model = keras.Model(inputs, outputs)
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(optimizer, 'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((10, 10), 'float32')
y = np.ones((10, 1), 'float32')
loss_small_batch = model.test_on_batch(x, y)
x2 = np.ones((20, 10), 'float32')
y2 = np.ones((20, 1), 'float32')
loss_big_batch = model.test_on_batch(x2, y2)
self.assertAlmostEqual(loss_small_batch, loss_big_batch, places=4)
@keras_parameterized.run_all_keras_modes
def test_activity_regularizer_in_model_call(self):
class MyModel(keras.Model):
def call(self, inputs):
self.add_loss(inputs)
return inputs
x = ops.convert_to_tensor(1.)
model = MyModel()
_ = model(x)
self.assertEqual(1, len(model.losses))
@keras_parameterized.run_all_keras_modes
def test_custom_mapping_in_config(self):
class MyModel(keras.Model):
def call(self, inputs):
return inputs
def get_config(self):
self.a = {}
return {'a': self.a}
model = MyModel()
self.assertIn('{"a": {}}', model.to_json())
@keras_parameterized.run_all_keras_modes
def test_training_on_sparse_data_with_dense_placeholders(self):
# TODO(kaftan) Test seems to not work, file ticket
if testing_utils.should_run_eagerly() and context.executing_eagerly():
self.skipTest('Skipping running model eagerly.')
if scipy_sparse is None:
return
test_inputs = [
scipy_sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)
]
test_outputs = [
scipy_sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5)
]
in1 = keras.layers.Input(shape=(3,))
in2 = keras.layers.Input(shape=(3,))
out1 = keras.layers.Dropout(0.5, name='dropout')(in1)
out2 = keras.layers.Dense(4, name='dense_1')(in2)
model = keras.Model([in1, in2], [out1, out2])
model.predict(test_inputs, batch_size=2)
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(
optimizer,
'mse',
metrics=['mae', metrics_module.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(test_inputs, test_outputs,
epochs=1, batch_size=2, validation_split=0.5)
model.evaluate(test_inputs, test_outputs, batch_size=2)
@keras_parameterized.run_all_keras_modes
def test_compile_with_sparse_placeholders(self):
# TODO(kaftan) Test seems to not work, file ticket
if testing_utils.should_run_eagerly() and context.executing_eagerly():
self.skipTest('Skipping running model eagerly.')
input_layer = keras.layers.Input(shape=(10,), sparse=True)
weights = variables_lib.Variable(
np.ones((10, 1)).astype(np.float32), name='weights')
weights_mult = lambda x: sparse_ops.sparse_tensor_dense_matmul(x, weights)
output_layer = keras.layers.Lambda(weights_mult)(input_layer)
model = keras.Model([input_layer], output_layer)
model.compile(
loss='binary_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.0001),
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_that_trainable_disables_updates(self):
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
a = keras.layers.Input(shape=(4,))
layer = keras.layers.BatchNormalization(input_shape=(4,))
b = layer(a)
model = keras.Model(a, b)
model.trainable = False
assert not model.updates
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
assert model.updates
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
layer.trainable = False
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
def test_logs_passed_to_callbacks(self):
with self.cached_session():
input_dim = 5
num_classes = 1
class TestCallback(Callback):
def __init__(self):
super(TestCallback, self).__init__()
self.epoch_end_logs = None
self.batch_end_logs = None
self.epoch_end_call_count = 0
self.batch_end_call_count = 0
def on_epoch_end(self, epoch, logs=None):
self.epoch_end_logs = logs
self.epoch_end_call_count += 1
def on_batch_end(self, batch, logs=None):
self.batch_end_logs = logs
self.batch_end_call_count += 1
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim)
model.compile(
loss='binary_crossentropy',
metrics=['acc'],
weighted_metrics=['mae'],
optimizer=RMSPropOptimizer(learning_rate=0.01))
np.random.seed(1337)
(x_train, y_train), (_, _) = testing_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
test_callback = TestCallback()
model.fit(
x_train,
y_train,
batch_size=2,
epochs=2,
verbose=0,
callbacks=[test_callback],
validation_data=(x_train, y_train))
self.assertEqual(test_callback.batch_end_call_count, 10)
self.assertEqual(test_callback.epoch_end_call_count, 2)
weighted_metric = ('mae'
if tf2.enabled() else 'weighted_mean_absolute_error')
self.assertSetEqual(
set(test_callback.batch_end_logs.keys()),
set(['batch', 'size', 'acc', 'loss', weighted_metric]))
self.assertSetEqual(
set(test_callback.epoch_end_logs.keys()),
set([
'acc', 'loss', weighted_metric, 'val_acc', 'val_loss',
'val_' + weighted_metric
]))
@keras_parameterized.run_all_keras_modes
def test_mismatched_output_shape_and_target_shape(self):
model = keras.Sequential([
keras.layers.Dense(2, input_shape=(3, 4)),
keras.layers.Dense(5),
])
model.compile(RMSPropOptimizer(learning_rate=0.001),
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
# Test with Numpy data
x_train = np.random.random((10, 3, 4))
y_train = np.random.randint(0, 5, size=(10, 3))
model.fit(x_train, y_train, batch_size=5, epochs=1)
# Test with iterator
dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.repeat(10)
dataset = dataset.batch(10)
iterator = dataset_ops.make_one_shot_iterator(dataset)
model.fit(iterator, epochs=1, steps_per_epoch=2)
if context.executing_eagerly():
# Test with eager execution
model.compile(RMSPropOptimizer(learning_rate=0.001),
loss='sparse_categorical_crossentropy',
run_eagerly=True)
model.fit(x_train, y_train, batch_size=5, epochs=1)
# Test with eager execution and iterator
model.fit(iterator, epochs=1, steps_per_epoch=2)
def test_losses_in_defun(self):
with context.eager_mode():
layer = keras.layers.Dense(1, kernel_regularizer='l1')
layer(array_ops.ones([1, 10]))
@function.defun
def get_losses():
return layer.losses
self.assertAllEqual(
self.evaluate(layer.losses), self.evaluate(get_losses()))
@keras_parameterized.run_all_keras_modes
def test_logging(self):
mock_stdout = io.BytesIO() if six.PY2 else io.StringIO()
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(
RMSPropOptimizer(learning_rate=0.001), loss='binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
with test.mock.patch.object(sys, 'stdout', mock_stdout):
model.fit(
np.ones((10, 10), 'float32'), np.ones((10, 1), 'float32'), epochs=10)
self.assertTrue('Epoch 5/10' in mock_stdout.getvalue())
@tf_test_util.run_in_graph_and_eager_modes
def test_training_with_loss_instance(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
loss_weights = [1., 0.5]
model.compile(
RMSPropOptimizer(learning_rate=0.001),
loss=keras.losses.MeanSquaredError(),
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
loss_weights=loss_weights)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.fit([input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5)
@tf_test_util.run_in_graph_and_eager_modes
def test_static_batch_in_input_layer(self):
class Counter(keras.callbacks.Callback):
def __init__(self):
self.batches = 0
def on_batch_end(self, batch, logs=None):
self.batches += 1
x, y = np.ones((64, 10), 'float32'), np.ones((64, 1), 'float32')
for batch_size, expected_batches in [(None, 2), (4, 16)]:
inputs = keras.Input(batch_size=batch_size, shape=(10,))
outputs = keras.layers.Dense(1, activation='sigmoid')(inputs)
model = keras.Model(inputs, outputs)
model.compile(keras.optimizer_v2.adam.Adam(0.001), 'binary_crossentropy')
counter = Counter()
model.fit(x, y, callbacks=[counter])
self.assertEqual(counter.batches, expected_batches)
model = keras.Sequential(
[keras.layers.Dense(1, batch_input_shape=(batch_size, 10))])
model.compile(keras.optimizer_v2.adam.Adam(0.001), 'binary_crossentropy')
counter = Counter()
model.fit(x, y, callbacks=[counter])
self.assertEqual(counter.batches, expected_batches)
@tf_test_util.run_in_graph_and_eager_modes
def test_static_batch_in_input_layer_consistency_checks(self):
x, y = np.ones((64, 10), 'float32'), np.ones((64, 1), 'float32')
inputs = keras.Input(batch_size=2, shape=(10,))
outputs = keras.layers.Dense(1, activation='sigmoid')(inputs)
model = keras.Model(inputs, outputs)
model.compile(keras.optimizer_v2.adam.Adam(0.001), 'binary_crossentropy')
with self.assertRaisesRegexp(ValueError,
'incompatible with the specified batch size'):
model.fit(x, y, batch_size=4)
data = dataset_ops.DatasetV2.from_tensor_slices((x, y))
data = data.batch(4, drop_remainder=True)
with self.assertRaisesRegexp(ValueError,
'incompatible with the specified batch size'):
model.fit(data, steps_per_epoch=16)
@tf_test_util.run_in_graph_and_eager_modes
def test_compatible_batch_size_functional_model(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs):
return array_ops.concat(inputs, axis=0)
input1 = keras.Input(batch_size=2, shape=(10,))
input2 = keras.Input(batch_size=3, shape=(10,))
outputs = MyLayer()([input1, input2])
with self.assertRaisesRegexp(ValueError,
'specified batch sizes of the Input Layers'):
keras.Model([input1, input2], outputs)
@tf_test_util.run_in_graph_and_eager_modes
def test_calling_subclass_model_on_different_datasets(self):
class SubclassedModel(keras.models.Model):
def call(self, inputs):
return inputs * 2
model = SubclassedModel()
dataset_one = dataset_ops.Dataset.range(2).batch(2)
dataset_two = dataset_ops.Dataset.range(3, 10).batch(2)
self.assertAllEqual([[0], [2]], model.predict(dataset_one, steps=1))
self.assertAllEqual([[6], [8], [10], [12]],
model.predict(dataset_two, steps=2))
def test_training_on_sparse_categorical_crossentropy_loss_with_softmax(self):
with context.eager_mode():
np.random.seed(1337)
train_x = np.ones((100, 4))
train_y = np.random.randint(0, 1, size=(100, 1))
reference_model = testing_utils.get_small_sequential_mlp(16, 2,
input_dim=4)
reference_model.compile(loss='sparse_categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=True)
fixed_weights = reference_model.get_weights()
reference_model_loss = reference_model.train_on_batch(train_x, train_y)
test_model = testing_utils.get_small_sequential_mlp(16, 2, input_dim=4)
test_model.compile(loss='sparse_categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=False)
test_model.set_weights(fixed_weights)
test_model_loss = test_model.train_on_batch(train_x, train_y)
self.assertAlmostEqual(test_model_loss, reference_model_loss, places=4)
def test_training_on_categorical_crossentropy_loss_with_softmax(self):
with context.eager_mode():
np.random.seed(1337)
train_x = np.ones((100, 4))
train_y = keras.utils.to_categorical(np.random.randint(0, 1,
size=(100, 1)), 2)
reference_model = testing_utils.get_small_sequential_mlp(16, 2,
input_dim=4)
reference_model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=True)
fixed_weights = reference_model.get_weights()
reference_model_loss = reference_model.train_on_batch(train_x, train_y)
test_model = testing_utils.get_small_sequential_mlp(16, 2, input_dim=4)
test_model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=False)
test_model.set_weights(fixed_weights)
test_model_loss = test_model.train_on_batch(train_x, train_y)
self.assertAlmostEqual(test_model_loss, reference_model_loss, places=4)
def test_training_on_binary_crossentropy_loss(self):
with context.eager_mode():
train_x = np.ones((100, 4), dtype=np.float32)
train_y = np.ones((100, 1), dtype=np.float32)
reference_model = testing_utils.get_small_sequential_mlp(16, 1,
input_dim=4)
reference_model.compile(loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=True)
fixed_weights = reference_model.get_weights()
reference_model_loss = reference_model.train_on_batch(train_x, train_y)
test_model = testing_utils.get_small_sequential_mlp(16, 1, input_dim=4)
test_model.compile(loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=False)
test_model.set_weights(fixed_weights)
test_model_loss = test_model.train_on_batch(train_x, train_y)
self.assertAlmostEqual(test_model_loss, reference_model_loss, places=4)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
('default', 1, 4), ('integer_two', 2, 2), ('integer_four', 4, 1),
('simple_list', [1, 3, 4], 3), ('duplicated_list', [4, 2, 2], 2))
def test_validation_freq(self, validation_freq, expected_runs):
x, y = np.ones((10, 10)), np.ones((10, 1))
model = testing_utils.get_small_mlp(2, 1, 10)
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
class ValCounter(keras.callbacks.Callback):
def __init__(self):
self.val_runs = 0
def on_test_begin(self, logs=None):
self.val_runs += 1
val_counter = ValCounter()
model.fit(
x,
y,
epochs=4,
validation_data=(x, y),
validation_freq=validation_freq,
callbacks=[val_counter])
self.assertEqual(val_counter.val_runs, expected_runs)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_validation_steps_without_data(self):
x, y = np.ones((10, 10)), np.ones((10, 1))
model = testing_utils.get_small_mlp(2, 1, 10)
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
with self.assertRaisesRegexp(
ValueError, '`validation_steps` should not be specified if '
'`validation_data` is None.'):
model.fit(x, y, epochs=4, validation_data=None, validation_steps=3)
@keras_parameterized.run_all_keras_modes
def test_add_loss_correctness(self):
class Bias(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
def call(self, inputs):
return inputs + self.bias
inputs = keras.Input(shape=(1,))
targets = keras.Input(shape=(1,))
outputs = Bias()(inputs)
model = keras.Model([inputs, targets], outputs)
model.add_loss(2 * math_ops.reduce_mean(
keras.losses.mean_absolute_error(targets, outputs)))
model.add_loss(keras.losses.MeanAbsoluteError()(targets, outputs))
model.compile(
keras.optimizer_v2.gradient_descent.SGD(0.025),
loss=keras.losses.MeanAbsoluteError(),
run_eagerly=testing_utils.should_run_eagerly())
x = np.array([[0.], [1.], [2.]])
y = np.array([[0.5], [2.], [3.5]])
history = model.fit([x, y], y, batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [4., 3.6, 3.2, 2.8, 2.4], 1e-3)
@keras_parameterized.run_all_keras_modes
def test_unconditional_add_loss_correctness(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
# Reachable from the inputs but marked as unconditional.
self.add_loss(math_ops.reduce_sum(inputs))
return inputs
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
self.assertEqual(len(model.losses), 1)
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
@keras_parameterized.run_all_keras_modes
def test_clear_losses(self):
class LayerWithSharedNestedLossLayer(keras.layers.Layer):
def __init__(self):
super(LayerWithSharedNestedLossLayer, self).__init__()
self.loss_layer = keras.layers.ActivityRegularization(l2=0.001)
self.add_weight(shape=(1,), regularizer='l2')
def call(self, x):
x = self.loss_layer(x)
return self.loss_layer(x)
inputs = keras.Input(shape=(1,))
outputs = LayerWithSharedNestedLossLayer()(inputs)
model = keras.Model(inputs, outputs)
# Weight loss + 2 activity losses.
self.assertEqual(len(model.losses), 3)
x = array_ops.ones((1, 1))
model(x)
y = array_ops.ones((1, 1))
model(y)
if context.executing_eagerly():
# Eager losses are cleared every `__call__`.
self.assertEqual(len(model.losses), 3)
else:
self.assertEqual(len(model.get_losses_for(x)), 2)
self.assertEqual(len(model.get_losses_for(y)), 2)
self.assertEqual(len(model.get_losses_for(None)), 1)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_layer_with_variable_output(self):
class VariableOutputLayer(keras.layers.Layer):
def build(self, input_shape):
self.v = self.add_weight('output_var', shape=(2, 5), initializer='ones')
def call(self, inputs):
return self.v
model = testing_utils.get_model_from_layers(
[VariableOutputLayer(), keras.layers.Dense(1)], input_shape=(10,))
# TODO(omalleyt): Make this work with `run_eagerly=True`.
model.compile('sgd', 'mse', run_eagerly=False)
model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2, epochs=5)
self.assertLen(model.trainable_variables, 3)
# TODO(b/131372221): Make this work with subclassed models.
@keras_parameterized.run_with_all_model_types(exclude_models=['subclass'])
@keras_parameterized.run_all_keras_modes
def test_model_dtype(self):
class AssertTypeLayer(keras.layers.Layer):
def __init__(self, assert_type=None, **kwargs):
super(AssertTypeLayer, self).__init__(**kwargs)
self.assert_type = assert_type
def call(self, inputs):
assert inputs.dtype.name == self.assert_type, (
'Input tensor has type %s which does not match assert type %s' %
(inputs.dtype.name, self.assert_type))
return inputs + 1.
for dtype in ('float16', 'float32', 'float64'):
model = testing_utils.get_model_from_layers([AssertTypeLayer(dtype)],
input_shape=(10,),
input_dtype=dtype)
model.compile('sgd', 'mse',
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((10, 10), dtype=dtype)
y = np.ones((10, 10), dtype=dtype)
model.fit(x, y)
model.test_on_batch(x, y)
model(x)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_subclassed_model_with_training_arg(self):
class LayerWithTrainingArg(keras.layers.Layer):
def call(self, inputs, training=None):
self.training = training
return inputs
class ModelWithTrainingArg(keras.Model):
def __init__(self):
super(ModelWithTrainingArg, self).__init__()
self.l1 = LayerWithTrainingArg()
def call(self, inputs, training=None):
self.training = training
inputs = self.l1(inputs, training=training)
return inputs
x = np.zeros((1, 2))
model = ModelWithTrainingArg()
model.compile(
loss='mse',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, x, epochs=1)
if testing_utils.should_run_eagerly():
expected_training_arg = True
else:
expected_training_arg = keras.backend.symbolic_learning_phase()
self.assertEqual(model.training, expected_training_arg)
self.assertEqual(model.l1.training, expected_training_arg)
@keras_parameterized.run_all_keras_modes
def test_error_when_model_is_not_compiled(self):
inputs = keras.Input(shape=(1,))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
with self.assertRaisesRegex(RuntimeError, 'must compile your model'):
model.fit(np.ones((1, 1)), np.ones((1, 1)))
class MyModel(keras.Model):
def call(self, x):
self.add_loss(math_ops.reduce_sum(x))
return x
model = MyModel()
with self.assertRaisesRegex(RuntimeError, 'must compile your model'):
model.fit(np.random.random((32, 1)), epochs=2)
class TestExceptionsAndWarnings(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_invalid_loss(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim)
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(optimizer, loss='categorical_crossentropy')
np.random.seed(1337)
(x_train, y_train), (_, _) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
with self.assertRaises(ValueError):
model.fit(x_train, np.concatenate([y_train, y_train], axis=-1))
if not context.executing_eagerly():
# TODO(psv): Investigate these use cases in eager mode.
with self.assertRaises(ValueError):
model.fit(x_train, y_train)
with self.assertRaises(ValueError):
model.compile(optimizer, loss=None,
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_compile_warning_for_loss_missing_output(self):
with self.cached_session():
inp = keras.layers.Input(shape=(16,), name='input_a')
out_1 = keras.layers.Dense(8, name='dense_1')(inp)
out_2 = keras.layers.Dense(3, activation='softmax', name='dense_2')(out_1)
model = keras.models.Model(inputs=[inp], outputs=[out_1, out_2])
optimizer = RMSPropOptimizer(learning_rate=0.001)
with test.mock.patch.object(logging, 'warning') as mock_log:
model.compile(
optimizer,
loss={
'dense_2': 'categorical_crossentropy',
},
metrics={
'dense_2': 'categorical_accuracy',
'dense_1': metrics_module.CategoricalAccuracy(),
},
run_eagerly=testing_utils.should_run_eagerly())
msg = ('Output dense_1 missing from loss dictionary. We assume this '
'was done on purpose. The fit and evaluate APIs will not be '
'expecting any data to be passed to dense_1.')
self.assertRegexpMatches(str(mock_log.call_args), msg)
@keras_parameterized.run_all_keras_modes
def test_invalid_steps_per_epoch_usage(self):
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(1)(x)
model = keras.Model(x, y)
model.compile(
'sgd', loss='mse', run_eagerly=testing_utils.should_run_eagerly())
err_msg = 'When passing input data as arrays, do not specify'
if testing_utils.should_run_eagerly():
with self.assertRaisesRegex(ValueError, err_msg):
model.fit(x=np.zeros((100, 1)), y=np.ones((100, 1)), steps_per_epoch=4)
with self.assertRaisesRegex(ValueError, err_msg):
model.evaluate(x=np.zeros((100, 1)), y=np.ones((100, 1)), steps=4)
with self.assertRaisesRegex(ValueError, err_msg):
model.predict(np.zeros((100, 1)), steps=4)
else:
with test.mock.patch.object(logging, 'warning') as mock_log:
model.fit(x=np.zeros((100, 1)), y=np.ones((100, 1)), steps_per_epoch=4)
self.assertRegexpMatches(str(mock_log.call_args), err_msg)
with test.mock.patch.object(logging, 'warning') as mock_log:
model.evaluate(x=np.zeros((100, 1)), y=np.ones((100, 1)), steps=4)
self.assertRegexpMatches(str(mock_log.call_args), err_msg)
with test.mock.patch.object(logging, 'warning') as mock_log:
model.predict(np.zeros((100, 1)), steps=4)
self.assertRegexpMatches(str(mock_log.call_args), err_msg)
class LossWeightingTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_class_weights(self):
num_classes = 5
batch_size = 5
epochs = 10
weighted_class = 3
weight = 10.
train_samples = 1000
test_samples = 1000
input_dim = 5
learning_rate = 0.001
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim)
model.compile(
loss='categorical_crossentropy',
metrics=['acc', metrics_module.CategoricalAccuracy()],
weighted_metrics=['mae', metrics_module.CategoricalAccuracy()],
optimizer=RMSPropOptimizer(learning_rate=learning_rate),
run_eagerly=testing_utils.should_run_eagerly())
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = weight
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
class_weight=class_weight,
validation_data=(x_train, y_train))
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 2,
verbose=0,
class_weight=class_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 2,
verbose=0,
class_weight=class_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size], y_train[:batch_size], class_weight=class_weight)
ref_score = model.evaluate(x_test, y_test, verbose=0)
score = model.evaluate(
x_test[test_ids, :], y_test[test_ids, :], verbose=0)
self.assertLess(score[0], ref_score[0])
@keras_parameterized.run_all_keras_modes
def test_sample_weights(self):
num_classes = 5
batch_size = 5
epochs = 10
weighted_class = 3
weight = 10.
train_samples = 1000
test_samples = 1000
input_dim = 5
learning_rate = 0.001
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim)
model.compile(
RMSPropOptimizer(learning_rate=learning_rate),
metrics=['acc', metrics_module.CategoricalAccuracy()],
weighted_metrics=['mae', metrics_module.CategoricalAccuracy()],
loss='categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
np.random.seed(43)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = weight
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=sample_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=sample_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
model.test_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
ref_score = model.evaluate(
x_test, y_test, verbose=0, sample_weight=sample_weight)
score = model.evaluate(
x_test[test_ids, :],
y_test[test_ids, :],
verbose=0,
sample_weight=sample_weight[test_ids])
self.assertLess(score[0], ref_score[0])
@keras_parameterized.run_all_keras_modes
def test_temporal_sample_weights(self):
num_classes = 5
batch_size = 5
epochs = 10
weighted_class = 3
weight = 10.
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
learning_rate = 0.001
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = weight
temporal_x_train = np.reshape(x_train, (len(x_train), 1,
x_train.shape[1]))
temporal_x_train = np.repeat(temporal_x_train, timesteps, axis=1)
temporal_x_test = np.reshape(x_test, (len(x_test), 1, x_test.shape[1]))
temporal_x_test = np.repeat(temporal_x_test, timesteps, axis=1)
temporal_y_train = np.reshape(y_train, (len(y_train), 1,
y_train.shape[1]))
temporal_y_train = np.repeat(temporal_y_train, timesteps, axis=1)
temporal_y_test = np.reshape(y_test, (len(y_test), 1, y_test.shape[1]))
temporal_y_test = np.repeat(temporal_y_test, timesteps, axis=1)
temporal_sample_weight = np.reshape(sample_weight, (len(sample_weight),
1))
temporal_sample_weight = np.repeat(
temporal_sample_weight, timesteps, axis=1)
model.compile(
RMSPropOptimizer(learning_rate=learning_rate),
loss='categorical_crossentropy',
metrics=['acc', metrics_module.CategoricalAccuracy()],
weighted_metrics=['mae', metrics_module.CategoricalAccuracy()],
sample_weight_mode='temporal',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(
temporal_x_train,
temporal_y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=temporal_sample_weight)
model.fit(
temporal_x_train,
temporal_y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=temporal_sample_weight,
validation_split=0.1)
model.train_on_batch(
temporal_x_train[:batch_size],
temporal_y_train[:batch_size],
sample_weight=temporal_sample_weight[:batch_size])
model.test_on_batch(
temporal_x_train[:batch_size],
temporal_y_train[:batch_size],
sample_weight=temporal_sample_weight[:batch_size])
ref_score = model.evaluate(temporal_x_test, temporal_y_test, verbose=0)
if not context.executing_eagerly():
score = model.evaluate(
temporal_x_test[test_ids], temporal_y_test[test_ids], verbose=0)
self.assertLess(score[0], ref_score[0])
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types(exclude_models='sequential')
def test_fit_with_incorrect_weights(self):
input_a = keras.layers.Input(shape=(3,), name='input_a')
input_b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(2, name='output_1')
dropout = keras.layers.Dropout(0.5, name='output_2')
branch_a = [input_a, dense]
branch_b = [input_b, dense, dropout]
model = testing_utils.get_multi_io_model(branch_a, branch_b)
model.compile(
optimizer='adam',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
x = np.random.random((10, 3))
y = np.random.random((10, 2))
with self.assertRaisesRegexp(
ValueError,
r'Unknown entries in sample_weight dictionary: \[\'unknown\'\]. '
r'Only expected following keys: \[\'output_1\', \'output_2\'\]'):
model.fit([x, x], [y, y],
epochs=1,
sample_weight={'unknown': 'something'})
with self.assertRaisesRegexp(
ValueError,
r'Unknown entries in class_weight dictionary: \[\'unknown\'\]. '
r'Only expected following keys: \[\'output_1\', \'output_2\'\]'):
model.fit([x, x], [y, y], epochs=1, class_weight={'unknown': 'something'})
@keras_parameterized.run_all_keras_modes
def test_class_weight_invalid_use_case(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
learning_rate = 0.001
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
optimizer = RMSPropOptimizer(learning_rate=learning_rate)
model.compile(optimizer, loss='binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
del class_weight[1]
with self.assertRaises(ValueError):
model.fit(x_train, y_train,
epochs=0, verbose=0, class_weight=class_weight)
with self.assertRaises(ValueError):
model.compile(
optimizer, loss='binary_crossentropy', sample_weight_mode=[],
run_eagerly=testing_utils.should_run_eagerly())
# Build multi-output model
x = keras.Input((3,))
y1 = keras.layers.Dense(4, name='1')(x)
y2 = keras.layers.Dense(4, name='2')(x)
model = keras.models.Model(x, [y1, y2])
model.compile(optimizer, loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
x_np = np.random.random((10, 3))
y_np = np.random.random((10, 4))
w_np = np.random.random((10,))
# This will work
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight={'1': w_np})
# These will not
with self.assertRaises(ValueError):
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight=[w_np])
with self.assertRaises(TypeError):
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight=w_np)
with self.assertRaises(ValueError):
bad_w_np = np.random.random((11,))
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2))
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2, 2))
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight={'1': bad_w_np})
@keras_parameterized.run_all_keras_modes
def test_default_sample_weight(self):
"""Verifies that fit works without having to set sample_weight."""
num_classes = 5
input_dim = 5
timesteps = 3
learning_rate = 0.001
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
x = np.random.random((10, timesteps, input_dim))
y = np.random.random((10, timesteps, num_classes))
optimizer = RMSPropOptimizer(learning_rate=learning_rate)
# sample_weight_mode is a list and mode value is None
model.compile(optimizer, loss='mse', sample_weight_mode=[None],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a list and mode value is `temporal`
model.compile(optimizer, loss='mse', sample_weight_mode=['temporal'],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a dict and mode value is None
model.compile(
optimizer, loss='mse', sample_weight_mode={'time_distributed': None},
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a dict and mode value is `temporal`
model.compile(
optimizer,
loss='mse',
sample_weight_mode={'time_distributed': 'temporal'},
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a not a list/dict and mode value is None
model.compile(optimizer, loss='mse', sample_weight_mode=None,
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a not a list/dict and mode value is `temporal`
model.compile(optimizer, loss='mse', sample_weight_mode='temporal',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, batch_size=10)
def test_sample_weight_tensor(self):
"""Tests that sample weight may be defined as a tensor in the graph."""
with context.graph_mode():
# Create a simple pass-through model
input_layer = keras.layers.Input(shape=1, name='input_layer')
model = keras.Model(inputs=input_layer, outputs=input_layer)
model.compile(
loss='mean_absolute_error',
optimizer='adam')
# Prepare sample weights iterator tensor
sample_weights = array_ops.constant(
[[0, .4, 1, 1], [2, .4, .3, 1]])
dataset = dataset_ops.Dataset.from_tensor_slices(sample_weights)
sample_weights = dataset_ops.make_one_shot_iterator(dataset).get_next()
sample_weights = training_utils.standardize_sample_weights(
sample_weights, model.output_names)
# Update model loss with sample weight tensor.
model._compile_weights_loss_and_weighted_metrics(sample_weights)
feeds = {'input_layer:0': [[0], [0], [0], [0]],
'input_layer_target:0': [[1], [1], [1], [1]]}
with self.cached_session() as sess:
self.assertAllClose(
(.4 + 1 + 1) / 4, sess.run(model.total_loss, feed_dict=feeds))
self.assertAllClose(
(2+ .4 + .3 + 1) / 4, sess.run(model.total_loss, feed_dict=feeds))
def test_prepare_sample_weights(self):
# pylint:disable=anomalous-backslash-in-string
input_layer = keras.layers.Input(shape=1, name='input_layer')
model = keras.Model(inputs=input_layer, outputs=[input_layer, input_layer])
sample_weights = array_ops.constant([0, .4, 1, 1])
temporal_weights = array_ops.constant([[1, 2], [3, 4], [5, 6]])
model.compile(
loss='mean_absolute_error',
optimizer='adam',
sample_weight_mode=None)
with self.assertRaises(AssertionError):
model._prepare_sample_weights([sample_weights, sample_weights])
model.compile(loss='mean_absolute_error', optimizer='adam',
sample_weight_mode='temporal')
model._prepare_sample_weights([temporal_weights, temporal_weights])
with self.assertRaisesRegexp(ValueError, 'Expected shape \[None, None\]'):
model._prepare_sample_weights([sample_weights, sample_weights])
with self.assertRaisesRegexp(ValueError,
'sample weights must have same length as the '
'number of outputs'):
model._prepare_sample_weights([temporal_weights])
model.compile(loss='mean_absolute_error', optimizer='adam',
sample_weight_mode='samplewise')
model._prepare_sample_weights([sample_weights, sample_weights])
with self.assertRaisesRegexp(ValueError, 'Expected shape \[None\]'):
model._prepare_sample_weights([temporal_weights, temporal_weights])
# pylint:enable=anomalous-backslash-in-string
@keras_parameterized.run_all_keras_modes
class MaskingTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Masking(mask_value=0),
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one'))
]
model = testing_utils.get_model_from_layers(layers, input_shape)
model.compile(
loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
def test_masking(self):
model = self._get_model(input_shape=(2, 1))
x = np.array([[[1], [1]], [[0], [0]]])
y = np.array([[[1], [1]], [[1], [1]]])
loss = model.train_on_batch(x, y)
self.assertEqual(loss, 0)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
def test_masking_deferred(self):
model = self._get_model()
x = np.array([[[1], [1]], [[0], [0]]])
y = np.array([[[1], [1]], [[1], [1]]])
loss = model.train_on_batch(x, y)
self.assertEqual(loss, 0)
def test_mask_argument_in_layer(self):
# Test that the mask argument gets correctly passed to a layer in the
# functional API.
class CustomMaskedLayer(keras.layers.Layer):
def __init__(self):
super(CustomMaskedLayer, self).__init__()
self.supports_masking = True
def call(self, inputs, mask=None):
assert mask is not None
return inputs
def compute_output_shape(self, input_shape):
return input_shape
x = np.random.random((5, 3))
inputs = keras.layers.Input((3,))
masked = keras.layers.Masking(mask_value=0)(inputs)
outputs = CustomMaskedLayer()(masked)
model = keras.Model(inputs, outputs)
model.compile(
loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
y = np.random.random((5, 3))
model.train_on_batch(x, y)
@keras_parameterized.run_all_keras_modes
class TestDynamicTrainability(keras_parameterized.TestCase):
def test_trainable_warning(self):
x = np.random.random((5, 3))
y = np.random.random((5, 2))
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_dim=3))
model.trainable = False
model.compile(
'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.trainable = True
model.train_on_batch(x, y)
self.assertRaises(Warning)
def test_trainable_argument(self):
with self.cached_session():
x = np.random.random((5, 3))
y = np.random.random((5, 2))
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_dim=3, trainable=False))
model.compile(
'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly())
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
self.assertAllClose(out, out_2)
# test with nesting
inputs = keras.layers.Input(shape=(3,))
output = model(inputs)
model = keras.models.Model(inputs, output)
model.compile(
'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly())
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
self.assertAllClose(out, out_2)
def test_layer_trainability_switch(self):
# with constructor argument, in Sequential
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, trainable=False, input_dim=1))
self.assertListEqual(model.trainable_weights, [])
# by setting the `trainable` argument, in Sequential
model = keras.models.Sequential()
layer = keras.layers.Dense(2, input_dim=1)
model.add(layer)
self.assertListEqual(model.trainable_weights, layer.trainable_weights)
layer.trainable = False
self.assertListEqual(model.trainable_weights, [])
# with constructor argument, in Model
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(2, trainable=False)(x)
model = keras.models.Model(x, y)
self.assertListEqual(model.trainable_weights, [])
# by setting the `trainable` argument, in Model
x = keras.layers.Input(shape=(1,))
layer = keras.layers.Dense(2)
y = layer(x)
model = keras.models.Model(x, y)
self.assertListEqual(model.trainable_weights, layer.trainable_weights)
layer.trainable = False
self.assertListEqual(model.trainable_weights, [])
def test_model_trainability_switch(self):
# a non-trainable model has no trainable weights
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
# same for Sequential
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_dim=1))
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
def test_nested_model_trainability(self):
# a Sequential inside a Model
inner_model = keras.models.Sequential()
inner_model.add(keras.layers.Dense(2, input_dim=1))
x = keras.layers.Input(shape=(1,))
y = inner_model(x)
outer_model = keras.models.Model(x, y)
self.assertListEqual(outer_model.trainable_weights,
inner_model.trainable_weights)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
# a Sequential inside a Sequential
inner_model = keras.models.Sequential()
inner_model.add(keras.layers.Dense(2, input_dim=1))
outer_model = keras.models.Sequential()
outer_model.add(inner_model)
self.assertListEqual(outer_model.trainable_weights,
inner_model.trainable_weights)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
# a Model inside a Model
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(2)(x)
inner_model = keras.models.Model(x, y)
x = keras.layers.Input(shape=(1,))
y = inner_model(x)
outer_model = keras.models.Model(x, y)
self.assertListEqual(outer_model.trainable_weights,
inner_model.trainable_weights)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
# a Model inside a Sequential
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(2)(x)
inner_model = keras.models.Model(x, y)
outer_model = keras.models.Sequential()
outer_model.add(inner_model)
self.assertListEqual(outer_model.trainable_weights,
inner_model.trainable_weights)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
def test_gan_workflow(self):
shared_layer = keras.layers.BatchNormalization()
inputs1 = keras.Input(10)
outputs1 = shared_layer(inputs1)
model1 = keras.Model(inputs1, outputs1)
shared_layer.trainable = False
model1.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
inputs2 = keras.Input(10)
outputs2 = shared_layer(inputs2)
model2 = keras.Model(inputs2, outputs2)
shared_layer.trainable = True
model2.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10)), np.ones((10, 10))
out1_0 = model1.predict_on_batch(x)
model1.train_on_batch(x, y)
out1_1 = model1.predict_on_batch(x)
self.assertAllClose(out1_0, out1_1)
out2_0 = model2.predict_on_batch(x)
model2.train_on_batch(x, y)
out2_1 = model2.predict_on_batch(x)
self.assertNotAllClose(out2_0, out2_1)
class TestTrainingWithDataTensors(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_training_and_eval_methods_on_symbolic_tensors_single_io(self):
# TODO(kaftan) Test seems to not work, file ticket
if context.executing_eagerly():
self.skipTest('Skipping eager execution.')
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer,
loss,
metrics=['mae', metrics_module.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
inputs = keras.backend.zeros(shape=(10, 3))
targets = keras.backend.zeros(shape=(10, 4))
model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0)
model.evaluate(inputs, targets, steps=2, verbose=0)
model.predict(inputs, steps=2)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
model.fit(inputs, targets,
epochs=1, steps_per_epoch=2, verbose=0,
validation_data=(inputs, targets), validation_steps=2)
# Test with dynamic shape
inputs = array_ops.placeholder_with_default(
np.zeros((2, 3)), shape=tensor_shape.TensorShape([None, 3]))
targets = array_ops.placeholder_with_default(
np.zeros((2, 4)), shape=tensor_shape.TensorShape([None, 4]))
self.assertEqual(inputs.shape.dims[0].value, None)
model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0)
model.evaluate(inputs, targets, steps=2, verbose=0)
model.predict(inputs, steps=2)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
model.fit(inputs, targets,
epochs=1, steps_per_epoch=2, verbose=0,
validation_data=(inputs, targets), validation_steps=2)
@keras_parameterized.run_all_keras_modes
def test_training_and_eval_methods_on_symbolic_tensors_multi_io(self):
# TODO(kaftan) Test seems to not work, file ticket
if context.executing_eagerly():
self.skipTest('Skipping eager execution.')
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=['mae', metrics_module.CategoricalAccuracy()],
loss_weights=loss_weights,
run_eagerly=testing_utils.should_run_eagerly())
input_a_tf = keras.backend.zeros(shape=(10, 3))
input_b_tf = keras.backend.zeros(shape=(10, 3))
output_d_tf = keras.backend.zeros(shape=(10, 4))
output_e_tf = keras.backend.zeros(shape=(10, 4))
model.fit(
[input_a_tf, input_b_tf], [output_d_tf, output_e_tf],
epochs=1,
steps_per_epoch=2,
verbose=0)
with self.assertRaisesRegexp(ValueError,
'should specify the `steps_per_epoch`'):
model.fit(
[input_a_tf, input_b_tf], [output_d_tf, output_e_tf],
epochs=1,
batch_size=5,
verbose=0)
model.train_on_batch([input_a_tf, input_b_tf], [output_d_tf, output_e_tf])
# Test with dictionary inputs
model.fit(
{'input_a': input_a_tf,
'input_b': input_b_tf},
{'dense': output_d_tf,
'dropout': output_e_tf},
epochs=1,
steps_per_epoch=2,
verbose=0)
model.fit(
{'input_a': input_a_tf,
'input_b': input_b_tf},
{'dense': output_d_tf,
'dropout': output_e_tf},
validation_data=({'input_a': input_a_tf,
'input_b': input_b_tf},
{'dense': output_d_tf,
'dropout': output_e_tf}),
epochs=1,
steps_per_epoch=2,
validation_steps=2,
verbose=0)
model.train_on_batch(
{'input_a': input_a_tf,
'input_b': input_b_tf},
{'dense': output_d_tf,
'dropout': output_e_tf})
# Test with validation data
model.fit(
[input_a_tf, input_b_tf], [output_d_tf, output_e_tf],
validation_data=([input_a_tf, input_b_tf],
[output_d_tf, output_e_tf]),
epochs=1,
steps_per_epoch=2,
validation_steps=2,
verbose=0)
# Test with validation split
with self.assertRaisesRegexp(ValueError,
'you cannot use `validation_split`'):
model.fit(
[input_a_tf, input_b_tf], [output_d_tf, output_e_tf],
epochs=2,
steps_per_epoch=2,
verbose=0,
validation_split=0.2,
validation_steps=2)
# Test evaluation / prediction methods
model.evaluate([input_a_tf, input_b_tf], [output_d_tf, output_e_tf],
steps=2, verbose=0)
model.predict([input_a_tf, input_b_tf], steps=2)
model.test_on_batch([input_a_tf, input_b_tf], [output_d_tf, output_e_tf])
@tf_test_util.run_deprecated_v1
def test_model_with_input_feed_tensor(self):
"""We test building a model with a TF variable as input.
We should be able to call fit, evaluate, predict,
by only passing them data for the placeholder inputs
in the model.
"""
with self.cached_session():
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
input_v = keras.backend.variables_module.Variable(
input_a_np, dtype='float32')
self.evaluate(variables_lib.variables_initializer([input_v]))
a = keras.Input(tensor=input_v)
b = keras.Input(shape=(3,), name='input_b')
a_2 = keras.layers.Dense(4, name='dense_1')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
b_2 = dp(b)
model = keras.models.Model([a, b], [a_2, b_2])
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(optimizer, loss, metrics=['mean_squared_error'],
loss_weights=loss_weights,
sample_weight_mode=None)
# test train_on_batch
out = model.train_on_batch(input_b_np,
[output_a_np, output_b_np])
out = model.train_on_batch({'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.test_on_batch({'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.predict_on_batch({'input_b': input_b_np})
# test fit
out = model.fit({'input_b': input_b_np},
[output_a_np, output_b_np], epochs=1, batch_size=10)
out = model.fit(input_b_np,
[output_a_np, output_b_np], epochs=1, batch_size=10)
# test evaluate
out = model.evaluate({'input_b': input_b_np},
[output_a_np, output_b_np], batch_size=10)
out = model.evaluate(input_b_np,
[output_a_np, output_b_np], batch_size=10)
# test predict
out = model.predict({'input_b': input_b_np}, batch_size=10)
out = model.predict(input_b_np, batch_size=10)
self.assertEqual(len(out), 2)
# Now test a model with a single input
# i.e. we don't pass any data to fit the model.
self.evaluate(variables_lib.variables_initializer([input_v]))
a = keras.Input(tensor=input_v)
a_2 = keras.layers.Dense(4, name='dense_1')(a)
a_2 = keras.layers.Dropout(0.5, name='dropout')(a_2)
model = keras.models.Model(a, a_2)
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss, metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None,
output_a_np)
out = model.train_on_batch(None,
output_a_np)
out = model.test_on_batch(None,
output_a_np)
out = model.predict_on_batch(None)
out = model.train_on_batch([],
output_a_np)
out = model.train_on_batch({},
output_a_np)
# test fit
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=3)
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=3)
# test evaluate
_ = model.evaluate(None, output_a_np, steps=3)
_ = model.evaluate(None, output_a_np, steps=3)
# test predict
out = model.predict(None, steps=3)
out = model.predict(None, steps=3)
self.assertEqual(out.shape, (10 * 3, 4))
# Same, without learning phase
# i.e. we don't pass any data to fit the model.
self.evaluate(variables_lib.variables_initializer([input_v]))
a = keras.Input(tensor=input_v)
a_2 = keras.layers.Dense(4, name='dense_1')(a)
model = keras.models.Model(a, a_2)
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss, metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None,
output_a_np)
out = model.train_on_batch(None,
output_a_np)
out = model.test_on_batch(None,
output_a_np)
out = model.predict_on_batch(None)
out = model.train_on_batch([],
output_a_np)
out = model.train_on_batch({},
output_a_np)
# test fit
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=10)
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=10)
# test evaluate
_ = model.evaluate(None, output_a_np, steps=10)
_ = model.evaluate(None, output_a_np, steps=10)
# test predict
out = model.predict(None, steps=3)
out = model.predict(None, steps=3)
self.assertEqual(out.shape, (10 * 3, 4))
def test_model_with_partial_loss(self):
with self.cached_session():
a = keras.Input(shape=(3,), name='input_a')
a_2 = keras.layers.Dense(4, name='dense_1')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
a_3 = dp(a_2)
model = keras.models.Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = {'dropout': 'mse'}
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
# test train_on_batch
_ = model.train_on_batch(input_a_np, output_a_np)
_ = model.test_on_batch(input_a_np, output_a_np)
# fit
_ = model.fit(input_a_np, [output_a_np])
# evaluate
_ = model.evaluate(input_a_np, [output_a_np])
# Same without dropout.
a = keras.Input(shape=(3,), name='input_a')
a_2 = keras.layers.Dense(4, name='dense_1')(a)
a_3 = keras.layers.Dense(4, name='dense_2')(a_2)
model = keras.models.Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = {'dense_2': 'mse'}
model.compile(optimizer, loss, metrics={'dense_1': 'mae'})
# test train_on_batch
_ = model.train_on_batch(input_a_np, output_a_np)
_ = model.test_on_batch(input_a_np, output_a_np)
# fit
_ = model.fit(input_a_np, [output_a_np])
# evaluate
_ = model.evaluate(input_a_np, [output_a_np])
@tf_test_util.run_deprecated_v1
def test_model_with_external_loss(self):
with self.cached_session():
# None loss, only regularization loss.
a = keras.Input(shape=(3,), name='input_a')
a_2 = keras.layers.Dense(4, name='dense_1',
kernel_regularizer='l1',
bias_regularizer='l2')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
a_3 = dp(a_2)
model = keras.models.Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = None
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
# test train_on_batch
out = model.train_on_batch(input_a_np, None)
out = model.test_on_batch(input_a_np, None)
# fit
out = model.fit(input_a_np, None)
# evaluate
out = model.evaluate(input_a_np, None)
# No dropout, external loss.
a = keras.Input(shape=(3,), name='input_a')
a_2 = keras.layers.Dense(4, name='dense_1')(a)
a_3 = keras.layers.Dense(4, name='dense_2')(a)
model = keras.models.Model(a, [a_2, a_3])
model.add_loss(keras.backend.mean(a_3 + a_2))
optimizer = 'rmsprop'
loss = None
model.compile(optimizer, loss, metrics=['mae'])
# test train_on_batch
out = model.train_on_batch(input_a_np, None)
out = model.test_on_batch(input_a_np, None)
# fit
out = model.fit(input_a_np, None)
# evaluate
out = model.evaluate(input_a_np, None)
# Test model with no external data at all.
input_v = keras.backend.variables_module.Variable(
input_a_np, dtype='float32')
self.evaluate(variables_lib.variables_initializer([input_v]))
a = keras.Input(tensor=input_v)
a_2 = keras.layers.Dense(4, name='dense_1')(a)
a_2 = keras.layers.Dropout(0.5, name='dropout')(a_2)
model = keras.models.Model(a, a_2)
model.add_loss(keras.backend.mean(a_2))
model.compile(optimizer='rmsprop',
loss=None,
metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None, None)
out = model.test_on_batch(None, None)
out = model.predict_on_batch(None)
# test fit
with self.assertRaises(ValueError):
out = model.fit(None, None, epochs=1, batch_size=10)
out = model.fit(None, None, epochs=1, steps_per_epoch=1)
# test fit with validation data
with self.assertRaises(ValueError):
out = model.fit(None, None, epochs=1,
steps_per_epoch=None,
validation_steps=2)
out = model.fit(None, None, epochs=1,
steps_per_epoch=2,
validation_steps=2)
# test evaluate
with self.assertRaises(ValueError):
out = model.evaluate(None, None, batch_size=10)
out = model.evaluate(None, None, steps=3)
# test predict
with self.assertRaises(ValueError):
out = model.predict(None, batch_size=10)
out = model.predict(None, steps=3)
self.assertEqual(out.shape, (10 * 3, 4))
# Test multi-output model with no external data at all.
self.evaluate(variables_lib.variables_initializer([input_v]))
a = keras.Input(tensor=input_v)
a_1 = keras.layers.Dense(4, name='dense_1')(a)
a_2 = keras.layers.Dropout(0.5, name='dropout')(a_1)
model = keras.models.Model(a, [a_1, a_2])
model.add_loss(keras.backend.mean(a_2))
model.compile(optimizer='rmsprop',
loss=None,
metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None, None)
out = model.test_on_batch(None, None)
out = model.predict_on_batch(None)
# test fit
with self.assertRaises(ValueError):
out = model.fit(None, None, epochs=1, batch_size=10)
out = model.fit(None, None, epochs=1, steps_per_epoch=1)
# test evaluate
with self.assertRaises(ValueError):
out = model.evaluate(None, None, batch_size=10)
out = model.evaluate(None, None, steps=3)
# test predict
with self.assertRaises(ValueError):
out = model.predict(None, batch_size=10, verbose=1)
out = model.predict(None, steps=3)
self.assertEqual(len(out), 2)
self.assertEqual(out[0].shape, (10 * 3, 4))
self.assertEqual(out[1].shape, (10 * 3, 4))
def test_target_tensors(self):
with self.cached_session():
# single-output, as list
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,), name='dense'))
input_val = np.random.random((10, 4))
target_val = np.random.random((10, 4))
target = keras.backend.variable(target_val)
model.compile(optimizer='rmsprop', loss='mse', target_tensors=[target])
model.train_on_batch(input_val, None)
# single-output, as single tensor
model.compile(optimizer='rmsprop', loss='mse', target_tensors=target)
model.train_on_batch(input_val, None)
# single-output, as dict
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense': target})
model.train_on_batch(input_val, None)
# test invalid arguments
with self.assertRaises(TypeError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=set())
with self.assertRaises(ValueError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target, target])
with self.assertRaises(ValueError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense2': None})
with self.assertRaises(ValueError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target])
model.train_on_batch(input_val, target_val)
# multi-output, as list
input_val = np.random.random((10, 4))
target_val_a = np.random.random((10, 4))
target_val_b = np.random.random((10, 4))
target_a = keras.backend.variable(target_val_a)
target_b = keras.backend.variable(target_val_b)
inputs = keras.layers.Input(shape=(4,))
output_a = keras.layers.Dense(4, name='dense_a')(inputs)
output_b = keras.layers.Dense(4, name='dense_b')(inputs)
model = keras.models.Model(inputs, [output_a, output_b])
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target_a, target_b])
model.train_on_batch(input_val, None)
# multi-output, as dict
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense_a': target_a,
'dense_b': target_b})
model.train_on_batch(input_val, None)
# test with sample weights
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=['mae', metrics_module.CategoricalAccuracy()],
target_tensors=[target_a, target_b])
model.train_on_batch(input_val, None,
sample_weight={'dense_a': np.random.random((10,))})
@tf_test_util.run_deprecated_v1
def test_model_custom_target_tensors(self):
with self.cached_session():
a = keras.Input(shape=(3,), name='input_a')
b = keras.Input(shape=(3,), name='input_b')
a_2 = keras.layers.Dense(4, name='dense_1')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
b_2 = dp(b)
y = keras.backend.placeholder([10, 4], name='y')
y1 = keras.backend.placeholder([10, 3], name='y1')
y2 = keras.backend.placeholder([7, 5], name='y2')
model = keras.models.Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
# test list of target tensors
with self.assertRaises(ValueError):
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None, target_tensors=[y, y1, y2])
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None, target_tensors=[y, y1])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
_ = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np], {
'dense_1': np.random.random((10,)),
'dropout': np.random.random((10,))
})
# test dictionary of target_tensors
with self.assertRaises(ValueError):
model.compile(optimizer, loss,
metrics=[],
loss_weights=loss_weights,
sample_weight_mode=None,
target_tensors={'does_not_exist': y2})
# test dictionary of target_tensors
model.compile(optimizer, loss,
metrics=[],
loss_weights=loss_weights,
sample_weight_mode=None,
target_tensors={'dense_1': y, 'dropout': y1})
_ = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np], {
'dense_1': np.random.random((10,)),
'dropout': np.random.random((10,))
})
# test with custom TF placeholder as target
pl_target_a = keras.backend.array_ops.placeholder('float32',
shape=(None, 4))
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense_1': pl_target_a})
model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
class TestTrainingWithMetrics(keras_parameterized.TestCase):
"""Training tests related to metrics."""
@keras_parameterized.run_all_keras_modes
def test_metrics_names(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
metrics = ['mse', metrics_module.BinaryAccuracy()]
model.compile(optimizer, loss='mae', metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
mse_metric = 'mse' if tf2.enabled() else 'mean_squared_error'
reference_metric_names = [
'loss', 'dense_loss', 'dropout_loss', 'dense_' + mse_metric,
'dense_binary_accuracy', 'dropout_' + mse_metric,
'dropout_binary_accuracy'
]
self.assertEqual(reference_metric_names, model.metrics_names)
# Verify that model metric names are not altered during training.
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.fit([input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5)
self.assertEqual(reference_metric_names, model.metrics_names)
@keras_parameterized.run_all_keras_modes
def test_metric_state_reset_between_fit_and_evaluate(self):
model = keras.Sequential()
model.add(keras.layers.Dense(3, activation='relu', input_dim=4))
model.add(keras.layers.Dense(1, activation='sigmoid'))
acc_obj = metrics_module.BinaryAccuracy()
model.compile(
loss='mae',
metrics=[acc_obj],
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
x_train = np.random.random((100, 4))
y_train = np.random.random((100, 1))
model.fit(x_train, y_train, batch_size=5, epochs=2)
self.assertEqual(self.evaluate(acc_obj.count), 100)
x_test = np.random.random((10, 4))
y_test = np.random.random((10, 1))
model.evaluate(x_test, y_test, batch_size=5)
self.assertEqual(self.evaluate(acc_obj.count), 10)
@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])
@keras_parameterized.run_all_keras_modes
def test_metrics_valid_compile_input_formats(self):
inp_1 = keras.layers.Input(shape=(1,), name='input_1')
inp_2 = keras.layers.Input(shape=(1,), name='input_2')
x = keras.layers.Dense(3, kernel_initializer='ones', trainable=False)
out_1 = keras.layers.Dense(
1, kernel_initializer='ones', name='output_1', trainable=False)
out_2 = keras.layers.Dense(
1, kernel_initializer='ones', name='output_2', trainable=False)
branch_a = [inp_1, x, out_1]
branch_b = [inp_2, x, out_2]
model = testing_utils.get_multi_io_model(branch_a, branch_b)
# list of metrics.
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=[keras.metrics.MeanSquaredError()],
weighted_metrics=[keras.metrics.MeanSquaredError()],
run_eagerly=testing_utils.should_run_eagerly())
# list of list of metrics.
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=[
keras.metrics.MeanSquaredError(),
[keras.metrics.MeanSquaredError(),
keras.metrics.Accuracy()]
],
weighted_metrics=[
keras.metrics.MeanSquaredError(),
[keras.metrics.MeanSquaredError(),
keras.metrics.Accuracy()]
],
run_eagerly=testing_utils.should_run_eagerly())
# dict of metrics.
model.compile(
optimizer='rmsprop',
loss='mse',
metrics={
'output_1':
keras.metrics.MeanSquaredError(),
'output_2': [
keras.metrics.MeanSquaredError(),
keras.metrics.Accuracy()
],
},
weighted_metrics={
'output_1':
keras.metrics.MeanSquaredError(),
'output_2': [
keras.metrics.MeanSquaredError(),
keras.metrics.Accuracy()
],
},
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_invalid_metrics(self):
num_classes = 5
input_dim = 5
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim)
with self.assertRaisesRegexp(
TypeError, 'Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: '):
model.compile(
RMSPropOptimizer(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=metrics_module.CategoricalAccuracy(),
run_eagerly=testing_utils.should_run_eagerly())
inp = keras.layers.Input(shape=(1,))
x = keras.layers.Dense(3, activation='relu')(inp)
out_1 = keras.layers.Dense(1, activation='sigmoid', name='output_1')(x)
out_2 = keras.layers.Dense(1, activation='sigmoid', name='output_2')(x)
model = keras.models.Model(inp, [out_1, out_2])
with self.assertRaisesRegex(
ValueError, 'When passing a list of lists as `metrics`, '
'it should have one entry per model output. '
'The model has 2 outputs, but you passed metrics='):
model.compile('rmsprop', loss='mse', metrics=[['mse']])
with self.assertRaisesRegex(
ValueError,
r'Unknown entries in metrics dictionary: \[\'output_3\'\]. Only '
r'expected following keys: \[\'output_1\', \'output_2\'\]'):
model.compile(
optimizer='rmsprop',
loss='mse',
metrics={
'output_1': 'mse',
'output_3': 'mse',
},
run_eagerly=testing_utils.should_run_eagerly())
with self.assertRaisesRegex(
ValueError,
r'Unknown entries in metrics dictionary: \[\'output_3\'\]. Only '
r'expected following keys: \[\'output_1\', \'output_2\'\]'):
model.compile(
optimizer='rmsprop',
loss='mse',
weighted_metrics={
'output_1': 'mse',
'output_3': 'mse',
},
run_eagerly=testing_utils.should_run_eagerly())
@keras_parameterized.run_all_keras_modes
def test_metrics_masking(self):
if testing_utils.should_run_eagerly():
self.skipTest('b/120495761')
with self.cached_session():
np.random.seed(1337)
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='ones')))
model.compile(
RMSPropOptimizer(learning_rate=0.001),
loss='mse',
weighted_metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
# verify that masking is applied.
x = np.array([[[1], [1]], [[1], [1]], [[0], [0]]])
y = np.array([[[1], [1]], [[0], [1]], [[1], [1]]])
scores = model.train_on_batch(x, y)
self.assertArrayNear(scores, [0.25, 0.75], 0.1)
# verify that masking is combined with sample weights.
w = np.array([3, 2, 4])
scores = model.train_on_batch(x, y, sample_weight=w)
self.assertArrayNear(scores, [0.3328, 0.8], 0.001)
@keras_parameterized.run_all_keras_modes
def test_add_metric_with_tensor_on_model(self):
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(1, kernel_initializer='ones')(x)
model = keras.models.Model(x, y)
model.add_metric(
math_ops.reduce_sum(y), name='metric_1', aggregation='mean')
if context.executing_eagerly():
# This is not a use case in v1 graph mode.
mean_result = metrics_module.Mean()(y)
with self.assertRaisesRegex(
ValueError, 'Expected a symbolic Tensor for the metric value'):
model.add_metric(mean_result, name='metric_2')
with self.assertRaisesRegex(
ValueError, 'Using the result of calling a `Metric` object '):
with keras.backend.get_graph().as_default():
model.add_metric(metrics_module.Mean(name='metric_2')(y))
model.compile(
'sgd', loss='mse', run_eagerly=testing_utils.should_run_eagerly())
inputs = np.ones(shape=(10, 1))
targets = np.ones(shape=(10, 1))
history = model.fit(
inputs,
targets,
epochs=2,
batch_size=5,
validation_data=(inputs, targets))
self.assertEqual(history.history['metric_1'][-1], 5)
self.assertEqual(history.history['val_metric_1'][-1], 5)
eval_results = model.evaluate(inputs, targets, batch_size=5)
self.assertEqual(eval_results[-1], 5)
model.predict(inputs, batch_size=5)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
@keras_parameterized.run_all_keras_modes
def test_add_metric_in_model_call(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(name='test_model')
self.dense1 = keras.layers.Dense(2, kernel_initializer='ones')
self.mean = metrics_module.Mean(name='metric_1')
def call(self, x):
self.add_metric(
math_ops.reduce_sum(x), name='metric_2', aggregation='mean')
# Provide same name as in the instance created in __init__
# for eager mode
self.add_metric(self.mean(x), name='metric_1')
return self.dense1(x)
model = TestModel()
model.compile(loss='mse', optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
history = model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
self.assertAlmostEqual(history.history['metric_1'][-1], 1, 0)
self.assertAlmostEqual(history.history['val_metric_1'][-1], 1, 0)
self.assertAlmostEqual(history.history['metric_2'][-1], 5, 0)
self.assertAlmostEqual(history.history['val_metric_2'][-1], 5, 0)
eval_results = model.evaluate(x, y, batch_size=5)
self.assertAlmostEqual(eval_results[1], 1, 0)
self.assertAlmostEqual(eval_results[2], 5, 0)
model.predict(x, batch_size=5)
model.train_on_batch(x, y)
model.test_on_batch(x, y)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_add_metric_in_layer_call(self):
class TestLayer(keras.layers.Layer):
def build(self, input_shape):
self.a = self.add_variable(
'a', (1, 1), initializer='ones', trainable=False)
self.built = True
def call(self, inputs):
self.add_metric(
math_ops.reduce_sum(inputs), name='metric_1', aggregation='mean')
return inputs + 1
layers = [
TestLayer(input_shape=(1,)),
keras.layers.Dense(2, kernel_initializer='ones')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(1,))
model.compile(loss='mse', optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
history = model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
self.assertEqual(history.history['metric_1'][-1], 5)
self.assertAlmostEqual(history.history['val_metric_1'][-1], 5, 0)
@keras_parameterized.run_all_keras_modes
def test_model_metrics_list(self):
class LayerWithAddMetric(keras.layers.Layer):
def __init__(self):
super(LayerWithAddMetric, self).__init__()
self.dense = keras.layers.Dense(1, kernel_initializer='ones')
def __call__(self, inputs):
outputs = self.dense(inputs)
self.add_metric(
math_ops.reduce_sum(outputs), name='metric_1', aggregation='mean')
return outputs
class LayerWithNestedAddMetricLayer(keras.layers.Layer):
def __init__(self):
super(LayerWithNestedAddMetricLayer, self).__init__()
self.layer = LayerWithAddMetric()
def call(self, inputs):
outputs = self.layer(inputs)
self.add_metric(
math_ops.reduce_sum(outputs), name='metric_2', aggregation='mean')
return outputs
x = keras.layers.Input(shape=(1,))
y = LayerWithNestedAddMetricLayer()(x)
model = keras.models.Model(x, y)
model.add_metric(
math_ops.reduce_sum(y), name='metric_3', aggregation='mean')
if context.executing_eagerly():
# This is not a use case in v1 graph mode.
mean_result = metrics_module.Mean()(y)
with self.assertRaisesRegex(
ValueError, 'Expected a symbolic Tensor for the metric value'):
model.add_metric(mean_result, name='metric_4')
with self.assertRaisesRegex(
ValueError, 'Using the result of calling a `Metric` object '):
with keras.backend.get_graph().as_default():
model.add_metric(metrics_module.Mean(name='metric_4')(y))
model.compile(
'sgd',
loss='mse',
metrics=[metrics_module.Accuracy('metric_4')],
run_eagerly=testing_utils.should_run_eagerly())
# Verify that the metrics added using `compile` and `add_metric` API are
# included
self.assertEqual([m.name for m in model._compile_metrics], ['metric_4'])
self.assertEqual([m.name for m in model.metrics],
['metric_4', 'metric_2', 'metric_1', 'metric_3'])
@keras_parameterized.run_all_keras_modes
def test_model_metrics_list_in_call(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(name='test_model')
self.dense1 = keras.layers.Dense(2, kernel_initializer='ones')
def call(self, x):
self.add_metric(
math_ops.reduce_sum(x), name='metric_1', aggregation='mean')
return self.dense1(x)
model = TestModel()
model.compile(
loss='mse',
optimizer=RMSPropOptimizer(0.01),
metrics=[metrics_module.Accuracy('acc')],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
self.assertEqual([m.name for m in model._compile_metrics], ['acc'])
self.assertEqual([m.name for m in model.metrics], ['acc', 'metric_1'])
@keras_parameterized.run_all_keras_modes
def test_multiple_add_metric_calls(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(name='test_model')
self.dense1 = keras.layers.Dense(2, kernel_initializer='ones')
self.mean1 = metrics_module.Mean(name='metric_1')
self.mean2 = metrics_module.Mean(name='metric_2')
def call(self, x):
self.add_metric(self.mean2(x), name='metric_2')
self.add_metric(self.mean1(x), name='metric_1')
self.add_metric(
math_ops.reduce_sum(x), name='metric_3', aggregation='mean')
return self.dense1(x)
model = TestModel()
model.compile(loss='mse', optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
history = model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
self.assertAlmostEqual(history.history['metric_1'][-1], 1, 0)
self.assertAlmostEqual(history.history['metric_2'][-1], 1, 0)
self.assertAlmostEqual(history.history['metric_3'][-1], 5, 0)
eval_results = model.evaluate(x, y, batch_size=5)
self.assertArrayNear(eval_results[1:4], [1, 1, 5], 0.1)
model.predict(x, batch_size=5)
model.train_on_batch(x, y)
model.test_on_batch(x, y)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_invalid_metric_tensor(self):
class TestLayer(keras.layers.Layer):
def build(self, input_shape):
self.built = True
def call(self, inputs):
self.add_metric(math_ops.reduce_mean(inputs), name='metric_1')
return inputs + 1
layers = [TestLayer(input_shape=(1,))]
layers.append(keras.layers.Dense(2, kernel_initializer='ones'))
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
with self.assertRaisesRegexp(
ValueError,
'We do not support adding an aggregated metric result tensor that is '
'not the output of a `tf.keras.metrics.Metric` metric instance.'):
model = testing_utils.get_model_from_layers(layers, input_shape=(1,))
model.compile(
loss='mse',
optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
@keras_parameterized.run_all_keras_modes
def test_duplicate_metric_name_in_add_metric(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(name='test_model')
self.dense1 = keras.layers.Dense(2, kernel_initializer='ones')
self.mean = metrics_module.Mean(name='metric_1')
self.mean2 = metrics_module.Mean(name='metric_1')
def call(self, x):
self.add_metric(self.mean(x), name='metric_1')
return self.dense1(x)
model = TestModel()
model.compile(loss='mse', optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
with self.assertRaisesRegexp(
ValueError,
'Please provide different names for the metrics you have added. '
'We found 2 metrics with the name: "metric_1"'):
model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
@keras_parameterized.run_all_keras_modes
def test_add_metric_without_name(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(name='test_model')
self.dense1 = keras.layers.Dense(2, kernel_initializer='ones')
def call(self, x):
self.add_metric(math_ops.reduce_sum(x), aggregation='mean')
return self.dense1(x)
model = TestModel()
model.compile(loss='mse', optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
with self.assertRaisesRegex(ValueError,
'Please provide a name for your metric like'):
model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
@keras_parameterized.run_all_keras_modes
def test_add_metric_correctness(self):
inputs = keras.Input(shape=(1,))
targets = keras.Input(shape=(1,))
class Bias(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
self.mae = metrics_module.MeanAbsoluteError(name='mae_1')
def call(self, inputs):
inputs, targets = inputs
outputs = inputs + self.bias
self.add_metric(self.mae(targets, outputs), name='mae_1')
return outputs
outputs = Bias()([inputs, targets])
model = keras.Model([inputs, targets], outputs)
model.add_metric(
metrics_module.mean_absolute_error(targets, outputs),
name='mae_2',
aggregation='mean')
model.compile(
loss='mae',
optimizer=keras.optimizer_v2.gradient_descent.SGD(0.1),
metrics=[metrics_module.MeanAbsoluteError(name='mae_3')],
run_eagerly=testing_utils.should_run_eagerly())
x = np.array([[0.], [1.], [2.]])
y = np.array([[0.5], [2.], [3.5]])
history = model.fit([x, y], y, batch_size=3, epochs=5)
expected_val = [1., 0.9, 0.8, 0.7, 0.6]
for key in ['loss', 'mae_1', 'mae_2', 'mae_3']:
self.assertAllClose(history.history[key], expected_val, 1e-3)
@keras_parameterized.run_all_keras_modes
def test_model_with_nested_compiled_model(self):
class LayerWithAddMetric(keras.layers.Layer):
def __init__(self):
super(LayerWithAddMetric, self).__init__()
self.dense = keras.layers.Dense(1, kernel_initializer='ones')
def call(self, inputs):
outputs = self.dense(inputs)
self.add_metric(
math_ops.reduce_sum(outputs), name='mean', aggregation='mean')
return outputs
x = keras.layers.Input(shape=(1,))
y = LayerWithAddMetric()(x)
inner_model = keras.models.Model(x, y)
inner_model.add_metric(
math_ops.reduce_sum(y), name='mean1', aggregation='mean')
inner_model.compile(
'sgd',
loss='mse',
metrics=[metrics_module.Accuracy('acc')],
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual([m.name for m in inner_model.metrics],
['acc', 'mean', 'mean1'])
x = keras.layers.Input(shape=[1])
y = inner_model(x)
outer_model = keras.Model(x, y)
outer_model.add_metric(
math_ops.reduce_sum(y), name='mean2', aggregation='mean')
outer_model.compile(
'sgd',
loss='mse',
metrics=[metrics_module.Accuracy('acc2')],
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual([m.name for m in outer_model.metrics],
['acc2', 'mean', 'mean1', 'mean2'])
class BareUpdateLayer(keras.layers.Layer):
def build(self, input_shape):
self.counter = self.add_weight(
'counter',
dtype='int32',
shape=(),
initializer='zeros',
trainable=False)
def call(self, inputs):
state_ops.assign_add(self.counter, 1)
return math_ops.cast(self.counter, inputs.dtype) * inputs
class LambdaUpdateLayer(keras.layers.Layer):
def build(self, input_shape):
self.counter = self.add_weight(
'counter',
dtype='int32',
shape=(),
initializer='zeros',
trainable=False)
def call(self, inputs):
# Make sure update isn't run twice.
self.add_update(lambda: state_ops.assign_add(self.counter, 1))
return math_ops.cast(self.counter, inputs.dtype) * inputs
class NestedUpdateLayer(keras.layers.Layer):
def build(self, input_shape):
self.layer = BareUpdateLayer()
self.layer.build(input_shape)
@property
def counter(self):
return self.layer.counter
def call(self, inputs):
return self.layer(inputs)
class SubgraphUpdateLayer(keras.layers.Layer):
def build(self, input_shape):
self.counter = self.add_weight(
'counter',
dtype='int32',
shape=(),
initializer='zeros',
trainable=False)
def call(self, inputs, training=None):
if training is None:
training = keras.backend.learning_phase()
if training:
self.counter.assign(self.counter + 1)
return inputs
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestAutoUpdates(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@parameterized.named_parameters(('bare_update', BareUpdateLayer()),
('lambda_update', LambdaUpdateLayer()),
('nested_update', NestedUpdateLayer()))
def test_updates_in_model(self, layer):
x, y = np.ones((10, 10)), np.ones((10, 1))
model = testing_utils.get_model_from_layers(
[layer, keras.layers.Dense(1)], input_shape=(10,))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
@keras_parameterized.run_with_all_model_types
def test_lambda_updates_trainable_false(self):
x, y = np.ones((10, 10)), np.ones((10, 1))
layer = LambdaUpdateLayer()
model = testing_utils.get_model_from_layers(
[layer, keras.layers.Dense(1)], input_shape=(10,))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
layer.trainable = False
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
@keras_parameterized.run_with_all_model_types
def test_subgraph_updates_in_model(self):
layer = SubgraphUpdateLayer()
x, y = np.ones((10, 10)), np.ones((10, 1))
model = testing_utils.get_model_from_layers(
[layer, keras.layers.Dense(1)], input_shape=(10,))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
@parameterized.named_parameters(('bare_update', BareUpdateLayer()),
('lambda_update', LambdaUpdateLayer()),
('nested_update', NestedUpdateLayer()))
def test_updates_standalone_layer(self, layer):
y = layer(np.ones((10, 10)))
self.evaluate(layer.counter.initializer)
self.evaluate(y)
self.assertEqual(self.evaluate(layer.counter), 1)
def test_trainable_false_standalone_layer(self):
layer = LambdaUpdateLayer()
y = layer(np.ones((10, 10)))
self.evaluate(layer.counter.initializer)
self.evaluate(y)
self.assertEqual(self.evaluate(layer.counter), 1)
layer.trainable = False
y = layer(np.ones((10, 10)))
self.evaluate(y)
self.assertEqual(self.evaluate(layer.counter), 1)
@keras_parameterized.run_with_all_model_types
def test_batchnorm_trainable_false(self):
bn = keras.layers.BatchNormalization()
model = testing_utils.get_model_from_layers([bn, keras.layers.Dense(1)],
input_shape=(10,))
bn.trainable = False
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10)), np.ones((10, 1))
model.fit(x, y, batch_size=2, epochs=1)
self.assertAllEqual(self.evaluate(bn.moving_mean), np.zeros((10,)))
self.assertAllEqual(self.evaluate(bn.moving_variance), np.ones((10,)))
if __name__ == '__main__':
test.main()
|
py | b415ea7238429a23d99da80759bb3ed1c38bf240 | #!/usr/bin/python3.6
# Copyright (c) 2020, Doug Eaton
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
USB Protocol Decoder and comparator for testing and verification of USB
support. Routines are provided to show details of what was received and
a clearly show differences from what was expected.
Environment:
-----------
Works with stand-alone or with cocotb_usb.
Design Philosophy:
-----------------
Packet: May be initialized by
1. a string of JK characters starting with SYNC and ending with EOP pattern
2. a pair of strings of 01 characters for D+/D-
3. a list of bytes, the content between but excluding SYNC and EOP
Mismatch handling can give a warning, an error or be fatal.
Simulation should be allowed to proceed and not aborted just because
the client gave a different response than expected.
------------------------------------------------------------------- '''
from enum import Enum, IntEnum
from array import array
import inspect
import fileinput
import pprint
import logging
import sys
import re
from explainusb.descriptors import descriptor_data
# Use cocotb logging if available. Use the built in logging module otherwise.
# The color routines check if stdout is a tty.
SAME_COLOR=''
DIFF_COLOR=''
try:
from cocotb.log import SimLog
from cocotb.utils import want_color_output
import cocotb.ANSI as ANSI
logger = SimLog("cocotb.usb.explain")
#logger.setLevel(logging.DEBUG)
if want_color_output():
SAME_COLOR=ANSI.COLOR_INFO
DIFF_COLOR=ANSI.COLOR_ERROR
except ImportError:
logger = logging
# Create a pretty printer object to display descriptors as structures.
# Leave list unsorted if the option is available.
if sys.version_info >= (3, 8):
pp=pprint.PrettyPrinter(sort_dicts=False)
else:
pp=pprint.PrettyPrinter()
# Use UTF empty set symbol for non-existant field in packet comparisons as long
# as environment is not limited to 8-bit characters.
import locale
NONEXISTENT='-'
if 'UTF-8' in locale.getdefaultlocale():
NONEXISTENT="โ
"
class DecodeError(Enum):
# Link Level Errors (incorrect bit patterns or signal integrity)
InvalidToken = "Unexpected character in bitstream"
BitStuffViolation = "Bit stuffing violation"
FalseEOP = "False EOP detected"
InvalidSync = "Packet does not start with Sync pattern"
InvalidEOP = "EOP expected but not present"
# Network Level Errors (incorrect packet size or contents)
IncorrectCRC = "CRC Mismatch"
InvalidPID = "Invalid PID" # likely bit error
# Application Level Errors (mismatch from expected result)
IncorrectLength = "Packet length not required size"
UnimplementedPacket="Format not supported"
UnexpectedResult = "Response differs from expected"
def bytes2bitstring(data):
# Convert array of 8-bit bytes into string of 0s and 1s.
bitstring = ""
for b in data:
bitstring += ("{0:08b}".format(b))[::-1]
return bitstring
def bitstring2bytes(bitstring):
# Convert string of 0s and 1s into array of 8-bit bytes.
data = []
while (len(bitstring)>8):
data.append(int(bitstring[:8][::-1], 2))
bitstring=bitstring[8:]
data.append(int(bitstring[::-1], 2))
return data
def xor(x, y):
# perform xor on two lists or strings of the same length
assert len(x)==len(y)
result = []
for i in range(0,len(x)):
if x[i]==y[i]:
result.append('0')
else:
result.append('1')
return result
def crc5(bitstring):
# Input: bitstring, length=11
# Output: bitstring, length=5
#logger.debug(f"Calculating CRC5 of {bitstring}")
G5 = ('0','0','1','0','1') # 0x14
crc = ['1','1','1','1','1'] # 0x1F
data = list(bitstring)
while (len(data)>0):
nextb=data.pop(0);
if nextb!='0' and nextb!='1':
continue # ignore invalid characters
if nextb == crc.pop(0):
crc.append('0')
else:
crc.append('0')
crc=xor(crc, G5)
# invert shift reg contents to produce final crc value
bitstring=""
for nextb in crc:
bitstring+=chr(ord(nextb)^1)
#logger.debug(f"Calculated CRC5 is {bitstring}")
return bitstring
def crc16(data):
# Input: array of bytes
# Output: 16-bit integer
#logger.debug(f"Calculating CRC16 of {data}")
G16 = 0xA001 # 1000000000000101 (lsb first)
crc = 0xFFFF
for byte in data:
for i in range(0,8):
bit=byte&1
byte>>=1
if bit==crc&1:
crc=crc>>1
else:
crc=crc>>1
crc^=G16
# invert shift reg contents to produce final crc value
crc^=0xFFFF
#logger.debug(f"Calculated CRC16 is {crc}")
return crc
def hexdump(a):
''' Turn list of bytes into `od` style hex plus ascii display
>>> a=[i for i in range(133)]
>>> print(hexdump(a), end='')
00010203 04050607 08090a0b 0c0d0e0f ................
10111213 14151617 18191a1b 1c1d1e1f ................
20212223 24252627 28292a2b 2c2d2e2f !"#$%&'()*+,-./
30313233 34353637 38393a3b 3c3d3e3f 0123456789:;<=>?
40414243 44454647 48494a4b 4c4d4e4f @ABCDEFGHIJKLMNO
50515253 54555657 58595a5b 5c5d5e5f PQRSTUVWXYZ[\]^_
60616263 64656667 68696a6b 6c6d6e6f `abcdefghijklmno
70717273 74757677 78797a7b 7c7d7e7f pqrstuvwxyz{|}~.
80818283 84 .....
'''
out=''
one=array("B", a).tobytes().hex()
lines=[one[i:i+32] for i in range(0, len(one), 32)]
for line in lines:
asc=bytes.fromhex(line).decode("ascii", 'replace')
asc=''.join([c if 31<ord(c)<127 else '.' for c in asc])
if len(line)<32:
line+=' '*(32-len(line))
out+=' '.join([line[0:8], line[8:16], line[16:24], line[24:32]])
out+=' '+asc+'\n'
return out
PIDPacketID = [
# Not to be confused with device PID/VID Product ID
"EXT", # 0 USB 2.01 Protocol Extension Transaction
"OUT", # 1 Initiate host-to-device transfer
"ACK", # 2 Data packet accepted
"DATA0", # 3 Even-numbered data packet
"PING", # 4 Check if endpoint can accept data
"SOF", # 5 Start of time frame marker
"NYET", # 6 Data not ready yet
"DATA2", # 7 Data packet for hish-speed isochronous transfer
"SPLIT", # 8 High-bandwidth hish-speed split transaction
"IN", # 9 Initiate device-to-host transfer
"NAK", # 10 Data packet not accepted; retransmit request
"DATA1", # 11 Odd-numbered data packet
"PRE", # 12 Low-speed preamble or high-speed split transaction error
"SETUP", # 13 Initiate host-to-device control transfer
"STALL", # 14 Transfer impossible; perform error recovery
"MDATA"] # 15 Data packet for high-speed isochronous transfer
class USBPacket():
def __init__(self, packet, transfer=None):
self.error=None
if isinstance(packet, str):
if re.search(r'^[01]+$', packet):
# String consisting solely of 0 and 1 : bitstring
self.bitstring=packet
self.byte=bitstring2bytes(self.bitstring)
elif re.search(r'^[jkJK01_]+$', packet):
# String with JK and SE0
self.nrzi=packet
self.byte=nrzi2bytes(self.nrzi)
elif packet.count('\n')>8:
# Prety printed by cocotb_usb with one J or K token per line
self.nrzi=unpp_packet(packet)
self.byte=nrzi2bytes(self.nrzi)
else:
self.error=DecodeError.InvalidToken
logger.critical(f"Unrecognized packet format: '{packet}'")
return self
elif len(packet)==2:
# Pair of iterables containing D+ and D- signals
self.differential=packet
self.nrzi=differential2nrzi(packet[0], packet[1])
self.byte=nrzi2bytes(self.nrzi)
else:
# Must be a list of bytes
self.byte=packet
logger.debug(f"Packet as bytes: {self.byte}")
self.pid=USBPacket.decode_pid(self.byte[0])
if transfer and self.pid[:4]=="DATA":
if isinstance(transfer, USBPacket):
logger.debug(f"Decoding packet as {transfer.fields['Command']}")
else:
logger.debug(f"Decoding packet as {transfer}")
self.currentTransfer=transfer
self.fields = {"PID": self.pid}
decode=PacketDecoder[self.pid]
decode(self)
logger.debug(str(self))
if self.error:
logger.error(self.error);
def __str__(self):
''' Provide the str() function with beutified output.
'''
if self.error:
# Return the error itself along with the PID if present
return str(self.error)+' '+self.pid
out=str(self.pid)
for f in self.fields:
if f=="PID" or f=="CRC5" or f=="CRC16":
continue
out+=f", {f}: {self.fields[f]}"
if hasattr(self, "descriptor") and self.descriptor!=None:
out=hexdump(self.data) + pp.pformat(self.descriptor)
# Old code that does what the pretty printer simplifies
#if isinstance(self.descriptor, list):
# for d in self.descriptor:
# out+="\n..."
# for f in d:
# out+=f", {f}: {d[f]}"
#else:
# for f in self.descriptor:
# out+=f", {f}: {self.descriptor[f]}"
return out
def summarize(self):
if hasattr(self, 'error') and self.error:
if hasattr(self, 'pid'):
return f"{self.error} {self.pid}"
else:
return str(self.error)
out=self.pid
if hasattr(self, "descriptor"):
if isinstance(self.descriptor, list):
try:
out=f"List of {len(self.descriptor)} descriptors"
except:
out="List of descriptors"
else:
try:
out=f"{self.descriptor['bDescriptorType']} descriptor"
except:
out="Partial descriptor"
else:
try:
out=f"{self.fields['Command']} command"
except:
pass
return out
def compare(self, expected, display=False):
''' Compare expected result (parameter) with the received result (self)
'''
# Try binary comparison before field-by-field
if self.byte == expected.byte:
logger.debug(f"{self.pid} Packets are identical")
return True
else:
logger.error(f"{self.pid} Packets differ")
message=""
# Show fields that are expected. Current routine does not show
# unexpected fields that were received.
for f in self.fields:
other=(expected.fields[f] if f in expected.fields else
NONEXISTENT)
message+=(SAME_COLOR if self.fields[f]==other else DIFF_COLOR)
message+=("{:20} {:20} {}\n".format(self.fields[f], other, f))
if hasattr(self, "descriptor"):
if isinstance(self.descriptor, list):
for d in self.descriptor:
for f in d:
other=(expected.descriptor[f] if hasattr(expected,
'descriptor') and f in expected.descriptor
else NONEXISTENT)
message+=(SAME_COLOR if d[f]==other else DIFF_COLOR)
message+=("{:20} {:20} {}\n".format(d[f], other, f))
elif isinstance(self.descriptor, DecodeError):
message+=DIFF_COLOR
message+=("{:20} {:20} {}\n".format(NONEXISTENT, '',
self.descriptor))
else:
d = self.descriptor
for f in d:
other=(expected.descriptor[f] if hasattr(expected,
'descriptor') and f in expected.descriptor
else NONEXISTENT)
message+=(SAME_COLOR if d[f]==other else DIFF_COLOR)
try:
message+=("{:20} {:20} {}\n".format(d[f], other, f))
except:
pass
logger.error(message)
return False
def decode_pid(pid):
code=pid&0xf
if pid==(code|(code<<4))^0xf0:
return PIDPacketID[code]
else:
logger.error(f"L3 Error: Invalid PID [0x{pid:02X}]")
return DecodeError.InvalidPID
def decode_handshake(self):
''' Handshake packets
Sync PID EOP
KJKJKJKK XXXXXXXX 00J
The only content of this packet is the protocol id itself
Nothing further to be done as the protocol has already been determined
'''
logger.debug(self.pid)
return self.error
def decode_token(self):
'''Token packets
Sync PID ADDR 7 ENDP CRC5 EOP
KJKJKJKK XXXXXXXX XXXXXXX XXXX XXXXX 00J
'''
self.fields.update(
{"Address" : self.byte[1]&0x7f,
"Endpoint" : ((self.byte[2]&7)<<1)|(self.byte[1]>>7),
"CRC5" : self.byte[2]>>3})
crc=bitstring2bytes(crc5(bytes2bitstring(self.byte[1:3])[0:11]))[0]
if self.fields["CRC5"]!=crc:
self.error=DecodeError.IncorrectCRC
logger.error("Calculated CRC {:02X}, Expected {:02X}".format(crc,
self.fields["CRC5"]))
logger.debug(f"{self.pid}, {self.fields['Address']}:"
f"{self.fields['Endpoint']}, crc={crc}, error={self.error}")
return self.error
def decode_sof(self):
''' SOF packet
Sync PID Frame no. CRC5 EOP
KJKJKJKK XXXXXXXX XXXXXXXXXXX XXXXX 00J
'''
self.fields.update(
{"Frame Number" : ((self.byte[2]&7)<<8)|(self.byte[1]),
"CRC5" : self.byte[2]>>3})
crc=bitstring2bytes(crc5(bytes2bitstring(self.byte[1:3])[0:11]))[0]
if self.fields["CRC5"]!=crc:
self.error=DecodeError.IncorrectCRC
logger.error("Calculated CRC {:02X}, Expected {:02X}".format(crc,
self.fields["CRC5"]))
logger.debug(f"{self.pid}, {self.fields['Frame Number']}, "
f"crc={crc}, error={self.error}")
return self.error
def decode_data(self):
'''Data packets
Sync PID DATA CRC16 EOP
KJKJKJKK XXXXXXXX XXXXXXXX*Count XXXXXXXXXXXXXXXX 00J
'''
self.data=self.byte[1:-2]
logger.debug(f"Data: {self.data}")
self.fields["CRC16"]=(self.byte[-1]<<8)|self.byte[-2]
crc=crc16(self.byte[1:-2])
if self.fields["CRC16"]!=crc:
self.error=DecodeError.IncorrectCRC
logger.error("Calculated CRC {:04X}, Expected {:04X}".format(crc,
self.fields["CRC16"]))
if hasattr(self, "currentTransfer"):
transfer=self.currentTransfer
if len(self.data)==0:
# Zero-byte data transfer does not get decoded
pass
elif transfer=="SETUP":
#logger.debug(f"Current transfer: {transfer}")
DataPacket.decode_setup_data(self)
elif transfer in DataDecoder:
#logger.debug(f"Current transfer: {transfer}")
if DataDecoder[transfer]!=None:
DataDecoder[transfer](self)
elif isinstance(transfer, USBPacket):
transfer=transfer.fields["Command"]
#logger.debug(f"Current transfer: {transfer}")
if DataDecoder[transfer]!=None:
DataDecoder[transfer](self)
else:
logger.warning(f"No decoder for {transfer}")
return self.error
def decode_split(self):
''' SSPLIT CSPLIT packet
Sync PID Hub addr S/C Port 7 S E ET CRC5 EOP
KJKJKJKK XXXXXXXX XXXXXXX X XXXXXXX X X XX XXXXX 00J
S/C: Start split transaction / Complete split transaction
S/C: 0 = SSPLIT: Start split transaction, 1 = CSPLIT: Complete split
S: Speed, 1 = Low speed, 0 = High speed
E: End of full speed payload
U: U bit is reserved/unused and must be reset to zero (0 B)
EP: End point type: 00=control, 01=isochronous, 10=bulk, 11=interrupt
'''
# High speed not yet verified
self.error = DecodeError.UnimplementedPacket
logger.warning("Decoder error: Unimplimented packet")
return self.error
def decode_pre(self):
'''PRE packet
Full speed preamble encapsulating Low speed packet after possible delay
Sync PID(PRE) Sync PID ADDR ENDP CRC5 EOP
KJKJKJKK XXXXXXXX KJKJKJKK XXXXXXXX XXXXXXX XXXX XXXXX 00J
'''
# Low speed not yet verified
self.error = DecodeError.UnimplementedPacket
logger.warning("Decoder error: Unimplimented packet")
return self.error
def decode_ext(self):
'''EXT packet - Changes the interpretation of the supsequent packet
Sync PID ADDR 7 ENDP CRC5 EOP
KJKJKJKK XXXXXXXX XXXXXXX XXXX XXXXX 00J
'''
# This is easy to decode but is only an indicator that the following
# packet has a SubPID and that it should not be taken as a real PID.
# Issue a warning that the next packet will not be decoded correctly.
self.error = DecodeError.UnimplementedPacket
logger.warning("Decoder error: Unimplimented packet")
return self.error
'''Transaction
OUT transaction: OUT, DATAx, ACK
IN transaction: IN, DATAx, ACK
SETUP transaction: SETUP, DATA0, ACK
'''
class DataPacket(USBPacket):
''' Interpretation for Data packets sent as part of Setup transaction '''
def decode_setup_data(self):
''' 8-byte data packet followup to a SETUP request '''
bmRequestTypeRecipient = (
# bmRequestType bits 0โ4 Recipient
# Only 4 of 32 values are defined. Show (reserved) for 4โ31
"Device", # 0
"Interface", # 1
"Endpoint", # 2
"Other") # 3
bmRequestTypeType = (
# bmRequestType bits 5โ6 Type: Used with bRequest byte
"Standard", # 0
"Class", # 1
"Vendor", # 2
"Reserved") # 3
bmRequestTypeDirection = (
# bmRequestType bit 7 Direction
"Host to device", # 0 # or when wLength=0 indicating no data needed
"Device to host") # 1 # wLength bytes of status expected
bRequestStandardSetup = (
# Standard Setup command: Recipient is Device and Type is Standard
"GET_STATUS", # 0 2-byte read
"CLEAR_FEATURE", # 1 0-byte write, feature selected by wValue
"RFU_2", # 2
"SET_FEATURE", # 3 0-byte write, feature selected by wValue
"RFU_4", # 4
"SET_ADDRESS", # 5 0-byte write, address in wValue
"GET_DESCRIPTOR", # 6 wLength-byte read, type & index in wValue
"SET_DESCRIPTOR", # 7 wLength-byte write, type & index in wValue
"GET_CONFIGURATION", # 8 1-byte read, config selected by wValue
"SET_CONFIGURATION", # 9 0-byte write, config selected by wValue
"GET_INTERFACE", # 10 returns alternate setting for interface
"SET_INTERFACE", # 11
"SYNCH_FRAME", # 12 Frame number in data
# Requests below this point are USB 3 specific
# TODO Not implemented in current version
"SET_ENCRYPTION", # 13
"GET_ENCRYPTION", # 14
"SET_HANDSHAKE", # 15
"GET_HANDSHAKE", # 16
"SET_CONNECTION", # 17
"SET_SECURITY_DATA", # 18
"GET_SECURITY_DATA", # 19
"SET_WUSB_DATA", # 20
"LOOPBACK_DATA_WRITE",#21
"LOOPBACK_DATA_READ",# 22
"SET_INTERFACE_DS", # 23
# Structure should be changed to dict for the following additions
#"SET_SEL", # 48
#"SET_ISOCH_DELAY", # 49
)
# promote generic packet to data sub-class
self.__class__ = DataPacket
self.fields.update({
"Recipient" : bmRequestTypeRecipient[self.byte[1]&0x1f],
"Type" : bmRequestTypeType[(self.byte[1]>>5)&3],
"Direction" : bmRequestTypeDirection[(self.byte[1]>>7)&1],
"Command" : bRequestStandardSetup[self.byte[2]],
"Value" : self.byte[3]+(self.byte[4]<<8), # See Command defn
"Index" : self.byte[5]+(self.byte[6]<<8),
"Length" : self.byte[7]+(self.byte[8]<<8), # of followup Data
})
if self.fields["Command"][4:]=="DESCRIPTOR": # GET and SET
self.fields.update({"Descriptor Type": self.byte[4],
"Descriptor Index": self.byte[3]})
logger.debug(f"{self.pid} error={self.error}")
#if self.fields["Type"]=="Standard" and self.fields["Recipient"]=="Device":
# USBPacket.currentTransfer=self.fields["Command"]
# if self.fields["Command"]=="GET_STATUS":
# USBPacket.currentTransfer+="_"+self.fields["Recipient"]
#else:
# # Only Standard commands are restricted to standard formats
# USBPacket.currentTransfer=None
return self.error
def decode_get_status_device(self):
# Status has 14 bits of reserved (0) TODO Pedantic should verify
self.fields.update({
"Self Powered" : self.byte[1]&1,
"Remote Wakeup": self.byte[1]&2})
return self.error
def decode_get_status_interface(self):
# Status is 16 bits of reserved (0) TODO Pedantic should verify
return self.error
def decode_get_status_endpoint(self):
# Status is 15 bits of reserved (0) TODO Pedantic should verify
self.fields.update({
"Halt": self.byte[1]&1})
return self.error
def decode_get_descriptor(self):
if isinstance(self.currentTransfer, USBPacket):
self.descriptor=descriptor_data(self.data,
self.currentTransfer.fields)
return self.error
def decode_get_configuration(self):
self.error = DecodeError.UnimplementedPacket
return self.error
def decode_get_interface(self):
self.error = DecodeError.UnimplementedPacket
return self.error
def decode_synch_frame(self):
self.fields.update({
"Frame Number": self.byte[1]+(self.byte[2]<<8)})
return self.error
PacketDecoder = {
"ACK": USBPacket.decode_handshake,
"NAK": USBPacket.decode_handshake,
"STALL": USBPacket.decode_handshake,
"NYET": USBPacket.decode_handshake,
"ERR": USBPacket.decode_handshake,
"IN": USBPacket.decode_token,
"OUT": USBPacket.decode_token,
"SETUP": USBPacket.decode_token,
"PING": USBPacket.decode_token,
"MDATA": USBPacket.decode_data,
"DATA0": USBPacket.decode_data,
"DATA1": USBPacket.decode_data,
"DATA2": USBPacket.decode_data,
"SPLIT": USBPacket.decode_split,
"SOF": USBPacket.decode_sof,
"PRE": USBPacket.decode_pre,
"EXT": USBPacket.decode_ext,
}
DataDecoder = {
#"SETUP": DataPacket.decode_setup_data,
"GET_STATUS_Device": DataPacket.decode_get_status_device,
"GET_STATUS_Interface": DataPacket.decode_get_status_interface,
"GET_STATUS_Endpoint": DataPacket.decode_get_status_endpoint,
"GET_DESCRIPTOR": DataPacket.decode_get_descriptor,
"SET_DESCRIPTOR": DataPacket.decode_get_descriptor,
"GET_CONFIGURATION": DataPacket.decode_get_configuration,
"GET_INTERFACE": DataPacket.decode_get_interface,
"SET_INTERFACE": DataPacket.decode_get_interface,
"SYNCH_FRAME": DataPacket.decode_synch_frame,
"CLEAR_FEATURE": None,
"SET_FEATURE": None,
"SET_ADDRESS": None,
"SET_CONFIGURATION": None,
#"get_device_descriptor": DataPacket.decode_get_descriptor,
#"get_configuration_descriptor": DataPacket.decode_get_descriptor,
#"get_string_descriptor": DataPacket.decode_get_descriptor,
}
# Decode USB where LSB is sent first and 0 a state transition and 1 is no change
def unpp_packet(dump):
""" Recover packet from cocotb_usb pretty print.
>>> # Only the first word of each line is used.
>>> # This assumes the pp_packet output is captured as a python string.
The following is a doctest statement that expects cocotb_usb is installed:
packet=unpp_packet(pp_packet(wrap_packet(handshake_packet(PID.ACK))))
"""
#logger.debug(f"Recovering packet from: {dump}")
nrzi=""
i=0
for line in dump.splitlines():
i+=1
words=re.findall(r"\S+", line)
if len(words)==0:
continue # ignore blank lines
signal=words[0]
if signal=="JJJJ" or signal=="J":
nrzi+='J'
elif signal=="KKKK" or signal=="K":
nrzi+='K'
elif signal=="----" or signal=="-":
pass
elif signal=="____" or signal=="_":
nrzi+='0'
else:
logger.error(f"Unexpected token '{signal}' in line {i}: {line}")
return DecodeError.InvalidToken
return nrzi
def differential2nrzi(dpi, dmi):
''' Join two iterables (strings or arrays) containing D+ and D- signals
>>> # Examples of proper usage:
>>> differential2nrzi([0,0,1,1], [0,1,0,1])
'0KJ1'
>>> differential2nrzi('0011', '0101')
'0KJ1'
'''
if len(dpi)!=len(dmi):
logger.critical(f"Length mismatch between inputs ({dpi}', {dmi})")
return DecodeError.InvalidToken
nrzi=""
for (dp,dm) in zip(dpi, dmi):
if dp and dp!='0':
if dm and dm!='0':
nrzi+='1'
else:
nrzi+='J'
else:
if dm and dm!='0':
nrzi+='K'
else:
nrzi+='0'
return nrzi
def nrzi2bytes(received, wrapped=True):
""" Convert NRZI string of JK tokens to list of bytes
Parameters:
received: String containing J, K and SE0 tokens. The SE0 (Single Ended Zero)
may appear as '0' or '_' in the bitstring.
wrapped: Packet must start with Sync (KJKJKJKK) and end with EOP (__J) which
are removed from the returned value. If False, these must have already
been stripped from the bitstream.
Return value:
โข List of bytes. Exception: The value -1 is used for EOP if not unwraped.
โข Error code from DecodeError if an invalid character is encountered or a
verification check fails.
The following checks are performed to ensure USB 2.0 complaince:
โข Verify bit stuffing is inserted after 6 consecutive 1s.
โข Verify that a multiple of 8 bits is recevied.
The following checks are not perfomed as these are consessions by the
standard for lower quality 1990s devices:
โข SE0 may be seen as a glitch briefly during differential transition.
Max duration of this event (TFST) is 14ns.
โข Valid SE0 for transmission of EOP (TFEOPT) is 160-175ns.
โข Receiver must accept an SEO as short as 82ns (TFEOPR) as a valid EOP.
โข The last data bit before EOP can become stretched (dribble) into a 6th bit
that does not require a bit stuff. The receiver must accept this as valid.
>>> # Examples of proper usage:
>>> nrzi2bytes('KJKJKJKKJJKJJKKK__J', wrapped=True)
[210]
>>> nrzi2bytes('JJKJJKKK', wrapped=False)
[210]
>>> # Verification of error handling:
>>> nrzi2bytes('KJKJKJKKJJKJJKKK__J', wrapped=False)
<DecodeError.FalseEOP: 'False EOP detected'>
>>> nrzi2bytes('JJKJJKKK', wrapped=True)
<DecodeError.InvalidSync: 'Packet does not start with Sync pattern'>
"""
SYNC=128
EOP=-1
state=('J' if wrapped else 'K') # SOP from idle pattern vs end of Sync
consecutive=0
bits=""
data=[]
received.upper()
for bit in received:
if bit=='J' or bit=='K' or bit=='_':
pass
elif bit=='0':
bit='_'
else:
logger.error(f"Unexpected character '{bit}' in NRZI bitstream")
return DecodeError.InvalidToken
if len(bits)==8:
byte = int(bits[::-1], 2)
#print("{} {:02x} {}".format(bits, byte, byte))
data.append(byte)
bits=""
if bit==state:
if bit=='J' or bit=='K':
bits+='1'
elif bit=='_':
bits+='_'
consecutive+=1
if consecutive>6:
logger.error("Bit stuffing violation")
return DecodeError.BitStuffViolation
else:
if state == '_':
# End of packet
if bits!="__" or bit!='J' or not wrapped:
logger.error("Invalid EOP detected")
return DecodeError.FalseEOP
data.append(EOP)
bits=""
else:
if consecutive==6:
state=bit
consecutive=0
continue
if bit=='J' or bit=='K':
bits+='0'
elif bit=='_':
bits+='_'
else:
logger.error(f"Invalid character '{bit}' in NRZI string")
return DecodeError.InvalidToken
consecutive=0
state=bit
if bits!="":
if len(bits)==8:
byte = int(bits[::-1], 2)
data.append(byte)
bits=""
else:
logger.error(f"Incomplete byte '{bits}' received")
return DecodeError.FalseEOP
if not wrapped:
return data
if data[0]!=SYNC:
logger.error("SYNC not detected")
return DecodeError.InvalidSync
elif data[-1]!=EOP:
logger.error("EOP not detected")
return DecodeError.InvalidEOP
return data[1:-1]
def separate_packets(bitstream):
''' Break continuous JK bitstream in sequence of packets
'''
# Use EOF (00J or __J) as packet delimiter
fields=re.split(r'([0_]+.)', bitstream)
# Join EOF back onto packet
pairs = iter(fields)
return [c+next(pairs, '') for c in pairs]
def test_packet_decode():
''' Replay poor-man's USB tcpdump file. Useful for testing new decoders.
'''
bitstream=(fileinput.input())
for line in bitstream:
words=re.findall(r"\S+", line)
# Default format is:
# [time] [direction] packet
if len(words)>1:
if len(words)>=3:
time=words[0]
direction=words[1]
else:
if re.search(r'^\d+$', words[0]):
time=words[0]
direction=""
else:
time=""
direction=words[0]
else:
time=""
direction=""
packet=words[-1]
if re.search(r'^[JKjk01_]+$', packet):
print("------------", time, direction)
captured=USBPacket(packet)
def test_stream_decode():
''' Read and display packet details from a Prety Printed dump.
'''
# Read packet details from a Prety Printed dump as continuous bitstream
bitstream=unpp_packet((fileinput.input()))
# Separate bitstream into packets
packets=separate_packets(bitstream)
# display details of each packet
for packet in packets:
# Ignore potential empty packets caused by artifacts of prety printing
if len(packet)>0:
packet=USBPacket(packet)
print(packet)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
py | b415eaee489d10063c23801f92856a7c6f033b27 | #! /usr/bin/env python
from xmlrpclib import Server
server=Server('http://user:[email protected]:9001')
info=server.supervisor.getProcessInfo('test')
if 'STOPPED'==info['statname']:
print 'server is stopped,we start it!'
server.supervisor.startProcess('test')
elif 'RUNNING'==info['statname']:
print 'server is running,web stop id!'
server.supervisor.stopProcess('test')
print 'test process state is',server.supervisor.getProcessInfo('test')
|
py | b415eafb5b10741a77ba3450ecd0f858dbb2377c | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/vehicle/component/shared_reactor_unit.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | b415ec6f502b19fff1c2271cb3b45fd44a2d6c3a | """
Django settings for intmed project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from datetime import timedelta
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o7k3mx^wh386yjj1jw1ne2@3sp7y#b*ajsp#j-41-dazkew(vo'
REFRESH_TOKEN_SECRET='%4^+-l+j_aa8w9v5$#khtq4622vlccg1n6!xtpv17fd8gs_9#g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'clinica.apps.ClinicaConfig',
'phonenumber_field',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_simplejwt',
'django_filters',
'corsheaders',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'intmed.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'intmed.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.oracle',
'NAME': 'xe',
'USER': 'joanna',
'PASSWORD': 'joanna',
'HOST': 'localhost',
'PORT': '1521',
}
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
'rest_framework.permissions.IsAuthenticated',
'rest_framework.permissions.AllowAny',
],
'DEFAULT_FILTER_BACKENDS': ['django_filters.rest_framework.DjangoFilterBackend'],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
],
}
AUTH_USER_MODEL = 'clinica.User'
ALLOWED_HOSTS = []
CORS_ORIGIN_WHITELIST = [
"http://localhost:4200",
]
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=60),
'AUTH_HEADER_TYPES': ('Token',),
'ROTATE_REFRESH_TOKENS': True,
}
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
PHONENUMBER_DEFAULT_REGION='BR'
PHONENUMBER_DB_FORMAT='NATIONAL'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
py | b415ecb93af2512b7116509df0b88b55d779bf6a | from __future__ import division
from abc import ABCMeta
from collections import Iterable, OrderedDict
from copy import deepcopy
from functools import partial
from numbers import Real, Integral
from xml.etree import ElementTree as ET
from math import sqrt
from six import add_metaclass, string_types
import numpy as np
from openmc.checkvalue import check_type, check_value, check_greater_than
from openmc.region import Region, Intersection, Union
from openmc.mixin import IDManagerMixin
# A static variable for auto-generated Surface IDs
AUTO_SURFACE_ID = 10000
_BOUNDARY_TYPES = ['transmission', 'vacuum', 'reflective', 'periodic']
class Surface(IDManagerMixin):
"""An implicit surface with an associated boundary condition.
An implicit surface is defined as the set of zeros of a function of the
three Cartesian coordinates. Surfaces in OpenMC are limited to a set of
algebraic surfaces, i.e., surfaces that are polynomial in x, y, and z.
Parameters
----------
surface_id : int, optional
Unique identifier for the surface. If not specified, an identifier will
automatically be assigned.
boundary_type : {'transmission, 'vacuum', 'reflective', 'periodic'}, optional
Boundary condition that defines the behavior for particles hitting the
surface. Defaults to transmissive boundary condition where particles
freely pass through the surface. Note that periodic boundary conditions
can only be applied to x-, y-, and z-planes, and only axis-aligned
periodicity is supported.
name : str, optional
Name of the surface. If not specified, the name will be the empty
string.
Attributes
----------
boundary_type : {'transmission, 'vacuum', 'reflective', 'periodic'}
Boundary condition that defines the behavior for particles hitting the
surface.
coefficients : dict
Dictionary of surface coefficients
id : int
Unique identifier for the surface
name : str
Name of the surface
type : str
Type of the surface
"""
next_id = 1
used_ids = set()
def __init__(self, surface_id=None, boundary_type='transmission', name=''):
self.id = surface_id
self.name = name
self._type = ''
self.boundary_type = boundary_type
# A dictionary of the quadratic surface coefficients
# Key - coefficeint name
# Value - coefficient value
self._coefficients = {}
# An ordered list of the coefficient names to export to XML in the
# proper order
self._coeff_keys = []
def __neg__(self):
return Halfspace(self, '-')
def __pos__(self):
return Halfspace(self, '+')
def __hash__(self):
return hash(repr(self))
def __repr__(self):
string = 'Surface\n'
string += '{0: <16}{1}{2}\n'.format('\tID', '=\t', self._id)
string += '{0: <16}{1}{2}\n'.format('\tName', '=\t', self._name)
string += '{0: <16}{1}{2}\n'.format('\tType', '=\t', self._type)
string += '{0: <16}{1}{2}\n'.format('\tBoundary', '=\t', self._boundary_type)
coefficients = '{0: <16}'.format('\tCoefficients') + '\n'
for coeff in self._coefficients:
coefficients += '{0: <16}{1}{2}\n'.format(
coeff, '=\t', self._coefficients[coeff])
string += coefficients
return string
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def boundary_type(self):
return self._boundary_type
@property
def coefficients(self):
return self._coefficients
@name.setter
def name(self, name):
if name is not None:
check_type('surface name', name, string_types)
self._name = name
else:
self._name = ''
@boundary_type.setter
def boundary_type(self, boundary_type):
check_type('boundary type', boundary_type, string_types)
check_value('boundary type', boundary_type, _BOUNDARY_TYPES)
self._boundary_type = boundary_type
def bounding_box(self, side):
"""Determine an axis-aligned bounding box.
An axis-aligned bounding box for surface half-spaces is represented by
its lower-left and upper-right coordinates. If the half-space is
unbounded in a particular direction, numpy.inf is used to represent
infinity.
Parameters
----------
side : {'+', '-'}
Indicates the negative or positive half-space
Returns
-------
numpy.ndarray
Lower-left coordinates of the axis-aligned bounding box for the
desired half-space
numpy.ndarray
Upper-right coordinates of the axis-aligned bounding box for the
desired half-space
"""
return (np.array([-np.inf, -np.inf, -np.inf]),
np.array([np.inf, np.inf, np.inf]))
def clone(self, memo=None):
"""Create a copy of this surface with a new unique ID.
Parameters
----------
memo : dict or None
A nested dictionary of previously cloned objects. This parameter
is used internally and should not be specified by the user.
Returns
-------
clone : openmc.Surface
The clone of this surface
"""
if memo is None:
memo = {}
# If no nemoize'd clone exists, instantiate one
if self not in memo:
clone = deepcopy(self)
clone.id = None
# Memoize the clone
memo[self] = clone
return memo[self]
def to_xml_element(self):
"""Return XML representation of the surface
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing source data
"""
element = ET.Element("surface")
element.set("id", str(self._id))
if len(self._name) > 0:
element.set("name", str(self._name))
element.set("type", self._type)
if self.boundary_type != 'transmission':
element.set("boundary", self.boundary_type)
element.set("coeffs", ' '.join([str(self._coefficients.setdefault(key, 0.0))
for key in self._coeff_keys]))
return element
@staticmethod
def from_hdf5(group):
"""Create surface from HDF5 group
Parameters
----------
group : h5py.Group
Group in HDF5 file
Returns
-------
openmc.Surface
Instance of surface subclass
"""
surface_id = int(group.name.split('/')[-1].lstrip('surface '))
name = group['name'].value.decode() if 'name' in group else ''
surf_type = group['type'].value.decode()
bc = group['boundary_type'].value.decode()
coeffs = group['coefficients'][...]
# Create the Surface based on its type
if surf_type == 'x-plane':
x0 = coeffs[0]
surface = XPlane(surface_id, bc, x0, name)
elif surf_type == 'y-plane':
y0 = coeffs[0]
surface = YPlane(surface_id, bc, y0, name)
elif surf_type == 'z-plane':
z0 = coeffs[0]
surface = ZPlane(surface_id, bc, z0, name)
elif surf_type == 'plane':
A, B, C, D = coeffs
surface = Plane(surface_id, bc, A, B, C, D, name)
elif surf_type == 'x-cylinder':
y0, z0, R = coeffs
surface = XCylinder(surface_id, bc, y0, z0, R, name)
elif surf_type == 'y-cylinder':
x0, z0, R = coeffs
surface = YCylinder(surface_id, bc, x0, z0, R, name)
elif surf_type == 'z-cylinder':
x0, y0, R = coeffs
surface = ZCylinder(surface_id, bc, x0, y0, R, name)
elif surf_type == 'sphere':
x0, y0, z0, R = coeffs
surface = Sphere(surface_id, bc, x0, y0, z0, R, name)
elif surf_type in ['x-cone', 'y-cone', 'z-cone']:
x0, y0, z0, R2 = coeffs
if surf_type == 'x-cone':
surface = XCone(surface_id, bc, x0, y0, z0, R2, name)
elif surf_type == 'y-cone':
surface = YCone(surface_id, bc, x0, y0, z0, R2, name)
elif surf_type == 'z-cone':
surface = ZCone(surface_id, bc, x0, y0, z0, R2, name)
elif surf_type == 'quadric':
a, b, c, d, e, f, g, h, j, k = coeffs
surface = Quadric(surface_id, bc, a, b, c, d, e, f, g,
h, j, k, name)
return surface
class Plane(Surface):
"""An arbitrary plane of the form :math:`Ax + By + Cz = D`.
Parameters
----------
surface_id : int, optional
Unique identifier for the surface. If not specified, an identifier will
automatically be assigned.
boundary_type : {'transmission, 'vacuum', 'reflective'}, optional
Boundary condition that defines the behavior for particles hitting the
surface. Defaults to transmissive boundary condition where particles
freely pass through the surface.
A : float, optional
The 'A' parameter for the plane. Defaults to 1.
B : float, optional
The 'B' parameter for the plane. Defaults to 0.
C : float, optional
The 'C' parameter for the plane. Defaults to 0.
D : float, optional
The 'D' parameter for the plane. Defaults to 0.
name : str, optional
Name of the plane. If not specified, the name will be the empty string.
Attributes
----------
a : float
The 'A' parameter for the plane
b : float
The 'B' parameter for the plane
c : float
The 'C' parameter for the plane
d : float
The 'D' parameter for the plane
boundary_type : {'transmission, 'vacuum', 'reflective'}
Boundary condition that defines the behavior for particles hitting the
surface.
periodic_surface : openmc.Surface
If a periodic boundary condition is used, the surface with which this
one is periodic with
coefficients : dict
Dictionary of surface coefficients
id : int
Unique identifier for the surface
name : str
Name of the surface
type : str
Type of the surface
"""
def __init__(self, surface_id=None, boundary_type='transmission',
A=1., B=0., C=0., D=0., name=''):
super(Plane, self).__init__(surface_id, boundary_type, name=name)
self._type = 'plane'
self._coeff_keys = ['A', 'B', 'C', 'D']
self._periodic_surface = None
self.a = A
self.b = B
self.c = C
self.d = D
@property
def a(self):
return self.coefficients['A']
@property
def b(self):
return self.coefficients['B']
@property
def c(self):
return self.coefficients['C']
@property
def d(self):
return self.coefficients['D']
@property
def periodic_surface(self):
return self._periodic_surface
@a.setter
def a(self, A):
check_type('A coefficient', A, Real)
self._coefficients['A'] = A
@b.setter
def b(self, B):
check_type('B coefficient', B, Real)
self._coefficients['B'] = B
@c.setter
def c(self, C):
check_type('C coefficient', C, Real)
self._coefficients['C'] = C
@d.setter
def d(self, D):
check_type('D coefficient', D, Real)
self._coefficients['D'] = D
@periodic_surface.setter
def periodic_surface(self, periodic_surface):
check_type('periodic surface', periodic_surface, Plane)
self._periodic_surface = periodic_surface
periodic_surface._periodic_surface = self
def evaluate(self, point):
"""Evaluate the surface equation at a given point.
Parameters
----------
point : 3-tuple of float
The Cartesian coordinates, :math:`(x',y',z')`, at which the surface
equation should be evaluated.
Returns
-------
float
:math:`Ax' + By' + Cz' - d`
"""
x, y, z = point
return self.a*x + self.b*y + self.c*z - self.d
def to_xml_element(self):
"""Return XML representation of the surface
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing source data
"""
element = super(Plane, self).to_xml_element()
# Add periodic surface pair information
if self.boundary_type == 'periodic':
if self.periodic_surface is not None:
element.set("periodic_surface_id", str(self.periodic_surface.id))
return element
class XPlane(Plane):
"""A plane perpendicular to the x axis of the form :math:`x - x_0 = 0`
Parameters
----------
surface_id : int, optional
Unique identifier for the surface. If not specified, an identifier will
automatically be assigned.
boundary_type : {'transmission, 'vacuum', 'reflective', 'periodic'}, optional
Boundary condition that defines the behavior for particles hitting the
surface. Defaults to transmissive boundary condition where particles
freely pass through the surface. Only axis-aligned periodicity is
supported, i.e., x-planes can only be paired with x-planes.
x0 : float, optional
Location of the plane. Defaults to 0.
name : str, optional
Name of the plane. If not specified, the name will be the empty string.
Attributes
----------
x0 : float
Location of the plane
boundary_type : {'transmission, 'vacuum', 'reflective', 'periodic'}
Boundary condition that defines the behavior for particles hitting the
surface.
periodic_surface : openmc.Surface
If a periodic boundary condition is used, the surface with which this
one is periodic with
coefficients : dict
Dictionary of surface coefficients
id : int
Unique identifier for the surface
name : str
Name of the surface
type : str
Type of the surface
"""
def __init__(self, surface_id=None, boundary_type='transmission',
x0=0., name=''):
super(XPlane, self).__init__(surface_id, boundary_type, name=name)
self._type = 'x-plane'
self._coeff_keys = ['x0']
self.x0 = x0
@property
def x0(self):
return self.coefficients['x0']
@x0.setter
def x0(self, x0):
check_type('x0 coefficient', x0, Real)
self._coefficients['x0'] = x0
def bounding_box(self, side):
"""Determine an axis-aligned bounding box.
An axis-aligned bounding box for surface half-spaces is represented by
its lower-left and upper-right coordinates. For the x-plane surface, the
half-spaces are unbounded in their y- and z- directions. To represent
infinity, numpy.inf is used.
Parameters
----------
side : {'+', '-'}
Indicates the negative or positive half-space
Returns
-------
numpy.ndarray
Lower-left coordinates of the axis-aligned bounding box for the
desired half-space
numpy.ndarray
Upper-right coordinates of the axis-aligned bounding box for the
desired half-space
"""
if side == '-':
return (np.array([-np.inf, -np.inf, -np.inf]),
np.array([self.x0, np.inf, np.inf]))
elif side == '+':
return (np.array([self.x0, -np.inf, -np.inf]),
np.array([np.inf, np.inf, np.inf]))
def evaluate(self, point):
"""Evaluate the surface equation at a given point.
Parameters
----------
point : 3-tuple of float
The Cartesian coordinates, :math:`(x',y',z')`, at which the surface
equation should be evaluated.
Returns
-------
float
:math:`x' - x_0`
"""
return point[0] - self.x0
class YPlane(Plane):
"""A plane perpendicular to the y axis of the form :math:`y - y_0 = 0`
Parameters
----------
surface_id : int, optional
Unique identifier for the surface. If not specified, an identifier will
automatically be assigned.
boundary_type : {'transmission, 'vacuum', 'reflective', 'periodic'}, optional
Boundary condition that defines the behavior for particles hitting the
surface. Defaults to transmissive boundary condition where particles
freely pass through the surface. Only axis-aligned periodicity is
supported, i.e., x-planes can only be paired with x-planes.
y0 : float, optional
Location of the plane
name : str, optional
Name of the plane. If not specified, the name will be the empty string.
Attributes
----------
y0 : float
Location of the plane
boundary_type : {'transmission, 'vacuum', 'reflective', 'periodic'}
Boundary condition that defines the behavior for particles hitting the
surface.
periodic_surface : openmc.Surface
If a periodic boundary condition is used, the surface with which this
one is periodic with
coefficients : dict
Dictionary of surface coefficients
id : int
Unique identifier for the surface
name : str
Name of the surface
type : str
Type of the surface
"""
def __init__(self, surface_id=None, boundary_type='transmission',
y0=0., name=''):
# Initialize YPlane class attributes
super(YPlane, self).__init__(surface_id, boundary_type, name=name)
self._type = 'y-plane'
self._coeff_keys = ['y0']
self.y0 = y0
@property
def y0(self):
return self.coefficients['y0']
@y0.setter
def y0(self, y0):
check_type('y0 coefficient', y0, Real)
self._coefficients['y0'] = y0
def bounding_box(self, side):
"""Determine an axis-aligned bounding box.
An axis-aligned bounding box for surface half-spaces is represented by
its lower-left and upper-right coordinates. For the y-plane surface, the
half-spaces are unbounded in their x- and z- directions. To represent
infinity, numpy.inf is used.
Parameters
----------
side : {'+', '-'}
Indicates the negative or positive half-space
Returns
-------
numpy.ndarray
Lower-left coordinates of the axis-aligned bounding box for the
desired half-space
numpy.ndarray
Upper-right coordinates of the axis-aligned bounding box for the
desired half-space
"""
if side == '-':
return (np.array([-np.inf, -np.inf, -np.inf]),
np.array([np.inf, self.y0, np.inf]))
elif side == '+':
return (np.array([-np.inf, self.y0, -np.inf]),
np.array([np.inf, np.inf, np.inf]))
def evaluate(self, point):
"""Evaluate the surface equation at a given point.
Parameters
----------
point : 3-tuple of float
The Cartesian coordinates, :math:`(x',y',z')`, at which the surface
equation should be evaluated.
Returns
-------
float
:math:`y' - y_0`
"""
return point[1] - self.y0
class ZPlane(Plane):
"""A plane perpendicular to the z axis of the form :math:`z - z_0 = 0`
Parameters
----------
surface_id : int, optional
Unique identifier for the surface. If not specified, an identifier will
automatically be assigned.
boundary_type : {'transmission, 'vacuum', 'reflective', 'periodic'}, optional
Boundary condition that defines the behavior for particles hitting the
surface. Defaults to transmissive boundary condition where particles
freely pass through the surface. Only axis-aligned periodicity is
supported, i.e., x-planes can only be paired with x-planes.
z0 : float, optional
Location of the plane. Defaults to 0.
name : str, optional
Name of the plane. If not specified, the name will be the empty string.
Attributes
----------
z0 : float
Location of the plane
boundary_type : {'transmission, 'vacuum', 'reflective', 'periodic'}
Boundary condition that defines the behavior for particles hitting the
surface.
periodic_surface : openmc.Surface
If a periodic boundary condition is used, the surface with which this
one is periodic with
coefficients : dict
Dictionary of surface coefficients
id : int
Unique identifier for the surface
name : str
Name of the surface
type : str
Type of the surface
"""
def __init__(self, surface_id=None, boundary_type='transmission',
z0=0., name=''):
# Initialize ZPlane class attributes
super(ZPlane, self).__init__(surface_id, boundary_type, name=name)
self._type = 'z-plane'
self._coeff_keys = ['z0']
self.z0 = z0
@property
def z0(self):
return self.coefficients['z0']
@z0.setter
def z0(self, z0):
check_type('z0 coefficient', z0, Real)
self._coefficients['z0'] = z0
def bounding_box(self, side):
"""Determine an axis-aligned bounding box.
An axis-aligned bounding box for surface half-spaces is represented by
its lower-left and upper-right coordinates. For the z-plane surface, the
half-spaces are unbounded in their x- and y- directions. To represent
infinity, numpy.inf is used.
Parameters
----------
side : {'+', '-'}
Indicates the negative or positive half-space
Returns
-------
numpy.ndarray
Lower-left coordinates of the axis-aligned bounding box for the
desired half-space
numpy.ndarray
Upper-right coordinates of the axis-aligned bounding box for the
desired half-space
"""
if side == '-':
return (np.array([-np.inf, -np.inf, -np.inf]),
np.array([np.inf, np.inf, self.z0]))
elif side == '+':
return (np.array([-np.inf, -np.inf, self.z0]),
np.array([np.inf, np.inf, np.inf]))
def evaluate(self, point):
"""Evaluate the surface equation at a given point.
Parameters
----------
point : 3-tuple of float
The Cartesian coordinates, :math:`(x',y',z')`, at which the surface
equation should be evaluated.
Returns
-------
float
:math:`z' - z_0`
"""
return point[2] - self.z0
@add_metaclass(ABCMeta)
class Cylinder(Surface):
"""A cylinder whose length is parallel to the x-, y-, or z-axis.
Parameters
----------
surface_id : int, optional
Unique identifier for the surface. If not specified, an identifier will
automatically be assigned.
boundary_type : {'transmission, 'vacuum', 'reflective'}, optional
Boundary condition that defines the behavior for particles hitting the
surface. Defaults to transmissive boundary condition where particles
freely pass through the surface.
R : float, optional
Radius of the cylinder. Defaults to 1.
name : str, optional
Name of the cylinder. If not specified, the name will be the empty
string.
Attributes
----------
r : float
Radius of the cylinder
boundary_type : {'transmission, 'vacuum', 'reflective'}
Boundary condition that defines the behavior for particles hitting the
surface.
coefficients : dict
Dictionary of surface coefficients
id : int
Unique identifier for the surface
name : str
Name of the surface
type : str
Type of the surface
"""
def __init__(self, surface_id=None, boundary_type='transmission',
R=1., name=''):
super(Cylinder, self).__init__(surface_id, boundary_type, name=name)
self._coeff_keys = ['R']
self.r = R
@property
def r(self):
return self.coefficients['R']
@r.setter
def r(self, R):
check_type('R coefficient', R, Real)
self._coefficients['R'] = R
class XCylinder(Cylinder):
"""An infinite cylinder whose length is parallel to the x-axis of the form
:math:`(y - y_0)^2 + (z - z_0)^2 = R^2`.
Parameters
----------
surface_id : int, optional
Unique identifier for the surface. If not specified, an identifier will
automatically be assigned.
boundary_type : {'transmission, 'vacuum', 'reflective'}, optional
Boundary condition that defines the behavior for particles hitting the
surface. Defaults to transmissive boundary condition where particles
freely pass through the surface.
y0 : float, optional
y-coordinate of the center of the cylinder. Defaults to 0.
z0 : float, optional
z-coordinate of the center of the cylinder. Defaults to 0.
R : float, optional
Radius of the cylinder. Defaults to 0.
name : str, optional
Name of the cylinder. If not specified, the name will be the empty
string.
Attributes
----------
y0 : float
y-coordinate of the center of the cylinder
z0 : float
z-coordinate of the center of the cylinder
boundary_type : {'transmission, 'vacuum', 'reflective'}
Boundary condition that defines the behavior for particles hitting the
surface.
coefficients : dict
Dictionary of surface coefficients
id : int
Unique identifier for the surface
name : str
Name of the surface
type : str
Type of the surface
"""
def __init__(self, surface_id=None, boundary_type='transmission',
y0=0., z0=0., R=1., name=''):
super(XCylinder, self).__init__(surface_id, boundary_type, R, name=name)
self._type = 'x-cylinder'
self._coeff_keys = ['y0', 'z0', 'R']
self.y0 = y0
self.z0 = z0
@property
def y0(self):
return self.coefficients['y0']
@property
def z0(self):
return self.coefficients['z0']
@y0.setter
def y0(self, y0):
check_type('y0 coefficient', y0, Real)
self._coefficients['y0'] = y0
@z0.setter
def z0(self, z0):
check_type('z0 coefficient', z0, Real)
self._coefficients['z0'] = z0
def bounding_box(self, side):
"""Determine an axis-aligned bounding box.
An axis-aligned bounding box for surface half-spaces is represented by
its lower-left and upper-right coordinates. For the x-cylinder surface,
the negative half-space is unbounded in the x- direction and the
positive half-space is unbounded in all directions. To represent
infinity, numpy.inf is used.
Parameters
----------
side : {'+', '-'}
Indicates the negative or positive half-space
Returns
-------
numpy.ndarray
Lower-left coordinates of the axis-aligned bounding box for the
desired half-space
numpy.ndarray
Upper-right coordinates of the axis-aligned bounding box for the
desired half-space
"""
if side == '-':
return (np.array([-np.inf, self.y0 - self.r, self.z0 - self.r]),
np.array([np.inf, self.y0 + self.r, self.z0 + self.r]))
elif side == '+':
return (np.array([-np.inf, -np.inf, -np.inf]),
np.array([np.inf, np.inf, np.inf]))
def evaluate(self, point):
"""Evaluate the surface equation at a given point.
Parameters
----------
point : 3-tuple of float
The Cartesian coordinates, :math:`(x',y',z')`, at which the surface
equation should be evaluated.
Returns
-------
float
:math:`(y' - y_0)^2 + (z' - z_0)^2 - R^2`
"""
y = point[1] - self.y0
z = point[2] - self.z0
return y**2 + z**2 - self.r**2
class YCylinder(Cylinder):
"""An infinite cylinder whose length is parallel to the y-axis of the form
:math:`(x - x_0)^2 + (z - z_0)^2 = R^2`.
Parameters
----------
surface_id : int, optional
Unique identifier for the surface. If not specified, an identifier will
automatically be assigned.
boundary_type : {'transmission, 'vacuum', 'reflective'}, optional
Boundary condition that defines the behavior for particles hitting the
surface. Defaults to transmissive boundary condition where particles
freely pass through the surface.
x0 : float, optional
x-coordinate of the center of the cylinder. Defaults to 0.
z0 : float, optional
z-coordinate of the center of the cylinder. Defaults to 0.
R : float, optional
Radius of the cylinder. Defaults to 1.
name : str, optional
Name of the cylinder. If not specified, the name will be the empty
string.
Attributes
----------
x0 : float
x-coordinate of the center of the cylinder
z0 : float
z-coordinate of the center of the cylinder
boundary_type : {'transmission, 'vacuum', 'reflective'}
Boundary condition that defines the behavior for particles hitting the
surface.
coefficients : dict
Dictionary of surface coefficients
id : int
Unique identifier for the surface
name : str
Name of the surface
type : str
Type of the surface
"""
def __init__(self, surface_id=None, boundary_type='transmission',
x0=0., z0=0., R=1., name=''):
super(YCylinder, self).__init__(surface_id, boundary_type, R, name=name)
self._type = 'y-cylinder'
self._coeff_keys = ['x0', 'z0', 'R']
self.x0 = x0
self.z0 = z0
@property
def x0(self):
return self.coefficients['x0']
@property
def z0(self):
return self.coefficients['z0']
@x0.setter
def x0(self, x0):
check_type('x0 coefficient', x0, Real)
self._coefficients['x0'] = x0
@z0.setter
def z0(self, z0):
check_type('z0 coefficient', z0, Real)
self._coefficients['z0'] = z0
def bounding_box(self, side):
"""Determine an axis-aligned bounding box.
An axis-aligned bounding box for surface half-spaces is represented by
its lower-left and upper-right coordinates. For the y-cylinder surface,
the negative half-space is unbounded in the y- direction and the
positive half-space is unbounded in all directions. To represent
infinity, numpy.inf is used.
Parameters
----------
side : {'+', '-'}
Indicates the negative or positive half-space
Returns
-------
numpy.ndarray
Lower-left coordinates of the axis-aligned bounding box for the
desired half-space
numpy.ndarray
Upper-right coordinates of the axis-aligned bounding box for the
desired half-space
"""
if side == '-':
return (np.array([self.x0 - self.r, -np.inf, self.z0 - self.r]),
np.array([self.x0 + self.r, np.inf, self.z0 + self.r]))
elif side == '+':
return (np.array([-np.inf, -np.inf, -np.inf]),
np.array([np.inf, np.inf, np.inf]))
def evaluate(self, point):
"""Evaluate the surface equation at a given point.
Parameters
----------
point : 3-tuple of float
The Cartesian coordinates, :math:`(x',y',z')`, at which the surface
equation should be evaluated.
Returns
-------
float
:math:`(x' - x_0)^2 + (z' - z_0)^2 - R^2`
"""
x = point[0] - self.x0
z = point[2] - self.z0
return x**2 + z**2 - self.r**2
class ZCylinder(Cylinder):
"""An infinite cylinder whose length is parallel to the z-axis of the form
:math:`(x - x_0)^2 + (y - y_0)^2 = R^2`.
Parameters
----------
surface_id : int, optional
Unique identifier for the surface. If not specified, an identifier will
automatically be assigned.
boundary_type : {'transmission, 'vacuum', 'reflective'}, optional
Boundary condition that defines the behavior for particles hitting the
surface. Defaults to transmissive boundary condition where particles
freely pass through the surface.
x0 : float, optional
x-coordinate of the center of the cylinder. Defaults to 0.
y0 : float, optional
y-coordinate of the center of the cylinder. Defaults to 0.
R : float, optional
Radius of the cylinder. Defaults to 1.
name : str, optional
Name of the cylinder. If not specified, the name will be the empty
string.
Attributes
----------
x0 : float
x-coordinate of the center of the cylinder
y0 : float
y-coordinate of the center of the cylinder
boundary_type : {'transmission, 'vacuum', 'reflective'}
Boundary condition that defines the behavior for particles hitting the
surface.
coefficients : dict
Dictionary of surface coefficients
id : int
Unique identifier for the surface
name : str
Name of the surface
type : str
Type of the surface
"""
def __init__(self, surface_id=None, boundary_type='transmission',
x0=0., y0=0., R=1., name=''):
super(ZCylinder, self).__init__(surface_id, boundary_type, R, name=name)
self._type = 'z-cylinder'
self._coeff_keys = ['x0', 'y0', 'R']
self.x0 = x0
self.y0 = y0
@property
def x0(self):
return self.coefficients['x0']
@property
def y0(self):
return self.coefficients['y0']
@x0.setter
def x0(self, x0):
check_type('x0 coefficient', x0, Real)
self._coefficients['x0'] = x0
@y0.setter
def y0(self, y0):
check_type('y0 coefficient', y0, Real)
self._coefficients['y0'] = y0
def bounding_box(self, side):
"""Determine an axis-aligned bounding box.
An axis-aligned bounding box for surface half-spaces is represented by
its lower-left and upper-right coordinates. For the z-cylinder surface,
the negative half-space is unbounded in the z- direction and the
positive half-space is unbounded in all directions. To represent
infinity, numpy.inf is used.
Parameters
----------
side : {'+', '-'}
Indicates the negative or positive half-space
Returns
-------
numpy.ndarray
Lower-left coordinates of the axis-aligned bounding box for the
desired half-space
numpy.ndarray
Upper-right coordinates of the axis-aligned bounding box for the
desired half-space
"""
if side == '-':
return (np.array([self.x0 - self.r, self.y0 - self.r, -np.inf]),
np.array([self.x0 + self.r, self.y0 + self.r, np.inf]))
elif side == '+':
return (np.array([-np.inf, -np.inf, -np.inf]),
np.array([np.inf, np.inf, np.inf]))
def evaluate(self, point):
"""Evaluate the surface equation at a given point.
Parameters
----------
point : 3-tuple of float
The Cartesian coordinates, :math:`(x',y',z')`, at which the surface
equation should be evaluated.
Returns
-------
float
:math:`(x' - x_0)^2 + (y' - y_0)^2 - R^2`
"""
x = point[0] - self.x0
y = point[1] - self.y0
return x**2 + y**2 - self.r**2
class Sphere(Surface):
"""A sphere of the form :math:`(x - x_0)^2 + (y - y_0)^2 + (z - z_0)^2 = R^2`.
Parameters
----------
surface_id : int, optional
Unique identifier for the surface. If not specified, an identifier will
automatically be assigned.
boundary_type : {'transmission, 'vacuum', 'reflective'}, optional
Boundary condition that defines the behavior for particles hitting the
surface. Defaults to transmissive boundary condition where particles
freely pass through the surface.
x0 : float, optional
x-coordinate of the center of the sphere. Defaults to 0.
y0 : float, optional
y-coordinate of the center of the sphere. Defaults to 0.
z0 : float, optional
z-coordinate of the center of the sphere. Defaults to 0.
R : float, optional
Radius of the sphere. Defaults to 1.
name : str, optional
Name of the sphere. If not specified, the name will be the empty string.
Attributes
----------
x0 : float
x-coordinate of the center of the sphere
y0 : float
y-coordinate of the center of the sphere
z0 : float
z-coordinate of the center of the sphere
r : float
Radius of the sphere
boundary_type : {'transmission, 'vacuum', 'reflective'}
Boundary condition that defines the behavior for particles hitting the
surface.
coefficients : dict
Dictionary of surface coefficients
id : int
Unique identifier for the surface
name : str
Name of the surface
type : str
Type of the surface
"""
def __init__(self, surface_id=None, boundary_type='transmission',
x0=0., y0=0., z0=0., R=1., name=''):
super(Sphere, self).__init__(surface_id, boundary_type, name=name)
self._type = 'sphere'
self._coeff_keys = ['x0', 'y0', 'z0', 'R']
self.x0 = x0
self.y0 = y0
self.z0 = z0
self.r = R
@property
def x0(self):
return self.coefficients['x0']
@property
def y0(self):
return self.coefficients['y0']
@property
def z0(self):
return self.coefficients['z0']
@property
def r(self):
return self.coefficients['R']
@x0.setter
def x0(self, x0):
check_type('x0 coefficient', x0, Real)
self._coefficients['x0'] = x0
@y0.setter
def y0(self, y0):
check_type('y0 coefficient', y0, Real)
self._coefficients['y0'] = y0
@z0.setter
def z0(self, z0):
check_type('z0 coefficient', z0, Real)
self._coefficients['z0'] = z0
@r.setter
def r(self, R):
check_type('R coefficient', R, Real)
self._coefficients['R'] = R
def bounding_box(self, side):
"""Determine an axis-aligned bounding box.
An axis-aligned bounding box for surface half-spaces is represented by
its lower-left and upper-right coordinates. The positive half-space of a
sphere is unbounded in all directions. To represent infinity, numpy.inf
is used.
Parameters
----------
side : {'+', '-'}
Indicates the negative or positive half-space
Returns
-------
numpy.ndarray
Lower-left coordinates of the axis-aligned bounding box for the
desired half-space
numpy.ndarray
Upper-right coordinates of the axis-aligned bounding box for the
desired half-space
"""
if side == '-':
return (np.array([self.x0 - self.r, self.y0 - self.r,
self.z0 - self.r]),
np.array([self.x0 + self.r, self.y0 + self.r,
self.z0 + self.r]))
elif side == '+':
return (np.array([-np.inf, -np.inf, -np.inf]),
np.array([np.inf, np.inf, np.inf]))
def evaluate(self, point):
"""Evaluate the surface equation at a given point.
Parameters
----------
point : 3-tuple of float
The Cartesian coordinates, :math:`(x',y',z')`, at which the surface
equation should be evaluated.
Returns
-------
float
:math:`(x' - x_0)^2 + (y' - y_0)^2 + (z' - z_0)^2 - R^2`
"""
x = point[0] - self.x0
y = point[1] - self.y0
z = point[2] - self.z0
return x**2 + y**2 + z**2 - self.r**2
@add_metaclass(ABCMeta)
class Cone(Surface):
"""A conical surface parallel to the x-, y-, or z-axis.
Parameters
----------
surface_id : int, optional
Unique identifier for the surface. If not specified, an identifier will
automatically be assigned.
boundary_type : {'transmission, 'vacuum', 'reflective'}, optional
Boundary condition that defines the behavior for particles hitting the
surface. Defaults to transmissive boundary condition where particles
freely pass through the surface.
x0 : float, optional
x-coordinate of the apex. Defaults to 0.
y0 : float
y-coordinate of the apex. Defaults to 0.
z0 : float
z-coordinate of the apex. Defaults to 0.
R2 : float
Parameter related to the aperature. Defaults to 1.
name : str
Name of the cone. If not specified, the name will be the empty string.
Attributes
----------
x0 : float
x-coordinate of the apex
y0 : float
y-coordinate of the apex
z0 : float
z-coordinate of the apex
r2 : float
Parameter related to the aperature
boundary_type : {'transmission, 'vacuum', 'reflective'}
Boundary condition that defines the behavior for particles hitting the
surface.
coefficients : dict
Dictionary of surface coefficients
id : int
Unique identifier for the surface
name : str
Name of the surface
type : str
Type of the surface
"""
def __init__(self, surface_id=None, boundary_type='transmission',
x0=0., y0=0., z0=0., R2=1., name=''):
super(Cone, self).__init__(surface_id, boundary_type, name=name)
self._coeff_keys = ['x0', 'y0', 'z0', 'R2']
self.x0 = x0
self.y0 = y0
self.z0 = z0
self.r2 = R2
@property
def x0(self):
return self.coefficients['x0']
@property
def y0(self):
return self.coefficients['y0']
@property
def z0(self):
return self.coefficients['z0']
@property
def r2(self):
return self.coefficients['r2']
@x0.setter
def x0(self, x0):
check_type('x0 coefficient', x0, Real)
self._coefficients['x0'] = x0
@y0.setter
def y0(self, y0):
check_type('y0 coefficient', y0, Real)
self._coefficients['y0'] = y0
@z0.setter
def z0(self, z0):
check_type('z0 coefficient', z0, Real)
self._coefficients['z0'] = z0
@r2.setter
def r2(self, R2):
check_type('R^2 coefficient', R2, Real)
self._coefficients['R2'] = R2
class XCone(Cone):
"""A cone parallel to the x-axis of the form :math:`(y - y_0)^2 + (z - z_0)^2 =
R^2 (x - x_0)^2`.
Parameters
----------
surface_id : int, optional
Unique identifier for the surface. If not specified, an identifier will
automatically be assigned.
boundary_type : {'transmission, 'vacuum', 'reflective'}, optional
Boundary condition that defines the behavior for particles hitting the
surface. Defaults to transmissive boundary condition where particles
freely pass through the surface.
x0 : float, optional
x-coordinate of the apex. Defaults to 0.
y0 : float, optional
y-coordinate of the apex. Defaults to 0.
z0 : float, optional
z-coordinate of the apex. Defaults to 0.
R2 : float, optional
Parameter related to the aperature. Defaults to 1.
name : str, optional
Name of the cone. If not specified, the name will be the empty string.
Attributes
----------
x0 : float
x-coordinate of the apex
y0 : float
y-coordinate of the apex
z0 : float
z-coordinate of the apex
R2 : float
Parameter related to the aperature
boundary_type : {'transmission, 'vacuum', 'reflective'}
Boundary condition that defines the behavior for particles hitting the
surface.
coefficients : dict
Dictionary of surface coefficients
id : int
Unique identifier for the surface
name : str
Name of the surface
type : str
Type of the surface
"""
def __init__(self, surface_id=None, boundary_type='transmission',
x0=0., y0=0., z0=0., R2=1., name=''):
super(XCone, self).__init__(surface_id, boundary_type, x0, y0,
z0, R2, name=name)
self._type = 'x-cone'
def evaluate(self, point):
"""Evaluate the surface equation at a given point.
Parameters
----------
point : 3-tuple of float
The Cartesian coordinates, :math:`(x',y',z')`, at which the surface
equation should be evaluated.
Returns
-------
float
:math:`(y' - y_0)^2 + (z' - z_0)^2 - R^2(x' - x_0)^2`
"""
x = point[0] - self.x0
y = point[1] - self.y0
z = point[2] - self.z0
return y**2 + z**2 - self.r2*x**2
class YCone(Cone):
"""A cone parallel to the y-axis of the form :math:`(x - x_0)^2 + (z - z_0)^2 =
R^2 (y - y_0)^2`.
Parameters
----------
surface_id : int, optional
Unique identifier for the surface. If not specified, an identifier will
automatically be assigned.
boundary_type : {'transmission, 'vacuum', 'reflective'}, optional
Boundary condition that defines the behavior for particles hitting the
surface. Defaults to transmissive boundary condition where particles
freely pass through the surface.
x0 : float, optional
x-coordinate of the apex. Defaults to 0.
y0 : float, optional
y-coordinate of the apex. Defaults to 0.
z0 : float, optional
z-coordinate of the apex. Defaults to 0.
R2 : float, optional
Parameter related to the aperature. Defaults to 1.
name : str, optional
Name of the cone. If not specified, the name will be the empty string.
Attributes
----------
x0 : float
x-coordinate of the apex
y0 : float
y-coordinate of the apex
z0 : float
z-coordinate of the apex
R2 : float
Parameter related to the aperature
boundary_type : {'transmission, 'vacuum', 'reflective'}
Boundary condition that defines the behavior for particles hitting the
surface.
coefficients : dict
Dictionary of surface coefficients
id : int
Unique identifier for the surface
name : str
Name of the surface
type : str
Type of the surface
"""
def __init__(self, surface_id=None, boundary_type='transmission',
x0=0., y0=0., z0=0., R2=1., name=''):
super(YCone, self).__init__(surface_id, boundary_type, x0, y0, z0,
R2, name=name)
self._type = 'y-cone'
def evaluate(self, point):
"""Evaluate the surface equation at a given point.
Parameters
----------
point : 3-tuple of float
The Cartesian coordinates, :math:`(x',y',z')`, at which the surface
equation should be evaluated.
Returns
-------
float
:math:`(x' - x_0)^2 + (z' - z_0)^2 - R^2(y' - y_0)^2`
"""
x = point[0] - self.x0
y = point[1] - self.y0
z = point[2] - self.z0
return x**2 + z**2 - self.r2*y**2
class ZCone(Cone):
"""A cone parallel to the x-axis of the form :math:`(x - x_0)^2 + (y - y_0)^2 =
R^2 (z - z_0)^2`.
Parameters
----------
surface_id : int, optional
Unique identifier for the surface. If not specified, an identifier will
automatically be assigned.
boundary_type : {'transmission, 'vacuum', 'reflective'}, optional
Boundary condition that defines the behavior for particles hitting the
surface. Defaults to transmissive boundary condition where particles
freely pass through the surface.
x0 : float, optional
x-coordinate of the apex. Defaults to 0.
y0 : float, optional
y-coordinate of the apex. Defaults to 0.
z0 : float, optional
z-coordinate of the apex. Defaults to 0.
R2 : float, optional
Parameter related to the aperature. Defaults to 1.
name : str, optional
Name of the cone. If not specified, the name will be the empty string.
Attributes
----------
x0 : float
x-coordinate of the apex
y0 : float
y-coordinate of the apex
z0 : float
z-coordinate of the apex
R2 : float
Parameter related to the aperature
boundary_type : {'transmission, 'vacuum', 'reflective'}
Boundary condition that defines the behavior for particles hitting the
surface.
coefficients : dict
Dictionary of surface coefficients
id : int
Unique identifier for the surface
name : str
Name of the surface
type : str
Type of the surface
"""
def __init__(self, surface_id=None, boundary_type='transmission',
x0=0., y0=0., z0=0., R2=1., name=''):
super(ZCone, self).__init__(surface_id, boundary_type, x0, y0, z0,
R2, name=name)
self._type = 'z-cone'
def evaluate(self, point):
"""Evaluate the surface equation at a given point.
Parameters
----------
point : 3-tuple of float
The Cartesian coordinates, :math:`(x',y',z')`, at which the surface
equation should be evaluated.
Returns
-------
float
:math:`(x' - x_0)^2 + (y' - y_0)^2 - R^2(z' - z_0)^2`
"""
x = point[0] - self.x0
y = point[1] - self.y0
z = point[2] - self.z0
return x**2 + y**2 - self.r2*z**2
class Quadric(Surface):
"""A surface of the form :math:`Ax^2 + By^2 + Cz^2 + Dxy + Eyz + Fxz + Gx + Hy +
Jz + K = 0`.
Parameters
----------
surface_id : int, optional
Unique identifier for the surface. If not specified, an identifier will
automatically be assigned.
boundary_type : {'transmission, 'vacuum', 'reflective', 'periodic'}, optional
Boundary condition that defines the behavior for particles hitting the
surface. Defaults to transmissive boundary condition where particles
freely pass through the surface.
a, b, c, d, e, f, g, h, j, k : float, optional
coefficients for the surface. All default to 0.
name : str, optional
Name of the sphere. If not specified, the name will be the empty string.
Attributes
----------
a, b, c, d, e, f, g, h, j, k : float
coefficients for the surface
boundary_type : {'transmission, 'vacuum', 'reflective', 'periodic'}
Boundary condition that defines the behavior for particles hitting the
surface.
coefficients : dict
Dictionary of surface coefficients
id : int
Unique identifier for the surface
name : str
Name of the surface
type : str
Type of the surface
"""
def __init__(self, surface_id=None, boundary_type='transmission',
a=0., b=0., c=0., d=0., e=0., f=0., g=0.,
h=0., j=0., k=0., name=''):
super(Quadric, self).__init__(surface_id, boundary_type, name=name)
self._type = 'quadric'
self._coeff_keys = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'k']
self.a = a
self.b = b
self.c = c
self.d = d
self.e = e
self.f = f
self.g = g
self.h = h
self.j = j
self.k = k
@property
def a(self):
return self.coefficients['a']
@property
def b(self):
return self.coefficients['b']
@property
def c(self):
return self.coefficients['c']
@property
def d(self):
return self.coefficients['d']
@property
def e(self):
return self.coefficients['e']
@property
def f(self):
return self.coefficients['f']
@property
def g(self):
return self.coefficients['g']
@property
def h(self):
return self.coefficients['h']
@property
def j(self):
return self.coefficients['j']
@property
def k(self):
return self.coefficients['k']
@a.setter
def a(self, a):
check_type('a coefficient', a, Real)
self._coefficients['a'] = a
@b.setter
def b(self, b):
check_type('b coefficient', b, Real)
self._coefficients['b'] = b
@c.setter
def c(self, c):
check_type('c coefficient', c, Real)
self._coefficients['c'] = c
@d.setter
def d(self, d):
check_type('d coefficient', d, Real)
self._coefficients['d'] = d
@e.setter
def e(self, e):
check_type('e coefficient', e, Real)
self._coefficients['e'] = e
@f.setter
def f(self, f):
check_type('f coefficient', f, Real)
self._coefficients['f'] = f
@g.setter
def g(self, g):
check_type('g coefficient', g, Real)
self._coefficients['g'] = g
@h.setter
def h(self, h):
check_type('h coefficient', h, Real)
self._coefficients['h'] = h
@j.setter
def j(self, j):
check_type('j coefficient', j, Real)
self._coefficients['j'] = j
@k.setter
def k(self, k):
check_type('k coefficient', k, Real)
self._coefficients['k'] = k
def evaluate(self, point):
"""Evaluate the surface equation at a given point.
Parameters
----------
point : 3-tuple of float
The Cartesian coordinates, :math:`(x',y',z')`, at which the surface
equation should be evaluated.
Returns
-------
float
:math:`Ax'^2 + By'^2 + Cz'^2 + Dx'y' + Ey'z' + Fx'z' + Gx' + Hy' +
Jz' + K = 0`
"""
x, y, z = point
return x*(self.a*x + self.d*y + self.g) + \
y*(self.b*y + self.e*z + self.h) + \
z*(self.c*z + self.f*x + self.j) + self.k
class Halfspace(Region):
"""A positive or negative half-space region.
A half-space is either of the two parts into which a two-dimension surface
divides the three-dimensional Euclidean space. If the equation of the
surface is :math:`f(x,y,z) = 0`, the region for which :math:`f(x,y,z) < 0`
is referred to as the negative half-space and the region for which
:math:`f(x,y,z) > 0` is referred to as the positive half-space.
Instances of Halfspace are generally not instantiated directly. Rather, they
can be created from an existing Surface through the __neg__ and __pos__
operators, as the following example demonstrates:
>>> sphere = openmc.Sphere(surface_id=1, R=10.0)
>>> inside_sphere = -sphere
>>> outside_sphere = +sphere
>>> type(inside_sphere)
<class 'openmc.surface.Halfspace'>
Parameters
----------
surface : openmc.Surface
Surface which divides Euclidean space.
side : {'+', '-'}
Indicates whether the positive or negative half-space is used.
Attributes
----------
surface : openmc.Surface
Surface which divides Euclidean space.
side : {'+', '-'}
Indicates whether the positive or negative half-space is used.
bounding_box : tuple of numpy.ndarray
Lower-left and upper-right coordinates of an axis-aligned bounding box
"""
def __init__(self, surface, side):
self.surface = surface
self.side = side
def __and__(self, other):
if isinstance(other, Intersection):
return Intersection([self] + other[:])
else:
return Intersection((self, other))
def __or__(self, other):
if isinstance(other, Union):
return Union([self] + other[:])
else:
return Union((self, other))
def __invert__(self):
return -self.surface if self.side == '+' else +self.surface
def __contains__(self, point):
"""Check whether a point is contained in the half-space.
Parameters
----------
point : 3-tuple of float
Cartesian coordinates, :math:`(x',y',z')`, of the point
Returns
-------
bool
Whether the point is in the half-space
"""
val = self.surface.evaluate(point)
return val >= 0. if self.side == '+' else val < 0.
@property
def surface(self):
return self._surface
@surface.setter
def surface(self, surface):
check_type('surface', surface, Surface)
self._surface = surface
@property
def side(self):
return self._side
@side.setter
def side(self, side):
check_value('side', side, ('+', '-'))
self._side = side
@property
def bounding_box(self):
return self.surface.bounding_box(self.side)
def __str__(self):
return '-' + str(self.surface.id) if self.side == '-' \
else str(self.surface.id)
def get_surfaces(self, surfaces=None):
"""
Returns the surface that this is a halfspace of.
Parameters
----------
surfaces: collections.OrderedDict, optional
Dictionary mapping surface IDs to :class:`openmc.Surface` instances
Returns
-------
surfaces: collections.OrderedDict
Dictionary mapping surface IDs to :class:`openmc.Surface` instances
"""
if surfaces is None:
surfaces = OrderedDict()
surfaces[self.surface.id] = self.surface
return surfaces
def clone(self, memo=None):
"""Create a copy of this halfspace, with a cloned surface with a
unique ID.
Parameters
----------
memo : dict or None
A nested dictionary of previously cloned objects. This parameter
is used internally and should not be specified by the user.
Returns
-------
clone : openmc.Halfspace
The clone of this halfspace
"""
if memo is None:
memo = dict
clone = deepcopy(self)
clone.surface = self.surface.clone(memo)
return clone
def get_rectangular_prism(width, height, axis='z', origin=(0., 0.),
boundary_type='transmission', corner_radius=0.):
"""Get an infinite rectangular prism from four planar surfaces.
Parameters
----------
width: float
Prism width in units of cm. The width is aligned with the y, x,
or x axes for prisms parallel to the x, y, or z axis, respectively.
height: float
Prism height in units of cm. The height is aligned with the z, z,
or y axes for prisms parallel to the x, y, or z axis, respectively.
axis : {'x', 'y', 'z'}
Axis with which the infinite length of the prism should be aligned.
Defaults to 'z'.
origin: Iterable of two floats
Origin of the prism. The two floats correspond to (y,z), (x,z) or
(x,y) for prisms parallel to the x, y or z axis, respectively.
Defaults to (0., 0.).
boundary_type : {'transmission, 'vacuum', 'reflective', 'periodic'}
Boundary condition that defines the behavior for particles hitting the
surfaces comprising the rectangular prism (default is 'transmission').
corner_radius: float
Prism corner radius in units of cm. Defaults to 0.
Returns
-------
openmc.Region
The inside of a rectangular prism
"""
check_type('width', width, Real)
check_type('height', height, Real)
check_type('corner_radius', corner_radius, Real)
check_value('axis', axis, ['x', 'y', 'z'])
check_type('origin', origin, Iterable, Real)
# Define function to create a plane on given axis
def plane(axis, name, value):
cls = globals()['{}Plane'.format(axis.upper())]
return cls(name='{} {}'.format(name, axis),
boundary_type=boundary_type,
**{axis + '0': value})
if axis == 'x':
x1, x2 = 'y', 'z'
elif axis == 'y':
x1, x2 = 'x', 'z'
else:
x1, x2 = 'x', 'y'
# Get cylinder class corresponding to given axis
cyl = globals()['{}Cylinder'.format(axis.upper())]
# Create rectangular region
min_x1 = plane(x1, 'minimum', -width/2 + origin[0])
max_x1 = plane(x1, 'maximum', width/2 + origin[0])
min_x2 = plane(x2, 'minimum', -height/2 + origin[1])
max_x2 = plane(x2, 'maximum', height/2 + origin[1])
prism = +min_x1 & -max_x1 & +min_x2 & -max_x2
# Handle rounded corners if given
if corner_radius > 0.:
args = {'R': corner_radius, 'boundary_type': boundary_type}
args[x1 + '0'] = origin[0] - width/2 + corner_radius
args[x2 + '0'] = origin[1] - height/2 + corner_radius
x1_min_x2_min = cyl(name='{} min {} min'.format(x1, x2), **args)
args[x1 + '0'] = origin[0] - width/2 + corner_radius
args[x2 + '0'] = origin[1] - height/2 + corner_radius
x1_min_x2_min = cyl(name='{} min {} min'.format(x1, x2), **args)
args[x1 + '0'] = origin[0] - width/2 + corner_radius
args[x2 + '0'] = origin[1] + height/2 - corner_radius
x1_min_x2_max = cyl(name='{} min {} max'.format(x1, x2), **args)
args[x1 + '0'] = origin[0] + width/2 - corner_radius
args[x2 + '0'] = origin[1] - height/2 + corner_radius
x1_max_x2_min = cyl(name='{} max {} min'.format(x1, x2), **args)
args[x1 + '0'] = origin[0] + width/2 - corner_radius
args[x2 + '0'] = origin[1] + height/2 - corner_radius
x1_max_x2_max = cyl(name='{} max {} max'.format(x1, x2), **args)
x1_min = plane(x1, 'min', -width/2 + origin[0] + corner_radius)
x1_max = plane(x1, 'max', width/2 + origin[0] - corner_radius)
x2_min = plane(x2, 'min', -height/2 + origin[1] + corner_radius)
x2_max = plane(x2, 'max', height/2 + origin[1] - corner_radius)
corners = (+x1_min_x2_min & -x1_min & -x2_min) | \
(+x1_min_x2_max & -x1_min & +x2_max) | \
(+x1_max_x2_min & +x1_max & -x2_min) | \
(+x1_max_x2_max & +x1_max & +x2_max)
prism = prism & ~corners
return prism
def get_hexagonal_prism(edge_length=1., orientation='y', origin=(0., 0.),
boundary_type='transmission', corner_radius=0.):
"""Create a hexagon region from six surface planes.
Parameters
----------
edge_length : float
Length of a side of the hexagon in cm
orientation : {'x', 'y'}
An 'x' orientation means that two sides of the hexagon are parallel to
the x-axis and a 'y' orientation means that two sides of the hexagon are
parallel to the y-axis.
origin: Iterable of two floats
Origin of the prism. Defaults to (0., 0.).
boundary_type : {'transmission, 'vacuum', 'reflective', 'periodic'}
Boundary condition that defines the behavior for particles hitting the
surfaces comprising the hexagonal prism (default is 'transmission').
corner_radius: float
Prism corner radius in units of cm. Defaults to 0.
Returns
-------
openmc.Region
The inside of a hexagonal prism
"""
l = edge_length
x, y = origin
if orientation == 'y':
right = XPlane(x0=x + sqrt(3.)/2*l, boundary_type=boundary_type)
left = XPlane(x0=x - sqrt(3.)/2*l, boundary_type=boundary_type)
c = sqrt(3.)/3.
# y = -x/sqrt(3) + a
upper_right = Plane(A=c, B=1., D=l+x*c+y, boundary_type=boundary_type)
# y = x/sqrt(3) + a
upper_left = Plane(A=-c, B=1., D=l-x*c+y, boundary_type=boundary_type)
# y = x/sqrt(3) - a
lower_right = Plane(A=-c, B=1., D=-l-x*c+y, boundary_type=boundary_type)
# y = -x/sqrt(3) - a
lower_left = Plane(A=c, B=1., D=-l+x*c+y, boundary_type=boundary_type)
prism = -right & +left & -upper_right & -upper_left & \
+lower_right & +lower_left
if boundary_type == 'periodic':
right.periodic_surface = left
upper_right.periodic_surface = lower_left
lower_right.periodic_surface = upper_left
elif orientation == 'x':
top = YPlane(y0=y + sqrt(3.)/2*l, boundary_type=boundary_type)
bottom = YPlane(y0=y - sqrt(3.)/2*l, boundary_type=boundary_type)
c = sqrt(3.)
# y = -sqrt(3)*(x - a)
upper_right = Plane(A=c, B=1., D=c*l+x*c+y, boundary_type=boundary_type)
# y = sqrt(3)*(x + a)
lower_right = Plane(A=-c, B=1., D=-c*l-x*c+y,
boundary_type=boundary_type)
# y = -sqrt(3)*(x + a)
lower_left = Plane(A=c, B=1., D=-c*l+x*c+y, boundary_type=boundary_type)
# y = sqrt(3)*(x + a)
upper_left = Plane(A=-c, B=1., D=c*l-x*c+y, boundary_type=boundary_type)
prism = -top & +bottom & -upper_right & +lower_right & \
+lower_left & -upper_left
if boundary_type == 'periodic':
top.periodic_surface = bottom
upper_right.periodic_surface = lower_left
lower_right.periodic_surface = upper_left
# Handle rounded corners if given
if corner_radius > 0.:
if boundary_type == 'periodic':
raise ValueError('Periodic boundary conditions not permitted when '
'rounded corners are used.')
c = sqrt(3.)/2
t = l - corner_radius/c
# Cylinder with corner radius and boundary type pre-applied
cyl1 = partial(ZCylinder, R=corner_radius, boundary_type=boundary_type)
cyl2 = partial(ZCylinder, R=corner_radius/(2*c),
boundary_type=boundary_type)
if orientation == 'x':
x_min_y_min_in = cyl1(name='x min y min in', x0=x-t/2, y0=y-c*t)
x_min_y_max_in = cyl1(name='x min y max in', x0=x+t/2, y0=y-c*t)
x_max_y_min_in = cyl1(name='x max y min in', x0=x-t/2, y0=y+c*t)
x_max_y_max_in = cyl1(name='x max y max in', x0=x+t/2, y0=y+c*t)
x_min_in = cyl1(name='x min in', x0=x-t, y0=y)
x_max_in = cyl1(name='x max in', x0=x+t, y0=y)
x_min_y_min_out = cyl2(name='x min y min out', x0=x-l/2, y0=y-c*l)
x_min_y_max_out = cyl2(name='x min y max out', x0=x+l/2, y0=y-c*l)
x_max_y_min_out = cyl2(name='x max y min out', x0=x-l/2, y0=y+c*l)
x_max_y_max_out = cyl2(name='x max y max out', x0=x+l/2, y0=y+c*l)
x_min_out = cyl2(name='x min out', x0=x-l, y0=y)
x_max_out = cyl2(name='x max out', x0=x+l, y0=y)
corners = (+x_min_y_min_in & -x_min_y_min_out |
+x_min_y_max_in & -x_min_y_max_out |
+x_max_y_min_in & -x_max_y_min_out |
+x_max_y_max_in & -x_max_y_max_out |
+x_min_in & -x_min_out |
+x_max_in & -x_max_out)
elif orientation == 'y':
x_min_y_min_in = cyl1(name='x min y min in', x0=x-c*t, y0=y-t/2)
x_min_y_max_in = cyl1(name='x min y max in', x0=x-c*t, y0=y+t/2)
x_max_y_min_in = cyl1(name='x max y min in', x0=x+c*t, y0=y-t/2)
x_max_y_max_in = cyl1(name='x max y max in', x0=x+c*t, y0=y+t/2)
y_min_in = cyl1(name='y min in', x0=x, y0=y-t)
y_max_in = cyl1(name='y max in', x0=x, y0=y+t)
x_min_y_min_out = cyl2(name='x min y min out', x0=x-c*l, y0=y-l/2)
x_min_y_max_out = cyl2(name='x min y max out', x0=x-c*l, y0=y+l/2)
x_max_y_min_out = cyl2(name='x max y min out', x0=x+c*l, y0=y-l/2)
x_max_y_max_out = cyl2(name='x max y max out', x0=x+c*l, y0=y+l/2)
y_min_out = cyl2(name='y min out', x0=x, y0=y-l)
y_max_out = cyl2(name='y max out', x0=x, y0=y+l)
corners = (+x_min_y_min_in & -x_min_y_min_out |
+x_min_y_max_in & -x_min_y_max_out |
+x_max_y_min_in & -x_max_y_min_out |
+x_max_y_max_in & -x_max_y_max_out |
+y_min_in & -y_min_out |
+y_max_in & -y_max_out)
prism = prism & ~corners
return prism
|
py | b415ed6a411c43bc2a8babcbf1694dab7587f88c | import config
import fabric.api as fab
from tool.log import warn
cfg = config.config()
fab.use_ssh_config = True
fab.env.host_string = cfg.get('deploy', 'prod.host')
def deploy(update_env=None):
code_dir = cfg.get('dev.code.dir')
with fab.cd(code_dir):
fab.run('git pull --ff-only origin master')
if update_env:
fab.run('./dev update_env')
fab.run('./dev migrate')
restart_uwsgi_server('pub')
restart_uwsgi_server('op')
def restart_uwsgi_server(name):
try:
fab.run("pgrep -lf uwsgi-{}.sock | awk {'print $1'} | xargs kill -9".format(name))
except:
warn('unable to stop the uwsgi-{} process...'.format(name))
fab.run('./{}_up'.format(name))
|
py | b415ee17ff02b9d0fe0ef07215b8f85d1eefd633 | __all__ = ["arr","txt"]
|
py | b415ef08458c6c89050ff1143df416044067ce80 | import torch
import importlib
import json
from types import SimpleNamespace
__all__ = ['configure', 'config_string', 'get_dataloaders']
def configure(config_file):
'''TODO:docs
'''
cfg = _Configuration(config_file)
return cfg
class _Configuration(object):
'''TODO:docs
'''
def __init__(self, config_file):
self.load(config_file)
def load(self, config_file):
cf = json.load(open(config_file))
# Network
model_def = cf['network']
model_pkg = model_def.get('package', None)
model_mod = importlib.import_module(model_def['module'], model_pkg)
model_class = getattr(model_mod, model_def['model'])
model_kwargs = model_def['kwargs']
# Dataset
data_def = cf['dataset']
if data_def['module']:
data_pkg = data_def.get('package', None)
data_mod = importlib.import_module(data_def['module'], data_pkg)
dataset = getattr(data_mod, data_def['dataset'])
data_kwargs = data_def['kwargs']
else:
dataset = None
data_kwargs = {}
# Loss
loss_def = cf['loss']
if loss_def['module']:
loss_pkg = loss_def.get('package', None)
loss_mod = importlib.import_module(loss_def['module'], loss_pkg)
loss = getattr(loss_mod, loss_def['loss'])
loss_kwargs = loss_def['kwargs']
else:
loss = None
loss_kwargs = {}
# Optimizer
optim_def = cf.get('optimizer', False)
if optim_def:
opt_pkg = optim_def.get('package', None)
opt_mod = importlib.import_module(optim_def['module'], opt_pkg)
opt = getattr(opt_mod, optim_def['optim'])
opt_kwargs = optim_def['kwargs']
else:
opt = None
opt_kwargs = {}
# Trainer
trainer_def = cf['trainer']
if trainer_def['module']:
t_pkg = trainer_def.get('package', None)
trainer_mod = importlib.import_module(trainer_def['module'], t_pkg)
trainer = getattr(trainer_mod, trainer_def['trainer'])
else:
trainer = None
# Hyperparameters
params = cf['hyperparams']
# Other
other = cf.get('other', {})
self.model = SimpleNamespace(model=model_class, args=model_kwargs)
self.data = SimpleNamespace(data=dataset, args=data_kwargs)
self.loss = SimpleNamespace(loss=loss, args=loss_kwargs)
self.optim = SimpleNamespace(optim=opt, args=opt_kwargs)
self.trainer = trainer
self.hp = params
self.other = other
def __repr__(self):
s = 'Configuration{{\n{}\n}}'.format(
'\n'.join('{}: {!r}'.format(k, v) for k, v in self.__dict__.items()))
return s
def __str__(self):
return repr(self)
def config_string(config):
'''TODO:docs
'''
if type(config) is str:
config = json.load(open(config))
j = config
description = j.pop('description')
details = ''
hp = j.pop('hyperparams', None)
for k, v in j.items():
v.pop('package', None)
v.pop('module', None)
kwargs = v.pop('kwargs', None)
others = list(v.values())
if others and others[0]:
details += f' {k}: {others[0]} {kwargs}\n'
details += f' {hp}\n'
return description, details
def get_dataloaders(data, batch_size, num_workers=8, collate=None):
'''TODO:docs
'''
args = dict(
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
)
if collate is not None:
args['collate_fn'] = collate
trl = torch.utils.data.DataLoader(data.train(), shuffle=True, **args)
tre = torch.utils.data.DataLoader(data.test(), **args)
return trl, tre
|
py | b415f0473b71775a411e30ce3e025eae690735d2 | from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for URI value used to import a resource into
the local data store.
NOTE: the actual import logic is handled by the edit form handler:
the renderer just ensures appropriate values are returned.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from annalist.views.fields.render_base import RenderBase
from annalist.views.fields.render_fieldvalue import (
RenderFieldValue,
get_field_edit_value,
get_field_view_value
)
from django.template import Template, Context
# ----------------------------------------------------------------------------
#
# Link URI value mapping
#
# ----------------------------------------------------------------------------
class URIImportValueMapper(RenderBase):
"""
Value mapper class for token list
"""
@classmethod
def encode(cls, data_value):
"""
Extracts import URL from value structure, for field display.
"""
return (data_value or {}).get('import_url', "")
@classmethod
def decode(cls, field_value):
"""
Returns textual link value from import URL field value
"""
return field_value or ""
def decode_store(self, field_value, entityvals, property_uri):
"""
Decodes a supplied value and uses it to update the 'import_url'
field of an URI import field.
"""
u = self.decode(field_value)
v = entityvals.get(property_uri, {})
if isinstance(v, dict):
v['import_url'] = u
else:
v = {'import_url': u}
entityvals[property_uri] = v
return v
# ----------------------------------------------------------------------------
#
# Import value templates
#
# ----------------------------------------------------------------------------
view_import = (
"""<a href="%s" target="_blank">%s</a>""")
edit_import = (
"""<!-- fields.uri_import_edit_renderer -->
<div class="row">
<div class="small-10 columns view-value view-subfield less-import-button">
<input type="text" size="64" name="{{repeat_prefix}}{{field.description.field_name}}"
placeholder="{{field.description.field_placeholder}}"
value="{{encoded_field_value}}" />
</div>
<div class="small-2 columns view-value view-subfield import-button left small-text-right">
<input type="submit" name="{{repeat_prefix}}{{field.description.field_name}}__import" value="Import" />
</div>
</div>
""")
# ----------------------------------------------------------------------------
#
# Link URI field renderers
#
# ----------------------------------------------------------------------------
class uri_import_view_renderer(object):
def render(self, context):
"""
Render import link for viewing.
"""
linkval = URIImportValueMapper.encode(get_field_view_value(context, ""))
common_prefixes = (
[ "http://", "https://"
, "file:///", "file://localhost/", "file://"
, "mailto:"]
)
textval = linkval
for p in common_prefixes:
if linkval.startswith(p):
textval = linkval[len(p):]
break
return view_import%(linkval, textval)
class uri_import_edit_renderer(object):
def __init__(self):
self._template = Template(edit_import)
return
def render(self, context):
"""
Render import link for editing
"""
val = URIImportValueMapper.encode(get_field_edit_value(context, None))
with context.push(encoded_field_value=val):
result = self._template.render(context)
return result
def get_uri_import_renderer():
"""
Return field renderer object for uri import value
"""
return RenderFieldValue("uri_import",
view_renderer=uri_import_view_renderer(),
edit_renderer=uri_import_edit_renderer(),
)
# End.
|
py | b415f0963e7e70c7573160f2033d8088a4bc7286 | import json
from exopy.tasks.api import (InstrumentTask)
from atom.api import Float, Unicode, Str, set_default
from qm.qua import *
class GetResultsTask(InstrumentTask):
""" Retrieves the variable results from the opx into the exopy database under 'variables'.
The returned value will be a dictionary, with the key being the name of the variable saved in qua and the value
being a dictionary with the saved variable data (see the qua documentation for more information)
"""
database_entries = set_default({'variables': {}, 'raw': {}})
results_file_path = Unicode().tag(pref=True)
def __init__(self, **kwargs):
super().__init__(**kwargs)
def perform(self):
results = self.driver.get_results()
var_dict = {}
for k in results.variable_results.__dict__:
data = getattr(results.variable_results, k).data
ts_in_ns = getattr(results.variable_results, k).ts_in_ns
possible_data_loss = getattr(results.variable_results, k).possible_data_loss
var_dict[k] = {
"data": data,
"ts_in_ns": ts_in_ns,
"possible_data_loss": possible_data_loss
}
self.write_in_database('variables', var_dict)
raw_dict = {}
for k in results.raw_results.__dict__:
input1_data = getattr(results.raw_results, k).input1_data
input2_data = getattr(results.raw_results, k).input2_data
ts_in_ns = getattr(results.raw_results, k).ts_in_ns
data_loss = getattr(results.raw_results, k).data_loss
raw_dict[k] = {
"input1_data": input1_data,
"input2_data": input2_data,
"ts_in_ns": ts_in_ns,
"data_loss": data_loss
}
self.write_in_database('raw', raw_dict)
all_dict = {'raw': raw_dict, 'variables': var_dict}
json_str = json.dumps(all_dict)
with open(self.results_file_path, 'w') as writer:
writer.write(json_str)
|
py | b415f0b971f514f69c2cb3dbce03baec405e84cf | #!/usr/bin/env python
import fileinput
import re
import numpy as np
num = re.compile(r"\d+")
m = np.zeros((50, 6), dtype=np.int)
for line in fileinput.input():
a, b = map(int, num.findall(line))
if "rect" in line:
m[:a, :b] = 1
elif "column" in line:
m[a] = np.roll(m[a], b)
else:
m[:, a] = np.roll(m[:, a], b)
assert int(np.sum(m)) == 106
print(
"\n".join(map(lambda x: "".join(map(str, x)), m.T))
.replace("0", " ")
.replace("1", "@")
)
"""
@@ @@@@ @ @@@@ @ @@ @ @@@@@ @@ @@@
@ @ @ @ @ @ @ @ @ @@ @ @ @
@ @@@ @ @@@ @ @ @ @ @ @@@ @ @
@ @ @ @ @ @ @ @ @ @ @@
@ @ @ @ @ @ @ @ @ @ @ @ @
@@ @ @@@@ @@@@ @@@@ @@ @ @ @@ @@@
"""
|
py | b415f0d6b8b5a0cf78c3cbf7805ef7a1e3013aee | # -*- coding: utf-8 -*-
import sqlite3
from telebot import *
import telebot
import random
import threading
import requests
import json
lock = threading.Lock()
# database connect
global db, sql
db = sqlite3.connect('newbot.db', check_same_thread=False)
sql = db.cursor()
sql.execute("""CREATE TABLE IF NOT EXISTS files (
usid BIGINT,
pass TEXT,
channel BIGINT,
info TEXT,
fake TEXT,
type TEXT
)""")
db.commit()
# telegram api connect
with open('config.json', 'r') as config_file:
config = json.load(config_file)
token = config["token"]
bot = telebot.TeleBot(token)
# handlers
# start message
@bot.message_handler(commands=["start"])
def start(message):
info = telebot.util.extract_arguments(message.text)
sql.execute(f"SELECT pass FROM files WHERE pass = '{info}'")
if sql.fetchone() is None:
bot.send_message(message.chat.id, "๐ <b>ะัะธะฒะตั!</b>\n\n<i>ะฏ ะผะพะณั ัะบัััั ัะตะบัั ัะพะพะฑัะตะฝะธั ะพั ัะตั
, ะบัะพ ะฝะต ะฟะพะดะฟะธัะฐะฝ ะฝะฐ ัะฒะพะน ะบะฐะฝะฐะป.</i>\n\n<b>ะขะตะฑะต ะฒัะตะณะพ ะปะธัั ะฝัะถะฝะพ:</b>\n\n1. ะะพะฑะฐะฒะธัั ะผะตะฝั ะฒ ัะฒะพะน ะบะฐะฝะฐะป ะธ ะฒัะดะฐัั ะฐะดะผะธะฝะฐ ั ะฟัะฐะฒะฐะผะธ - 'ะะทะผะตะฝััั ะธะฝัะพัะผะฐัะธั ะพ ะบะฐะฝะฐะปะต' ะธ 'ะะพะฑะฐะฒะปััั ััะฐััะฝะธะบะพะฒ'\n2. ะะพะปััะธัั ID ัะฒะพะตะณะพ ะบะฐะฝะฐะปะฐ. ะญัะพ ะผะพะถะฝะพ ัะดะตะปะฐัั ะฟะตัะตัะปะฐะฒ ะฑะพัั @myidbot ัะพะพะฑัะตะฝะธะต ะธะท ัะฒะพะตะณะพ ะบะฐะฝะฐะปะฐ\n3. ะัะพะฟะธัะฐัั ะบะพะผะผะฐะฝะดั /new ะธ ัะปะตะดะพะฒะฐัั ะธะฝััััะบัะธัะผ\nะขะฐะบ ะถะต ั ะฟะพะดะดะตัะถะธะฒะฐั <b>ะัะดะธะพ ะคะฐะนะปั ะธ ะะพะปะพัะพะฒัะต ัะพะพะฑัะตะฝะธั!</b>\nะงัะพะฑั ะทะฐะณััะทะธัั ะฐัะดะธะพ ัะฐะนะป ะฒ ััะพะณะพ ะฑะพัะฐ, ะฟัะพััะพ ะพัะฟัะฐะฒั ะผะฝะต ััะพั ะฐัะดะธะพ ัะฐะนะป ะธะปะธ ะณะพะปะพัะพะฒะพะต ัะพะพะฑัะตะฝะธะต ะธ ัะปะตะดัะน ะธะฝััััะบัะธัะผ!(ะบะพะผะผะฐะฝะดั /new ะฝะต ะฟะธัะฐัั)\n\nะ ะฐะทัะฐะฑะพััะธะบ: @kirillsaint_info, https://kirillsaint.xyz\nะกะฒัะทั ั ัะฐะทัะฐะฑะพััะธะบะพะผ: @saintfukk2, @kirillsaint_bot\nะะปะพะณ ัะฐะทัะฐะฑะพััะธะบะฐ: @kirillsaint", parse_mode="HTML")
else:
try:
for ci in sql.execute(f"SELECT channel FROM files WHERE pass = '{info}'"):
CHAT_ID = ci[0]
USER_ID = message.chat.id
response = requests.get(f'https://api.telegram.org/bot{token}/getChatMember?chat_id={CHAT_ID}&user_id={USER_ID}')
status = json.loads(response.text)["result"]["status"]
if status == 'left':
sub = False
else:
sub = True
except:
sub = False
try:
if sub == True:
for t in sql.execute(f"SELECT type FROM files WHERE pass = '{info}'"):
typ = t[0]
print(typ)
if typ == 'text':
for i in sql.execute(f"SELECT info FROM files WHERE pass = '{info}'"):
info = i[0]
bot.send_message(message.chat.id, info)
elif typ == 'audio':
bot.send_audio(message.chat.id, open(f"{info}-audio.mp3", "rb"))
elif typ == 'voice':
bot.send_voice(message.chat.id, open(f"{info}-voice.ogg", "rb"))
else:
for f in sql.execute(f"SELECT fake FROM files WHERE pass = '{info}'"):
fake = f[0]
bot.send_message(message.chat.id, fake)
except:
try:
for f in sql.execute(f"SELECT fake FROM files WHERE pass = '{info}'"):
fake = f[0]
bot.send_message(message.chat.id, fake)
except:
bot.send_message(message.chat.id, "ะัะพะธะทะพัะปะฐ ะบะฐะบะฐั-ัะพ ะพัะธะฑะบะฐ!")
# new message
@bot.message_handler(commands=["new"])
def new1(message):
bot.send_message(message.chat.id, "*ะะฒะตะดะธัะต ID ะบะฐะฝะฐะปะฐ:* ", parse_mode="Markdown")
bot.register_next_step_handler(message, new2)
# functions
def new2(message):
try:
global ch
ch = int(message.text)
bot.send_message(message.chat.id, "*ะะฒะตะดะธัะต ัะตะบัั ัะตะบัะตัะฝะพะณะพ ัะพะพะฑัะตะฝะธั:* ", parse_mode="Markdown")
bot.register_next_step_handler(message, new3)
except:
bot.send_message(message.chat.id, "ะญัะพ ะฝะต ะฐะนะดะธ ะบะฐะฝะฐะปะฐ! ", parse_mode="Markdown")
def new3(message):
global info
info = message.text
bot.send_message(message.chat.id, "*ะะฒะตะดะธัะต ัะตะบัั ัะพะพะฑัะตะฝะธั, ะบะพัะพัะพะต ะฑัะดะตั ะพัะฟัะฐะฒะปััััั ะฒ ัะปััะฐะต ะตัะปะธ ะฟะพะปัะทะพะฒะฐัะตะปั ะฝะต ะฟะพะดะฟะธัะฐะฝ ะฝะฐ ะบะฐะฝะฐะป:* ", parse_mode="Markdown")
bot.register_next_step_handler(message, new4)
def new4(message):
global fake
fake = message.text
try:
chars = 'abcdefghyjklmnopqrstuvwxyz'
chars += chars.upper()
nums = str(1234567890)
chars += nums
length = 8
pas = "".join(random.sample(chars, length))
finfo = [message.chat.id, pas, ch, info, fake, 'text']
sql.execute(f"INSERT INTO files VALUES (?, ?, ?, ?, ?, ?)", finfo)
db.commit()
bot.send_message(message.chat.id, f"*ะขะฒะพะน ัะตะบัั ะดะพัััะฟะตะฝ ะฟะพ ัััะปะบะต:* https://t.me/channeltextbot?start={pas}", parse_mode="Markdown")
except:
bot.send_message(message.chat.id, f"ะัะธะฑะบะฐ!")
@bot.message_handler(content_types=["audio"])
def audio(message):
try:
chars = 'abcdefghyjklmnopqrstuvwxyz'
chars += chars.upper()
nums = str(1234567890)
chars += nums
length = 9
global pas
global f
global fa
pas = "".join(random.sample(chars, length))
file_info = bot.get_file(message.audio.file_id)
file = requests.get('https://api.telegram.org/file/bot{0}/{1}'.format(token, file_info.file_path))
with open(f'{pas}-audio.mp3','wb') as f:
f.write(file.content)
fa = f'{pas}-audio.mp3'
bot.send_message(message.chat.id, "*ะะฒะตะดะธัะต ัะตะบัั ัะพะพะฑัะตะฝะธั, ะบะพัะพัะพะต ะฑัะดะตั ะพัะฟัะฐะฒะปััััั ะฒ ัะปััะฐะต ะตัะปะธ ะฟะพะปัะทะพะฒะฐัะตะปั ะฝะต ะฟะพะดะฟะธัะฐะฝ ะฝะฐ ะบะฐะฝะฐะป:* ", parse_mode="Markdown")
bot.register_next_step_handler(message, newfile1)
except:
bot.send_message(message.chat.id, 'ะัะธะฑะบะฐ!')
def newfile1(message):
try:
global fake
fake = message.text
bot.send_message(message.chat.id, "*ะะฒะตะดะธัะต ID ะบะฐะฝะฐะปะฐ:* ", parse_mode="Markdown")
bot.register_next_step_handler(message, newfile)
except:
bot.send_message(message.chat.id, 'ะัะธะฑะบะฐ!')
def newfile(message):
try:
ch = message.text
finfo = [message.chat.id, pas, ch, fa, fake, 'audio']
sql.execute(f"INSERT INTO files VALUES (?, ?, ?, ?, ?, ?)", finfo)
db.commit()
bot.send_message(message.chat.id, f"*ะขะฒะพะน ัะฐะนะป ะดะพัััะฟะตะฝ ะฟะพ ัััะปะบะต:* https://t.me/channeltextbot?start={pas}", parse_mode="Markdown")
except:
bot.send_message(message.chat.id, 'ะัะธะฑะบะฐ!')
@bot.message_handler(content_types=["voice"])
def voice(message):
try:
chars = 'abcdefghyjklmnopqrstuvwxyz'
chars += chars.upper()
nums = str(1234567890)
chars += nums
length = 9
global pas
global f
global fa
pas = "".join(random.sample(chars, length))
file_info = bot.get_file(message.voice.file_id)
file = requests.get('https://api.telegram.org/file/bot{0}/{1}'.format(token, file_info.file_path))
with open(f'{pas}-voice.ogg','wb') as f:
f.write(file.content)
fa = f'{pas}-voice.mp3'
bot.send_message(message.chat.id, "*ะะฒะตะดะธัะต ัะตะบัั ัะพะพะฑัะตะฝะธั, ะบะพัะพัะพะต ะฑัะดะตั ะพัะฟัะฐะฒะปััััั ะฒ ัะปััะฐะต ะตัะปะธ ะฟะพะปัะทะพะฒะฐัะตะปั ะฝะต ะฟะพะดะฟะธัะฐะฝ ะฝะฐ ะบะฐะฝะฐะป:* ", parse_mode="Markdown")
bot.register_next_step_handler(message, newvoice1)
except:
bot.send_message(message.chat.id, 'ะัะธะฑะบะฐ!')
def newvoice1(message):
try:
global fake
fake = message.text
bot.send_message(message.chat.id, "*ะะฒะตะดะธัะต ID ะบะฐะฝะฐะปะฐ:* ", parse_mode="Markdown")
bot.register_next_step_handler(message, newvoice)
except:
bot.send_message(message.chat.id, 'ะัะธะฑะบะฐ!')
def newvoice(message):
try:
ch = message.text
finfo = [message.chat.id, pas, ch, fa, fake, 'voice']
sql.execute(f"INSERT INTO files VALUES (?, ?, ?, ?, ?, ?)", finfo)
db.commit()
bot.send_message(message.chat.id, f"*ะขะฒะพะต ะณะพะปะพัะพะฒะพะต ัะพะพะฑัะตะฝะธะต ะดะพัััะฟะตะฝ ะฟะพ ัััะปะบะต:* https://t.me/channeltextbot?start={pas}", parse_mode="Markdown")
except:
bot.send_message(message.chat.id, 'ะัะธะฑะบะฐ!')
# polling
bot.polling(none_stop=True)
# by @kirillsaint / https://github.com/kirillsaint/
|
py | b415f1adfa1e4f6179b407404085118930103dac | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.tpu.experimental namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.tpu.device_assignment import DeviceAssignment
from tensorflow.python.tpu.feature_column_v2 import embedding_column_v2 as embedding_column
from tensorflow.python.tpu.feature_column_v2 import shared_embedding_columns_v2 as shared_embedding_columns
from tensorflow.python.tpu.tpu_embedding import AdagradParameters
from tensorflow.python.tpu.tpu_embedding import AdamParameters
from tensorflow.python.tpu.tpu_embedding import StochasticGradientDescentParameters
from tensorflow.python.tpu.tpu_strategy_util import initialize_tpu_system
del _print_function
from tensorflow.python.util import module_wrapper as _module_wrapper
if not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):
_sys.modules[__name__] = _module_wrapper.TFModuleWrapper(
_sys.modules[__name__], "compat.v1.tpu.experimental", public_apis=None, deprecation=False,
has_lite=False)
|
py | b415f240090fc57995b3b96eee5b517c55a36301 | from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from parameterized import parameterized
from tests import utils
class SimpleClampModel(torch.nn.Module):
def __init__(self, min, max):
super(SimpleClampModel, self).__init__()
self.min = min
self.max = max
def forward(self, input):
return torch.clamp(input, self.min, self.max)
class TestClamp(unittest.TestCase):
@parameterized.expand(
[
("basic", 0.0, 0.8),
("no_min", None, 0.8),
("no_max", 0.0, None),
]
)
def test_clamp(self, _, min, max):
"""Test of the PyTorch clamp Node on Glow."""
utils.compare_tracing_methods(
SimpleClampModel(min, max), torch.randn(7), fusible_ops={"aten::clamp"}
)
|
py | b415f3d781ea11a63a5dd8580b0b204156296613 | from apps.common.permissions import (IsObjectOwner, ReadOnly)
class CVitaeViewsetBasicsMixin(object):
def get_username(self):
return self.kwargs.get('username')
def get_permissions(self):
if self.request.user.is_authenticated \
and self.get_username()== self.request.user.username:
permission_classes = [IsObjectOwner]
else:
permission_classes = [ReadOnly]
return [permission() for permission in permission_classes]
def perform_create(self, serializer):
serializer.save(created_by=self.request.user)
def get_serializer_context(self):
return {'request': self.request}
|
py | b415f47bdc8eb5a3053616a2f4c87d99cbe93437 | """Market underlying the BLP model."""
import functools
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from .. import exceptions, options
from ..configurations.iteration import ContractionResults, Iteration
from ..economies.economy import Economy
from ..moments import EconomyMoments, MarketMoments, FirstChoiceCovarianceMoment
from ..parameters import (
LinearCoefficient, NonlinearCoefficient, Parameter, Parameters, PiParameter, RhoParameter, SigmaParameter
)
from ..primitives import Container
from ..utilities.algebra import approximately_invert, approximately_solve
from ..utilities.basics import Array, RecArray, Error, Groups, SolverStats, update_matrices
class Market(Container):
"""A market underlying the BLP model."""
groups: Groups
costs_type: str
J: int
I: int
K1: int
K2: int
K3: int
D: int
H: int
sigma: Array
pi: Array
beta: Optional[Array]
rho_size: int
group_rho: Array
rho: Array
delta: Optional[Array]
mu: Array
parameters: Parameters
moments: Optional[MarketMoments]
def __init__(
self, economy: Economy, t: Any, parameters: Parameters, sigma: Array, pi: Array, rho: Array,
beta: Optional[Array] = None, delta: Optional[Array] = None, moments: Optional[EconomyMoments] = None,
data_override: Optional[Dict[str, Array]] = None, agents_override: Optional[RecArray] = None) -> None:
"""Store or compute information about formulations, data, parameters, and utility."""
# structure relevant data
super().__init__(
economy.products[economy._product_market_indices[t]],
economy.agents[economy._agent_market_indices[t]] if agents_override is None else agents_override
)
# drop unneeded product data fields to save memory
products_update_mapping = {}
for key in ['market_ids', 'demand_ids', 'supply_ids', 'clustering_ids', 'X1', 'X3', 'ZD', 'ZS']:
products_update_mapping[key] = (None, self.products[key].dtype)
self.products = update_matrices(self.products, products_update_mapping)
# drop unneeded agent data fields and fill missing columns of integration nodes (associated with zeros in sigma)
# with zeros
agents_update_mapping = {'market_ids': (None, self.agents.market_ids.dtype)}
if not parameters.nonzero_sigma_index.all():
nodes = np.zeros((self.agents.shape[0], economy.K2), self.agents.nodes.dtype)
nodes[:, parameters.nonzero_sigma_index] = self.agents.nodes[:, :parameters.nonzero_sigma_index.sum()]
agents_update_mapping['nodes'] = (nodes, nodes.dtype)
self.agents = update_matrices(self.agents, agents_update_mapping)
# create nesting groups
self.groups = Groups(self.products.nesting_ids)
# store other configuration information
self.costs_type = economy.costs_type
# count dimensions
self.J = self.products.shape[0]
self.I = self.agents.shape[0]
self.K1 = economy.K1
self.K2 = economy.K2
self.K3 = economy.K3
self.D = economy.D
self.H = self.groups.group_count
# override any data
if data_override is not None:
for name, variable in data_override.items():
self.products[name][:] = variable[economy._product_market_indices[t]]
for index, formulation in enumerate(self._X2_formulations):
if any(n in formulation.names for n in data_override):
self.products.X2[:, [index]] = formulation.evaluate(self.products)
# store parameters (expand rho to all groups and all products)
self.parameters = parameters
self.sigma = sigma
self.pi = pi
self.beta = beta
self.rho_size = rho.size
if self.rho_size == 1:
self.group_rho = np.full((self.H, 1), float(rho))
self.rho = np.full((self.J, 1), float(rho))
else:
self.group_rho = rho[np.searchsorted(economy.unique_nesting_ids, self.groups.unique)]
self.rho = self.groups.expand(self.group_rho)
# store delta and compute mu
self.delta = None if delta is None else delta[economy._product_market_indices[t]]
self.mu = self.compute_mu()
# store moments relevant to this market
self.moments = None if moments is None else MarketMoments(moments, t)
def get_membership_matrix(self) -> Array:
"""Build a membership matrix from nesting IDs."""
tiled_ids = np.tile(self.products.nesting_ids, self.J)
return np.where(tiled_ids == tiled_ids.T, 1, 0)
def get_ownership_matrix(self, firm_ids: Optional[Array] = None, ownership: Optional[Array] = None) -> Array:
"""Get a pre-computed ownership matrix or build one. By default, use unchanged firm IDs."""
if ownership is not None:
return ownership[:, :self.J]
if firm_ids is not None:
tiled_ids = np.tile(firm_ids, self.J)
return np.where(tiled_ids == tiled_ids.T, 1, 0)
if self.products.ownership.shape[1] > 0:
return self.products.ownership[:, :self.J]
tiled_ids = np.tile(self.products.firm_ids, self.J)
return np.where(tiled_ids == tiled_ids.T, 1, 0)
def compute_random_coefficients(self, sigma: Optional[Array] = None, pi: Optional[Array] = None) -> Array:
"""Compute the random coefficients by weighting agent characteristics with nonlinear parameters. By default, use
unchanged parameters.
"""
if sigma is None:
sigma = self.sigma
if pi is None:
pi = self.pi
coefficients = sigma @ self.agents.nodes.T
if self.D > 0:
coefficients += pi @ self.agents.demographics.T
return coefficients
def compute_mu(
self, X2: Optional[Array] = None, sigma: Optional[Array] = None, pi: Optional[Array] = None) -> Array:
"""Compute mu. By default, use unchanged X2 and parameters."""
if X2 is None:
X2 = self.products.X2
return X2 @ self.compute_random_coefficients(sigma, pi)
def compute_default_bounds(self, parameters: List[Parameter]) -> List[Tuple[Array, Array]]:
"""Compute default bounds for nonlinear parameters."""
# define a function to normalize bounds
def normalize(x: float) -> float:
"""Reduce an initial parameter bound by 1% and round it to two significant figures."""
if not np.isfinite(x) or x == 0:
return x
reduced = 0.99 * x
return np.round(reduced, 1 + int(reduced < 1) - int(np.log10(reduced)))
# compute common components of default bounds
mu_norm = np.abs(self.mu).max()
mu_max = np.log(np.finfo(np.float64).max)
bounds: List[Tuple[Array, Array]] = []
# compute the bounds parameter-by-parameter
with np.errstate(divide='ignore', invalid='ignore'):
for parameter in parameters:
if isinstance(parameter, SigmaParameter):
sigma = self.sigma.copy()
sigma[parameter.location] = 0
additional_mu_norm = np.abs(self.compute_mu(sigma=sigma)).max()
v_norm = np.abs(parameter.get_agent_characteristic(self)).max()
x_norm = np.abs(parameter.get_product_characteristic(self)).max()
bound = normalize(max(0, mu_max - additional_mu_norm) / v_norm / x_norm)
bounds.append((-bound if parameter.location[0] != parameter.location[1] else 0, bound))
elif isinstance(parameter, PiParameter):
pi = self.pi.copy()
pi[parameter.location] = 0
additional_mu_norm = np.abs(self.compute_mu(pi=pi)).max()
v_norm = np.abs(parameter.get_agent_characteristic(self)).max()
x_norm = np.abs(parameter.get_product_characteristic(self)).max()
bound = normalize(max(0, mu_max - additional_mu_norm) / v_norm / x_norm)
bounds.append((-bound, bound))
else:
assert isinstance(parameter, RhoParameter)
bounds.append((0, normalize(1 - min(1, mu_norm / mu_max))))
return bounds
def update_delta_with_variable(self, name: str, variable: Array) -> Array:
"""Update delta to reflect a changed variable by adding any parameter-weighted characteristic changes to X1."""
assert self.beta is not None and self.delta is not None
# if the variable does not contribute to X1, delta remains unchanged
if not any(name in f.names for f in self._X1_formulations):
return self.delta
# if the variable does contribute to X1, delta may change
delta = self.delta.copy()
override = {name: variable}
for index, formulation in enumerate(self._X1_formulations):
if name in formulation.names:
delta += self.beta[index] * (formulation.evaluate(self.products, override) - self.products[name])
return delta
def update_mu_with_variable(self, name: str, variable: Array) -> Array:
"""Update mu to reflect a changed variable by re-computing mu under the changed X2."""
# if the variable does not contribute to X2, mu remains unchanged
if not any(name in f.names for f in self._X2_formulations):
return self.mu
# if the variable does contribute to X2, mu may change
X2 = self.products.X2.copy()
override = {name: variable}
for index, formulation in enumerate(self._X2_formulations):
if name in formulation.names:
X2[:, [index]] = formulation.evaluate(self.products, override)
return self.compute_mu(X2)
def compute_X1_derivatives(self, name: str, variable: Optional[Array] = None) -> Array:
"""Compute derivatives of X1 with respect to a variable. By default, use unchanged variable values."""
override = None if variable is None else {name: variable}
derivatives = np.zeros((self.J, self.K1), options.dtype)
for index, formulation in enumerate(self._X1_formulations):
if name in formulation.names:
derivatives[:, [index]] = formulation.evaluate_derivative(name, self.products, override)
return derivatives
def compute_X2_derivatives(self, name: str, variable: Optional[Array] = None) -> Array:
"""Compute derivatives of X2 with respect to a variable. By default, use unchanged variable values."""
override = None if variable is None else {name: variable}
derivatives = np.zeros((self.J, self.K2), options.dtype)
for index, formulation in enumerate(self._X2_formulations):
if name in formulation.names:
derivatives[:, [index]] = formulation.evaluate_derivative(name, self.products, override)
return derivatives
def compute_utility_derivatives(self, name: str, variable: Optional[Array] = None) -> Array:
"""Compute derivatives of utility with respect to a variable. By default, use unchanged variable values."""
assert self.beta is not None
derivatives = np.tile(self.compute_X1_derivatives(name, variable) @ np.nan_to_num(self.beta), self.I)
if self.K2 > 0:
derivatives += self.compute_X2_derivatives(name, variable) @ self.compute_random_coefficients()
return derivatives
def compute_probabilities(
self, delta: Array = None, mu: Optional[Array] = None, linear: bool = True, safe: bool = True,
utility_reduction: Optional[Array] = None, numerator: Optional[Array] = None,
eliminate_outside: bool = False, eliminate_product: Optional[int] = None) -> Tuple[Array, Optional[Array]]:
"""Compute choice probabilities. By default, use unchanged delta and mu values. If linear is False, delta and mu
must be specified and already be exponentiated. If safe is True, scale the logit equation by the exponential of
negative the maximum utility for each agent, and if utility_reduction is specified, it should be values that
have already been subtracted from the specified utility for each agent. If the numerator is specified, it will
be used as the numerator in the non-nested logit expression. If eliminate_outside is True, eliminate the outside
option from the choice set. If eliminate_product is specified, eliminate the product associated with the
specified index from the choice set.
"""
if delta is None:
assert self.delta is not None
delta = self.delta
if mu is None:
mu = self.mu
if self.K2 == 0:
mu = int(not linear)
# compute exponentiated utilities, optionally re-scaling the logit expression
if not linear:
exp_utilities = np.array(delta * mu)
if self.H > 0:
exp_utilities **= 1 / (1 - self.rho)
else:
utilities = delta + mu
if self.H > 0:
utilities /= 1 - self.rho
if safe:
utility_reduction = np.clip(utilities.max(axis=0, keepdims=True), 0, None)
utilities -= utility_reduction
exp_utilities = np.exp(utilities)
# compute any components used to re-scale the logit expression
scale = scale_weights = 1
if utility_reduction is not None:
if self.H == 0:
scale = np.exp(-utility_reduction)
else:
scale = np.exp(-utility_reduction * (1 - self.group_rho))
if self.rho_size > 1:
scale_weights = np.exp(-utility_reduction[None] * (self.group_rho.T - self.group_rho)[..., None])
# optionally eliminate the outside option from the choice set
if eliminate_outside:
scale = 0
# optionally eliminate a product from the choice set
if eliminate_product is not None:
exp_utilities[eliminate_product] = 0
# compute standard probabilities
if self.H == 0:
if numerator is None:
numerator = exp_utilities
probabilities = numerator / (scale + exp_utilities.sum(axis=0, keepdims=True))
return probabilities, None
# compute nested probabilities
exp_inclusives = self.groups.sum(exp_utilities)
with np.errstate(divide='ignore', invalid='ignore'):
exp_weighted_inclusives = np.exp(np.log(exp_inclusives) * (1 - self.group_rho))
conditionals = exp_utilities / self.groups.expand(exp_inclusives)
exp_weighted_inclusives[~np.isfinite(exp_weighted_inclusives)] = 0
conditionals[~np.isfinite(conditionals)] = 0
marginals = exp_weighted_inclusives / (scale + (scale_weights * exp_weighted_inclusives[None]).sum(axis=1))
probabilities = conditionals * self.groups.expand(marginals)
return probabilities, conditionals
def compute_delta(
self, initial_delta: Array, iteration: Iteration, fp_type: str) -> Tuple[Array, SolverStats, List[Error]]:
"""Compute the mean utility for this market that equates market shares to observed values by solving a fixed
point problem.
"""
errors: List[Error] = []
# if there is no heterogeneity, use the closed-form solution
if self.K2 == 0:
log_shares = np.log(self.products.shares)
log_outside_share = np.log(1 - self.products.shares.sum())
delta = log_shares - log_outside_share
if self.H > 0:
log_group_shares = np.log(self.groups.expand(self.groups.sum(self.products.shares)))
delta -= self.rho * (log_shares - log_group_shares)
return delta, SolverStats(), errors
# solve for delta with a linear fixed point
if 'linear' in fp_type:
log_shares = np.log(self.products.shares)
compute_probabilities = functools.partial(self.compute_probabilities, safe='safe' in fp_type)
# define the linear contraction
if self.H == 0:
def contraction(x: Array) -> ContractionResults:
"""Compute the next linear delta and optionally its Jacobian."""
probabilities = compute_probabilities(x)[0]
shares = probabilities @ self.agents.weights
x = x + log_shares - np.log(shares)
if not iteration._compute_jacobian:
return x, None, None
weighted_probabilities = self.agents.weights * probabilities.T
jacobian = (probabilities @ weighted_probabilities) / shares
return x, None, jacobian
else:
# pre-compute additional components for the nested contraction
dampener = 1 - self.rho
rho_membership = self.rho * self.get_membership_matrix()
# define the nested contraction
def contraction(x: Array) -> ContractionResults:
"""Compute the next linear delta and optionally its Jacobian under nesting."""
probabilities, conditionals = compute_probabilities(x)
shares = probabilities @ self.agents.weights
x = x + (log_shares - np.log(shares)) * dampener
if not iteration._compute_jacobian:
return x, None, None
weighted_probabilities = self.agents.weights * probabilities.T
probabilities_part = dampener * (probabilities @ weighted_probabilities)
conditionals_part = rho_membership * (conditionals @ weighted_probabilities)
jacobian = (probabilities_part + conditionals_part) / shares
return x, None, jacobian
# solve the linear fixed point problem
delta, stats = iteration._iterate(initial_delta, contraction)
return delta, stats, errors
# solve for delta with a nonlinear fixed point
assert 'nonlinear' in fp_type
if 'safe' in fp_type:
utility_reduction = np.clip(self.mu.max(axis=0, keepdims=True), 0, None)
exp_mu = np.exp(self.mu - utility_reduction)
compute_probabilities = functools.partial(
self.compute_probabilities, mu=exp_mu, utility_reduction=utility_reduction, linear=False
)
else:
exp_mu = np.exp(self.mu)
compute_probabilities = functools.partial(self.compute_probabilities, mu=exp_mu, linear=False)
# define the nonlinear contraction
if self.H == 0:
def contraction(x: Array) -> ContractionResults:
"""Compute the next exponentiated delta and optionally its Jacobian."""
probability_ratios = compute_probabilities(x, numerator=exp_mu)[0]
share_ratios = probability_ratios @ self.agents.weights
x0, x = x, self.products.shares / share_ratios
if not iteration._compute_jacobian:
return x, None, None
shares = x0 * share_ratios
probabilities = x0 * probability_ratios
weighted_probabilities = self.agents.weights * probabilities.T
jacobian = x / x0.T * (probabilities @ weighted_probabilities) / shares
return x, None, jacobian
else:
# pre-compute additional components for the nested contraction
dampener = 1 - self.rho
rho_membership = self.rho * self.get_membership_matrix()
# define the nested contraction
def contraction(x: Array) -> ContractionResults:
"""Compute the next exponentiated delta and optionally its Jacobian under nesting."""
probabilities, conditionals = compute_probabilities(x)
shares = probabilities @ self.agents.weights
x0, x = x, x * (self.products.shares / shares) ** dampener
if not iteration._compute_jacobian:
return x, None, None
weighted_probabilities = self.agents.weights * probabilities.T
probabilities_part = dampener * (probabilities @ weighted_probabilities)
conditionals_part = rho_membership * (conditionals @ weighted_probabilities)
jacobian = x / x0.T * (probabilities_part + conditionals_part) / shares
return x, None, jacobian
# solve the nonlinear fixed point problem
exp_delta, stats = iteration._iterate(np.exp(initial_delta), contraction)
delta = np.log(exp_delta)
return delta, stats, errors
def compute_capital_lamda(self, value_derivatives: Array) -> Array:
"""Compute the diagonal capital lambda matrix used to decompose markups."""
diagonal = value_derivatives @ self.agents.weights
if self.H > 0:
diagonal /= 1 - self.rho
return np.diagflat(diagonal)
def compute_capital_gamma(
self, value_derivatives: Array, probabilities: Array, conditionals: Optional[Array]) -> Array:
"""Compute the dense capital gamma matrix used to decompose markups."""
weighted_value_derivatives = self.agents.weights * value_derivatives.T
capital_gamma = probabilities @ weighted_value_derivatives
if self.H > 0:
membership = self.get_membership_matrix()
capital_gamma += self.rho / (1 - self.rho) * membership * (conditionals @ weighted_value_derivatives)
return capital_gamma
def compute_eta(
self, ownership_matrix: Optional[Array] = None, delta: Optional[Array] = None) -> Tuple[Array, List[Error]]:
"""Compute the markup term in the BLP-markup equation. By default, get an unchanged ownership matrix and use
the delta with which this market was initialized.
"""
errors: List[Error] = []
if ownership_matrix is None:
ownership_matrix = self.get_ownership_matrix()
if delta is None:
assert self.delta is not None
delta = self.delta
utility_derivatives = self.compute_utility_derivatives('prices')
probabilities, conditionals = self.compute_probabilities(delta)
shares = self.products.shares
jacobian = self.compute_shares_by_variable_jacobian(utility_derivatives, probabilities, conditionals)
intra_firm_jacobian = ownership_matrix * jacobian
eta, replacement = approximately_solve(intra_firm_jacobian, -shares)
if replacement:
errors.append(exceptions.IntraFirmJacobianInversionError(intra_firm_jacobian, replacement))
return eta, errors
def compute_zeta(
self, costs: Array, ownership_matrix: Optional[Array] = None, utility_derivatives: Optional[Array] = None,
prices: Optional[Array] = None) -> Tuple[Array, Array]:
"""Compute the markup term in the zeta-markup equation. By default, get an unchanged ownership matrix, compute
derivatives of utilities with respect to prices, and use unchanged prices. Also return the intermediate
diagonal of the capital lambda matrix, which is used for weighting during fixed point iteration.
"""
if ownership_matrix is None:
ownership_matrix = self.get_ownership_matrix()
if utility_derivatives is None:
utility_derivatives = self.compute_utility_derivatives('prices')
if prices is None:
probabilities, conditionals = self.compute_probabilities()
shares = self.products.shares
else:
delta = self.update_delta_with_variable('prices', prices)
mu = self.update_mu_with_variable('prices', prices)
probabilities, conditionals = self.compute_probabilities(delta, mu)
shares = probabilities @ self.agents.weights
value_derivatives = probabilities * utility_derivatives
capital_lamda_diagonal = self.compute_capital_lamda(value_derivatives).diagonal()
capital_lamda_inverse = np.diag(1 / capital_lamda_diagonal)
capital_gamma = self.compute_capital_gamma(value_derivatives, probabilities, conditionals)
tilde_capital_omega = capital_lamda_inverse @ (ownership_matrix * capital_gamma).T
zeta = tilde_capital_omega @ (prices - costs) - capital_lamda_inverse @ shares
return zeta, capital_lamda_diagonal
def compute_equilibrium_prices(
self, costs: Array, iteration: Iteration, prices: Optional[Array] = None,
ownership_matrix: Optional[Array] = None) -> Tuple[Array, SolverStats]:
"""Compute equilibrium prices by iterating over the zeta-markup equation. By default, use unchanged firm IDs
and use unchanged prices as initial values.
"""
if ownership_matrix is None:
ownership_matrix = self.get_ownership_matrix()
if prices is None:
prices = self.products.prices
# derivatives of utilities with respect to prices change during iteration only if they depend on prices
formulations = self._X1_formulations + self._X2_formulations
if any(s.name == 'prices' for f in formulations for s in f.differentiate('prices').free_symbols):
get_derivatives = lambda p: self.compute_utility_derivatives('prices', p)
else:
derivatives = self.compute_utility_derivatives('prices')
get_derivatives = lambda _: derivatives
# define the contraction
def contraction(x: Array) -> ContractionResults:
"""Compute the next equilibrium prices."""
zeta, capital_lamda_diagonal = self.compute_zeta(costs, ownership_matrix, get_derivatives(x), x)
x = costs + zeta
return x, capital_lamda_diagonal, None
# solve the fixed point problem
prices, stats = iteration._iterate(prices, contraction)
return prices, stats
def compute_shares(self, prices: Optional[Array] = None) -> Array:
"""Compute shares evaluated at specific prices. By default, use unchanged prices."""
if prices is None:
prices = self.products.prices
delta = self.update_delta_with_variable('prices', prices)
mu = self.update_mu_with_variable('prices', prices)
shares = self.compute_probabilities(delta, mu)[0] @ self.agents.weights
return shares
def compute_utility_derivatives_by_parameter_tangent(
self, parameter: Parameter, X1_derivatives: Array, X2_derivatives: Array) -> Array:
"""Compute the tangent with respect to a parameter of derivatives of utility with respect to a variable."""
tangent = np.zeros((self.J, self.I), options.dtype)
if isinstance(parameter, LinearCoefficient):
tangent += X1_derivatives[:, [parameter.location[0]]]
elif isinstance(parameter, NonlinearCoefficient):
v = parameter.get_agent_characteristic(self)
tangent += X2_derivatives[:, [parameter.location[0]]] @ v.T
return tangent
def compute_probabilities_by_parameter_tangent(
self, parameter: Parameter, probabilities: Array, conditionals: Optional[Array],
delta: Optional[Array] = None, mu: Optional[Array] = None) -> Tuple[Array, Optional[Array]]:
"""Compute the tangent of probabilities with respect to a parameter. By default, use unchanged delta and mu."""
if delta is None:
assert self.delta is not None
delta = self.delta
if mu is None:
mu = self.mu
# without nesting, compute only the tangent of probabilities with respect to the parameter
if self.H == 0:
if isinstance(parameter, LinearCoefficient):
x = parameter.get_product_characteristic(self)
probabilities_tangent = probabilities * (x - x.T @ probabilities)
else:
assert isinstance(parameter, NonlinearCoefficient)
v = parameter.get_agent_characteristic(self)
x = parameter.get_product_characteristic(self)
probabilities_tangent = probabilities * v.T * (x - x.T @ probabilities)
return probabilities_tangent, None
# marginal probabilities are needed to compute tangents with nesting
marginals = self.groups.sum(probabilities)
# compute the tangent of conditional and marginal probabilities with respect to the parameter
if isinstance(parameter, LinearCoefficient):
x = parameter.get_product_characteristic(self)
# compute the tangent of conditional probabilities with respect to the parameter
A = conditionals * x
A_sums = self.groups.sum(A)
conditionals_tangent = conditionals * (x - self.groups.expand(A_sums)) / (1 - self.rho)
# compute the tangent of marginal probabilities with respect to the parameter
B = marginals * A_sums
marginals_tangent = B - marginals * B.sum(axis=0, keepdims=True)
elif isinstance(parameter, NonlinearCoefficient):
v = parameter.get_agent_characteristic(self)
x = parameter.get_product_characteristic(self)
# compute the tangent of conditional probabilities with respect to the parameter
A = conditionals * x
A_sums = self.groups.sum(A)
conditionals_tangent = conditionals * v.T * (x - self.groups.expand(A_sums)) / (1 - self.rho)
# compute the tangent of marginal probabilities with respect to the parameter
B = marginals * A_sums * v.T
marginals_tangent = B - marginals * B.sum(axis=0, keepdims=True)
else:
assert isinstance(parameter, RhoParameter)
group_associations = parameter.get_group_associations(self.groups)
associations = self.groups.expand(group_associations)
# utilities are needed to compute tangents with respect to rho
utilities = (delta + mu) / (1 - self.rho)
# compute the tangent of conditional probabilities with respect to the parameter
A = conditionals * utilities / (1 - self.rho)
A_sums = self.groups.sum(A)
conditionals_tangent = associations * (A - conditionals * self.groups.expand(A_sums))
# compute the tangent of marginal probabilities with respect to the parameter (re-scale for robustness)
utility_reduction = np.clip(utilities.max(axis=0, keepdims=True), 0, None)
with np.errstate(divide='ignore', invalid='ignore'):
B = marginals * (
A_sums * (1 - self.group_rho) -
(np.log(self.groups.sum(np.exp(utilities - utility_reduction))) + utility_reduction)
)
marginals_tangent = group_associations * B - marginals * (group_associations.T @ B)
marginals_tangent[~np.isfinite(marginals_tangent)] = 0
# compute the tangent of probabilities with respect to the parameter
probabilities_tangent = (
conditionals_tangent * self.groups.expand(marginals) +
conditionals * self.groups.expand(marginals_tangent)
)
return probabilities_tangent, conditionals_tangent
def compute_probabilities_by_xi_tensor(
self, probabilities: Array, conditionals: Optional[Array]) -> Tuple[Array, Optional[Array]]:
"""Use choice probabilities to compute their tensor derivatives with respect to xi (equivalently, to delta),
indexed with the first axis.
"""
probabilities_tensor = -probabilities[None] * probabilities[None].swapaxes(0, 1)
probabilities_tensor[np.diag_indices(self.J)] += probabilities
conditionals_tensor = None
if self.H > 0:
assert conditionals is not None
membership = self.get_membership_matrix()
multiplied_probabilities = self.rho / (1 - self.rho) * probabilities
multiplied_conditionals = 1 / (1 - self.rho) * conditionals
probabilities_tensor -= membership[..., None] * (
conditionals[None] * multiplied_probabilities[None].swapaxes(0, 1)
)
conditionals_tensor = -membership[..., None] * (
conditionals[None] * multiplied_conditionals[None].swapaxes(0, 1)
)
probabilities_tensor[np.diag_indices(self.J)] += multiplied_probabilities
conditionals_tensor[np.diag_indices(self.J)] += multiplied_conditionals
return probabilities_tensor, conditionals_tensor
def compute_shares_by_variable_jacobian(
self, utility_derivatives: Array, probabilities: Optional[Array] = None,
conditionals: Optional[Array] = None) -> Array:
"""Compute the Jacobian of market shares with respect to a variable. By default, compute unchanged choice
probabilities.
"""
if probabilities is None:
probabilities, conditionals = self.compute_probabilities()
value_derivatives = probabilities * utility_derivatives
capital_lamda = self.compute_capital_lamda(value_derivatives)
capital_gamma = self.compute_capital_gamma(value_derivatives, probabilities, conditionals)
return capital_lamda - capital_gamma
def compute_shares_by_xi_jacobian(self, probabilities: Array, conditionals: Optional[Array]) -> Array:
"""Compute the Jacobian of shares with respect to xi (equivalently, to delta)."""
diagonal_shares = np.diagflat(self.products.shares)
weighted_probabilities = self.agents.weights * probabilities.T
jacobian = diagonal_shares - probabilities @ weighted_probabilities
if self.H > 0:
membership = self.get_membership_matrix()
jacobian += self.rho / (1 - self.rho) * (
diagonal_shares - membership * (conditionals @ weighted_probabilities)
)
return jacobian
def compute_shares_by_theta_jacobian(
self, delta: Array, probabilities: Array, conditionals: Optional[Array]) -> Array:
"""Compute the Jacobian of shares with respect to theta."""
jacobian = np.zeros((self.J, self.parameters.P), options.dtype)
for p, parameter in enumerate(self.parameters.unfixed):
tangent, _ = self.compute_probabilities_by_parameter_tangent(parameter, probabilities, conditionals, delta)
jacobian[:, [p]] = tangent @ self.agents.weights
return jacobian
def compute_capital_lamda_by_parameter_tangent(
self, parameter: Parameter, value_derivatives: Array, value_derivatives_tangent: Array) -> Array:
"""Compute the tangent of the diagonal capital lambda matrix with respect to a parameter."""
diagonal = value_derivatives_tangent @ self.agents.weights
if self.H > 0:
diagonal /= 1 - self.rho
if isinstance(parameter, RhoParameter):
associations = self.groups.expand(parameter.get_group_associations(self.groups))
diagonal += associations / (1 - self.rho)**2 * (value_derivatives @ self.agents.weights)
return np.diagflat(diagonal)
def compute_capital_lamda_by_xi_tensor(self, value_derivatives_tensor: Array) -> Array:
"""Compute the tensor derivative of the diagonal capital lambda matrix with respect to xi, indexed by the first
axis.
"""
diagonal = value_derivatives_tensor @ self.agents.weights
if self.H > 0:
diagonal /= 1 - self.rho[None]
tensor = np.zeros((self.J, self.J, self.J), options.dtype)
tensor[:, np.arange(self.J), np.arange(self.J)] = np.squeeze(diagonal)
return tensor
def compute_capital_gamma_by_parameter_tangent(
self, parameter: Parameter, value_derivatives: Array, value_derivatives_tangent: Array,
probabilities: Array, probabilities_tangent: Array, conditionals: Optional[Array],
conditionals_tangent: Optional[Array]) -> Array:
"""Compute the tangent of the dense capital gamma matrix with respect to a parameter."""
weighted_value_derivatives = self.agents.weights * value_derivatives.T
weighted_value_derivatives_tangent = self.agents.weights * value_derivatives_tangent.T
tangent = (
probabilities_tangent @ weighted_value_derivatives +
probabilities @ weighted_value_derivatives_tangent
)
if self.H > 0:
assert conditionals is not None and conditionals_tangent is not None
membership = self.get_membership_matrix()
tangent += membership * self.rho / (1 - self.rho) * (
conditionals_tangent @ weighted_value_derivatives +
conditionals @ weighted_value_derivatives_tangent
)
if isinstance(parameter, RhoParameter):
associations = self.groups.expand(parameter.get_group_associations(self.groups))
tangent += associations * membership / (1 - self.rho)**2 * (conditionals @ weighted_value_derivatives)
return tangent
def compute_capital_gamma_by_xi_tensor(
self, value_derivatives: Array, value_derivatives_tensor: Array, probabilities: Array,
probabilities_tensor: Array, conditionals: Optional[Array], conditionals_tensor: Optional[Array]) -> Array:
"""Compute the tensor derivative of the dense capital gamma matrix with respect to xi, indexed with the first
axis.
"""
weighted_value_derivatives = self.agents.weights * value_derivatives.T
weighted_probabilities = self.agents.weights.T * probabilities
tensor = (
probabilities_tensor @ weighted_value_derivatives +
weighted_probabilities @ value_derivatives_tensor.swapaxes(1, 2)
)
if self.H > 0:
assert conditionals is not None and conditionals_tensor is not None
membership = self.get_membership_matrix()
weighted_conditionals = self.agents.weights.T * conditionals
tensor += membership[None] * self.rho[None] / (1 - self.rho[None]) * (
conditionals_tensor @ weighted_value_derivatives +
weighted_conditionals @ value_derivatives_tensor.swapaxes(1, 2)
)
return tensor
def compute_eta_by_theta_jacobian(self, xi_jacobian: Array) -> Tuple[Array, List[Error]]:
"""Compute the Jacobian of the markup term in the BLP-markup equation with respect to theta."""
errors: List[Error] = []
# compute derivatives of aggregate inclusive values with respect to prices
probabilities, conditionals = self.compute_probabilities()
utility_derivatives = self.compute_utility_derivatives('prices')
value_derivatives = probabilities * utility_derivatives
# compute the capital delta matrix, which, when inverted and multiplied by shares, gives eta
ownership = self.get_ownership_matrix()
capital_lamda = self.compute_capital_lamda(value_derivatives)
capital_gamma = self.compute_capital_gamma(value_derivatives, probabilities, conditionals)
capital_delta = -ownership * (capital_lamda - capital_gamma)
# compute the inverse of capital delta and use it to compute eta
capital_delta_inverse, replacement = approximately_invert(capital_delta)
if replacement:
errors.append(exceptions.IntraFirmJacobianInversionError(capital_delta, replacement))
eta = capital_delta_inverse @ self.products.shares
# compute the tensor derivative with respect to xi (equivalently, to delta), indexed with the first axis, of
# derivatives of aggregate inclusive values
probabilities_tensor, conditionals_tensor = self.compute_probabilities_by_xi_tensor(probabilities, conditionals)
value_derivatives_tensor = probabilities_tensor * utility_derivatives
# compute the tensor derivative of capital delta with respect to xi (equivalently, to delta)
capital_lamda_tensor = self.compute_capital_lamda_by_xi_tensor(value_derivatives_tensor)
capital_gamma_tensor = self.compute_capital_gamma_by_xi_tensor(
value_derivatives, value_derivatives_tensor, probabilities, probabilities_tensor, conditionals,
conditionals_tensor
)
capital_delta_tensor = -ownership[None] * (capital_lamda_tensor - capital_gamma_tensor)
# compute the product of the tensor and eta
capital_delta_tensor_times_eta = np.squeeze(capital_delta_tensor @ eta)
# compute derivatives of X1 and X2 with respect to prices
X1_derivatives = self.compute_X1_derivatives('prices')
X2_derivatives = self.compute_X2_derivatives('prices')
# fill the Jacobian of eta with respect to theta parameter-by-parameter
eta_jacobian = np.zeros((self.J, self.parameters.P), options.dtype)
for p, parameter in enumerate(self.parameters.unfixed):
# compute the tangent with respect to the parameter of derivatives of aggregate inclusive values
probabilities_tangent, conditionals_tangent = self.compute_probabilities_by_parameter_tangent(
parameter, probabilities, conditionals
)
utility_derivatives_tangent = self.compute_utility_derivatives_by_parameter_tangent(
parameter, X1_derivatives, X2_derivatives
)
value_derivatives_tangent = (
probabilities_tangent * utility_derivatives +
probabilities * utility_derivatives_tangent
)
# compute the tangent of capital delta with respect to the parameter
capital_lamda_tangent = self.compute_capital_lamda_by_parameter_tangent(
parameter, value_derivatives, value_derivatives_tangent
)
capital_gamma_tangent = self.compute_capital_gamma_by_parameter_tangent(
parameter, value_derivatives, value_derivatives_tangent, probabilities, probabilities_tangent,
conditionals, conditionals_tangent
)
capital_delta_tangent = -ownership * (capital_lamda_tangent - capital_gamma_tangent)
# extract the tangent of xi with respect to the parameter and compute the associated tangent of eta
eta_jacobian[:, [p]] = -capital_delta_inverse @ (
capital_delta_tangent @ eta + capital_delta_tensor_times_eta.T @ xi_jacobian[:, [p]]
)
# return the filled Jacobian
return eta_jacobian, errors
def compute_xi_by_theta_jacobian(self, delta: Optional[Array] = None) -> Tuple[Array, List[Error]]:
"""Use the Implicit Function Theorem to compute the Jacobian of xi (equivalently, of delta) with respect to
theta. By default, use unchanged delta values.
"""
errors: List[Error] = []
if delta is None:
assert self.delta is not None
delta = self.delta
probabilities, conditionals = self.compute_probabilities(delta)
shares_by_xi_jacobian = self.compute_shares_by_xi_jacobian(probabilities, conditionals)
shares_by_theta_jacobian = self.compute_shares_by_theta_jacobian(delta, probabilities, conditionals)
xi_by_theta_jacobian, replacement = approximately_solve(shares_by_xi_jacobian, -shares_by_theta_jacobian)
if replacement:
errors.append(exceptions.SharesByXiJacobianInversionError(shares_by_xi_jacobian, replacement))
return xi_by_theta_jacobian, errors
def compute_omega_by_theta_jacobian(self, tilde_costs: Array, xi_jacobian: Array) -> Tuple[Array, List[Error]]:
"""Compute the Jacobian of omega (equivalently, of transformed marginal costs) with respect to theta."""
errors: List[Error] = []
eta_jacobian, eta_jacobian_errors = self.compute_eta_by_theta_jacobian(xi_jacobian)
errors.extend(eta_jacobian_errors)
if self.costs_type == 'linear':
omega_jacobian = -eta_jacobian
else:
assert self.costs_type == 'log'
omega_jacobian = -eta_jacobian / np.exp(tilde_costs)
return omega_jacobian, errors
def compute_micro(self, delta: Optional[Array] = None) -> Tuple[Array, Array, Array]:
"""Compute micro moments. By default, use the delta with which this market was initialized. Also return the
probabilities with the outside option eliminated so they can be re-used when computing other things related to
micro moments.
"""
assert self.moments is not None
# compute probabilities with the outside option eliminated
micro_probabilities, micro_conditionals = self.compute_probabilities(delta, eliminate_outside=True)
# compute the micro moments
micro = np.zeros((self.moments.MM, 1), options.dtype)
for m, moment in enumerate(self.moments.micro_moments):
assert isinstance(moment, FirstChoiceCovarianceMoment)
z = micro_probabilities.T @ self.products.X2[:, [moment.X2_index]]
d = self.agents.demographics[:, [moment.demographics_index]]
demeaned_z = z - z.T @ self.agents.weights
demeaned_d = d - d.T @ self.agents.weights
micro[m] = demeaned_z.T @ (self.agents.weights * demeaned_d) - moment.value
return micro, micro_probabilities, micro_conditionals
|
py | b415f4b8bfca2e71ee7022f96182123b4d143142 | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ notification interface."""
import configparser
import os
import struct
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.mininode import CTransaction
from test_framework.util import (assert_equal,
bytes_to_hex_str,
hash256,
)
from io import BytesIO
class ZMQSubscriber:
def __init__(self, socket, topic):
self.sequence = 0
self.socket = socket
self.topic = topic
import zmq
self.socket.setsockopt(zmq.SUBSCRIBE, self.topic)
def receive(self):
topic, body, seq = self.socket.recv_multipart()
# Topic should match the subscriber topic.
assert_equal(topic, self.topic)
# Sequence should be incremental.
assert_equal(struct.unpack('<I', seq)[-1], self.sequence)
self.sequence += 1
return body
class ZMQTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_nodes(self):
# Try to import python3-zmq. Skip this test if the import fails.
try:
import zmq
except ImportError:
raise SkipTest("python3-zmq module not available.")
# Check that bitcoin has been built with ZMQ enabled.
config = configparser.ConfigParser()
if not self.options.configfile:
self.options.configfile = os.path.abspath(os.path.join(os.path.dirname(__file__), "../config.ini"))
config.read_file(open(self.options.configfile))
if not config["components"].getboolean("ENABLE_ZMQ"):
raise SkipTest("noodlyappendagecoind has not been built with zmq enabled.")
# Initialize ZMQ context and socket.
# All messages are received in the same socket which means
# that this test fails if the publishing order changes.
# Note that the publishing order is not defined in the documentation and
# is subject to change.
address = "tcp://127.0.0.1:28332"
self.zmq_context = zmq.Context()
socket = self.zmq_context.socket(zmq.SUB)
socket.set(zmq.RCVTIMEO, 60000)
socket.connect(address)
# Subscribe to all available topics.
self.hashblock = ZMQSubscriber(socket, b"hashblock")
self.hashtx = ZMQSubscriber(socket, b"hashtx")
self.rawblock = ZMQSubscriber(socket, b"rawblock")
self.rawtx = ZMQSubscriber(socket, b"rawtx")
self.extra_args = [["-zmqpub%s=%s" % (sub.topic.decode(), address) for sub in [self.hashblock, self.hashtx, self.rawblock, self.rawtx]], []]
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
try:
self._zmq_test()
finally:
# Destroy the ZMQ context.
self.log.debug("Destroying ZMQ context")
self.zmq_context.destroy(linger=None)
def _zmq_test(self):
num_blocks = 5
self.log.info("Generate %(n)d blocks (and %(n)d coinbase txes)" % {"n": num_blocks})
genhashes = self.nodes[0].generate(num_blocks)
self.sync_all()
for x in range(num_blocks):
# Should receive the coinbase txid.
txid = self.hashtx.receive()
# Should receive the coinbase raw transaction.
hex = self.rawtx.receive()
tx = CTransaction()
tx.deserialize(BytesIO(hex))
tx.calc_sha256()
assert_equal(tx.hash, bytes_to_hex_str(txid))
# Should receive the generated block hash.
hash = bytes_to_hex_str(self.hashblock.receive())
assert_equal(genhashes[x], hash)
# The block should only have the coinbase txid.
assert_equal([bytes_to_hex_str(txid)], self.nodes[1].getblock(hash)["tx"])
# Should receive the generated raw block.
block = self.rawblock.receive()
assert_equal(genhashes[x], bytes_to_hex_str(hash256(block[:80])))
self.log.info("Wait for tx from second node")
payment_txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# Should receive the broadcasted txid.
txid = self.hashtx.receive()
assert_equal(payment_txid, bytes_to_hex_str(txid))
# Should receive the broadcasted raw transaction.
hex = self.rawtx.receive()
assert_equal(payment_txid, bytes_to_hex_str(hash256(hex)))
if __name__ == '__main__':
ZMQTest().main()
|
py | b415f4f2a14c59d33dd55948bf3364b6f79376bf | import time
import ssl
import socketpool
import wifi
import golioth.golioth as Golioth
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
def connected(client):
print("Connected to Golioth!")
client.listen_hello()
client.log_debug("device connected from CircuitPython")
client.log_info({
'msg': "connected",
'module': "networking",
'hostname': wifi.radio.hostname,
})
def disconnected(client):
print("Disconnected from Golioth!")
def on_hello(client, message):
print(message)
# secrets dictionary must contain 'ssid' and 'password' at a minimum
print("Connecting...")
wifi.radio.connect(secrets["ssid"], secrets["password"])
print("IP address ", wifi.radio.ipv4_address)
pool = socketpool.SocketPool(wifi.radio)
golioth_client = Golioth.Client(
secrets["psk_id"], secrets["psk"], pool, ssl.create_default_context())
golioth_client.on_connect = connected
golioth_client.on_disconnect = disconnected
golioth_client.on_hello = on_hello
print("Connecting to Golioth...")
golioth_client.connect()
last_check = 0
i = 1
while True:
try:
golioth_client.loop()
now = time.monotonic()
if now - last_check > 5:
golioth_client.log_debug("Hello "+str(i))
i = i + 1
last_check = now
except (ValueError, RuntimeError) as e:
print("Failed to get data, retrying\n", e)
print("Reconnecting...")
wifi.radio.connect(secrets["ssid"], secrets["password"])
golioth_client.connect()
continue
|
py | b415f6168da0efe75836398da73564bf3a2e89c6 | import numpy as np
import theano
from theano import tensor
from . import util
def flatcat(arrays):
'''
Flattens arrays and concatenates them in order.
'''
return tensor.concatenate([a.flatten() for a in arrays])
def flatgrad(loss, vars_):
return flatcat(tensor.grad(loss, vars_))
def gaussian_kl(means1_N_D, stdevs1_N_D, means2_N_D, stdevs2_N_D):
'''
KL divergences between Gaussians with diagonal covariances
Covariances matrices are specified with square roots of the diagonal (standard deviations)
'''
D = tensor.shape(means1_N_D)[1]
return (
.5 * (tensor.sqr(stdevs1_N_D/stdevs2_N_D).sum(axis=1) +
tensor.sqr((means2_N_D-means1_N_D)/stdevs2_N_D).sum(axis=1) +
2.*(tensor.log(stdevs2_N_D).sum(axis=1) - tensor.log(stdevs1_N_D).sum(axis=1)) - D
))
def gaussian_log_density(means_N_D, stdevs_N_D, x_N_D):
'''Log density of a Gaussian distribution with diagonal covariance (specified as standard deviations).'''
D = tensor.shape(means_N_D)[1]
lognormconsts_B = -.5*(D*np.log(2.*np.pi) + 2.*tensor.log(stdevs_N_D).sum(axis=1)) # log normalization constants
logprobs_B = -.5*tensor.sqr((x_N_D - means_N_D)/stdevs_N_D).sum(axis=1) + lognormconsts_B
return logprobs_B
def sigmoid_cross_entropy_with_logits(logits_B, labels_B):
return tensor.nnet.binary_crossentropy(tensor.nnet.sigmoid(logits_B), labels_B)
def logsigmoid(a):
'''Equivalent to tf.log(tf.sigmoid(a))'''
return -tensor.nnet.softplus(-a)
def logit_bernoulli_kl(logits1_B, logits2_B):
logp1_B, logp2_B = logsigmoid(logits1_B), logsigmoid(logits2_B)
logq1_B, logq2_B = logp1_B - logits1_B, logp2_B - logits2_B # these are log(1-p)
p1_B = tensor.nnet.sigmoid(logits1_B)
kl_B = p1_B*(logp1_B - logp2_B) + (1.-p1_B)*(logq1_B - logq2_B)
return kl_B
def logit_bernoulli_entropy(logits_B):
ent_B = (1.-tensor.nnet.sigmoid(logits_B))*logits_B - logsigmoid(logits_B)
return ent_B
def logsumexp(a, axis, name=None):
'''
Like scipy.misc.logsumexp with keepdims=True
(does NOT eliminate the singleton axis)
'''
amax = a.max(axis=axis, keepdims=True)
return amax + tensor.log(tensor.exp(a-amax).sum(axis=axis, keepdims=True))
def categorical_kl(logprobs1_B_A, logprobs2_B_A, name=None):
'''KL divergence between categorical distributions, specified as log probabilities'''
kl_B = (tensor.exp(logprobs1_B_A) * (logprobs1_B_A - logprobs2_B_A)).sum(axis=1)
return kl_B
def unflatten_into_tensors(flatparams_P, output_shapes, name=None):
'''
Unflattens a vector produced by flatcat into a list of tensors of the specified shapes.
'''
outputs = []
curr_pos = 0
for shape in output_shapes:
size = np.prod(shape)
flatval = flatparams_P[curr_pos:curr_pos+size]
outputs.append(flatval.reshape(shape))
curr_pos += size
# assert curr_pos == flatparams_P.get_shape().num_elements()
return outputs
# from http://arxiv.org/abs/1412.6980
# and https://gist.github.com/Newmu/acb738767acb4788bac3
# suggested lr 0.001
def adam(cost, params, lr, beta1=0.9, beta2=0.999, eps=1e-8):
updates = []
grads = tensor.grad(cost, params); assert len(params) == len(grads)
t0 = theano.shared(np.array(0., dtype=theano.config.floatX))
t = t0 + 1
corr1 = (1 - beta1**t)
corr2 = (1 - beta2**t)
alpha = lr * tensor.sqrt(corr2) / corr1
for p, g in zip(params, grads):
m = theano.shared(value=np.zeros(p.get_value().shape, dtype=theano.config.floatX), broadcastable=p.broadcastable)
v = theano.shared(value=np.zeros(p.get_value().shape, dtype=theano.config.floatX), broadcastable=p.broadcastable)
m_t = beta1 * m + (1 - beta1) * g
v_t = beta2 * v + (1 - beta2) * tensor.square(g)
p_t = p - alpha * m_t/(tensor.sqrt(v_t) + eps)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((t0, t))
return updates
def function(inputs, outputs, **kwargs):
# Cache compiled function
f = theano.function(inputs, outputs, **kwargs)
def wrapper(*args):
# Execute
out = f(*args)
# Find output elements with shape == () and convert them to scalars
is_list = isinstance(out, (list,tuple))
out_as_list = list(out) if is_list else [out]
for i in xrange(len(out_as_list)):
if isinstance(out_as_list[i], np.ndarray) and out_as_list[i].shape == ():
out_as_list[i] = np.asscalar(out_as_list[i])
return out_as_list if is_list else out_as_list[0]
return wrapper
|
py | b415f6fa8ea254962431420442b5aed18eb541db | import sys
import typing
import mathutils
def area_tri(v1: 'mathutils.Vector', v2: 'mathutils.Vector',
v3: 'mathutils.Vector') -> float:
'''Returns the area size of the 2D or 3D triangle defined.
:param v1: Point1
:type v1: 'mathutils.Vector'
:param v2: Point2
:type v2: 'mathutils.Vector'
:param v3: Point3
:type v3: 'mathutils.Vector'
'''
pass
def barycentric_transform(
point: 'mathutils.Vector', tri_a1: 'mathutils.Vector',
tri_a2: 'mathutils.Vector', tri_a3: 'mathutils.Vector',
tri_b1: 'mathutils.Vector', tri_b2: 'mathutils.Vector',
tri_b3: 'mathutils.Vector'):
'''Return a transformed point, the transformation is defined by 2 triangles.
:param point: The point to transform.
:type point: 'mathutils.Vector'
:param tri_a1: source triangle vertex.
:type tri_a1: 'mathutils.Vector'
:param tri_a2: source triangle vertex.
:type tri_a2: 'mathutils.Vector'
:param tri_a3: source triangle vertex.
:type tri_a3: 'mathutils.Vector'
:param tri_b1: target triangle vertex.
:type tri_b1: 'mathutils.Vector'
:param tri_b2: target triangle vertex.
:type tri_b2: 'mathutils.Vector'
:param tri_b3: target triangle vertex.
:type tri_b3: 'mathutils.Vector'
:return: The transformed point
'''
pass
def box_fit_2d(points: list) -> float:
'''Returns an angle that best fits the points to an axis aligned rectangle
:param points: list of 2d points.
:type points: list
:return: angle
'''
pass
def box_pack_2d(boxes: list):
'''Returns the normal of the 3D tri or quad.
:param boxes: list of boxes, each box is a list where the first 4 items are [x, y, width, height, โฆ] other items are ignored.
:type boxes: list
:return: the width and height of the packed bounding box
'''
pass
def convex_hull_2d(points: list) -> list:
'''Returns a list of indices into the list given
:param points: list of 2d points.
:type points: list
:return: a list of indices
'''
pass
def distance_point_to_plane(pt: 'mathutils.Vector',
plane_co: 'mathutils.Vector',
plane_no: 'mathutils.Vector') -> float:
'''Returns the signed distance between a point and a plane (negative when below the normal).
:param pt: Point
:type pt: 'mathutils.Vector'
:param plane_co: A point on the plane
:type plane_co: 'mathutils.Vector'
:param plane_no: The direction the plane is facing
:type plane_no: 'mathutils.Vector'
'''
pass
def interpolate_bezier(knot1: 'mathutils.Vector', handle1: 'mathutils.Vector',
handle2: 'mathutils.Vector', knot2: 'mathutils.Vector',
resolution: int) -> list:
'''Interpolate a bezier spline segment.
:param knot1: First bezier spline point.
:type knot1: 'mathutils.Vector'
:param handle1: First bezier spline handle.
:type handle1: 'mathutils.Vector'
:param handle2: Second bezier spline handle.
:type handle2: 'mathutils.Vector'
:param knot2: Second bezier spline point.
:type knot2: 'mathutils.Vector'
:param resolution: Number of points to return.
:type resolution: int
:return: The interpolated points
'''
pass
def intersect_line_line(v1: 'mathutils.Vector', v2: 'mathutils.Vector',
v3: 'mathutils.Vector', v4: 'mathutils.Vector'):
'''Returns a tuple with the points on each line respectively closest to the other.
:param v1: First point of the first line
:type v1: 'mathutils.Vector'
:param v2: Second point of the first line
:type v2: 'mathutils.Vector'
:param v3: First point of the second line
:type v3: 'mathutils.Vector'
:param v4: Second point of the second line
:type v4: 'mathutils.Vector'
'''
pass
def intersect_line_line_2d(lineA_p1: 'mathutils.Vector',
lineA_p2: 'mathutils.Vector',
lineB_p1: 'mathutils.Vector',
lineB_p2: 'mathutils.Vector') -> 'mathutils.Vector':
'''Takes 2 segments (defined by 4 vectors) and returns a vector for their point of intersection or None.
:param lineA_p1: First point of the first line
:type lineA_p1: 'mathutils.Vector'
:param lineA_p2: Second point of the first line
:type lineA_p2: 'mathutils.Vector'
:param lineB_p1: First point of the second line
:type lineB_p1: 'mathutils.Vector'
:param lineB_p2: Second point of the second line
:type lineB_p2: 'mathutils.Vector'
:return: The point of intersection or None when not found
'''
pass
def intersect_line_plane(line_a: 'mathutils.Vector',
line_b: 'mathutils.Vector',
plane_co: 'mathutils.Vector',
plane_no: 'mathutils.Vector',
no_flip=False) -> 'mathutils.Vector':
'''Calculate the intersection between a line (as 2 vectors) and a plane. Returns a vector for the intersection or None.
:param line_a: First point of the first line
:type line_a: 'mathutils.Vector'
:param line_b: Second point of the first line
:type line_b: 'mathutils.Vector'
:param plane_co: A point on the plane
:type plane_co: 'mathutils.Vector'
:param plane_no: The direction the plane is facing
:type plane_no: 'mathutils.Vector'
:return: The point of intersection or None when not found
'''
pass
def intersect_line_sphere(line_a: 'mathutils.Vector',
line_b: 'mathutils.Vector',
sphere_co: 'mathutils.Vector',
sphere_radius,
clip=True) -> 'mathutils.Vector':
'''Takes a line (as 2 points) and a sphere (as a point and a radius) and returns the intersection
:param line_a: First point of the line
:type line_a: 'mathutils.Vector'
:param line_b: Second point of the line
:type line_b: 'mathutils.Vector'
:param sphere_co: The center of the sphere
:type sphere_co: 'mathutils.Vector'
:param sphere_radius: Radius of the sphere
:return: The intersection points as a pair of vectors or None when there is no intersection
'''
pass
def intersect_line_sphere_2d(line_a: 'mathutils.Vector',
line_b: 'mathutils.Vector',
sphere_co: 'mathutils.Vector',
sphere_radius,
clip=True) -> 'mathutils.Vector':
'''Takes a line (as 2 points) and a sphere (as a point and a radius) and returns the intersection
:param line_a: First point of the line
:type line_a: 'mathutils.Vector'
:param line_b: Second point of the line
:type line_b: 'mathutils.Vector'
:param sphere_co: The center of the sphere
:type sphere_co: 'mathutils.Vector'
:param sphere_radius: Radius of the sphere
:return: The intersection points as a pair of vectors or None when there is no intersection
'''
pass
def intersect_plane_plane(
plane_a_co: 'mathutils.Vector', plane_a_no: 'mathutils.Vector',
plane_b_co: 'mathutils.Vector',
plane_b_no: 'mathutils.Vector') -> 'mathutils.Vector':
'''Return the intersection between two planes
:param plane_a_co: Point on the first plane
:type plane_a_co: 'mathutils.Vector'
:param plane_a_no: Normal of the first plane
:type plane_a_no: 'mathutils.Vector'
:param plane_b_co: Point on the second plane
:type plane_b_co: 'mathutils.Vector'
:param plane_b_no: Normal of the second plane
:type plane_b_no: 'mathutils.Vector'
:return: The line of the intersection represented as a point and a vector
'''
pass
def intersect_point_line(pt: 'mathutils.Vector', line_p1: 'mathutils.Vector',
line_p2) -> float:
'''Takes a point and a line and returns a tuple with the closest point on the line and its distance from the first point of the line as a percentage of the length of the line.
:param pt: Point
:type pt: 'mathutils.Vector'
:param line_p1: First point of the line
:type line_p1: 'mathutils.Vector'
:param line_p1: Second point of the line
'''
pass
def intersect_point_quad_2d(
pt: 'mathutils.Vector', quad_p1: 'mathutils.Vector',
quad_p2: 'mathutils.Vector', quad_p3: 'mathutils.Vector',
quad_p4: 'mathutils.Vector') -> int:
'''Takes 5 vectors (using only the x and y coordinates): one is the point and the next 4 define the quad, only the x and y are used from the vectors. Returns 1 if the point is within the quad, otherwise 0. Works only with convex quads without singular edges.
:param pt: Point
:type pt: 'mathutils.Vector'
:param quad_p1: First point of the quad
:type quad_p1: 'mathutils.Vector'
:param quad_p2: Second point of the quad
:type quad_p2: 'mathutils.Vector'
:param quad_p3: Third point of the quad
:type quad_p3: 'mathutils.Vector'
:param quad_p4: Fourth point of the quad
:type quad_p4: 'mathutils.Vector'
'''
pass
def intersect_point_tri(pt: 'mathutils.Vector', tri_p1: 'mathutils.Vector',
tri_p2: 'mathutils.Vector',
tri_p3: 'mathutils.Vector') -> 'mathutils.Vector':
'''Takes 4 vectors: one is the point and the next 3 define the triangle.
:param pt: Point
:type pt: 'mathutils.Vector'
:param tri_p1: First point of the triangle
:type tri_p1: 'mathutils.Vector'
:param tri_p2: Second point of the triangle
:type tri_p2: 'mathutils.Vector'
:param tri_p3: Third point of the triangle
:type tri_p3: 'mathutils.Vector'
:return: Point on the triangles plane or None if its outside the triangle
'''
pass
def intersect_point_tri_2d(pt: 'mathutils.Vector', tri_p1: 'mathutils.Vector',
tri_p2: 'mathutils.Vector',
tri_p3: 'mathutils.Vector') -> int:
'''Takes 4 vectors (using only the x and y coordinates): one is the point and the next 3 define the triangle. Returns 1 if the point is within the triangle, otherwise 0.
:param pt: Point
:type pt: 'mathutils.Vector'
:param tri_p1: First point of the triangle
:type tri_p1: 'mathutils.Vector'
:param tri_p2: Second point of the triangle
:type tri_p2: 'mathutils.Vector'
:param tri_p3: Third point of the triangle
:type tri_p3: 'mathutils.Vector'
'''
pass
def intersect_ray_tri(v1: 'mathutils.Vector',
v2: 'mathutils.Vector',
v3: 'mathutils.Vector',
ray: 'mathutils.Vector',
orig: 'mathutils.Vector',
clip: bool = True) -> 'mathutils.Vector':
'''Returns the intersection between a ray and a triangle, if possible, returns None otherwise.
:param v1: Point1
:type v1: 'mathutils.Vector'
:param v2: Point2
:type v2: 'mathutils.Vector'
:param v3: Point3
:type v3: 'mathutils.Vector'
:param ray: Direction of the projection
:type ray: 'mathutils.Vector'
:param orig: Origin
:type orig: 'mathutils.Vector'
:param clip: When False, donโt restrict the intersection to the area of the triangle, use the infinite plane defined by the triangle.
:type clip: bool
:return: The point of intersection or None if no intersection is found
'''
pass
def intersect_sphere_sphere_2d(p_a: 'mathutils.Vector', radius_a: float,
p_b: 'mathutils.Vector', radius_b: float):
'''Returns 2 points on between intersecting circles.
:param p_a: Center of the first circle
:type p_a: 'mathutils.Vector'
:param radius_a: Radius of the first circle
:type radius_a: float
:param p_b: Center of the second circle
:type p_b: 'mathutils.Vector'
:param radius_b: Radius of the second circle
:type radius_b: float
'''
pass
def normal(vectors) -> 'mathutils.Vector':
'''Returns the normal of a 3D polygon.
:param vectors: Vectors to calculate normals with
'''
pass
def points_in_planes(planes: typing.List['mathutils.Vector']):
'''Returns a list of points inside all planes given and a list of index values for the planes used.
:param planes: List of planes (4D vectors).
:type planes: typing.List['mathutils.Vector']
:return: two lists, once containing the vertices inside the planes, another containing the plane indices used
'''
pass
def tessellate_polygon(veclist_list) -> list:
'''Takes a list of polylines (each point a vector) and returns the point indices for a polyline filled with triangles.
:param veclist_list: list of polylines
'''
pass
def volume_tetrahedron(v1: 'mathutils.Vector', v2: 'mathutils.Vector',
v3: 'mathutils.Vector',
v4: 'mathutils.Vector') -> float:
'''Return the volume formed by a tetrahedron (points can be in any order).
:param v1: Point1
:type v1: 'mathutils.Vector'
:param v2: Point2
:type v2: 'mathutils.Vector'
:param v3: Point3
:type v3: 'mathutils.Vector'
:param v4: Point4
:type v4: 'mathutils.Vector'
'''
pass
|
py | b415f7300b8a02197b0849fad6296dadbc240e23 | import itertools
from functools import partial
import threading
from robocode_ls_core.basic import implements
from robocode_ls_core.robotframework_log import get_logger
from robocode_ls_core.protocols import (
ILanguageServerClientBase,
Sentinel,
COMMUNICATION_DROPPED,
)
from typing import Any, Union, Optional
log = get_logger(__name__)
class _MessageMatcher(object):
def __init__(self):
self.event = threading.Event()
self.msg = None
def notify(self, msg):
# msg can be None if the communication was finished in the meanwhile.
self.msg = msg
self.event.set()
class _IdMessageMatcher(_MessageMatcher):
def __init__(self, message_id):
_MessageMatcher.__init__(self)
self.message_id = message_id
def __str__(self):
return "IdMatcher(%s)" % (self.message_id,)
__repr__ = __str__
class _PatternMessageMatcher(_MessageMatcher):
def __init__(self, message_pattern):
self.message_pattern = message_pattern
_MessageMatcher.__init__(self)
def matches(self, msg):
for key, val in self.message_pattern.items():
if msg.get(key) != val:
return False
return True
def __str__(self):
return "PatternMatcher(%s)" % (self.message_pattern,)
__repr__ = __str__
class _ReaderThread(threading.Thread):
def __init__(self, reader):
threading.Thread.__init__(self)
self.setDaemon(True)
self.reader = reader
self._lock = threading.Lock()
self._finished = False
# Message matchers.
self._id_message_matchers = {} # msg id-> matcher
self._pattern_message_matchers = {} # id(matcher) -> matcher
def run(self):
try:
self.reader.listen(self._on_message)
finally:
with self._lock:
self._finished = True
id_message_matchers = self._id_message_matchers
self._id_message_matchers = {}
pattern_message_matchers = self._pattern_message_matchers
self._pattern_message_matchers = {}
for message_matcher in id_message_matchers.values():
message_matcher.notify(None)
for message_matcher in pattern_message_matchers.values():
message_matcher.notify(None)
def _on_message(self, msg):
from robocode_ls_core.options import Setup
notify_matchers = []
log.debug("Will handle read message: %s" % (msg,))
with self._lock:
for message_matcher in self._pattern_message_matchers.values():
if message_matcher.matches(msg):
notify_matchers.append(message_matcher)
for message_matcher in notify_matchers:
del self._pattern_message_matchers[id(message_matcher)]
if "id" in msg:
message_matcher = self._id_message_matchers.pop(msg["id"], None)
if message_matcher is not None:
notify_matchers.append(message_matcher)
if Setup.options.DEBUG_MESSAGE_MATCHERS:
log.debug(
"Notify matchers: %s\nRemaining id matchers: %s\nRemaining pattern matchers: %s"
% (
notify_matchers,
self._id_message_matchers,
self._pattern_message_matchers,
)
)
for message_matcher in notify_matchers:
message_matcher.notify(msg)
def obtain_pattern_message_matcher(self, message_pattern):
"""
:param message_pattern:
Obtains a matcher which will be notified when the given message pattern is
returned.
:return:
None if it's already finished or the message matcher otherwise.
"""
with self._lock:
if self._finished:
return None
message_matcher = _PatternMessageMatcher(message_pattern)
self._pattern_message_matchers[id(message_matcher)] = message_matcher
return message_matcher
def obtain_id_message_matcher(self, message_id):
"""
:param message_id:
Obtains a matcher which will be notified when the given message id is
returned.
:return:
None if it's already finished or the message matcher otherwise.
"""
with self._lock:
if self._finished:
return None
message_matcher = _IdMessageMatcher(message_id)
self._id_message_matchers[message_id] = message_matcher
return message_matcher
class LanguageServerClientBase(object):
"""
A base implementation for talking with a process that implements the language
server.
"""
DEFAULT_TIMEOUT: Optional[
int
] = None # The default if not redefined is not having a timeout.
def __init__(self, writer, reader):
"""
:param JsonRpcStreamWriter writer:
:param JsonRpcStreamReader reader:
"""
self.writer = writer
self.reader = reader
t = _ReaderThread(reader)
self._reader_thread = t
t.start()
self.require_exit_messages = True
self.next_id = partial(next, itertools.count())
@implements(ILanguageServerClientBase.request_async)
def request_async(self, contents: dict):
message_id = contents["id"]
message_matcher = self._reader_thread.obtain_id_message_matcher(message_id)
if message_matcher is None:
return None
if not self.write(contents):
return None
return message_matcher
@implements(ILanguageServerClientBase.request)
def request(
self,
contents,
timeout: Union[int, Sentinel, None] = Sentinel.USE_DEFAULT_TIMEOUT,
default: Any = COMMUNICATION_DROPPED,
):
"""
:param contents:
:param timeout:
:return:
The returned message if everything goes ok.
`default` if the communication dropped in the meanwhile and timeout was None.
:raises:
TimeoutError if the timeout was given and no answer was given at the available time
(including if the communication was dropped).
"""
if timeout is Sentinel.USE_DEFAULT_TIMEOUT:
timeout = self.DEFAULT_TIMEOUT
message_id = contents["id"]
message_matcher = self._reader_thread.obtain_id_message_matcher(message_id)
if message_matcher is None:
if timeout:
raise TimeoutError(
"Request timed-out (%s) - no message matcher: %s"
% (timeout, contents)
)
return default
if not self.write(contents):
if timeout:
raise TimeoutError(
"Request timed-out (%s) - no write: %s" % (timeout, contents)
)
return default
if not message_matcher.event.wait(timeout=timeout):
raise TimeoutError("Request timed-out (%s): %s" % (timeout, contents))
return message_matcher.msg
@implements(ILanguageServerClientBase.obtain_pattern_message_matcher)
def obtain_pattern_message_matcher(self, message_pattern):
return self._reader_thread.obtain_pattern_message_matcher(message_pattern)
@implements(ILanguageServerClientBase.obtain_id_message_matcher)
def obtain_id_message_matcher(self, message_id):
return self._reader_thread.obtain_id_message_matcher(message_id)
@implements(ILanguageServerClientBase.write)
def write(self, contents):
return self.writer.write(contents)
@implements(ILanguageServerClientBase.shutdown)
def shutdown(self):
self.write({"jsonrpc": "2.0", "id": self.next_id(), "method": "shutdown"})
@implements(ILanguageServerClientBase.exit)
def exit(self):
self.write({"jsonrpc": "2.0", "id": self.next_id(), "method": "exit"})
|
py | b415f8911ff14da18af621c103440493a6703472 | import colour
import matplotlib.pyplot as plt
import numpy as np
COLOUR_STYLE = colour.plotting.colour_style()
COLOUR_STYLE.update(
{
"figure.figsize": (11, 11),
"legend.framealpha": colour.plotting.COLOUR_STYLE_CONSTANTS.opacity.low,
}
)
plt.style.use(COLOUR_STYLE)
plt.style.use("dark_background")
colour.utilities.describe_environment()
colour.utilities.filter_warnings(*[True] * 4)
def colour_wheel(samples=1024, clip_circle=True, method="Colour"):
xx, yy = np.meshgrid(
np.linspace(-1, 1, samples), np.linspace(-1, 1, samples)
)
S = np.sqrt(xx**2 + yy**2)
H = (np.arctan2(xx, yy) + np.pi) / (np.pi * 2)
HSV = colour.utilities.tstack([H, S, np.ones(H.shape)])
RGB = colour.HSV_to_RGB(HSV)
if clip_circle:
RGB[S > 1] = 0
A = np.where(S > 1, 0, 1)
else:
A = np.ones(S.shape)
if method.lower() == "matplotlib":
RGB = colour.utilities.orient(RGB, "90 CW")
elif method.lower() == "nuke":
RGB = colour.utilities.orient(RGB, "Flip")
RGB = colour.utilities.orient(RGB, "90 CW")
R, G, B = colour.utilities.tsplit(RGB)
return colour.utilities.tstack([R, G, B, A])
COLOUR_WHEEL = colour_wheel(method="Nuke")
colour.plotting.plot_image(COLOUR_WHEEL)
|
py | b415f8d741530f5fa111e83a29b4884276206e18 | # coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.custom_data import CustomData
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
class CustomdataApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(CustomdataApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def get(self, output_id, **kwargs):
# type: (string_types, dict) -> CustomData
"""Azure Output Custom Data
:param output_id: Id of the output
:type output_id: string_types, required
:return: S3 output custom data
:rtype: CustomData
"""
return self.api_client.get(
'/encoding/outputs/azure/{output_id}/customData',
path_params={'output_id': output_id},
type=CustomData,
**kwargs
)
|
py | b415f9427d4a39b3b8ead537651b81aa0b4ea330 | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from dace.transformation import transformation
from dace import memlet
from dace.sdfg import nodes, utils, graph as gr
from dace.sdfg import SDFGState
from dace.sdfg.propagation import propagate_memlet
class InMergeArrays(transformation.SingleStateTransformation, transformation.SimplifyPass):
""" Merge duplicate arrays connected to the same scope entry. """
array1 = transformation.PatternNode(nodes.AccessNode)
array2 = transformation.PatternNode(nodes.AccessNode)
map_entry = transformation.PatternNode(nodes.EntryNode)
@classmethod
def expressions(cls):
# Matching
# o o
# | |
# /======\
g = gr.OrderedMultiDiConnectorGraph()
g.add_node(cls.array1)
g.add_node(cls.array2)
g.add_node(cls.map_entry)
g.add_edge(cls.array1, None, cls.map_entry, None, memlet.Memlet())
g.add_edge(cls.array2, None, cls.map_entry, None, memlet.Memlet())
return [g]
def can_be_applied(self, graph: SDFGState, expr_index, sdfg, permissive=False):
# Ensure both arrays contain the same data
arr1 = self.array1
arr2 = self.array2
if arr1.data != arr2.data:
return False
# Ensure only arr1's node ID contains incoming edges
if graph.in_degree(arr2) > 0:
return False
# Ensure arr1 and arr2's node IDs are ordered (avoid duplicates)
arr1_id = graph.node_id(self.array1)
arr2_id = graph.node_id(self.array2)
if (graph.in_degree(arr1) == 0 and graph.in_degree(arr2) == 0 and arr1_id >= arr2_id):
return False
map = self.map_entry
# If array's connector leads directly to map, skip it
if all(e.dst_conn and not e.dst_conn.startswith('IN_') for e in graph.edges_between(arr1, map)):
return False
if all(e.dst_conn and not e.dst_conn.startswith('IN_') for e in graph.edges_between(arr2, map)):
return False
if (any(e.dst != map for e in graph.out_edges(arr1)) or any(e.dst != map for e in graph.out_edges(arr2))):
return False
# Ensure arr1 and arr2 are the first two incoming nodes (avoid further
# duplicates)
all_source_nodes = set(
graph.node_id(e.src) for e in graph.in_edges(map) if e.src != arr1 and e.src != arr2
and e.src.data == arr1.data and e.dst_conn and e.dst_conn.startswith('IN_') and graph.in_degree(e.src) == 0)
if any(nid < arr1_id or nid < arr2_id for nid in all_source_nodes):
return False
return True
def match_to_str(self, graph):
arr = self.array1
map = self.map_entry
nid1, nid2 = graph.node_id(self.array1), graph.node_id(self.array2)
return '%s (%d, %d) -> %s' % (arr.data, nid1, nid2, map.label)
def apply(self, graph, sdfg):
array = self.array1
map = self.map_entry
map_edge = next(e for e in graph.out_edges(array) if e.dst == map)
result_connector = map_edge.dst_conn[3:]
# Find all other incoming access nodes without incoming edges
source_edges = [
e for e in graph.in_edges(map) if isinstance(e.src, nodes.AccessNode) and e.src.data == array.data
and e.src != array and e.dst_conn and e.dst_conn.startswith('IN_') and graph.in_degree(e.src) == 0
]
# Modify connectors to point to first array
connectors_to_remove = set()
for e in source_edges:
connector = e.dst_conn[3:]
connectors_to_remove.add(connector)
for inner_edge in graph.out_edges(map):
if inner_edge.src_conn[4:] == connector:
inner_edge._src_conn = 'OUT_' + result_connector
# Remove other nodes from state
graph.remove_nodes_from(set(e.src for e in source_edges))
# Remove connectors from scope entry
for c in connectors_to_remove:
map.remove_in_connector('IN_' + c)
map.remove_out_connector('OUT_' + c)
# Re-propagate memlets
edge_to_propagate = next(e for e in graph.out_edges(map) if e.src_conn[4:] == result_connector)
map_edge._data = propagate_memlet(dfg_state=graph,
memlet=edge_to_propagate.data,
scope_node=map,
union_inner_edges=True)
class OutMergeArrays(transformation.SingleStateTransformation, transformation.SimplifyPass):
""" Merge duplicate arrays connected to the same scope entry. """
array1 = transformation.PatternNode(nodes.AccessNode)
array2 = transformation.PatternNode(nodes.AccessNode)
map_exit = transformation.PatternNode(nodes.ExitNode)
@classmethod
def expressions(cls):
# Matching
# \======/
# | |
# o o
g = gr.OrderedMultiDiConnectorGraph()
g.add_node(cls.array1)
g.add_node(cls.array2)
g.add_node(cls.map_exit)
g.add_edge(cls.map_exit, None, cls.array1, None, memlet.Memlet())
g.add_edge(cls.map_exit, None, cls.array2, None, memlet.Memlet())
return [g]
def can_be_applied(self, graph, expr_index, sdfg, permissive=False):
arr1_id = self.subgraph[OutMergeArrays.array1]
arr2_id = self.subgraph[OutMergeArrays.array2]
# Ensure both arrays contain the same data
arr1 = self.array1
arr2 = self.array2
if arr1.data != arr2.data:
return False
# Ensure only arr1's node ID contains outgoing edges
if graph.out_degree(arr2) > 0:
return False
# Ensure arr1 and arr2's node IDs are ordered (avoid duplicates)
if (graph.out_degree(arr1) == 0 and graph.out_degree(arr2) == 0 and arr1_id >= arr2_id):
return False
map = self.map_exit
if (any(e.src != map for e in graph.in_edges(arr1)) or any(e.src != map for e in graph.in_edges(arr2))):
return False
# Ensure arr1 and arr2 are the first two sink nodes (avoid further
# duplicates)
all_sink_nodes = set(
graph.node_id(e.dst) for e in graph.out_edges(map)
if e.dst != arr1 and e.dst != arr2 and e.dst.data == arr1.data and e.src_conn
and e.src_conn.startswith('OUT_') and graph.out_degree(e.dst) == 0)
if any(nid < arr1_id or nid < arr2_id for nid in all_sink_nodes):
return False
return True
def match_to_str(self, graph):
arr = self.array1
map = self.map_exit
nid1, nid2 = graph.node_id(self.array1), graph.node_id(self.array2)
return '%s (%d, %d) -> %s' % (arr.data, nid1, nid2, map.label)
def apply(self, graph, sdfg):
array = self.array1
map = self.map_exit
map_edge = next(e for e in graph.in_edges(array) if e.src == map)
result_connector = map_edge.src_conn[4:]
# Find all other outgoing access nodes without outgoing edges
dst_edges = [
e for e in graph.out_edges(map) if isinstance(e.dst, nodes.AccessNode) and e.dst.data == array.data
and e.dst != array and e.src_conn and e.src_conn.startswith('OUT_') and graph.out_degree(e.dst) == 0
]
# Modify connectors to point to first array
connectors_to_remove = set()
for e in dst_edges:
connector = e.src_conn[4:]
connectors_to_remove.add(connector)
for inner_edge in graph.in_edges(map):
if inner_edge.dst_conn[3:] == connector:
inner_edge.dst_conn = 'IN_' + result_connector
# Remove other nodes from state
graph.remove_nodes_from(set(e.dst for e in dst_edges))
# Remove connectors from scope entry
for c in connectors_to_remove:
map.remove_in_connector('IN_' + c)
map.remove_out_connector('OUT_' + c)
# Re-propagate memlets
edge_to_propagate = next(e for e in graph.in_edges(map) if e.dst_conn[3:] == result_connector)
map_edge._data = propagate_memlet(dfg_state=graph,
memlet=edge_to_propagate.data,
scope_node=map,
union_inner_edges=True)
class MergeSourceSinkArrays(transformation.SingleStateTransformation, transformation.SimplifyPass):
""" Merge duplicate arrays that are source/sink nodes. """
array1 = transformation.PatternNode(nodes.AccessNode)
@classmethod
def expressions(cls):
# Matching
# o o
return [utils.node_path_graph(cls.array1)]
def can_be_applied(self, graph, expr_index, sdfg, permissive=False):
arr1_id = self.subgraph[MergeSourceSinkArrays.array1]
arr1 = self.array1
# Ensure array is either a source or sink node
src_nodes = graph.source_nodes()
sink_nodes = graph.sink_nodes()
if arr1 in src_nodes:
nodes_to_consider = src_nodes
elif arr1 in sink_nodes:
nodes_to_consider = sink_nodes
else:
return False
# Ensure there are more nodes with the same data
other_nodes = [
graph.node_id(n) for n in nodes_to_consider
if isinstance(n, nodes.AccessNode) and n.data == arr1.data and n != arr1
]
if len(other_nodes) == 0:
return False
# Ensure arr1 is the first node to avoid further duplicates
nid = min(other_nodes)
if nid < arr1_id:
return False
return True
def apply(self, graph, sdfg):
array = self.array1
if array in graph.source_nodes():
src_node = True
nodes_to_consider = graph.source_nodes()
edges_to_consider = lambda n: graph.out_edges(n)
else:
src_node = False
nodes_to_consider = graph.sink_nodes()
edges_to_consider = lambda n: graph.in_edges(n)
for node in nodes_to_consider:
if node == array:
continue
if not isinstance(node, nodes.AccessNode):
continue
if node.data != array.data:
continue
for edge in list(edges_to_consider(node)):
if src_node:
graph.add_edge(array, edge.src_conn, edge.dst, edge.dst_conn, edge.data)
else:
graph.add_edge(edge.src, edge.src_conn, array, edge.dst_conn, edge.data)
graph.remove_edge(edge)
graph.remove_node(node)
|
py | b415fa9fcd55a58a8d5973ae279eb590c96d6abc | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/services/account_budget_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v6.proto.resources import account_budget_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_account__budget__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/services/account_budget_service.proto',
package='google.ads.googleads.v6.services',
syntax='proto3',
serialized_options=b'\n$com.google.ads.googleads.v6.servicesB\031AccountBudgetServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v6/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V6.Services\312\002 Google\\Ads\\GoogleAds\\V6\\Services\352\002$Google::Ads::GoogleAds::V6::Services',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n=google/ads/googleads/v6/services/account_budget_service.proto\x12 google.ads.googleads.v6.services\x1a\x36google/ads/googleads/v6/resources/account_budget.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\"`\n\x17GetAccountBudgetRequest\x12\x45\n\rresource_name\x18\x01 \x01(\tB.\xe0\x41\x02\xfa\x41(\n&googleads.googleapis.com/AccountBudget2\xa9\x02\n\x14\x41\x63\x63ountBudgetService\x12\xc9\x01\n\x10GetAccountBudget\x12\x39.google.ads.googleads.v6.services.GetAccountBudgetRequest\x1a\x30.google.ads.googleads.v6.resources.AccountBudget\"H\x82\xd3\xe4\x93\x02\x32\x12\x30/v6/{resource_name=customers/*/accountBudgets/*}\xda\x41\rresource_name\x1a\x45\xca\x41\x18googleads.googleapis.com\xd2\x41\'https://www.googleapis.com/auth/adwordsB\x80\x02\n$com.google.ads.googleads.v6.servicesB\x19\x41\x63\x63ountBudgetServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v6/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V6.Services\xca\x02 Google\\Ads\\GoogleAds\\V6\\Services\xea\x02$Google::Ads::GoogleAds::V6::Servicesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_account__budget__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,])
_GETACCOUNTBUDGETREQUEST = _descriptor.Descriptor(
name='GetAccountBudgetRequest',
full_name='google.ads.googleads.v6.services.GetAccountBudgetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.services.GetAccountBudgetRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002\372A(\n&googleads.googleapis.com/AccountBudget', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=270,
serialized_end=366,
)
DESCRIPTOR.message_types_by_name['GetAccountBudgetRequest'] = _GETACCOUNTBUDGETREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAccountBudgetRequest = _reflection.GeneratedProtocolMessageType('GetAccountBudgetRequest', (_message.Message,), {
'DESCRIPTOR' : _GETACCOUNTBUDGETREQUEST,
'__module__' : 'google.ads.googleads.v6.services.account_budget_service_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.services.GetAccountBudgetRequest)
})
_sym_db.RegisterMessage(GetAccountBudgetRequest)
DESCRIPTOR._options = None
_GETACCOUNTBUDGETREQUEST.fields_by_name['resource_name']._options = None
_ACCOUNTBUDGETSERVICE = _descriptor.ServiceDescriptor(
name='AccountBudgetService',
full_name='google.ads.googleads.v6.services.AccountBudgetService',
file=DESCRIPTOR,
index=0,
serialized_options=b'\312A\030googleads.googleapis.com\322A\'https://www.googleapis.com/auth/adwords',
create_key=_descriptor._internal_create_key,
serialized_start=369,
serialized_end=666,
methods=[
_descriptor.MethodDescriptor(
name='GetAccountBudget',
full_name='google.ads.googleads.v6.services.AccountBudgetService.GetAccountBudget',
index=0,
containing_service=None,
input_type=_GETACCOUNTBUDGETREQUEST,
output_type=google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_account__budget__pb2._ACCOUNTBUDGET,
serialized_options=b'\202\323\344\223\0022\0220/v6/{resource_name=customers/*/accountBudgets/*}\332A\rresource_name',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_ACCOUNTBUDGETSERVICE)
DESCRIPTOR.services_by_name['AccountBudgetService'] = _ACCOUNTBUDGETSERVICE
# @@protoc_insertion_point(module_scope)
|
py | b415fadbabcdb433cd8d790013ec332d89baebbe | import uuid
from djoser.views import UserView, UserDeleteView
from djoser import serializers
from rest_framework import views, permissions, status, permissions, generics, filters
from rest_framework.response import Response
from . import models
from . import serializers
from .serializers import IdeaSerializer
from rest_framework.decorators import api_view
class IdeaView(generics.ListCreateAPIView):
"""Use this endpoint to add ideas in the backend."""
def get_queryset(self):
queryset = models.Idea.objects.all()
idea_id = self.request.query_params.get('id', None)
idea_cursor = self.request.query_params.get('idea_cursor', None)
if idea_id is None:
if idea_cursor is None:
return queryset
else:
return queryset[int(idea_cursor):int(idea_cursor)+5]
else:
return queryset.filter(id=idea_id)
permission_classes = [permissions.AllowAny]
serializer_class = serializers.IdeaSerializer
class UpvotesView(views.APIView):
permission_classes = [permissions.AllowAny]
def put(self, request, idea_id):
idea = models.Idea.objects.get(pk = idea_id)
user = models.User.objects.get(pk = request.user.id)
user_upvoted_ideas = user.upvoted_ideas.get_or_create(user=request.user.id)[0]
upvotes = idea.upvotes
if str(idea_id) in user_upvoted_ideas.idea_list:
upvotes -= 1
user_upvoted_ideas.idea_list.remove(str(idea_id))
user.upvoted_ideas.update(idea_list=user_upvoted_ideas.idea_list)
else:
upvotes += 1
user_upvoted_ideas.idea_list.append(str(idea_id))
user.upvoted_ideas.update(idea_list=user_upvoted_ideas.idea_list)
serializer = IdeaSerializer(idea, data = {'upvotes': upvotes}, partial = True)
if serializer.is_valid():
serializer.save()
response_dict = serializer.data.copy()
response_dict['user'] = False
if str(idea_id) in user_upvoted_ideas.idea_list:
response_dict['user'] = True
return Response(response_dict, status.HTTP_201_CREATED)
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
class UserUpvotedIdeasView(generics.ListCreateAPIView):
"""Use this endpoint to fetch upvoted ideas from the backend."""
def get_queryset(self):
queryset = models.Upvoted_ideas.objects.all()
return queryset
model = models.Upvoted_ideas
permission_classes = [permissions.AllowAny]
serializer_class = serializers.UserUpvotedIdeasSerializer
class UserPinnedIdeasView(generics.ListCreateAPIView):
"""Use this endpoint to fetch upvoted ideas from the backend."""
def get_queryset(self):
queryset = models.Pinned_ideas.objects.all()
return queryset
model = models.Pinned_ideas
permission_classes = [permissions.AllowAny]
serializer_class = serializers.UserUpvotedIdeasSerializer
|
py | b415fb07102cee6c5e80b94d081125434cc603f1 | from .base import * # noqa: F403, F401
|
py | b415fb519df8e052ec074b515e9ea4b004e8210e | """photoshare URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('photos.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
py | b415fb592de460ddae9b0f6346c9657e4de48843 | import _plotly_utils.basevalidators
class TicklenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='ticklen',
parent_name='scatterpolargl.marker.colorbar',
**kwargs
):
super(TicklenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
|
py | b415fb8ee2dd25393984265361d9c3f06347d5ac | # Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
from django.utils.translation import gettext as _
from aether.sdk.health.utils import (
check_external_app,
get_external_app_url,
get_external_app_auth_header,
)
from aether.sdk.multitenancy.utils import add_instance_realm_in_headers
from aether.sdk.utils import request
# list of messages that can be translated
MSG_KERNEL_CONNECTION_ERR = _(
'Connection with Aether Kernel server is not possible.'
)
MSG_KERNEL_RESPONSE_ERR = _(
'Unexpected response from Aether Kernel server '
'while trying to create/update the project artefacts "{project_id}".\n'
'Response: {content}'
)
NAMESPACE = 'org.ehealthafrica.aether.odk.xforms'
EXTERNAL_APP_KERNEL = 'aether-kernel'
class KernelSubmissionError(Exception):
pass
class KernelPropagationError(Exception):
pass
def check_kernel_connection():
return check_external_app(EXTERNAL_APP_KERNEL)
def get_kernel_url():
return get_external_app_url(EXTERNAL_APP_KERNEL)
def get_kernel_auth_header():
return get_external_app_auth_header(EXTERNAL_APP_KERNEL)
def get_submissions_url(submission_id=None):
'''
Returns Aether Kernel url for submissions
'''
return __get_type_url('submissions', submission_id)
def get_attachments_url(attachment_id=None):
'''
Returns Aether Kernel url for submission attachments
'''
return __get_type_url('attachments', attachment_id)
def propagate_kernel_project(project, family=None):
'''
Creates a copy of the indicated project in Aether Kernel
and creates/updates its linked artefacts based on the given AVRO schemas.
One AVRO schema should create/update in Kernel:
- one Project,
- one Mapping,
- one Schema and
- one SchemaDecorator.
'''
artefacts = {
'name': project.name,
'family': family,
'avro_schemas': [],
}
for xform in project.xforms.order_by('-modified_at'):
artefacts['avro_schemas'].append(__parse_xform(xform))
__upsert_kernel_artefacts(project, artefacts)
# indicates that the project and its artefacts are in Kernel
return True
def propagate_kernel_artefacts(xform, family=None):
'''
Creates/updates artefacts based on the indicated xForm in Aether Kernel.
'''
artefacts = {
'name': xform.project.name,
'family': family,
'avro_schemas': [__parse_xform(xform)],
}
__upsert_kernel_artefacts(xform.project, artefacts)
# indicates that the xform linked artefacts are in Kernel
return True
def __upsert_kernel_artefacts(project, artefacts={}):
'''
This method pushes the project artefacts to Aether Kernel.
'''
project_id = str(project.project_id)
auth_header = get_kernel_auth_header()
if not auth_header:
raise KernelPropagationError(MSG_KERNEL_CONNECTION_ERR)
headers = add_instance_realm_in_headers(project, auth_header)
kernel_url = get_kernel_url()
url = f'{kernel_url}/projects/{project_id}/avro-schemas/'
response = request(method='patch', url=url, json=artefacts, headers=headers)
if response.status_code != 200:
content = response.content.decode('utf-8')
raise KernelPropagationError(
MSG_KERNEL_RESPONSE_ERR.format(project_id=project_id, content=content)
)
return True
def __parse_xform(xform):
definition = copy.deepcopy(xform.avro_schema)
# assign namespace based on project name
definition['namespace'] = f'{NAMESPACE}.{__clean_name(xform.project.name)}'
return {
'id': str(xform.kernel_id),
'definition': definition,
}
def __clean_name(value):
'''
Replaces any non alphanumeric character with spaces
Converts to title case
Removes spaces
'''
return ''.join([c if c.isalnum() else ' ' for c in value]).title().replace(' ', '')
def __get_type_url(model_type, id=None):
'''
Returns Aether Kernel url for type "XXX"
'''
kernel_url = get_kernel_url()
if not id:
return f'{kernel_url}/{model_type}/'
else:
return f'{kernel_url}/{model_type}/{id}/'
|
py | b415fbac949df7cbcd1461dbb01ff0eebae59058 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyWarlock(PythonPackage):
"""Self-validating Python objects using JSON schema"""
homepage = "https://github.com/bcwaldon/warlock"
url = "https://github.com/bcwaldon/warlock/archive/1.3.3.tar.gz"
version('1.3.3', sha256='b77e4977d5dc54d47f88cbcc9ab2d716f5f10171d123138785dad96aeb2858d0')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:3', type=('build', 'run'))
depends_on('[email protected]:1', type=('build', 'run'))
depends_on('py-six@1:', type=('build', 'run'))
|
py | b415fc06344766de78978b0bdd787235479ec5da | from flask import Blueprint, render_template
errors= Blueprint('errors', __name__)
#There's also a method called errorhandler() but we aren't using it as it would only be applicable for this blueprint.
#Our main goal is to enable the error handlers throughout the application.
@errors.app_errorhandler(404)
def error_404(error):
return render_template('errors/404.html'), 404 #404 is specified to give the correct error code response.
@errors.app_errorhandler(403)
def error_403(error):
return render_template('errors/403.html'), 403 #404 is specified to give the correct error code response.
@errors.app_errorhandler(500)
def error_500(error):
return render_template('errors/500.html'), 500 #404 is specified to give the correct error code response.
|
py | b415fc11ee43f8d8a841dac510edb1ed9852a483 | # Generated by Django 3.2.8 on 2021-10-25 18:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalog', '0002_auto_20211020_1301'),
]
operations = [
migrations.AddField(
model_name='bookinstance',
name='borrower',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='author',
name='date_of_death',
field=models.DateField(blank=True, default='', null=True, verbose_name='Year of Death'),
),
migrations.AlterField(
model_name='book',
name='genre',
field=models.ManyToManyField(help_text='Select a genre for this book.', to='catalog.Genre'),
),
migrations.AlterField(
model_name='language',
name='name_en',
field=models.CharField(help_text='Name of the language in English:', max_length=20),
),
migrations.AlterField(
model_name='language',
name='name_native',
field=models.CharField(help_text='Native name of the lanaguge:', max_length=20),
),
]
|
py | b415fe7f28e22bce5a20f1d857025fc031b7f1bf | """
This script is used to quickly evaluate a baseline for bottleneck0.
Baseline is no AVs.
Bottleneck in which the actions are specifying a desired velocity in a segment
of space. The autonomous penetration rate in this example is 10%.
Action Dimension: (?, )
Observation Dimension: (?, )
Horizon: 1000 steps
"""
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \
InFlows
from flow.core.traffic_lights import TrafficLights
from flow.core.vehicles import Vehicles
from flow.controllers import ContinuousRouter
from flow.envs.bottleneck_env import DesiredVelocityEnv
from flow.core.experiment import SumoExperiment
from flow.scenarios.bottleneck.scenario import BottleneckScenario
from flow.scenarios.bottleneck.gen import BottleneckGenerator
import numpy as np
# time horizon of a single rollout
HORIZON = 1000
SCALING = 1
NUM_LANES = 4 * SCALING # number of lanes in the widest highway
DISABLE_TB = True
DISABLE_RAMP_METER = True
AV_FRAC = 0.10
vehicles = Vehicles()
vehicles.add(veh_id="human",
speed_mode=9,
routing_controller=(ContinuousRouter, {}),
lane_change_mode=0,
num_vehicles=1 * SCALING)
controlled_segments = [("1", 1, False), ("2", 2, True), ("3", 2, True),
("4", 2, True), ("5", 1, False)]
num_observed_segments = [("1", 1), ("2", 3), ("3", 3),
("4", 3), ("5", 1)]
additional_env_params = {
"target_velocity": 40,
"disable_tb": True,
"disable_ramp_metering": True,
"controlled_segments": controlled_segments,
"symmetric": False,
"observed_segments": num_observed_segments,
"reset_inflow": False,
"lane_change_duration": 5,
"max_accel": 3,
"max_decel": 3,
"inflow_range": [1000, 2000]
}
# flow rate
flow_rate = 1900 * SCALING
# percentage of flow coming out of each lane
inflow = InFlows()
inflow.add(veh_type="human", edge="1",
vehs_per_hour=flow_rate,
departLane="random", departSpeed=10)
traffic_lights = TrafficLights()
if not DISABLE_TB:
traffic_lights.add(node_id="2")
if not DISABLE_RAMP_METER:
traffic_lights.add(node_id="3")
additional_net_params = {"scaling": SCALING}
net_params = NetParams(in_flows=inflow,
no_internal_links=False,
additional_params=additional_net_params)
sumo_params = SumoParams(
sim_step=0.5,
sumo_binary="sumo-gui",
print_warnings=False,
restart_instance=False,
)
env_params = EnvParams(
evaluate=True, # Set to True to evaluate traffic metrics
warmup_steps=40,
sims_per_step=1,
horizon=HORIZON,
additional_params=additional_env_params,
)
initial_config = InitialConfig(
spacing="uniform",
min_gap=5,
lanes_distribution=float("inf"),
edges_distribution=["2", "3", "4", "5"],
)
scenario = BottleneckScenario(name="bay_bridge_toll",
generator_class=BottleneckGenerator,
vehicles=vehicles,
net_params=net_params,
initial_config=initial_config,
traffic_lights=traffic_lights)
env = DesiredVelocityEnv(env_params, sumo_params, scenario)
exp = SumoExperiment(env, scenario)
num_runs = 2
results = exp.run(num_runs, HORIZON)
avg_outflow = np.mean([outflow[-1] for outflow in results["per_step_returns"]])
print('The average outflow over 500 seconds '
'across {} runs is {}'.format(num_runs, avg_outflow))
|
py | b415ffd51e60fc7fcf03adc2793a99571c12899b | import os
import argparse
import io
from fairseq.models.onemodel import OneModel
from sklearn.metrics import (
roc_curve,
auc,
average_precision_score,
accuracy_score,
mean_absolute_error,
mean_squared_error,
)
import torch
import torch.nn.functional as F
import math
from math import floor
from collections import Counter
def ret_rmse(targets, preds):
return math.sqrt(mean_squared_error(targets, preds))
def main(dataset, cktpath, subset):
roberta = OneModel.from_pretrained(
os.path.dirname(cktpath),
checkpoint_file=os.path.basename(cktpath),
data_name_or_path=dataset,
)
roberta.cuda()
roberta.eval()
roberta.load_data(subset)
output, target = roberta.inference(split=subset, classification_head_name="molecule_head")
if not isinstance(target, torch.cuda.FloatTensor):
output = F.softmax(output, dim=-1)
labels = target.tolist()
y_pred = output.argmax(dim=-1).tolist()
scores = output[:, 1].tolist()
fpr, tpr, thresholds = roc_curve(labels, scores)
auroc = auc(fpr, tpr)
auprc = average_precision_score(labels, scores)
acc = accuracy_score(labels, y_pred, normalize=True)
print("dataset: {} auroc: {} auprc: {} acc: {}".format(dataset, auroc, auprc, acc))
scores = [floor(x + 0.5) for x in scores]
scores_counter = Counter(scores)
print(scores_counter)
else:
y_pred = output.view(-1).tolist()
labels = target.tolist()
rmse = ret_rmse(labels, y_pred)
mae = mean_absolute_error(labels, y_pred)
print("dataset: {} rmse: {} mae: {}".format(dataset, rmse, mae))
print(max(labels), min(labels), max(y_pred), min(y_pred))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("dataset", type=str)
parser.add_argument("cktpath", type=str)
parser.add_argument("--subset", type=str, default="test")
args = parser.parse_args()
dataset = args.dataset
cktpath = args.cktpath
subset = args.subset
assert os.path.exists(cktpath)
main(dataset, cktpath, subset)
|
py | b415fff458a356db2ddd6114ede38d873ca59d38 | """
This script saves a numpy ndarray (a detached torch tensor)
of single jet events (i.e. not jagged arrays)
back to a ROOT TTree, without ROOT or Athena.
TODO: Metadata?, compressiontypes
"""
import uproot
import numpy
#Specifies the 27D dataset. The available 'columns' can be read with ttree.keys()
prefix = 'HLT_xAOD__JetContainer_TrigHLTJetDSSelectorCollectionAuxDyn'
branches27D = [
# 4-momentum
(prefix + '.pt',numpy.float64),
(prefix + '.eta',numpy.float64),
(prefix + '.phi',numpy.float64),
(prefix + '.m',numpy.float64),
# Energy deposition in each calorimeter layer
# prefix + '.EnergyPerSampling',
# Area of jet,used for pile-up suppression (4-vector)
(prefix + '.ActiveArea',numpy.int32),
(prefix + '.ActiveArea4vec_eta',numpy.float64),
(prefix + '.ActiveArea4vec_m',numpy.float64),
(prefix + '.ActiveArea4vec_phi',numpy.float64),
(prefix + '.ActiveArea4vec_pt',numpy.float64),
# prefix + '.JetGhostArea',
# Variables related to quality of jet
(prefix + '.AverageLArQF',numpy.float64),
# prefix + '.BchCorrCell',
(prefix + '.NegativeE',numpy.float64),
(prefix + '.HECQuality',numpy.float64),
(prefix + '.LArQuality',numpy.float64),
# Shape and position, most energetic cluster
(prefix + '.Width',numpy.float64),
(prefix + '.WidthPhi',numpy.float64),
(prefix + '.CentroidR',numpy.float64),
(prefix + '.DetectorEta',numpy.float64),
(prefix + '.LeadingClusterCenterLambda',numpy.float64),
(prefix + '.LeadingClusterPt',numpy.float64),
(prefix + '.LeadingClusterSecondLambda',numpy.float64),
(prefix + '.LeadingClusterSecondR',numpy.float64),
(prefix + '.N90Constituents',numpy.int32),
# Energy released in each calorimeter
(prefix + '.EMFrac',numpy.float64),
(prefix + '.HECFrac',numpy.float64),
# Variables related to the time of arrival of a jet
(prefix + '.Timing',numpy.float64),
(prefix + '.OotFracClusters10',numpy.float64),
(prefix + '.OotFracClusters5',numpy.float64),
]
def ndarray_to_DxAOD(filename, array, branches=branches27D, compression=uproot.ZLIB):
f = uproot.recreate(filename)
branchdict = dict(branches)
print(branchdict)
f["CollectionTree"] = uproot.newtree(branchdict)
#for i,branch in enumerate(branches):
# data = array[:,i]
# print(branch[0])
f["CollectionTree"].extend(dict([(branch[0],array[:,i]) for (i,branch) in enumerate(branches)]))
|
py | b41600d29fc3ac258bb08760aa443c36875858ce | # Generated by Django 3.0.4 on 2020-07-24 13:08
from django.db import migrations
import modules.core.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
import wagtailnhsukfrontend.blocks
class Migration(migrations.Migration):
dependencies = [
("home", "0013_auto_20200713_1538"),
]
operations = [
migrations.AlterField(
model_name="homepage",
name="body",
field=wagtail.core.fields.StreamField(
[
("rich_text", wagtail.core.blocks.RichTextBlock(group=" Content")),
(
"block_quote",
wagtail.core.blocks.BlockQuoteBlock(group=" Content"),
),
("embed", modules.core.blocks.EmbedBlock(group=" Content")),
(
"captioned_embed",
wagtail.core.blocks.StructBlock(
[
("embed", modules.core.blocks.EmbedBlock()),
(
"title",
wagtail.core.blocks.CharBlock(required=False),
),
(
"sub_title",
wagtail.core.blocks.CharBlock(required=False),
),
],
group=" Content",
),
),
(
"latest_blog_posts",
wagtail.core.blocks.StructBlock(
[
(
"heading",
wagtail.core.blocks.CharBlock(required=True),
),
(
"number_of_posts",
wagtail.core.blocks.ChoiceBlock(
choices=[(1, "One"), (2, "Two"), (3, "Three")]
),
),
(
"tag_id",
wagtail.core.blocks.ChoiceBlock(
choices=modules.core.blocks.get_tag_list
),
),
],
group=" Content",
),
),
(
"image",
wagtail.core.blocks.StructBlock(
[
(
"content_image",
wagtail.images.blocks.ImageChooserBlock(
required=True
),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(
help_text="Only leave this blank if the image is decorative.",
required=False,
),
),
(
"caption",
wagtail.core.blocks.CharBlock(required=False),
),
],
group=" NHS Components",
),
),
(
"panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(required=False),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(required=True),
),
],
group=" NHS Components",
),
),
(
"promo",
wagtail.core.blocks.StructBlock(
[
(
"link_page",
wagtail.core.blocks.PageChooserBlock(
label="Page", required=False
),
),
(
"url",
wagtail.core.blocks.URLBlock(
label="URL", required=False
),
),
(
"heading",
wagtail.core.blocks.CharBlock(required=True),
),
(
"description",
wagtail.core.blocks.CharBlock(required=False),
),
(
"content_image",
wagtail.images.blocks.ImageChooserBlock(
label="Image", required=False
),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(required=False),
),
(
"size",
wagtail.core.blocks.ChoiceBlock(
choices=[("", "Default"), ("small", "Small")],
required=False,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
],
group=" NHS Components",
),
),
(
"expander",
wagtail.core.blocks.StructBlock(
[
("title", wagtail.core.blocks.CharBlock(required=True)),
(
"body",
wagtail.core.blocks.StreamBlock(
[
(
"richtext",
wagtail.core.blocks.RichTextBlock(),
),
(
"action_link",
wagtail.core.blocks.StructBlock(
[
(
"text",
wagtail.core.blocks.CharBlock(
label="Link text",
required=True,
),
),
(
"external_url",
wagtail.core.blocks.URLBlock(
label="URL",
required=True,
),
),
(
"new_window",
wagtail.core.blocks.BooleanBlock(
label="Open in new window",
required=False,
),
),
]
),
),
(
"inset_text",
wagtail.core.blocks.StructBlock(
[
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
)
]
),
),
(
"image",
wagtail.core.blocks.StructBlock(
[
(
"content_image",
wagtail.images.blocks.ImageChooserBlock(
required=True
),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(
help_text="Only leave this blank if the image is decorative.",
required=False,
),
),
(
"caption",
wagtail.core.blocks.CharBlock(
required=False
),
),
]
),
),
(
"grey_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
label="heading",
required=False,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no heading. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"warning_callout",
wagtail.core.blocks.StructBlock(
[
(
"title",
wagtail.core.blocks.CharBlock(
default="Important",
required=True,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
required=True,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"summary_list",
wagtail.core.blocks.StructBlock(
[
(
"rows",
wagtail.core.blocks.ListBlock(
wagtailnhsukfrontend.blocks.SummaryListRowBlock
),
),
(
"no_border",
wagtail.core.blocks.BooleanBlock(
default=False,
required=False,
),
),
]
),
),
("table", modules.core.blocks.TableBlock()),
],
required=True,
),
),
],
group=" NHS Components",
),
),
(
"grey_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
label="heading", required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no heading. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(required=True),
),
],
group=" NHS Components",
),
),
(
"inset_text",
wagtail.core.blocks.StructBlock(
[
(
"body",
wagtail.core.blocks.RichTextBlock(required=True),
)
],
group=" NHS Components",
),
),
(
"panel_list",
wagtail.core.blocks.StructBlock(
[
(
"panels",
wagtail.core.blocks.ListBlock(
wagtail.core.blocks.StructBlock(
[
(
"left_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"right_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
]
)
),
)
],
group=" NHS Components",
),
),
(
"promo_group",
wagtail.core.blocks.StructBlock(
[
(
"column",
wagtail.core.blocks.ChoiceBlock(
choices=[
("one-half", "One-half"),
("one-third", "One-third"),
]
),
),
(
"size",
wagtail.core.blocks.ChoiceBlock(
choices=[("", "Default"), ("small", "Small")],
required=False,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"promos",
wagtail.core.blocks.ListBlock(
modules.core.blocks.BasePromoBlock
),
),
],
group=" NHS Components",
),
),
(
"warning_callout",
wagtail.core.blocks.StructBlock(
[
(
"title",
wagtail.core.blocks.CharBlock(
default="Important", required=True
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
required=True,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(required=True),
),
],
group=" NHS Components",
),
),
("table", modules.core.blocks.TableBlock(group=" NHS Components")),
(
"panel_table",
wagtail.core.blocks.StructBlock(
[
("title", wagtail.core.blocks.CharBlock()),
("table", modules.core.blocks.TableBlock()),
],
group=" NHS Components",
),
),
(
"action_link",
wagtail.core.blocks.StructBlock(
[
(
"text",
wagtail.core.blocks.CharBlock(
label="Link text", required=True
),
),
(
"external_url",
wagtail.core.blocks.URLBlock(
label="URL", required=True
),
),
(
"new_window",
wagtail.core.blocks.BooleanBlock(
label="Open in new window", required=False
),
),
],
group=" NHS Components",
),
),
],
blank=True,
verbose_name="Body blocks",
),
),
]
|
py | b4160225d7214e7669962dd7936295d5fc4e89e5 | import itertools
import json
import random
from pathlib import Path
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset as TorchDataset
from typing import Callable, List, Iterable, Tuple
from deepproblog.dataset import Dataset
from deepproblog.query import Query
from problog.logic import Term, list2term, Constant
_DATA_ROOT = Path(__file__).parent
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]
)
datasets = {
"train": torchvision.datasets.MNIST(
root=str(_DATA_ROOT), train=True, download=True, transform=transform
),
"test": torchvision.datasets.MNIST(
root=str(_DATA_ROOT), train=False, download=True, transform=transform
),
}
def digits_to_number(digits: Iterable[int]) -> int:
number = 0
for d in digits:
number *= 10
number += d
return number
class MNIST_Images(object):
def __init__(self, subset):
self.subset = subset
def __getitem__(self, item):
return datasets[self.subset][int(item[0])][0]
MNIST_train = MNIST_Images("train")
MNIST_test = MNIST_Images("test")
class MNIST(Dataset):
def __len__(self):
return len(self.data)
def to_query(self, i):
l = Constant(self.data[i][1])
return Query(
Term("digit", Term("tensor", Term(self.dataset, Term("a"))), l),
substitution={Term("a"): Constant(i)},
)
def __init__(self, dataset):
self.dataset = dataset
self.data = datasets[dataset]
def addition(n: int, dataset: str, seed=None):
"""Returns a dataset for binary addition"""
return MNISTOperator(
dataset_name=dataset,
function_name="addition" if n == 1 else "multi_addition",
operator=sum,
size=n,
arity=2,
seed=seed,
)
class MNISTOperator(Dataset, TorchDataset):
def __getitem__(self, index: int) -> Tuple[list, list, int]:
l1, l2 = self.data[index]
label = self._get_label(index)
l1 = [self.dataset[x][0] for x in l1]
l2 = [self.dataset[x][0] for x in l2]
return l1, l2, label
def __init__(
self,
dataset_name: str,
function_name: str,
operator: Callable[[List[int]], int],
size=1,
arity=2,
seed=None,
):
"""Generic dataset for operator(img, img) style datasets.
:param dataset_name: Dataset to use (train, val, test)
:param function_name: Name of Problog function to query.
:param operator: Operator to generate correct examples
:param size: Size of numbers (number of digits)
:param arity: Number of arguments for the operator
:param seed: Seed for RNG
"""
super(MNISTOperator, self).__init__()
assert size >= 1
assert arity >= 1
self.dataset_name = dataset_name
self.dataset = datasets[self.dataset_name]
self.function_name = function_name
self.operator = operator
self.size = size
self.arity = arity
self.seed = seed
mnist_indices = list(range(len(self.dataset)))
if seed is not None:
rng = random.Random(seed)
rng.shuffle(mnist_indices)
dataset_iter = iter(mnist_indices)
# Build list of examples (mnist indices)
self.data = []
try:
while dataset_iter:
self.data.append(
[
[next(dataset_iter) for _ in range(self.size)]
for _ in range(self.arity)
]
)
except StopIteration:
pass
def to_file_repr(self, i):
"""Old file represenation dump. Not a very clear format as multi-digit arguments are not separated"""
return f"{tuple(itertools.chain(*self.data[i]))}\t{self._get_label(i)}"
def to_json(self):
"""
Convert to JSON, for easy comparisons with other systems.
Format is [EXAMPLE, ...]
EXAMPLE :- [ARGS, expected_result]
ARGS :- [MULTI_DIGIT_NUMBER, ...]
MULTI_DIGIT_NUMBER :- [mnist_img_id, ...]
"""
data = [(self.data[i], self._get_label(i)) for i in range(len(self))]
return json.dumps(data)
def to_query(self, i: int) -> Query:
"""Generate queries"""
mnist_indices = self.data[i]
expected_result = self._get_label(i)
# Build substitution dictionary for the arguments
subs = dict()
var_names = []
for i in range(self.arity):
inner_vars = []
for j in range(self.size):
t = Term(f"p{i}_{j}")
subs[t] = Term(
"tensor",
Term(
self.dataset_name,
Constant(mnist_indices[i][j]),
),
)
inner_vars.append(t)
var_names.append(inner_vars)
# Build query
if self.size == 1:
return Query(
Term(
self.function_name,
*(e[0] for e in var_names),
Constant(expected_result),
),
subs,
)
else:
return Query(
Term(
self.function_name,
*(list2term(e) for e in var_names),
Constant(expected_result),
),
subs,
)
def _get_label(self, i: int):
mnist_indices = self.data[i]
# Figure out what the ground truth is, first map each parameter to the value:
ground_truth = [
digits_to_number(self.dataset[j][1] for j in i) for i in mnist_indices
]
# Then compute the expected value:
expected_result = self.operator(ground_truth)
return expected_result
def __len__(self):
return len(self.data)
|
py | b41602c13410c315b8c45c55b5d4aa0953dfb1ee | ##########################################################################
#
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
QtCore = GafferUI._qtImport( "QtCore" )
QtGui = GafferUI._qtImport( "QtGui" )
## The VectorDataWidget provides a table view for the contents of
# one or more IECore VectorData instances.
class VectorDataWidget( GafferUI.Widget ) :
## data may either be a VectorData instance or a list of VectorData instances
# of identical length.
#
# header may be False for no header, True for a default header, or a list of
# strings to specify a custom header per column.
#
# minimumVisibleRows specifies a number of rows after which a vertical scroll bar
# may become visible - before this all rows should be directly visible with no need
# for scrolling.
#
# columnToolTips may be specified as a list of strings to provide a tooltip for
# each column.
#
# sizeEditable specifies whether or not items may be added and removed
# from the data (assuming it is editable).
#
# columnEditability may be specified as a list of booleans, providing per-column
# editability.
def __init__(
self,
data=None,
editable=True,
header=False,
showIndices=True,
minimumVisibleRows=8,
columnToolTips=None,
sizeEditable=True,
columnEditability=None,
**kw
) :
self.__column = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
GafferUI.Widget.__init__( self, self.__column, **kw )
# table view
self.__tableView = _TableView( minimumVisibleRows = minimumVisibleRows )
self.__tableView.horizontalHeader().setVisible( bool( header ) )
self.__tableView.horizontalHeader().setMinimumSectionSize( 70 )
self.__tableView.verticalHeader().setVisible( showIndices )
self.__tableView.verticalHeader().setResizeMode( QtGui.QHeaderView.Fixed )
self.__tableView.verticalHeader().setObjectName( "vectorDataWidgetVerticalHeader" )
self.__tableView.setHorizontalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
self.__tableView.setVerticalScrollBarPolicy( QtCore.Qt.ScrollBarAsNeeded )
self.__tableView.setSelectionBehavior( QtGui.QAbstractItemView.SelectItems )
self.__tableView.setCornerButtonEnabled( False )
self.__tableView.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.__tableView.customContextMenuRequested.connect( Gaffer.WeakMethod( self.__contextMenu ) )
self.__tableView.verticalHeader().setDefaultSectionSize( 20 )
self.__tableViewHolder = GafferUI.Widget( self.__tableView )
self.__column.append( self.__tableViewHolder )
# buttons
self.__buttonRow = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal )
addButton = GafferUI.Button( image="plus.png", hasFrame=False )
self.__addButtonConnection = addButton.clickedSignal().connect( Gaffer.WeakMethod( self.__addRows ) )
self.__buttonRow.append( addButton )
removeButton = GafferUI.Button( image="minus.png", hasFrame=False )
self.__removeButtonConnection = removeButton.clickedSignal().connect( Gaffer.WeakMethod( self.__removeSelection ) )
self.__buttonRow.append( removeButton )
self.__buttonRow.append( GafferUI.Spacer( size = IECore.V2i( 0 ), maximumSize = IECore.V2i( 100000, 1 ) ), expand=1 )
self.__column.append( self.__buttonRow )
# stuff for drag enter/leave and drop
self.__dragEnterConnections = [
self.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ) ),
addButton.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ) ),
removeButton.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ) ),
]
self.__dragLeaveConnections = [
self.dragLeaveSignal().connect( Gaffer.WeakMethod( self.__dragLeave ) ),
addButton.dragLeaveSignal().connect( Gaffer.WeakMethod( self.__dragLeave ) ),
removeButton.dragLeaveSignal().connect( Gaffer.WeakMethod( self.__dragLeave ) ),
]
self.__dropConnections = [
self.dropSignal().connect( Gaffer.WeakMethod( self.__drop ) ),
addButton.dropSignal().connect( Gaffer.WeakMethod( self.__drop ) ),
removeButton.dropSignal().connect( Gaffer.WeakMethod( self.__drop ) ),
]
self.__dragPointer = "values.png"
# stuff for drag begin
self.__borrowedButtonPress = None
self.__emittingButtonPress = False
self.__buttonPressConnection = self.__tableViewHolder.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ) )
self.__buttonReleaseConnection = self.__tableViewHolder.buttonReleaseSignal().connect( Gaffer.WeakMethod( self.__buttonRelease ) )
self.__mouseMoveConnection = self.__tableViewHolder.mouseMoveSignal().connect( Gaffer.WeakMethod( self.__mouseMove ) )
self.__dragBeginConnection = self.__tableViewHolder.dragBeginSignal().connect( Gaffer.WeakMethod( self.__dragBegin ) )
self.__dragEndConnection = self.__tableViewHolder.dragEndSignal().connect( Gaffer.WeakMethod( self.__dragEnd ) )
# final setup
self.__dataChangedSignal = GafferUI.WidgetSignal()
self.__editSignal = Gaffer.Signal3()
if isinstance( header, list ) :
self.__headerOverride = header
else :
self.__headerOverride = None
self.__columnToolTips = columnToolTips
self.__columnEditability = columnEditability
self.__propagatingDataChangesToSelection = False
self.__sizeEditable = sizeEditable
self.setData( data )
self.setEditable( editable )
def setHighlighted( self, highlighted ) :
if highlighted == self.getHighlighted() :
return
self.__tableView.setProperty( "gafferHighlighted", GafferUI._Variant.toVariant( highlighted ) )
GafferUI.Widget.setHighlighted( self, highlighted )
def setData( self, data ) :
# it could be argued that we should early out here if data is self.getData(),
# but we can't right now as we're relying on setData() to update everything
# when the data has been modified in place by some external process, or
# by self.__removeSelection.
if data is not None :
if not isinstance( data, list ) :
data = [ data ]
self.__model = _Model( data, self.__tableView, self.getEditable(), self.__headerOverride, self.__columnToolTips, self.__columnEditability )
self.__model.dataChanged.connect( Gaffer.WeakMethod( self.__modelDataChanged ) )
self.__model.rowsInserted.connect( Gaffer.WeakMethod( self.__emitDataChangedSignal ) )
self.__model.rowsRemoved.connect( Gaffer.WeakMethod( self.__emitDataChangedSignal ) )
else :
self.__model = None
self.__tableView.setModel( self.__model )
if self.__model :
columnIndex = 0
haveResizeableContents = False
for accessor in self.__model.vectorDataAccessors() :
for i in range( 0, accessor.numColumns() ) :
delegate = _Delegate.create( accessor.data() )
delegate.setParent( self.__model )
self.__tableView.setItemDelegateForColumn( columnIndex, delegate )
canStretch = delegate.canStretch()
haveResizeableContents = haveResizeableContents or canStretch
columnIndex += 1
self.__tableView.horizontalHeader().setResizeMode(
QtGui.QHeaderView.ResizeToContents if haveResizeableContents else QtGui.QHeaderView.Fixed
)
self.__tableView.horizontalHeader().setStretchLastSection( canStretch )
self.__tableView.setSizePolicy(
QtGui.QSizePolicy(
QtGui.QSizePolicy.Expanding if canStretch else QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Maximum
)
)
# Somehow the QTableView can leave its header in a state where updates are disabled.
# If we didn't turn them back on, the header would disappear.
self.__tableView.verticalHeader().setUpdatesEnabled( True )
self.__tableView.updateGeometry()
## Returns the data being displayed. This is always returned as a list of
# VectorData instances, even if only one instance was passed to setData().
def getData( self ) :
return self.__model.vectorData()
def setEditable( self, editable ) :
# set object name so stylesheet can differentiate editable from
# non editable in terms of the style. hide the add/remove buttons
# if not editable.
if editable :
self.__tableView.setObjectName( "vectorDataWidgetEditable" )
self.__buttonRow.setVisible( self.__sizeEditable )
else :
self.__tableView.setObjectName( "vectorDataWidget" )
self.__buttonRow.setVisible( False )
# update the model
if self.__model is not None :
self.__model.setEditable( editable )
def getEditable( self ) :
return self.__tableView.objectName()=="vectorDataWidgetEditable"
def setSizeEditable( self, sizeEditable ) :
if sizeEditable == self.__sizeEditable :
return
self.__sizeEditable = sizeEditable
self.__buttonRow.setVisible( self.getEditable() and self.__sizeEditable )
def getSizeEditable( self ) :
return self.__sizeEditable
## Note that the number of columns is not necessarily the
# same as the length of the list returned by getData() - for
# instance a V3fVectorData in the list will generate 3 columns
# in the UI. The columnIndex is specified taking this into account,
# so there are actually 3 columns indexes relating to a single
# V3fVectorData, and each can be shown/hidden individually.
def setColumnVisible( self, columnIndex, visible ) :
self.__tableView.setColumnHidden( columnIndex, not visible )
def getColumnVisible( self, columnIndex ) :
return not self.__tableView.isColumnHidden( columnIndex )
def setColumnEditable( self, columnIndex, editable ) :
if columnIndex < 0 or not self.__model or columnIndex >= self.__model.columnCount() :
raise IndexError
if self.__columnEditability is None :
if editable :
return
else :
self.__columnEditability = [ True ] * self.__model.columnCount()
if self.__columnEditability[columnIndex] == editable :
return
self.__columnEditability[columnIndex] = editable
self.setData( self.getData() ) # update the model
def getColumnEditable( self, columnIndex ) :
if columnIndex < 0 or not self.__model or columnIndex >= self.__model.columnCount() :
raise IndexError
if self.__columnEditability is None :
return True
return self.__columnEditability[columnIndex]
def setDragPointer( self, dragPointer ) :
self.__dragPointer = dragPointer
def getDragPointer( self ) :
return self.__dragPointer
## Returns a tuple of ( columnIndex, rowIndex ) for the
# index at the specified position in local space. Note that because
# compound types like V3fVectorData are represented as more than one
# column, this index is not suitable for indexing directly into the
# data returned by getData().
def indexAt( self, position ) :
index = self.__tableView.indexAt( QtCore.QPoint( position[0], position[1] ) )
return ( index.column(), index.row() )
## Returns a list of ( columnIndex, rowIndex ) for the currently
# selected cells.
def selectedIndices( self ) :
return [ ( x.column(), x.row() ) for x in self.__tableView.selectedIndexes() ]
## Maps from the index of a column to a tuple of ( dataIndex, componentIndex )
# which can be used to index into the data as follows :
#
# getData()[dataIndex][rowIndex][componentIndex]
#
# Where the data in a column is not of a compound type, the returned
# componentIndex will be -1.
def columnToDataIndex( self, columnIndex ) :
c = 0
for dataIndex, accessor in enumerate( self.__model.vectorDataAccessors() ) :
nc = accessor.numColumns()
if c + nc > columnIndex :
if nc == 1 :
return ( dataIndex, -1 )
else :
return ( dataIndex, columnIndex - c )
c += nc
raise IndexError( columnIndex )
## Performs the reverse of columnToDataIndex.
def dataToColumnIndex( self, dataIndex, componentIndex ) :
accessors = self.__model.vectorDataAccessors()
if dataIndex < 0 or dataIndex >= len( accessors ) :
raise IndexError( dataIndex )
columnIndex = 0
for d in range( 0, dataIndex ) :
columnIndex += accessors[d].numColumns()
return columnIndex + max( 0, componentIndex )
## Returns a signal which is emitted whenever the data is edited.
# The signal is /not/ emitted when setData() is called.
def dataChangedSignal( self ) :
return self.__dataChangedSignal
## A signal emitted when the user clicks to edit a cell.
# Slots should accept ( vectorDataWidget, columnIndex, rowIndex )
# arguments and return a Widget which will be used to perform the
# editing. The Widget must have setValue()/getValue() methods which
# accept the appropriate type for the column being edited - these will
# be used to transfer values to and from the Widget. By default, the Enter
# and Escape keys will be intercepted to complete editing, but the Widget
# may also be hidden to signify that editing has been completed by some
# other means.
def editSignal( self ) :
return self.__editSignal
## Returns a definition for the popup menu - this is called each time
# the menu is displayed to allow menus to be built dynamically. May be
# overridden in derived classes to modify the menu.
## \todo We should remove this as part of implementing #217, and just
# let everything hook onto contextMenuSignal() instead.
def _contextMenuDefinition( self, selectedRows ) :
m = IECore.MenuDefinition()
m.append( "/Select All", { "command" : self.__selectAll } )
m.append( "/Clear Selection", { "command" : self.__clearSelection } )
if self.getEditable() and self.getSizeEditable() :
m.append( "/divider", { "divider" : True } )
m.append( "/Remove Selected Rows", { "command" : IECore.curry( Gaffer.WeakMethod( self.__removeRows ), selectedRows ) } )
return m
## This function is used by the ui to create new data to append. It may be overridden
# in derived classes to customise data creation. The return value should be a list of
# VectorData instances in the same format as that returned by getData(), or None to
# cancel the operation.
def _createRows( self ) :
newData = []
data = self.getData()
accessors = self.__model.vectorDataAccessors()
for d, a in zip( data, accessors ) :
nd = d.__class__()
nd.append( a.defaultElement() )
newData.append( nd )
return newData
def __modelDataChanged( self, topLeft, bottomRight ) :
if self.__propagatingDataChangesToSelection :
return
if topLeft == bottomRight and self.__tableView.selectionModel().isSelected( topLeft ) :
self.__propagatingDataChangesToSelection = True
valueToPropagate = self.__model.data( topLeft, QtCore.Qt.EditRole )
for index in self.__tableView.selectedIndexes() :
if index == topLeft :
continue
# we have to ignore exceptions, as the items we're setting might
# have a different data type than the value we're passing.
with IECore.IgnoredExceptions( Exception ) :
self.__model.setData( index, valueToPropagate, QtCore.Qt.EditRole )
self.__propagatingDataChangesToSelection = False
self.__emitDataChangedSignal()
def __emitDataChangedSignal( self, *unusedArgs ) :
self.dataChangedSignal()( self )
def __contextMenu( self, pos ) :
# build the menu and pop it up
m = self._contextMenuDefinition( self.__selectedRows() )
self.__popupMenu = GafferUI.Menu( m )
self.__popupMenu.popup( self )
def __selectAll( self ) :
self.__tableView.selectAll()
def __clearSelection( self ) :
self.__tableView.clearSelection()
def __selectedRows( self ) :
selectedRows = [ x.row() for x in self.__tableView.selectedIndexes() ]
selectedRows = list( set( selectedRows ) ) # remove duplicates
selectedRows.sort()
return selectedRows
def __removeSelection( self, button ) :
self.__removeRows( self.__selectedRows() )
def __removeRows( self, rows ) :
data = self.getData()
# delete the rows from data
for i in range( len( rows )-1, -1, -1 ) :
for d in data :
del d[rows[i]]
# tell the world
self.setData( data )
self.__emitDataChangedSignal()
def __addRows( self, button ) :
if self.__model is None :
return
newData = self._createRows()
if not newData :
return
data = self.getData()
assert( len( data ) == len( newData ) )
for i in range( 0, len( data ) ) :
data[i].extend( newData[i] )
self.setData( data )
self.__tableView.scrollToBottom()
self.__emitDataChangedSignal()
def __dragEnter( self, widget, event ) :
if event.sourceWidget is self.__tableViewHolder and widget is not self.__buttonRow[1]:
# we don't accept drags from ourself unless the target is the remove button
return False
data = self.getData()
if len( data ) == 1 and event.data.isInstanceOf( data[0].typeId() ) :
widget.setHighlighted( True )
return True
return False
def __dragLeave( self, widget, event ) :
widget.setHighlighted( False )
return True
def __drop( self, widget, event ) :
# dragEnter checked that we only had one data array
data = self.getData()[0]
if widget is self.__buttonRow[1] :
# remove
s = set( event.data )
newData = data.__class__()
for d in data :
if d not in s :
newData.append( d )
data = newData
else :
# add, but avoid creating duplicates
s = set( data )
for d in event.data :
if d not in s :
data.append( d )
s.add( d )
self.setData( [ data ] )
self.dataChangedSignal()( self )
widget.setHighlighted( False )
return True
def __buttonPress( self, widget, event ) :
assert( widget is self.__tableViewHolder )
if len( self.getData() ) != 1 :
# we only act as a drag source when we have a single vector of data
return False
if self.__emittingButtonPress :
return False
self.__borrowedButtonPress = None
if event.buttons == event.Buttons.Left and event.modifiers == event.Modifiers.None :
# We want to implement drag and drop of the selected items, which means borrowing
# mouse press events that the QTableView needs to perform selection.
# This makes things a little tricky. There are are two cases :
#
# 1) There is an existing selection, and it's been clicked on. We borrow the event
# so we can get a dragBeginSignal(), and to prevent the QTableView reducing a current
# multi-selection down to the single clicked item. If a drag doesn't materialise we'll
# re-emit the event straight to the QTableView in __buttonRelease so the QTableView can
# do its thing.
#
# 2) There is no existing selection. We pass the event to the QTableView
# to see if it will select something which we can subsequently drag.
#
# This is further complicated by the fact that the button presses we simulate for Qt
# will end up back in this function, so we have to be careful to ignore those.
index = self.__tableView.indexAt( QtCore.QPoint( event.line.p0.x, event.line.p0.y ) )
if self.__tableView.selectionModel().isSelected( index ) :
# case 1 : existing selection.
self.__borrowedButtonPress = event
return True
else :
# case 2 : no existing selection.
# allow qt to update the selection first.
self.__emitButtonPress( event )
# we must always return True to prevent the event getting passed
# to the QTreeView again, and so we get a chance to start a drag.
return True
return False
def __buttonRelease( self, widget, event ) :
if self.__borrowedButtonPress is not None :
self.__emitButtonPress( self.__borrowedButtonPress )
self.__borrowedButtonPress = None
return False
def __mouseMove( self, widget, event ) :
if event.buttons :
# take the event so that the underlying QTableView doesn't
# try to do drag-selection, which would ruin our own upcoming drag.
return True
return False
def __dragBegin( self, widget, event ) :
self.__borrowedButtonPress = None
selectedRows = self.__selectedRows()
if len( selectedRows ) :
data = self.getData()[0]
result = IECore.Object.create( data.typeId() )
for i in selectedRows :
result.append( data[i] )
GafferUI.Pointer.setFromFile( self.__dragPointer )
return result
return None
def __dragEnd( self, widget, event ) :
GafferUI.Pointer.set( None )
def __emitButtonPress( self, event ) :
qEvent = QtGui.QMouseEvent(
QtCore.QEvent.MouseButtonPress,
QtCore.QPoint( event.line.p0.x, event.line.p0.y ),
QtCore.Qt.LeftButton,
QtCore.Qt.LeftButton,
QtCore.Qt.NoModifier
)
try :
self.__emittingButtonPress = True
# really i think we should be using QApplication::sendEvent()
# here, but it doesn't seem to be working. it works with the qObject
# in the Widget event filter, but for some reason that differs from
# Widget._owner( qObject )._qtWidget() which is what we have here.
self.__tableView.mousePressEvent( qEvent )
finally :
self.__emittingButtonPress = False
# Private implementation - a QTableView with custom size behaviour.
class _TableView( QtGui.QTableView ) :
def __init__( self, minimumVisibleRows ) :
QtGui.QTableView.__init__( self )
self.__minimumVisibleRows = minimumVisibleRows
def setModel( self, model ) :
prevModel = self.model()
if prevModel :
prevModel.rowsInserted.disconnect( self.__sizeShouldChange )
prevModel.rowsRemoved.disconnect( self.__sizeShouldChange )
prevModel.dataChanged.connect( self.__sizeShouldChange )
QtGui.QTableView.setModel( self, model )
if model :
model.rowsInserted.connect( self.__sizeShouldChange )
model.rowsRemoved.connect( self.__sizeShouldChange )
model.dataChanged.connect( self.__sizeShouldChange )
def minimumSizeHint( self ) :
# compute the minimum height to be the size of the header plus
# a minimum number of rows specified in self.__minimumVisibleRows
margins = self.contentsMargins()
minimumHeight = margins.top() + margins.bottom()
if not self.horizontalHeader().isHidden() :
minimumHeight += self.horizontalHeader().sizeHint().height()
numRows = self.verticalHeader().count()
if numRows :
minimumHeight += self.verticalHeader().sectionSize( 0 ) * min( numRows, self.__minimumVisibleRows )
# horizontal direction doesn't matter, as we don't allow shrinking
# in that direction anyway.
return QtCore.QSize( 1, minimumHeight )
def sizeHint( self ) :
# this seems to be necessary to nudge the header into calculating
# the correct size - otherwise the length() below comes out wrong
# sometimes. in other words it's a hack.
for i in range( 0, self.horizontalHeader().count() ) :
self.horizontalHeader().sectionSize( i )
margins = self.contentsMargins()
w = self.horizontalHeader().length() + margins.left() + margins.right()
if not self.verticalHeader().isHidden() :
w += self.verticalHeader().sizeHint().width()
# always allow room for a scrollbar even though we don't always need one. we
# make sure the background in the stylesheet is transparent so that when the
# scrollbar is hidden we don't draw an empty gap where it otherwise would be.
w += self.verticalScrollBar().sizeHint().width()
h = self.verticalHeader().length() + margins.top() + margins.bottom()
if not self.horizontalHeader().isHidden() :
h += self.horizontalHeader().sizeHint().height()
return QtCore.QSize( w, h )
def __sizeShouldChange( self, *unusedArgs ) :
self.updateGeometry()
# Internal implementation detail - a qt model which wraps
# around the VectorData.
class _Model( QtCore.QAbstractTableModel ) :
__addValueText = "Add..."
def __init__( self, data, parent=None, editable=True, header=None, columnToolTips=None, columnEditability=None ) :
QtCore.QAbstractTableModel.__init__( self, parent )
self.__data = data
self.__editable = editable
self.__header = header
self.__columnToolTips = columnToolTips
self.__columnEditability = columnEditability
self.__columns = []
self.__accessors = []
for d in self.__data :
accessor = _DataAccessor.create( d )
assert( accessor is not None )
for i in range( 0, accessor.numColumns() ) :
self.__columns.append( IECore.Struct( accessor=accessor, relativeColumnIndex=i ) )
self.__accessors.append( accessor )
if self.__columnToolTips is not None :
assert( len( self.__columns ) == len( self.__columnToolTips ) )
if self.__columnEditability is not None :
assert( len( self.__columns ) == len( self.__columnEditability ) )
## Methods specific to this model first
def vectorData( self ) :
return self.__data
def vectorDataAccessors( self ) :
return self.__accessors
def setEditable( self, editable ) :
if self.__editable == editable :
return
self.__editable = editable
def getEditable( self ) :
return self.__editable
## Then overrides for methods inherited from QAbstractModel
def rowCount( self, parent = QtCore.QModelIndex() ) :
if parent.isValid() :
return 0
return len( self.__data[0] )
def columnCount( self, parent = QtCore.QModelIndex() ) :
if parent.isValid() :
return 0
return len( self.__columns )
def headerData( self, section, orientation, role ) :
if QtCore is None :
# it seems that this is sometimes getting called during python shutdown.
# during shutdown python makes all module globals reference None, so
# QtCore becomes None, and we can't do anything. just return None and
# hope for the best.
return None
if role == QtCore.Qt.DisplayRole :
if orientation == QtCore.Qt.Horizontal :
if self.__header is not None :
return GafferUI._Variant.toVariant( self.__header[section] )
else :
column = self.__columns[section]
return GafferUI._Variant.toVariant( column.accessor.headerLabel( column.relativeColumnIndex ) )
else :
return GafferUI._Variant.toVariant( section )
elif role == QtCore.Qt.ToolTipRole :
if orientation == QtCore.Qt.Horizontal and self.__columnToolTips is not None :
return GafferUI._Variant.toVariant( self.__columnToolTips[section] )
return GafferUI._Variant.toVariant( None )
def flags( self, index ) :
result = (
QtCore.Qt.ItemIsSelectable |
QtCore.Qt.ItemIsEnabled |
QtCore.Qt.ItemIsDragEnabled
)
if self.__editable :
if self.__columnEditability is None or self.__columnEditability[index.column()] :
result |= QtCore.Qt.ItemIsEditable
return result
def data( self, index, role ) :
if (
role == QtCore.Qt.DisplayRole or
role == QtCore.Qt.EditRole
) :
column = self.__columns[index.column()]
return column.accessor.getElement( index.row(), column.relativeColumnIndex )
elif role == QtCore.Qt.ToolTipRole and self.__columnToolTips is not None :
return GafferUI._Variant.toVariant( self.__columnToolTips[index.column()] )
return GafferUI._Variant.toVariant( None )
def setData( self, index, value, role ) :
if role == QtCore.Qt.EditRole :
column = self.__columns[index.column()]
column.accessor.setElement( index.row(), column.relativeColumnIndex, value )
self.dataChanged.emit( index, index )
return True
# The _DataAccessor classes are responsible for converting from the Cortex data representation to the
# Qt (QVariant) representation.
class _DataAccessor() :
def __init__( self, data ) :
self.__data = data
def data( self ) :
return self.__data
def numColumns( self ) :
return 1
def headerLabel( self, columnIndex ) :
return [ "X", "Y", "Z" ][columnIndex]
def defaultElement( self ) :
elementType = IECore.DataTraits.valueTypeFromSequenceType( type( self.data() ) )
return elementType( 0 )
def setElement( self, rowIndex, columnIndex, value ) :
self.data()[rowIndex] = GafferUI._Variant.fromVariant( value )
def getElement( self, rowIndex, columnIndex ) :
return GafferUI._Variant.toVariant( self.data()[rowIndex] )
# Factory methods
#################################
@classmethod
def create( cls, data ) :
typeIds = [ data.typeId() ] + IECore.RunTimeTyped.baseTypeIds( data.typeId() )
for typeId in typeIds :
creator = cls.__typesToCreators.get( typeId, None )
if creator is not None :
return creator( data )
return None
@classmethod
def registerType( cls, typeId, creator ) :
cls.__typesToCreators[typeId] = creator
__typesToCreators = {}
_DataAccessor.registerType( IECore.BoolVectorData.staticTypeId(), _DataAccessor )
_DataAccessor.registerType( IECore.HalfVectorData.staticTypeId(), _DataAccessor )
_DataAccessor.registerType( IECore.FloatVectorData.staticTypeId(), _DataAccessor )
_DataAccessor.registerType( IECore.DoubleVectorData.staticTypeId(), _DataAccessor )
_DataAccessor.registerType( IECore.IntVectorData.staticTypeId(), _DataAccessor )
_DataAccessor.registerType( IECore.UIntVectorData.staticTypeId(), _DataAccessor )
_DataAccessor.registerType( IECore.Int64VectorData.staticTypeId(), _DataAccessor )
_DataAccessor.registerType( IECore.UInt64VectorData.staticTypeId(), _DataAccessor )
class _CompoundDataAccessor( _DataAccessor ) :
def __init__( self, data ) :
_DataAccessor.__init__( self, data )
def numColumns( self ) :
v = IECore.DataTraits.valueTypeFromSequenceType( type( self.data() ) )
return v.dimensions()
def headerLabel( self, columnIndex ) :
if isinstance( self.data(), ( IECore.Color3fVectorData, IECore.Color4fVectorData ) ) :
return [ "R", "G", "B", "A" ][columnIndex]
else :
return [ "X", "Y", "Z", "W" ][columnIndex]
def setElement( self, rowIndex, columnIndex, value ) :
element = self.data()[rowIndex]
element[columnIndex] = GafferUI._Variant.fromVariant( value )
self.data()[rowIndex] = element
def getElement( self, rowIndex, columnIndex ) :
return GafferUI._Variant.toVariant( self.data()[rowIndex][columnIndex] )
_DataAccessor.registerType( IECore.Color3fVectorData.staticTypeId(), _CompoundDataAccessor )
_DataAccessor.registerType( IECore.Color4fVectorData.staticTypeId(), _CompoundDataAccessor )
_DataAccessor.registerType( IECore.V3fVectorData.staticTypeId(), _CompoundDataAccessor )
class _StringDataAccessor( _DataAccessor ) :
def __init__( self, data ) :
_DataAccessor.__init__( self, data )
def defaultElement( self ) :
return ""
_DataAccessor.registerType( IECore.StringVectorData.staticTypeId(), _StringDataAccessor )
class _InternedStringDataAccessor( _StringDataAccessor ) :
def __init__( self, data ) :
_DataAccessor.__init__( self, data )
def getElement( self, rowIndex, columnIndex ) :
return GafferUI._Variant.toVariant( self.data()[rowIndex].value() )
_DataAccessor.registerType( IECore.InternedStringVectorData.staticTypeId(), _InternedStringDataAccessor )
# The _Delegate classes are used to decide how the different types of data are
# displayed. They derive from QStyledItemDelegate for drawing and event handling,
# but also have additional methods to specify sizing.
class _Delegate( QtGui.QStyledItemDelegate ) :
def __init__( self ) :
QtGui.QStyledItemDelegate.__init__( self )
# The closeEditor signal is used to tell the view that editing is complete,
# at which point it will destroy the QWidget used for editing.
# It is emitted from QtGui.QAbstractItemDelegate.eventFilter() and also from
# our own eventFilter() to stop editing. We connect to it here so that we can
# drop our reference to self.__editor (a GafferUI.Widget) when editing finishes -
# otherwise it would live on but with its Qt half already destroyed, which would
# likely give rise to errors.
self.closeEditor.connect( self.__closeEditor )
# Qt methods we override
########################
def createEditor( self, parent, option, index ) :
# see if editSignal() has been connected up to provide a custom editor.
vectorDataWidget = GafferUI.Widget._owner( parent ).ancestor( VectorDataWidget )
self.__editor = vectorDataWidget.editSignal()( vectorDataWidget, index.column(), index.row() )
# if it hasn't, then see if a derived class can provide a custom editor.
if self.__editor is None :
self.__editor = self._createEditorInternal( index )
# set up the custom editor if we have one, otherwise
# fall through to the base class which will provide a default
# editor.
if self.__editor is not None :
self.__editor._qtWidget().setParent( parent )
return self.__editor._qtWidget()
else :
return QtGui.QStyledItemDelegate.createEditor( self, parent, option, index )
def setEditorData( self, editor, index ) :
if self.__editor is not None :
self.__editor.setValue( GafferUI._Variant.fromVariant( index.data() ) )
else :
QtGui.QStyledItemDelegate.setEditorData( self, editor, index )
def setModelData( self, editor, model, index ) :
if self.__editor is not None :
model.setData( index, GafferUI._Variant.toVariant( self.__editor.getValue() ), QtCore.Qt.EditRole )
else :
QtGui.QStyledItemDelegate.setModelData( self, editor, model, index )
def eventFilter( self, object, event ) :
if QtGui.QStyledItemDelegate.eventFilter( self, object, event ) :
return True
if event.type() == event.Hide and self.__editor is not None :
# custom editors may hide themselves to indicate that editing
# is complete. when this happens we are responsible for carrying
# out this completion.
self.commitData.emit( self.__editor._qtWidget() )
self.closeEditor.emit( self.__editor._qtWidget(), self.NoHint )
return False
# Methods we define for our own purposes
########################################
def canStretch( self ) :
return False
# Called by createEditor() if editSignal() doesn't provide a custom widget.
# Derived classes may override this to return a GafferUI.Widget to be used
# for editing if they wish to override the default behaviour.
def _createEditorInternal( self, index ) :
return None
@classmethod
def create( cls, data ) :
typeIds = [ data.typeId() ] + IECore.RunTimeTyped.baseTypeIds( data.typeId() )
for typeId in typeIds :
creator = cls.__typesToCreators.get( typeId, None )
if creator is not None :
return creator()
return None
@classmethod
def registerType( cls, typeId, creator ) :
cls.__typesToCreators[typeId] = creator
__typesToCreators = {}
def __closeEditor( self ) :
# the QWidget for the editor is being destroyed - also destroy
# the GafferUI.Widget that wrapped it.
self.__editor = None
# A delegate to ensure that numeric editing is performed by our NumericWidget
# class, complete with cursor increments and virtual sliders, rather than the
# built in qt one.
class _NumericDelegate( _Delegate ) :
def __init__( self ) :
_Delegate.__init__( self )
def _createEditorInternal( self, index ) :
return GafferUI.NumericWidget( GafferUI._Variant.fromVariant( index.data() ) )
_Delegate.registerType( IECore.HalfVectorData.staticTypeId(), _NumericDelegate )
_Delegate.registerType( IECore.FloatVectorData.staticTypeId(), _NumericDelegate )
_Delegate.registerType( IECore.DoubleVectorData.staticTypeId(), _NumericDelegate )
_Delegate.registerType( IECore.IntVectorData.staticTypeId(), _NumericDelegate )
_Delegate.registerType( IECore.UIntVectorData.staticTypeId(), _NumericDelegate )
_Delegate.registerType( IECore.Int64VectorData.staticTypeId(), _NumericDelegate )
_Delegate.registerType( IECore.UInt64VectorData.staticTypeId(), _NumericDelegate )
_Delegate.registerType( IECore.FloatVectorData.staticTypeId(), _NumericDelegate )
_Delegate.registerType( IECore.Color3fVectorData.staticTypeId(), _NumericDelegate )
_Delegate.registerType( IECore.Color4fVectorData.staticTypeId(), _NumericDelegate )
_Delegate.registerType( IECore.V3fVectorData.staticTypeId(), _NumericDelegate )
class _BoolDelegate( _Delegate ) :
def __init__( self ) :
_Delegate.__init__( self )
def paint( self, painter, option, index ) :
# in PyQt, option is passed to us correctly as a QStyleOptionViewItemV4,
# but in PySide it is merely a QStyleOptionViewItem and we must "cast" it.
option = QtGui.QStyleOptionViewItemV4( option )
# in PyQt, we can access the widget with option.widget, but in PySide it
# is always None for some reason, so we jump through some hoops to get the
# widget via our parent.
widget = QtCore.QObject.parent( self.parent() )
# draw the background
widget.style().drawControl( QtGui.QStyle.CE_ItemViewItem, option, painter, widget )
# draw the checkbox.
styleOption = QtGui.QStyleOptionButton()
styleOption.state = option.state
styleOption.state |= QtGui.QStyle.State_Enabled
if self.__toBool( index ) :
styleOption.state |= QtGui.QStyle.State_On
else :
styleOption.state |= QtGui.QStyle.State_Off
styleOption.rect = self.__checkBoxRect( widget, option.rect )
widget.style().drawControl( QtGui.QStyle.CE_CheckBox, styleOption, painter, widget )
def createEditor( self, parent, option, index ) :
return None
def editorEvent( self, event, model, option, index ) :
if not ( index.flags() & QtCore.Qt.ItemIsEditable ) :
return False
if event.type()==QtCore.QEvent.MouseButtonDblClick :
# eat event so an editor doesn't get created
return True
elif event.type()==QtCore.QEvent.MouseButtonPress :
# eat event so row isn't selected
widget = QtCore.QObject.parent( self.parent() )
rect = self.__checkBoxRect( widget, option.rect )
if event.button() == QtCore.Qt.LeftButton and rect.contains( event.pos() ) :
checked = self.__toBool( index )
model.setData( index, not checked, QtCore.Qt.EditRole )
return True
elif event.type()==QtCore.QEvent.KeyPress :
if event.key() in ( QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter ) :
checked = self.__toBool( index )
model.setData( index, not checked, QtCore.Qt.EditRole )
return True
return False
def __checkBoxRect( self, widget, viewItemRect ) :
checkBoxStyleOption = QtGui.QStyleOptionButton()
r = widget.style().subElementRect( QtGui.QStyle.SE_CheckBoxIndicator, checkBoxStyleOption )
return QtCore.QRect(
viewItemRect.center() - ( QtCore.QPoint( r.width(), r.height() ) / 2 ),
r.size()
)
def __toBool( self, index ) :
return GafferUI._Variant.fromVariant( index.model().data( index, QtCore.Qt.DisplayRole ) )
_Delegate.registerType( IECore.BoolVectorData.staticTypeId(), _BoolDelegate )
class _StringDelegate( _Delegate ) :
def __init__( self ) :
_Delegate.__init__( self )
def canStretch( self ) :
return True
_Delegate.registerType( IECore.StringVectorData.staticTypeId(), _StringDelegate )
_Delegate.registerType( IECore.InternedStringVectorData.staticTypeId(), _StringDelegate )
|
py | b416030f333b31dec80b0b33a4d405bac7cdb570 | # Copyright (C) July 2018: TF TECH NV in Belgium see https://www.threefold.tech/
# In case TF TECH NV ceases to exist (e.g. because of bankruptcy)
# then Incubaid NV also in Belgium will get the Copyright & Authorship for all changes made since July 2018
# and the license will automatically become Apache v2 for all code related to Jumpscale & DigitalMe
# This file is part of jumpscale at <https://github.com/threefoldtech>.
# jumpscale is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jumpscale is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License v3 for more details.
#
# You should have received a copy of the GNU General Public License
# along with jumpscale or jumpscale derived works. If not, see <http://www.gnu.org/licenses/>.
# LICENSE END
from Jumpscale import j
class NAMESPACE(j.data.bcdb._BCDBModelClass):
def _schema_get(self):
return j.data.schema.get_from_url("jumpscale.bcdb.meta.namespace.2")
@property
def acl(self):
raise j.exceptions.Base("cannot modify acl object in acl object")
|
py | b416038c397e52940efb39c0663fb2b49502ad86 | from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import numpy as np
import time
import glob
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_sched
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
import etw_pytorch_utils as pt_utils
import pprint
import os.path as osp
import os,sys
import argparse
sys.path.append('../..')
from pointnet2.models import Pointnet2ClsMSG as Pointnet
from pointnet2.models.pointnet2_msg_cls import model_fn_decorator
from pointnet2.data import ScanNet
import pointnet2.data.data_utils as d_utils
if __name__ == "__main__":
# model
model = Pointnet(input_channels=3, num_classes=21, use_xyz=True)
print('#parameters %d' % sum([x.nelement() for x in model.parameters()])) |
py | b41604ada14ba9295afc267d863d7ba94a4315ad | from datetime import datetime
import os
import requests
from blogist.types import Post, User, Comment, WrongUsername, List
class Gist:
def __init__(self, username: str, prefix: str, form: str):
self.username = username
self.API = 'https://api.github.com'
self.TIME = '%Y-%m-%dT%H:%M:%SZ'
self.session = requests.Session()
self.session.headers.update({
'Accept': 'application/vnd.github.v3+json'
})
if not self.check_username():
raise WrongUsername(f'{username} is not found in GitHub.')
self.user = None
self.prefix = prefix
self.format = form
self.posts = []
def check_username(self) -> bool:
resp = self.session.get(f'{self.API}/users/{self.username}')
if resp.ok:
data = resp.json()
if data.get('type', '') == 'User':
self.user = User(
data.get('login'),
data.get('name'),
data.get('url'),
data.get('html_url'),
data.get('avatar_url'),
)
return True
return False
def fetch_all_posts(self):
url = f'{self.API}/users/{self.username}/gists'
while True:
resp = self.session.get(url)
if resp.ok:
data = resp.json()
for gist in data:
files = gist.get('files')
for file in files:
if file.startswith(self.prefix) and file.endswith(f'.{self.format}'):
self.posts.append(Post(
self.user,
file.replace(self.prefix, '', 1),
datetime.strptime(gist.get('created_at'), self.TIME),
datetime.strptime(gist.get('updated_at'), self.TIME),
gist.get('description'),
self.get_post_body(files[file].get('raw_url')),
self.get_comments(gist.get('comments_url')) if gist.get('comments', 0) > 0 else []
))
break # only one file will be add to posts
next_url = resp.links.get('next')
if next_url:
url = next_url.get('url')
continue
break
def generate_file_name(self, post: Post) -> str:
date = post.created_at
return f'{date.year}-{date.month}-{date.day}-{post.title}'
def get_post_body(self, url: str) -> str:
if url is None:
return ''
resp = self.session.get(url)
if resp.ok:
return resp.text
return ''
def get_comments(self, url: str) -> List[Comment]:
if url is None:
return []
resp = self.session.get(url)
comments = []
if resp.ok:
data = resp.json()
for comment in data:
user = comment.get('user', {})
comments.append(Comment(
User(
user.get('login'),
'',
user.get('url'),
user.get('html_url'),
user.get('avatar_url'),
),
comment.get('body'),
comment.get('created_at'),
comment.get('updated_at'),
))
return comments
def store(self, path: str):
if not os.path.isdir(path):
os.mkdir(path)
for post in self.posts:
with open(os.path.join(path, self.generate_file_name(post)), 'w', encoding='utf-8') as f:
f.write(f'---\ntitle: {post.title.rsplit(".", 1)[0]}\n---\n')
f.write(f'\n{post.description}\n\n<!--more-->\n\n')
f.write(post.body)
if post.comments:
f.write('\n\n')
for com in post.comments:
f.write(f'[commont@{com.user.login}] [{com.created_at}] ({com.body})\n')
|
py | b41604ca994f96daadbb38880061a1cad2a65404 | import FWCore.ParameterSet.Config as cms
from HeterogeneousCore.CUDACore.SwitchProducerCUDA import SwitchProducerCUDA
from RecoLocalTracker.SiStripRecHitConverter.StripCPEfromTrackAngle_cfi import *
from RecoLocalTracker.SiStripRecHitConverter.SiStripRecHitMatcher_cfi import *
from RecoTracker.TransientTrackingRecHit.TransientTrackingRecHitBuilder_cfi import *
import RecoTracker.TransientTrackingRecHit.TransientTrackingRecHitBuilder_cfi
myTTRHBuilderWithoutAngle = RecoTracker.TransientTrackingRecHit.TransientTrackingRecHitBuilder_cfi.ttrhbwr.clone(
StripCPE = 'Fake',
ComponentName = 'PixelTTRHBuilderWithoutAngle'
)
from RecoTracker.TkSeedingLayers.PixelLayerTriplets_cfi import *
from RecoTracker.TkSeedingLayers.TTRHBuilderWithoutAngle4PixelTriplets_cfi import *
from RecoPixelVertexing.PixelTrackFitting.pixelFitterByHelixProjections_cfi import pixelFitterByHelixProjections
from RecoPixelVertexing.PixelTrackFitting.pixelNtupletsFitter_cfi import pixelNtupletsFitter
from RecoPixelVertexing.PixelTrackFitting.pixelTrackFilterByKinematics_cfi import pixelTrackFilterByKinematics
from RecoPixelVertexing.PixelTrackFitting.pixelTrackCleanerBySharedHits_cfi import pixelTrackCleanerBySharedHits
from RecoPixelVertexing.PixelTrackFitting.pixelTracks_cfi import pixelTracks as _pixelTracks
from RecoTracker.TkTrackingRegions.globalTrackingRegion_cfi import globalTrackingRegion as _globalTrackingRegion
from RecoTracker.TkTrackingRegions.globalTrackingRegionFromBeamSpot_cfi import globalTrackingRegionFromBeamSpot as _globalTrackingRegionFromBeamSpot
from RecoTracker.TkHitPairs.hitPairEDProducer_cfi import hitPairEDProducer as _hitPairEDProducer
from RecoPixelVertexing.PixelTriplets.pixelTripletHLTEDProducer_cfi import pixelTripletHLTEDProducer as _pixelTripletHLTEDProducer
from RecoPixelVertexing.PixelLowPtUtilities.ClusterShapeHitFilterESProducer_cfi import *
import RecoPixelVertexing.PixelLowPtUtilities.LowPtClusterShapeSeedComparitor_cfi
from RecoTracker.FinalTrackSelectors.trackAlgoPriorityOrder_cfi import trackAlgoPriorityOrder
# Eras
from Configuration.Eras.Modifier_trackingLowPU_cff import trackingLowPU
from Configuration.Eras.Modifier_run3_common_cff import run3_common
# seeding layers
from RecoTracker.IterativeTracking.InitialStep_cff import initialStepSeedLayers, initialStepHitDoublets, _initialStepCAHitQuadruplets
# TrackingRegion
pixelTracksTrackingRegions = _globalTrackingRegion.clone()
trackingLowPU.toReplaceWith(pixelTracksTrackingRegions, _globalTrackingRegionFromBeamSpot.clone())
# Pixel quadruplets tracking
pixelTracksSeedLayers = initialStepSeedLayers.clone(
BPix = dict(HitProducer = "siPixelRecHitsPreSplitting"),
FPix = dict(HitProducer = "siPixelRecHitsPreSplitting")
)
pixelTracksHitDoublets = initialStepHitDoublets.clone(
clusterCheck = "",
seedingLayers = "pixelTracksSeedLayers",
trackingRegions = "pixelTracksTrackingRegions"
)
pixelTracksHitQuadruplets = _initialStepCAHitQuadruplets.clone(
doublets = "pixelTracksHitDoublets",
SeedComparitorPSet = dict(clusterShapeCacheSrc = 'siPixelClusterShapeCachePreSplitting')
)
pixelTracks = _pixelTracks.clone(
SeedingHitSets = "pixelTracksHitQuadruplets"
)
pixelTracksTask = cms.Task(
pixelTracksTrackingRegions,
pixelFitterByHelixProjections,
pixelTrackFilterByKinematics,
pixelTracksSeedLayers,
pixelTracksHitDoublets,
pixelTracksHitQuadruplets,
pixelTracks
)
pixelTracksSequence = cms.Sequence(pixelTracksTask)
# Pixel triplets for trackingLowPU
pixelTracksHitTriplets = _pixelTripletHLTEDProducer.clone(
doublets = "pixelTracksHitDoublets",
produceSeedingHitSets = True,
SeedComparitorPSet = RecoPixelVertexing.PixelLowPtUtilities.LowPtClusterShapeSeedComparitor_cfi.LowPtClusterShapeSeedComparitor.clone(
clusterShapeCacheSrc = "siPixelClusterShapeCachePreSplitting"
)
)
trackingLowPU.toModify(pixelTracks,
SeedingHitSets = "pixelTracksHitTriplets"
)
_pixelTracksTask_lowPU = pixelTracksTask.copy()
_pixelTracksTask_lowPU.replace(pixelTracksHitQuadruplets, pixelTracksHitTriplets)
trackingLowPU.toReplaceWith(pixelTracksTask, _pixelTracksTask_lowPU)
# "Patatrack" pixel ntuplets, fishbone cleaning, Broken Line fit, and density-based vertex reconstruction
from Configuration.ProcessModifiers.pixelNtupletFit_cff import pixelNtupletFit
from RecoPixelVertexing.PixelTriplets.pixelTracksCUDA_cfi import pixelTracksCUDA as _pixelTracksCUDA
# SwitchProducer providing the pixel tracks in SoA format on the CPU
pixelTracksSoA = SwitchProducerCUDA(
# build pixel ntuplets and pixel tracks in SoA format on the CPU
cpu = _pixelTracksCUDA.clone(
pixelRecHitSrc = "siPixelRecHitsPreSplittingSoA",
idealConditions = False,
onGPU = False
)
)
# use quality cuts tuned for Run 2 ideal conditions for all Run 3 workflows
run3_common.toModify(pixelTracksSoA.cpu,
idealConditions = True
)
# convert the pixel tracks from SoA to legacy format
from RecoPixelVertexing.PixelTrackFitting.pixelTrackProducerFromSoA_cfi import pixelTrackProducerFromSoA as _pixelTrackProducerFromSoA
(pixelNtupletFit & ~phase2_tracker).toReplaceWith(pixelTracks, _pixelTrackProducerFromSoA.clone(
pixelRecHitLegacySrc = "siPixelRecHitsPreSplitting",
))
(pixelNtupletFit & ~phase2_tracker).toReplaceWith(pixelTracksTask, cms.Task(
#pixelTracksTrackingRegions,
#pixelFitterByHelixProjections,
#pixelTrackFilterByKinematics,
#pixelTracksSeedLayers,
#pixelTracksHitDoublets,
#pixelTracksHitQuadruplets,
# build the pixel ntuplets and the pixel tracks in SoA format on the GPU
pixelTracksSoA,
# convert the pixel tracks from SoA to legacy format
pixelTracks
))
# "Patatrack" sequence running on GPU
from Configuration.ProcessModifiers.gpu_cff import gpu
# build the pixel ntuplets and pixel tracks in SoA format on the GPU
pixelTracksCUDA = _pixelTracksCUDA.clone(
pixelRecHitSrc = "siPixelRecHitsPreSplittingCUDA",
idealConditions = False,
onGPU = True
)
# use quality cuts tuned for Run 2 ideal conditions for all Run 3 workflows
run3_common.toModify(pixelTracksCUDA,
idealConditions = True
)
# SwitchProducer providing the pixel tracks in SoA format on the CPU
from RecoPixelVertexing.PixelTrackFitting.pixelTracksSoA_cfi import pixelTracksSoA as _pixelTracksSoA
gpu.toModify(pixelTracksSoA,
# transfer the pixel tracks in SoA format to the host
cuda = _pixelTracksSoA.clone()
)
from Configuration.Eras.Modifier_phase2_tracker_cff import phase2_tracker
(pixelNtupletFit & gpu & ~phase2_tracker).toReplaceWith(pixelTracksTask, cms.Task(
# build the pixel ntuplets and pixel tracks in SoA format on the GPU
pixelTracksCUDA,
# transfer the pixel tracks in SoA format to the CPU, and convert them to legacy format
pixelTracksTask.copy()
))
|
py | b41605a2b3508851e2ec3e91ca5e3260066a616a | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# pylint:disable=too-many-lines
from datetime import datetime
from ._shared import parse_vault_id
from ._shared._generated.v7_0 import models
from .enums import ActionType, KeyUsageType, KeyCurveName, KeyType, SecretContentType
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Any, Dict, Optional
class AdministratorDetails(object):
"""Details of the organization administrator of the certificate issuer.
:param str first_name: First name of the issuer.
:param str last_name: Last name of the issuer.
:param str email: email of the issuer.
:param str phone: phone number of the issuer.
"""
def __init__(self, first_name=None, last_name=None, email=None, phone=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[str]) -> None
self._first_name = first_name
self._last_name = last_name
self._phone = phone
self._email = email
@classmethod
def _from_admin_details_bundle(cls, admin_details_bundle):
# type: (models.AdministratorDetails) -> AdministratorDetails
"""Construct a AdministratorDetails from an autorest-generated AdministratorDetailsBundle"""
return cls(
email=admin_details_bundle.email_address,
first_name=admin_details_bundle.first_name,
last_name=admin_details_bundle.last_name,
phone=admin_details_bundle.phone
)
@property
def email(self):
# type: () -> str
""":rtype: str"""
return self._email
@property
def first_name(self):
# type: () -> str
""":rtype: str"""
return self._first_name
@property
def last_name(self):
# type: () -> str
""":rtype: str"""
return self._last_name
@property
def phone(self):
# type: () -> str
""":rtype: str"""
return self._phone
class Error(object):
"""The key vault server error.
:param str code: The error code.
:param str message: The error message.
:param inner_error: The error object itself
:type inner_error: ~azure.keyvault.certificates.Error
"""
def __init__(self, code, message, inner_error):
# type: (str, str, models.Error, **Any) -> None
self._code = code
self._message = message
self._inner_error = inner_error
@property
def code(self):
# type: () -> str
"""The error code.
:rtype: str
"""
return self._code
@property
def message(self):
# type: () -> str
"""The error message.
:rtype: str
"""
return self._message
@property
def inner_error(self):
# type: () -> Error
"""The error itself
:return models.Error:
"""
return self._inner_error
class CertificateBase(object):
"""Certificate base consists of a certificates metadata.
:param attributes: The certificate management attributes.
:type attributes: ~azure.keyvault.certificates.CertificateAttributes
:param str cert_id: The certificate id.
:param bytes thumbprint: Thumpbrint of the certificate
"""
def __init__(self, attributes=None, cert_id=None, thumbprint=None, **kwargs):
# type: (Optional[models.CertificateAttributes], Optional[str], Optional[bytes], **Any) -> None
self._attributes = attributes
self._id = cert_id
self._vault_id = parse_vault_id(cert_id)
self._thumbprint = thumbprint
self._tags = kwargs.get("tags", None)
@classmethod
def _from_certificate_item(cls, certificate_item):
# type: (models.CertificateItem) -> CertificateBase
"""Construct a CertificateBase from an autorest-generated CertificateItem"""
return cls(
attributes=certificate_item.attributes,
cert_id=certificate_item.id,
thumbprint=certificate_item.x509_thumbprint,
tags=certificate_item.tags,
)
@property
def id(self):
# type: () -> str
"""Certificate identifier.
:rtype: str
"""
return self._id
@property
def name(self):
# type: () -> str
"""The name of the certificate.
:rtype: str
"""
return self._vault_id.name
@property
def enabled(self):
# type: () -> bool
"""Whether the certificate is enabled or not.
:rtype: bool
"""
return self._attributes.enabled if self._attributes else None
@property
def not_before(self):
# type: () -> datetime
"""The datetime before which the certificate is not valid.
:rtype: datetime
"""
return self._attributes.not_before if self._attributes else None
@property
def expires(self):
# type: () -> datetime
"""The datetime when the certificate expires.
:rtype: datetime
"""
return self._attributes.expires if self._attributes else None
@property
def created(self):
# type: () -> datetime
"""The datetime when the certificate is created.
:rtype: datetime
"""
return self._attributes.created if self._attributes else None
@property
def updated(self):
# type: () -> datetime
"""The datetime when the certificate was last updated.
:rtype: datetime
"""
return self._attributes.updated if self._attributes else None
@property
def recovery_level(self):
# type: () -> models.DeletionRecoveryLevel
"""The deletion recovery level currently in effect for the certificate.
:rtype: models.DeletionRecoveryLevel
"""
return self._attributes.recovery_level if self._attributes else None
@property
def vault_url(self):
# type: () -> str
"""The name of the vault that the certificate is created in.
:rtype: str
"""
return self._vault_id.vault_url
@property
def thumbprint(self):
# type: () -> bytes
"""Thumbprint of the certificate.
:rtype: bytes
"""
return self._thumbprint
@property
def tags(self):
# type: () -> Dict[str, str]
"""Application specific metadata in the form of key-value pairs.
:rtype: str
"""
return self._tags
@property
def version(self):
# type: () -> str
"""The version of the certificate
:rtype: str
"""
return self._vault_id.version
class Certificate(CertificateBase):
"""Consists of a certificate and its attributes
:param policy: The management policy for the certificate.
:type policy: ~azure.keyvault.certificates.CertificatePolicy
:paramstr cert_id: The certificate id.
:param bytes thumbprint: Thumpbrint of the certificate
:param str key_id: The key id.
:param str secret_id: The secret id.
:param attributes: The certificate attributes.
:type attributes: ~azure.keyvault.certificates.CertificateAttributes
:param bytearray cer: CER contents of the X509 certificate.
"""
def __init__(
self,
policy, # type: models.CertificatePolicy
cert_id, # type: Optional[str]
thumbprint=None, # type: Optional[bytes]
key_id=None, # type: Optional[str]
secret_id=None, # type: Optional[str]
attributes=None, # type: Optional[CertificateAttributes]
cer=None, # type: Optional[bytes]
**kwargs # type: **Any
):
# type: (...) -> None
super(Certificate, self).__init__(attributes=attributes, cert_id=cert_id, thumbprint=thumbprint, **kwargs)
self._key_id = key_id
self._secret_id = secret_id
self._policy = policy
self._cer = cer
@classmethod
def _from_certificate_bundle(cls, certificate_bundle):
# type: (models.CertificateBundle) -> Certificate
"""Construct a certificate from an autorest-generated certificateBundle"""
# pylint:disable=protected-access
return cls(
attributes=certificate_bundle.attributes,
cert_id=certificate_bundle.id,
thumbprint=certificate_bundle.x509_thumbprint,
key_id=certificate_bundle.kid,
secret_id=certificate_bundle.sid,
policy=CertificatePolicy._from_certificate_policy_bundle(certificate_bundle.policy),
cer=certificate_bundle.cer,
tags=certificate_bundle.tags,
)
@property
def key_id(self):
# type: () -> str
""":rtype: str"""
return self._key_id
@property
def secret_id(self):
# type: () -> str
""":rtype: str"""
return self._secret_id
@property
def policy(self):
# type: () -> CertificatePolicy
"""The management policy of the certificate.
:rtype: ~azure.keyvault.certificates.CertificatePolicy
"""
return self._policy
@property
def cer(self):
# type: () -> bytes
"""The CER contents of the certificate.
:rtype: bytes
"""
return self._cer
class CertificateOperation(object):
# pylint:disable=too-many-instance-attributes
"""A certificate operation is returned in case of asynchronous requests.
:param str cert_operation_id: The certificate id.
:param str issuer_name: Name of the operation's issuer object or reserved names;
for example, 'Self' or 'Unknown
:param str certificate_type: Type of certificate requested from the issuer provider.
:param bool certificate_transparency: Indicates if the certificate this operation is
running for is published to certificate transparency logs.
:param bytearray csr: The certificate signing request (CSR) that is being used in the certificate
operation.
:param bool cancellation_requested: Indicates if cancellation was requested on the certificate
operation.
:param str status: Status of the certificate operation.
:param str status_details: The status details of the certificate operation
:param error: Error encountered, if any, during the certificate operation.
:type error: ~azure.keyvault.certificates.Error
:param str target: Location which contains the result of the certificate operation.
:param str request_id: Identifier for the certificate operation.
"""
def __init__(
self,
cert_operation_id=None, # type: Optional[str]
issuer_name=None, # type: Optional[str]
certificate_type=None, # type: Optional[str]
certificate_transparency=False, # type: Optional[bool]
csr=None, # type: Optional[bytes]
cancellation_requested=False, # type: Optional[bool]
status=None, # type: Optional[str]
status_details=None, # type: Optional[str]
error=None, # type: Optional[models.Error]
target=None, # type: Optional[str]
request_id=None # type: Optional[str]
):
# type: (...) -> None
self._id = cert_operation_id
self._vault_id = parse_vault_id(cert_operation_id)
self._issuer_name = issuer_name
self._certificate_type = certificate_type
self._certificate_transparency = certificate_transparency
self._csr = csr
self._cancellation_requested = cancellation_requested
self._status = status
self._status_details = status_details
self._error = error
self._target = target
self._request_id = request_id
@classmethod
def _from_certificate_operation_bundle(cls, certificate_operation_bundle):
# type: (models.CertificateOperation) -> CertificateOperation
"""Construct a CertificateOperation from an autorest-generated CertificateOperation"""
return cls(
cert_operation_id=certificate_operation_bundle.id,
issuer_name=(certificate_operation_bundle.issuer_parameters.name
if certificate_operation_bundle.issuer_parameters else None),
certificate_type=(certificate_operation_bundle.issuer_parameters.certificate_type
if certificate_operation_bundle.issuer_parameters else None),
certificate_transparency=(certificate_operation_bundle.issuer_parameters.certificate_transparency
if certificate_operation_bundle.issuer_parameters else None),
csr=certificate_operation_bundle.csr,
cancellation_requested=certificate_operation_bundle.cancellation_requested,
status=certificate_operation_bundle.status,
status_details=certificate_operation_bundle.status_details,
error=certificate_operation_bundle.error,
target=certificate_operation_bundle.target,
request_id=certificate_operation_bundle.request_id,
)
@property
def id(self):
# type: () -> str
""":rtype: str"""
return self._id
@property
def name(self):
# type: () -> str
""":rtype: str"""
return self._vault_id.name
@property
def issuer_name(self):
# type: () -> str
"""The name of the issuer of the certificate.
:rtype: str
"""
return self._issuer_name
@property
def certificate_type(self):
# type: () -> str
"""Type of certificate to be requested from the issuer provider.
:rtype: str
"""
return self._certificate_type
@property
def certificate_transparency(self):
# type: () -> bool
"""Whether certificates generated under this policy should be published to certificate
transparency logs.
:rtype: bool
"""
return self._certificate_transparency
@property
def csr(self):
# type: () -> bytes
"""The certificate signing request that is being used in this certificate operation.
:rtype: bytes
"""
return self._csr
@property
def cancellation_requested(self):
# type: () -> bool
"""Whether cancellation was requested on the certificate operation.
:rtype: bool
"""
return self._cancellation_requested
@property
def status(self):
# type: () -> str
""":rtype: str"""
return self._status
@property
def status_details(self):
# type: () -> str
""":rtype: str"""
return self._status_details
@property
def error(self):
# type: () -> models.Error
""":rtype: models.Error"""
return self._error
@property
def target(self):
# type: () -> str
"""Location which contains the result of the certificate operation.
:rtype: str
"""
return self._target
@property
def request_id(self):
# type: () -> str
"""Identifier for the certificate operation.
:rtype: str
"""
return self._request_id
class CertificatePolicy(object):
"""Management policy for a certificate.
:param attributes: the certificate attributes.
:type attributes: ~azure.keyvault.certificates.models.CertificateAttributes
:param str cert_policy_id: The certificate id.
:param key_properties: Properties of the key backing the certificate.
:type key_properties: ~azure.keyvault.certificates.models.KeyProperties
:param content_type: The media type (MIME type) of the secret backing the certificate.
:type content_type: ~azure.keyvault.certificates.enums.SecretContentType or str
:param str subject_name: The subject name of the certificate. Should be a valid X509
distinguished name.
:param int validity_in_months: The duration that the certificate is valid in months.
:param lifetime_actions: Actions that will be performed by Key Vault over the lifetime
of a certificate
:type lifetime_actions: Iterable[~azure.keyvault.certificates.LifetimeAction]
:param str issuer_name: Name of the referenced issuer object or reserved names; for example,
'Self' or 'Unknown"
:param str certificate_type: Type of certificate to be requested from the issuer provider.
:param bool certificate_transparency: Indicates if the certificates generated under this policy
should be published to certificate transparency logs.
:param san_emails: Subject alternative emails of the X509 object. Only one out of san_emails,
san_dns_names, and san_upns may be set.
:type san_emails: Iterable[str]
:param san_dns_names: Subject alternative DNS names of the X509 object. Only one out of
san_emails, san_dns_names, and san_upns may be set.
:type san_dns_names: Iterable[str]
:param san_upns: Subject alternative user principal names. Only one out of san_emails,
san_dns_names, and san_upns may be set.
:type san_upns: Iterable[str]
"""
# pylint:disable=too-many-instance-attributes
def __init__(
self,
attributes=None, # type: Optional[models.CertificateAttributes]
cert_policy_id=None, # type: Optional[str]
key_properties=None, # type: Optional[KeyProperties]
content_type=None, # type: Optional[models.SecretContentType] or str
subject_name=None, # type: Optional[str]
validity_in_months=None, # type: Optional[int]
lifetime_actions=None, # type: Optional[list[LifetimeAction]]
issuer_name=None, # type: Optional[str]
certificate_type=None, # type: Optional[str]
certificate_transparency=None, # type: Optional[bool]
**kwargs # type: **Any
):
# type: (...) -> None
self._attributes = attributes
self._id = cert_policy_id
self._key_properties = key_properties
self._content_type = content_type
self._subject_name = subject_name
self._validity_in_months = validity_in_months
self._lifetime_actions = lifetime_actions
self._issuer_name = issuer_name
self._certificate_type = certificate_type
self._certificate_transparency = certificate_transparency
self._san_emails = kwargs.pop('san_emails', None)
self._san_dns_names = kwargs.pop('san_dns_names', None)
self._san_upns = kwargs.pop('san_upns', None)
sans = [self._san_emails, self._san_upns, self._san_dns_names]
if len([x for x in sans if x is not None]) > 1:
raise ValueError("You can only set at most one of san_emails, san_dns_names, and san_upns")
def _to_certificate_policy_bundle(self):
# type: (CertificatePolicy) -> models.CertificatePolicy
"""Construct a version emulating the generated CertificatePolicy from a wrapped CertificatePolicy"""
if self.issuer_name or self.certificate_type or self.certificate_transparency:
issuer_parameters = models.IssuerParameters(
name=self.issuer_name,
certificate_type=self.certificate_type,
certificate_transparency=self.certificate_transparency
)
else:
issuer_parameters = None
# pylint:disable=too-many-boolean-expressions
if (self.enabled is not None or
self.not_before is not None or
self.expires is not None or
self.created is not None or
self.updated is not None
or self.recovery_level):
attributes = models.CertificateAttributes(
enabled=self.enabled,
not_before=self.not_before,
expires=self.expires,
created=self.enabled,
updated=self.updated,
recovery_level=self.recovery_level
)
else:
attributes = None
if self.lifetime_actions:
lifetime_actions = []
for lifetime_action in self.lifetime_actions:
lifetime_actions.append(
models.LifetimeAction(
trigger=models.Trigger(
lifetime_percentage=lifetime_action.lifetime_percentage,
days_before_expiry=lifetime_action.days_before_expiry
),
action=models.Action(action_type=lifetime_action.action_type.value
if not isinstance(lifetime_action.action_type, str)
and lifetime_action.action_type
else lifetime_action.action_type)
)
)
else:
lifetime_actions = None
# pylint:disable=too-many-boolean-expressions
if(self.subject_name or
(self.key_properties and self.key_properties.ekus) or
(self.key_properties and self.key_properties.key_usage) or
self.san_emails or
self.san_upns or
self.san_dns_names or
self.validity_in_months):
if self.key_properties and self.key_properties.key_usage:
key_usage = [k.value if not isinstance(k, str) else k for k in self.key_properties.key_usage]
else:
key_usage = None
sans = [self._san_emails, self._san_upns, self._san_dns_names]
if len([x for x in sans if x is not None]) > 1:
raise ValueError("You can only set at most one of san_emails, san_dns_names, and san_upns")
x509_certificate_properties = models.X509CertificateProperties(
subject=self.subject_name,
ekus=self.key_properties.ekus if self.key_properties else None,
subject_alternative_names=models.SubjectAlternativeNames(
emails=self.san_emails,
upns=self.san_upns,
dns_names=self.san_dns_names
),
key_usage=key_usage,
validity_in_months=self.validity_in_months
)
else:
x509_certificate_properties = None
if (self.key_properties and
(self.key_properties.exportable or
self.key_properties.key_type or
self.key_properties.key_size or
self.key_properties.reuse_key or
self.key_properties.curve)):
key_properties = models.KeyProperties(
exportable=self.key_properties.exportable,
key_type=(self.key_properties.key_type.value
if not isinstance(self.key_properties.key_type, str) and self.key_properties.key_type
else self.key_properties.key_type),
key_size=self.key_properties.key_size,
reuse_key=self.key_properties.reuse_key,
curve=(self.key_properties.curve.value
if not isinstance(self.key_properties.curve, str) and self.key_properties.curve
else self.key_properties.curve)
)
else:
key_properties = None
if self.content_type:
secret_properties = models.SecretProperties(content_type=self.content_type.value
if not isinstance(self.content_type, str) and self.content_type
else self.content_type)
else:
secret_properties = None
policy_bundle = models.CertificatePolicy(
id=self.id,
key_properties=key_properties,
secret_properties=secret_properties,
x509_certificate_properties=x509_certificate_properties,
lifetime_actions=lifetime_actions,
issuer_parameters=issuer_parameters,
attributes=attributes
)
return policy_bundle
@classmethod
def _from_certificate_policy_bundle(cls, certificate_policy_bundle):
# type: (models.CertificatePolicy) -> CertificatePolicy
"""Construct a CertificatePolicy from an autorest-generated CertificatePolicy"""
if certificate_policy_bundle.lifetime_actions:
lifetime_actions = [
LifetimeAction(
action_type=(ActionType(item.action.action_type)
if item.action.action_type else None),
lifetime_percentage=item.trigger.lifetime_percentage,
days_before_expiry=item.trigger.days_before_expiry,
)
for item in certificate_policy_bundle.lifetime_actions
]
else:
lifetime_actions = None
key_properties_bundle = certificate_policy_bundle.key_properties
# pylint:disable=too-many-boolean-expressions
if key_properties_bundle:
if certificate_policy_bundle.x509_certificate_properties and \
certificate_policy_bundle.x509_certificate_properties.key_usage:
key_usage = [KeyUsageType(k) for k in certificate_policy_bundle.x509_certificate_properties.key_usage]
else:
key_usage = None
key_properties = KeyProperties(
exportable=certificate_policy_bundle.key_properties.exportable,
key_type=(KeyType(certificate_policy_bundle.key_properties.key_type)
if certificate_policy_bundle.key_properties.key_type else None),
key_size=certificate_policy_bundle.key_properties.key_size,
reuse_key=certificate_policy_bundle.key_properties.reuse_key,
curve=(KeyCurveName(certificate_policy_bundle.key_properties.curve)
if certificate_policy_bundle.key_properties.curve else None),
ekus=(certificate_policy_bundle.x509_certificate_properties.ekus
if certificate_policy_bundle.x509_certificate_properties else None),
key_usage=key_usage,
)
else:
key_properties = None
return cls(
attributes=certificate_policy_bundle.attributes,
cert_policy_id=certificate_policy_bundle.id,
issuer_name=(certificate_policy_bundle.issuer_parameters.name
if certificate_policy_bundle.issuer_parameters else None),
certificate_type=(certificate_policy_bundle.issuer_parameters.certificate_type
if certificate_policy_bundle.issuer_parameters else None),
certificate_transparency=(certificate_policy_bundle.issuer_parameters.certificate_transparency
if certificate_policy_bundle.issuer_parameters else None),
lifetime_actions=lifetime_actions,
subject_name=(certificate_policy_bundle.x509_certificate_properties.subject
if certificate_policy_bundle.x509_certificate_properties else None),
key_properties=key_properties,
content_type=(SecretContentType(certificate_policy_bundle.secret_properties.content_type)
if certificate_policy_bundle.secret_properties else None),
san_emails=(certificate_policy_bundle.x509_certificate_properties.subject_alternative_names.emails
if certificate_policy_bundle.x509_certificate_properties and
certificate_policy_bundle.x509_certificate_properties.subject_alternative_names else None),
san_upns=(certificate_policy_bundle.x509_certificate_properties.subject_alternative_names.upns
if certificate_policy_bundle.x509_certificate_properties and
certificate_policy_bundle.x509_certificate_properties.subject_alternative_names else None),
san_dns_names=(certificate_policy_bundle.x509_certificate_properties.subject_alternative_names.dns_names
if certificate_policy_bundle.x509_certificate_properties and
certificate_policy_bundle.x509_certificate_properties.subject_alternative_names else None),
validity_in_months=(certificate_policy_bundle.x509_certificate_properties.validity_in_months
if certificate_policy_bundle.x509_certificate_properties else None)
)
@property
def id(self):
# type: () -> str
""":rtype: str"""
return self._id
@property
def key_properties(self):
# type: () -> KeyProperties
"""Properties of the key backing the certificate.
:rtype: ~azure.keyvault.certificates.models.KeyProperties
"""
return self._key_properties
@property
def content_type(self):
# type: () -> SecretContentType
"""The media type (MIME type).
:rtype: ~azure.keyvault.certificates.enums.SecretContentType
"""
return self._content_type
@property
def subject_name(self):
# type: () -> str
""":rtype: str"""
return self._subject_name
@property
def san_emails(self):
# type: () -> list[str]
"""The subject alternative email addresses.
:rtype: list[str]
"""
return self._san_emails
@property
def san_dns_names(self):
# type: () -> list[str]
"""The subject alternative domain names.
:rtype: list[str]
"""
return self._san_dns_names
@property
def san_upns(self):
# type: () -> list[str]
"""The subject alternative user principal names.
:rtype: list[str]
"""
return self._san_upns
@property
def validity_in_months(self):
# type: () -> int
"""The duration that the certificate is valid for in months.
:rtype: int
"""
return self._validity_in_months
@property
def lifetime_actions(self):
# type: () -> list[LifetimeAction]
"""Actions and their triggers that will be performed by Key Vault over
the lifetime of the certificate.
:rtype: list[~azure.keyvault.certificates.models.LifetimeAction]
"""
return self._lifetime_actions
@property
def issuer_name(self):
# type: () -> str
"""Name of the referenced issuer object or reserved names for the issuer
of the certificate.
:rtype: str
"""
return self._issuer_name
@property
def certificate_type(self):
# type: () -> str
"""Type of certificate requested from the issuer provider.
:rtype: str
"""
return self._certificate_type
@property
def certificate_transparency(self):
# type: () -> bool
"""Whether the certificates generated under this policy should be published
to certificate transparency logs.
:rtype: bool
"""
return self._certificate_transparency
@property
def enabled(self):
# type: () -> bool
"""Whether the certificate is enabled or not.
:rtype: bool
"""
return self._attributes.enabled if self._attributes else None
@property
def not_before(self):
# type: () -> datetime
"""The datetime before which the certificate is not valid.
:rtype: datetime
"""
return self._attributes.not_before if self._attributes else None
@property
def expires(self):
# type: () -> datetime
"""The datetime when the certificate expires.
:rtype: datetime
"""
return self._attributes.expires if self._attributes else None
@property
def created(self):
# type: () -> datetime
"""The datetime when the certificate is created.
:rtype: datetime
"""
return self._attributes.created if self._attributes else None
@property
def updated(self):
# type: () -> datetime
"""The datetime when the certificate was last updated.
:rtype: datetime
"""
return self._attributes.updated if self._attributes else None
@property
def recovery_level(self):
# type: () -> models.DeletionRecoveryLevel
"""The deletion recovery level currently in effect for the certificate.
:rtype: DeletionRecoveryLevel
"""
return self._attributes.recovery_level if self._attributes else None
class Contact(object):
"""The contact information for the vault certificates.
:param str email: Email address of a contact for the certificate.
:param str name: Name of a contact for the certificate.
:param str phone: phone number of a contact for the certificate.
"""
def __init__(self, email=None, name=None, phone=None):
# type: (Optional[str], Optional[str], Optional[str]) -> None
self._email = email
self._name = name
self._phone = phone
def _to_certificate_contacts_item(self):
# type: (Contact) -> models.Contact
return models.Contact(
email_address=self.email,
name=self.name,
phone=self.phone
)
@classmethod
def _from_certificate_contacts_item(cls, contact_item):
# type: (models.Contact) -> Contact
"""Construct a Contact from an autorest-generated ContactItem."""
return cls(email=contact_item.email_address, name=contact_item.name, phone=contact_item.phone)
@property
def email(self):
# type: () -> str
""":rtype: str"""
return self._email
@property
def name(self):
# type: () -> str
""":rtype: str"""
return self._name
@property
def phone(self):
# type: () -> str
""":rtype: str"""
return self._phone
class IssuerBase(object):
"""The base for the issuer containing the issuer metadata.
:param str issuer_id: the ID of the issuer.
"""
def __init__(self, issuer_id=None, provider=None):
# type: (Optional[str], Optional[str]) -> None
self._id = issuer_id
self._vault_id = parse_vault_id(issuer_id)
self._provider = provider
@classmethod
def _from_issuer_item(cls, issuer_item):
# type: (models.CertificateIssuerItem) -> IssuerBase
"""Construct a IssuerBase from an autorest-generated CertificateIssuerItem"""
return cls(issuer_id=issuer_item.id, provider=issuer_item.provider)
@property
def id(self):
# type: () -> str
""":rtype: str"""
return self._id
@property
def name(self):
# type: () -> str
# Issuer name is listed under version under vault_id
""":rtype: str"""
return self._vault_id.version
@property
def provider(self):
# type: () -> str
""":rtype: str"""
return self._provider
@property
def vault_url(self):
# type: () -> str
"""The name of the vault with this issuer.
:rtype: str
"""
return self._vault_id.vault_url
class Issuer(IssuerBase):
"""The issuer for a Key Vault certificate.
:param attributes: Attributes of the issuer object. Only populated by server.
:type attributes: ~azure.keyvault.v7_0.models.IssuerAttributes
:param str provider: The issuer provider.
:param str issuer_id: The ID of the issuer.
:param str account_id: The username / account name / account id.
:param str password: The password / secret / account key.
:param str organization_id: The ID of the organization.
:param admin_details: Details of the organization administrator.
:type admin_details: list[~azure.keyvault.certificates.AdministratorDetails]
"""
def __init__(
self,
attributes=None, # type: Optional[models.IssuerAttributes]
provider=None, # type: Optional[str]
issuer_id=None, # type: Optional[str]
account_id=None, # type: Optional[str]
password=None, # type: Optional[str]
organization_id=None, # type: Optional[str]
admin_details=None, # type: Optional[List[AdministratorDetails]]
**kwargs # type: **Any
):
# type: (...) -> None
super(Issuer, self).__init__(issuer_id=issuer_id, provider=provider, **kwargs)
self._attributes = attributes
self._account_id = account_id
self._password = password
self._organization_id = organization_id
self._admin_details = admin_details
@classmethod
def _from_issuer_bundle(cls, issuer_bundle):
# type: (models.IssuerBundle) -> Issuer
"""Construct a Issuer from an autorest-generated IssuerBundle"""
admin_details = []
admin_details_service = (issuer_bundle.organization_details.admin_details
if issuer_bundle.organization_details else None)
if admin_details_service:
# pylint:disable=protected-access
for admin_detail in admin_details_service:
admin_details.append(AdministratorDetails._from_admin_details_bundle(admin_detail))
return cls(
attributes=issuer_bundle.attributes,
issuer_id=issuer_bundle.id,
provider=issuer_bundle.provider,
account_id=issuer_bundle.credentials.account_id if issuer_bundle.credentials else None,
password=issuer_bundle.credentials.password if issuer_bundle.credentials else None,
organization_id=issuer_bundle.organization_details.id if issuer_bundle.organization_details else None,
admin_details=admin_details
)
@property
def enabled(self):
# type: () -> bool
"""Whether the certificate is enabled or not.
:rtype: bool
"""
return self._attributes.enabled if self._attributes else None
@property
def created(self):
# type: () -> datetime
"""The datetime when the certificate is created.
:rtype: datetime
"""
return self._attributes.created if self._attributes else None
@property
def updated(self):
# type: () -> datetime
"""The datetime when the certificate was last updated.
:rtype: datetime
"""
return self._attributes.updated if self._attributes else None
@property
def account_id(self):
# type: () -> str
"""The username/ account name/ account id.
:rtype: str
"""
return self._account_id
@property
def password(self):
# type: () -> str
"""The password / secret / account key.
:rtype: str
"""
return self._password
@property
def organization_id(self):
# type: () -> str
""":rtype: str"""
return self._organization_id
@property
def admin_details(self):
# type: () -> List[AdministratorDetails]
"""Details of the organization administrator of this issuer.
:rtype: list[~azure.keyvault.certificates.models.AdministratorDetails]
"""
return self._admin_details
class KeyProperties(object):
"""Properties of the key pair backing a certificate.
:param bool exportable: Indicates if the private key can be exported.
:param key_type: The type of key pair to be used for the certificate.
Possible values include: 'EC', 'EC-HSM', 'RSA', 'RSA-HSM', 'oct'
:type key_type: str or ~azure.keyvault.certificates.enums.KeyType
:param int key_size: The key size in bits. For example: 2048, 3072, or 4096
for RSA.
:param bool reuse_key: Indicates if the same key pair will be used on certificate
renewal.
:param curve: Elliptic curve name. For valid values, see KeyCurveName.
Possible values include: 'P-256', 'P-384', 'P-521', 'P-256K'
:type curve: str or ~azure.keyvault.certificates.enums.KeyCurveName
:param ekus: The enhanced key usages.
:type ekus: list[str]
:param key_usage: List of key usages.
:type key_usage: list[str or ~azure.keyvault.certificates.enums.KeyUsageType]
"""
def __init__(
self,
exportable=None, # type: Optional[bool]
key_type=None, # type: Optional[KeyType]
key_size=None, # type: Optional[str]
reuse_key=None, # type: Optional[bool]
curve=None, # type: Optional[KeyCurveName]
ekus=None, # type: Optional[list[str]]
key_usage=None # type: Optional[list[KeyUsageType]]
):
# type: (...) -> None
self._exportable = exportable
self._key_type = key_type
self._key_size = key_size
self._reuse_key = reuse_key
self._curve = curve
self._ekus = ekus
self._key_usage = key_usage
@property
def exportable(self):
# type: () -> bool
"""Whether the private key can be exported.
:rtype: bool
"""
return self._exportable
@property
def key_type(self):
# type: () -> KeyType
"""The type of key pair to be used for the certificate.
:rtype: ~azure.keyvault.certificates.enums.KeyType
"""
return self._key_type
@property
def key_size(self):
# type: () -> int
"""The key size in bits.
:rtype: int
"""
return self._key_size
@property
def reuse_key(self):
# type: () -> bool
"""Whether the same key pair will be used on certificate renewal.
:rtype: bool
"""
return self._reuse_key
@property
def curve(self):
# type: () -> KeyCurveName
"""Elliptic curve name.
:rtype: ~azure.keyvault.certificates.enums.KeyCurveName
"""
return self._curve
@property
def ekus(self):
# type: () -> list[str]
"""The enhanced key usage.
:rtype: list[str]
"""
return self._ekus
@property
def key_usage(self):
# type: () -> list[KeyUsageType]
"""List of key usages.
:rtype: list[~azure.keyvault.certificates.enums.KeyUsageType]
"""
return self._key_usage
class LifetimeAction(object):
"""Action and its trigger that will be performed by certificate Vault over the
lifetime of a certificate.
:param action_type: The type of the action. Possible values include: 'EmailContacts',
'AutoRenew'
:type action_type: str or ~azure.keyvault.certificates.enums.ActionType
:param int lifetime_percentage: Percentage of lifetime at which to trigger. Value
should be between 1 and 99.
:param int days_before_expiry: Days before expiry to attempt renewal. Value should be between
1 and validity_in_months multiplied by 27. I.e., if validity_in_months is 36, then value
should be between 1 and 972 (36 * 27).
"""
def __init__(self, action_type, lifetime_percentage=None, days_before_expiry=None):
# type: (ActionType, Optional[int], Optional[int]) -> None
self._lifetime_percentage = lifetime_percentage
self._days_before_expiry = days_before_expiry
self._action_type = action_type
@property
def lifetime_percentage(self):
# type: () -> int
"""Percentage of lifetime at which to trigger.
:rtype: int
"""
return self._lifetime_percentage
@property
def days_before_expiry(self):
# type: () -> int
"""Days before expiry to attempt renewal.
:rtype: int
"""
return self._days_before_expiry
@property
def action_type(self):
# type: () -> str
"""The type of the action that will be executed.
Valid values are "EmailContacts" and "AutoRenew"
:rtype: str or ~azure.keyvault.certificates.enums.ActionType
"""
return self._action_type
class DeletedCertificate(Certificate):
"""A Deleted Certificate consisting of its previous id, attributes and its
tags, as well as information on when it will be purged.
:param attributes: The certificate attributes
:type attributes: ~azure.keyvault.certifictaes.CertificateAttributes
:param str cert_id: The certificate id.
:param bytes thumbprint: Thumbprint of the certificate.
:param str key_id: The key id.
:param str secret_id: The secret id.
:param policy: The management policy of the deleted certificate.
:type policy: ~azure.keyvault.certificates.CertificatePolicy
:param bytearray cer: CER contents of the X509 certificate.
:param datetime deleted_date: The time when the certificate was deleted, in UTC
:param str recovery_id: The url of the recovery object, used to identify and
recover the deleted certificate.
:param datetime scheduled_purge_date: The time when the certificate is scheduled to
be purged, in UTC
"""
def __init__(
self,
attributes=None, # type: Optional[CertificateAttributes]
cert_id=None, # type: Optional[str]
thumbprint=None, # type: Optional[bytes]
key_id=None, # type: Optional[str]
secret_id=None, # type: Optional[str]
policy=None, # type: Optional[CertificatePolicy]
cer=None, # type: Optional[bytes]
deleted_date=None, # type: Optional[datetime]
recovery_id=None, # type: Optional[str]
scheduled_purge_date=None, # type: Optional[datetime]
**kwargs # type: **Any
):
# type: (...) -> None
super(DeletedCertificate, self).__init__(
policy=policy,
cert_id=cert_id,
thumbprint=thumbprint,
key_id=key_id,
secret_id=secret_id,
attributes=attributes,
cer=cer,
**kwargs
)
self._deleted_date = deleted_date
self._recovery_id = recovery_id
self._scheduled_purge_date = scheduled_purge_date
@classmethod
def _from_deleted_certificate_item(cls, deleted_certificate_item):
# type: (models.DeletedCertificateItem) -> DeletedCertificate
"""Construct a DeletedCertificate from an autorest-generated DeletedCertificateItem"""
return cls(
attributes=deleted_certificate_item.attributes,
cert_id=deleted_certificate_item.id,
thumbprint=deleted_certificate_item.x509_thumbprint,
key_id=None,
secret_id=None,
policy=None,
cer=None,
deleted_date=deleted_certificate_item.deleted_date,
recovery_id=deleted_certificate_item.recovery_id,
scheduled_purge_date=deleted_certificate_item.scheduled_purge_date,
tags=deleted_certificate_item.tags,
)
@classmethod
def _from_deleted_certificate_bundle(cls, deleted_certificate_bundle):
# type: (models.DeletedCertificateBundle) -> DeletedCertificate
"""Construct a DeletedCertificate from an autorest-generated DeletedCertificateItem"""
# pylint:disable=protected-access
return cls(
attributes=deleted_certificate_bundle.attributes,
cert_id=deleted_certificate_bundle.id,
thumbprint=deleted_certificate_bundle.x509_thumbprint,
key_id=deleted_certificate_bundle.kid,
secret_id=deleted_certificate_bundle.sid,
policy=CertificatePolicy._from_certificate_policy_bundle(deleted_certificate_bundle.policy),
cer=deleted_certificate_bundle.cer,
deleted_date=deleted_certificate_bundle.deleted_date,
recovery_id=deleted_certificate_bundle.recovery_id,
scheduled_purge_date=deleted_certificate_bundle.scheduled_purge_date,
tags=deleted_certificate_bundle.tags,
)
@property
def deleted_date(self):
# type: () -> datetime
"""The datetime that the certificate was deleted.
:rtype: datetime
"""
return self._deleted_date
@property
def recovery_id(self):
# type: () -> str
"""The url of the recovery object, used to identify and recover the deleted certificate.
:rtype: str
"""
return self._recovery_id
@property
def scheduled_purge_date(self):
# type: () -> datetime
"""The datetime when the certificate is scheduled to be purged.
:rtype: str
"""
return self._scheduled_purge_date
|
py | b416062de98e6965399c277f882ca87ea6f46ef2 | import os
import logging
import shutil
def clear_and_init_folder(folder: str):
"""
:param folder:
:return:
"""
if os.path.isdir(folder):
shutil.rmtree(folder)
logging.debug("Folder already exist, removing {}".format(folder))
if not os.path.isdir(folder):
os.makedirs(folder, True)
def uzip_all_files(src: str, dst: str):
"""
Unzip all the zip files in the src folder to the dst folder
:param src:
:param dst:
:return:
"""
for f in os.listdir(src):
if f.endswith('.zip'):
zip_ref = zipfile.ZipFile(os.path.join(src, f), 'r')
zip_ref.extractall(dst)
zip_ref.close()
|
py | b41606a07aeabf69ea2eb1672e3accd689e731b1 | from django.db.models.loading import get_model
from django.core.management.base import LabelCommand
from calaccess_raw.management.commands import CalAccessCommand
class Command(CalAccessCommand, LabelCommand):
help = 'Compare the number of records in a model against its source CSV'
args = '<model name>'
def handle_label(self, label, **options):
self.log(" Verifying %s" % label)
# Get the model
model = get_model("calaccess_raw", label)
# Get the db total
cnt = model.objects.count()
# Get the CSV total
csv_path = model.objects.get_csv_path()
infile = open(csv_path)
csv_record_cnt = len(infile.readlines()) - 1
infile.close()
# Report back on how we did
if cnt == csv_record_cnt:
self.success(" Table record count matches CSV")
else:
self.failure(' Table Record count doesn\'t match CSV. \
Table: %s\tCSV: %s' % (
cnt,
csv_record_cnt,
))
|
py | b41606eed9d74d68446481b950a7e823f52ceae1 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.sensors.base import BaseSensorOperator
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
class TimeDeltaSensor(BaseSensorOperator):
"""
Waits for a timedelta after the task's execution_date + schedule_interval.
In Airflow, the daily task stamped with ``execution_date``
2016-01-01 can only start running on 2016-01-02. The timedelta here
represents the time after the execution period has closed.
:param delta: time length to wait after execution_date before succeeding
:type delta: datetime.timedelta
"""
@apply_defaults
def __init__(self, *, delta, **kwargs):
super().__init__(**kwargs)
self.delta = delta
def poke(self, context):
dag = context['dag']
target_dttm = dag.following_schedule(context['execution_date'])
target_dttm += self.delta
self.log.info('Checking if the time (%s) has come', target_dttm)
return timezone.utcnow() > target_dttm
|
py | b41609b7648275ec93c2d685f17e61ab97558db0 | #!/usr/bin/env python
import asyncio
from os.path import join, dirname
import logging
import argparse
from eth_account.local import LocalAccount
import pandas as pd
import platform
import re
from six import string_types
from typing import (
List,
Dict,
Optional,
Tuple,
Any,
Set,
Callable,
)
from wings.clock import (
Clock,
ClockMode
)
from wings.ethereum_chain import EthereumChain
from wings.market.binance_market import BinanceMarket
from wings.market.coinbase_pro_market import CoinbaseProMarket
from wings.market.ddex_market import DDEXMarket
from wings.market.market_base import MarketBase
from wings.market.radar_relay_market import RadarRelayMarket
from wings.network_iterator import NetworkStatus
from wings.order_book_tracker import OrderBookTrackerDataSourceType
from wings.trade import Trade
from wings.wallet.web3_wallet import Web3Wallet
from hummingbot import init_logging
from hummingbot.cli.ui.keybindings import load_key_bindings
from hummingbot.cli.ui.parser import load_parser, ThrowingArgumentParser
from hummingbot.cli.ui.hummingbot_cli import HummingbotCLI
from hummingbot.cli.ui.completer import load_completer
from hummingbot.cli.utils.symbol_splitter import SymbolSplitter
from hummingbot.cli.utils.wallet_setup import (
create_and_save_wallet,
import_and_save_wallet,
list_wallets,
unlock_wallet
)
from hummingbot.cli.errors import (
InvalidCommandError,
ArgumentParserError
)
from hummingbot.cli.config.config_var import ConfigVar
from hummingbot.cli.config.in_memory_config_map import in_memory_config_map
from hummingbot.cli.config.global_config_map import global_config_map
from hummingbot.cli.config.config_helpers import (
get_strategy_config_map,
write_config_to_yml,
load_required_configs,
parse_cvar_value,
copy_strategy_template,
get_erc20_token_addresses,
)
from hummingbot.cli.settings import EXCHANGES
from hummingbot.logger.report_aggregator import ReportAggregator
from hummingbot.strategy.cross_exchange_market_making import (
CrossExchangeMarketMakingStrategy,
CrossExchangeMarketPair,
)
from hummingbot.strategy.arbitrage import (
ArbitrageStrategy,
ArbitrageMarketPair
)
from hummingbot.strategy.discovery import (
DiscoveryStrategy,
DiscoveryMarketPair
)
from hummingbot.cli.utils.exchange_rate_conversion import ExchangeRateConversion
from hummingbot.cli.utils.ethereum import check_web3
from hummingbot.data_feed.data_feed_base import DataFeedBase
from hummingbot.data_feed.coin_cap_data_feed import CoinCapDataFeed
from hummingbot.cli.utils.stop_loss_tracker import StopLossTracker
s_logger = None
class HummingbotApplication:
KILL_TIMEOUT = 5.0
@classmethod
def logger(cls) -> logging.Logger:
global s_logger
if s_logger is None:
s_logger = logging.getLogger(__name__)
return s_logger
def __init__(self):
self.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
self.parser: ThrowingArgumentParser = load_parser(self)
self.app = HummingbotCLI(
input_handler=self._handle_command,
bindings=load_key_bindings(self),
completer=load_completer(self))
self.acct: Optional[LocalAccount] = None
self.markets: Dict[str, MarketBase] = {}
self.wallet: Optional[Web3Wallet] = None
self.strategy_task: Optional[asyncio.Task] = None
self.strategy: Optional[CrossExchangeMarketMakingStrategy] = None
self.market_pair: Optional[CrossExchangeMarketPair] = None
self.clock: Optional[Clock] = None
self.assets: Optional[Set[str]] = set()
self.starting_balances = {}
self.placeholder_mode = False
self.log_queue_listener: Optional[logging.handlers.QueueListener] = None
self.reporting_module: Optional[ReportAggregator] = None
self.data_feed: Optional[DataFeedBase] = None
self.stop_loss_tracker: Optional[StopLossTracker] = None
def init_reporting_module(self):
if not self.reporting_module:
self.reporting_module = ReportAggregator(
self,
report_aggregation_interval=global_config_map["reporting_aggregation_interval"].value,
log_report_interval=global_config_map["reporting_log_interval"].value)
self.reporting_module.start()
def _handle_command(self, raw_command: str):
raw_command = raw_command.lower().strip()
try:
if self.placeholder_mode:
pass
else:
logging.getLogger("hummingbot.command_history").info(raw_command)
args = self.parser.parse_args(args=raw_command.split())
kwargs = vars(args)
if not hasattr(args, "func"):
return
f = args.func
del kwargs['func']
f(**kwargs)
except InvalidCommandError as e:
self.app.log("Invalid command: %s" % (str(e),))
except ArgumentParserError as e:
self.app.log(str(e))
except EnvironmentError as e:
# Handle order book too thin error more gracefully
if "no price quote is possible" in str(e):
self.logger().error(f"Order book error: Not enough volume on order book. Please consider choosing a "
f"trading pair with more volume or trade on a different exchange. {e}")
except NotImplementedError:
self.app.log("Command not yet implemented. This feature is currently under development.")
except Exception as e:
self.logger().error(e, exc_info=True)
async def _cancel_outstanding_orders(self) -> bool:
on_chain_cancel_on_exit = global_config_map.get("on_chain_cancel_on_exit").value
success = True
self.app.log("Cancelling outstanding orders...")
for market_name, market in self.markets.items():
# By default, the bot does not cancel orders on exit on Radar Relay, since all open orders will
# expire in a short window
if not on_chain_cancel_on_exit and market_name == "radar_relay":
continue
cancellation_results = await market.cancel_all(self.KILL_TIMEOUT)
uncancelled = list(filter(lambda cr: cr.success is False, cancellation_results))
if len(uncancelled) > 0:
success = False
uncancelled_order_ids = list(map(lambda cr: cr.order_id, uncancelled))
self.app.log("\nFailed to cancel the following orders on %s:\n%s" % (
market_name,
'\n'.join(uncancelled_order_ids)
))
if success:
self.app.log("All outstanding orders cancelled.")
return success
async def run(self):
await self.app.run()
@property
def config_complete(self):
config_map = load_required_configs()
for key in self._get_empty_configs():
cvar = config_map.get(key)
if cvar.value is None and cvar.required:
return False
return True
@staticmethod
def _get_empty_configs() -> List[str]:
config_map = load_required_configs()
return [key for key, config in config_map.items() if config.value is None]
def get_wallet_balance(self) -> pd.DataFrame:
return pd.DataFrame(data=list(self.wallet.get_all_balances().items()),
columns=["currency", "balance"]).set_index("currency")
def get_exchange_balance(self, exchange_name: str) -> pd.DataFrame:
market: MarketBase = self.markets[exchange_name]
raw_balance: pd.DataFrame = pd.DataFrame(data=list(market.get_all_balances().items()),
columns=["currency", "balance"]).set_index("currency")
return raw_balance[raw_balance.balance > 0]
def config(self, key: str = None):
self.app.clear_input()
if key is not None and key not in load_required_configs().keys():
self.app.log("Invalid config variable %s" % (key,))
return
if key is not None:
keys = [key]
else:
keys = self._get_empty_configs()
asyncio.ensure_future(self._config_loop(keys))
async def _create_or_import_wallet(self):
choice = await self.app.prompt(prompt=global_config_map.get("wallet").prompt)
if choice == "import":
private_key = await self.app.prompt(prompt="Your wallet private key >>> ", is_password=True)
password = await self.app.prompt(prompt="A password to protect your wallet key >>> ", is_password=True)
self.acct = import_and_save_wallet(password, private_key)
self.app.log("Wallet %s imported into hummingbot" % (self.acct.address,))
elif choice == "create":
password = await self.app.prompt(prompt="A password to protect your wallet key >>> ", is_password=True)
self.acct = create_and_save_wallet(password)
self.app.log("New wallet %s created" % (self.acct.address,))
else:
self.app.log('Invalid choice. Please enter "create" or "import".')
result = await self._create_or_import_wallet()
return result
return self.acct.address
async def _unlock_wallet(self):
choice = await self.app.prompt(prompt="Would you like to unlock your previously saved wallet? (y/n) >>> ")
if choice.lower() in {"y", "yes"}:
wallets = list_wallets()
self.app.log("Existing wallets:")
self.list(obj="wallets")
if len(wallets) == 1:
public_key = wallets[0]
else:
public_key = await self.app.prompt(prompt="Which wallet would you like to import ? >>> ")
password = await self.app.prompt(prompt="Enter your password >>> ", is_password=True)
try:
acct = unlock_wallet(public_key=public_key, password=password)
self.app.log("Wallet %s unlocked" % (acct.address,))
self.acct = acct
return self.acct.address
except Exception as e:
self.app.log("Cannot unlock wallet. Please try again.")
result = await self._unlock_wallet()
return result
else:
value = await self._create_or_import_wallet()
return value
async def _import_or_create_strategy_config(self):
current_strategy: str = in_memory_config_map.get("strategy").value
strategy_file_path_cv: ConfigVar = in_memory_config_map.get("strategy_file_path")
choice = await self.app.prompt(prompt="Import previous configs or create a new config file? "
"(import/create) >>> ")
if choice == "import":
strategy_path = await self.app.prompt(strategy_file_path_cv.prompt)
strategy_path = strategy_path
self.app.log(f"Loading previously saved config file from {strategy_path}...")
elif choice == "create":
strategy_path = await copy_strategy_template(current_strategy)
self.app.log(f"new config file at {strategy_path} created.")
else:
self.app.log('Invalid choice. Please enter "create" or "import".')
strategy_path = await self._import_or_create_strategy_config()
# Validate response
if not strategy_file_path_cv.validate(strategy_path):
self.app.log(f"Invalid path {strategy_path}. Please enter \"create\" or \"import\".")
strategy_path = await self._import_or_create_strategy_config()
return strategy_path
async def _config_loop(self, keys: List[str] = []):
self.app.log("Please follow the prompt to complete configurations: ")
self.placeholder_mode = True
self.app.toggle_hide_input()
single_key = len(keys) == 1
async def single_prompt(cvar: ConfigVar):
if cvar.required or single_key:
if cvar.key == "strategy_file_path":
val = await self._import_or_create_strategy_config()
elif cvar.key == "wallet":
wallets = list_wallets()
if len(wallets) > 0:
val = await self._unlock_wallet()
else:
val = await self._create_or_import_wallet()
logging.getLogger("hummingbot.public_eth_address").info(val)
else:
val = await self.app.prompt(prompt=cvar.prompt, is_password=cvar.is_secure)
if not cvar.validate(val):
self.app.log("%s is not a valid %s value" % (val, cvar.key))
val = await single_prompt(cvar)
else:
val = cvar.value
if val is None or (isinstance(val, string_types) and len(val) == 0):
val = cvar.default
return val
async def inner_loop(_keys: List[str]):
for key in _keys:
current_strategy: str = in_memory_config_map.get("strategy").value
strategy_cm: Dict[str, ConfigVar] = get_strategy_config_map(current_strategy)
if key in in_memory_config_map:
cv: ConfigVar = in_memory_config_map.get(key)
elif key in global_config_map:
cv: ConfigVar = global_config_map.get(key)
else:
cv: ConfigVar = strategy_cm.get(key)
value = await single_prompt(cv)
cv.value = parse_cvar_value(cv, value)
if single_key:
self.app.log(f"\nNew config saved:\n{key}: {str(value)}")
if not self.config_complete:
await inner_loop(self._get_empty_configs())
try:
await inner_loop(keys)
await write_config_to_yml()
if not single_key:
self.app.log("\nConfig process complete. Enter \"start\" to start market making.")
self.app.set_text("start")
except asyncio.TimeoutError:
self.logger().error("Prompt timeout")
except Exception as err:
self.logger().error("Unknown error while writing config. %s" % (err,), exc_info=True)
finally:
self.app.toggle_hide_input()
self.placeholder_mode = False
self.app.change_prompt(prompt=">>> ")
def _initialize_wallet(self, token_symbols: List[str]):
ethereum_rpc_url = global_config_map.get("ethereum_rpc_url").value
erc20_token_addresses = get_erc20_token_addresses(token_symbols)
if self.acct is not None:
self.wallet: Web3Wallet = Web3Wallet(private_key=self.acct.privateKey,
backend_urls=[ethereum_rpc_url],
erc20_token_addresses=erc20_token_addresses,
chain=EthereumChain.MAIN_NET)
def _initialize_markets(self, market_names: List[Tuple[str, List[str]]]):
ethereum_rpc_url = global_config_map.get("ethereum_rpc_url").value
for market_name, symbols in market_names:
if market_name == "ddex" and self.wallet:
market = DDEXMarket(wallet=self.wallet,
web3_url=ethereum_rpc_url,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
symbols=symbols)
elif market_name == "binance":
binance_api_key = global_config_map.get("binance_api_key").value
binance_api_secret = global_config_map.get("binance_api_secret").value
market = BinanceMarket(web3_url=ethereum_rpc_url,
binance_api_key=binance_api_key,
binance_api_secret=binance_api_secret,
order_book_tracker_data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
symbols=symbols)
elif market_name == "radar_relay" and self.wallet:
market = RadarRelayMarket(wallet=self.wallet,
web3_url=ethereum_rpc_url,
symbols=symbols)
elif market_name == "coinbase_pro":
coinbase_pro_api_key = global_config_map.get("coinbase_pro_api_key").value
coinbase_pro_secret_key = global_config_map.get("coinbase_pro_secret_key").value
coinbase_pro_passphrase = global_config_map.get("coinbase_pro_passphrase").value
market = CoinbaseProMarket(web3_url=ethereum_rpc_url,
coinbase_pro_api_key=coinbase_pro_api_key,
coinbase_pro_secret_key=coinbase_pro_secret_key,
coinbase_pro_passphrase=coinbase_pro_passphrase,
symbols=symbols)
else:
raise ValueError(f"Market name {market_name} is invalid.")
self.markets[market_name]: MarketBase = market
def status(self) -> bool:
self.app.log("\n Preliminary checks:")
if self.config_complete:
self.app.log(" - Config check: Config complete")
else:
self.app.log(' x Config check: Pending config. Please enter "config" before starting the bot.')
return False
eth_node_valid = check_web3(global_config_map.get("ethereum_rpc_url").value)
if eth_node_valid:
self.app.log(" - Node check: Ethereum node running and current")
else:
self.app.log(' x Node check: Bad ethereum rpc url. Your node may be syncing. '
'Please re-configure by entering "config ethereum_rpc_url"')
return False
if self.wallet is not None:
if self.wallet.network_status is NetworkStatus.CONNECTED:
has_minimum_eth = self.wallet.get_balance("ETH") > 0.01
if has_minimum_eth:
self.app.log(" - ETH wallet check: Minimum ETH requirement satisfied")
else:
self.app.log(" x ETH wallet check: Not enough ETH in wallet. "
"A small amount of Ether is required for sending transactions on "
"Decentralized Exchanges")
else:
self.app.log(" x ETH wallet check: ETH wallet is not connected.")
loading_markets: List[str] = []
for market_name, market in self.markets.items():
if not market.ready:
loading_markets.append(market_name)
if self.strategy is None:
self.app.log(" x initializing strategy.")
return True
elif len(loading_markets) > 0:
for loading_market in loading_markets:
self.app.log(f" x Market check: Waiting for {loading_market} market to get ready for trading. "
f"Please keep the bot running and try to start again in a few minutes")
return False
elif not all([market.network_status is NetworkStatus.CONNECTED for market in self.markets.values()]):
offline_markets: List[str] = [
market_name
for market_name, market
in self.markets.items()
if market.network_status is not NetworkStatus.CONNECTED
]
for offline_market in offline_markets:
self.app.log(f" x Market check: {offline_market} is currently offline.")
self.app.log(" - Market check: All markets ready")
self.app.log(self.strategy.format_status() + "\n")
return True
def help(self, command):
if command == 'all':
self.app.log(self.parser.format_help())
else:
subparsers_actions = [
action for action in self.parser._actions if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
subparser = subparsers_action.choices.get(command)
self.app.log(subparser.format_help())
def get_balance(self, currency: str = "WETH", wallet: bool = False, exchange: str = None):
if wallet:
if self.wallet is None:
self.app.log('Wallet not available. Please configure your wallet (Enter "config wallet")')
elif currency is None:
self.app.log(f"{self.get_wallet_balance()}")
else:
self.app.log(self.wallet.get_balance(currency.upper()))
elif exchange:
if exchange in self.markets:
if currency is None:
self.app.log(f"{self.get_exchange_balance(exchange)}")
else:
self.app.log(self.markets[exchange].get_balance(currency.upper()))
else:
self.app.log('The exchange you entered has not been initialized. '
'You may check your exchange balance after entering the "start" command.')
else:
self.help("get_balance")
def list(self, obj: str):
if obj == "wallets":
wallets = list_wallets()
if len(wallets) == 0:
self.app.log('Wallet not available. Please configure your wallet (Enter "config wallet")')
else:
self.app.log('\n'.join(wallets))
elif obj == "exchanges":
if len(EXCHANGES) == 0:
self.app.log("No exchanges available")
else:
self.app.log('\n'.join(EXCHANGES))
elif obj == "configs":
columns: List[str] = ["Key", "Current Value"]
global_cvs: List[ConfigVar] = list(in_memory_config_map.values()) + list(global_config_map.values())
global_data: List[List[str, Any]] = [
[cv.key, len(str(cv.value)) * "*" if cv.is_secure else str(cv.value)]
for cv in global_cvs]
global_df: pd.DataFrame = pd.DataFrame(data=global_data, columns=columns)
self.app.log("\nglobal configs:")
self.app.log(str(global_df))
strategy = in_memory_config_map.get("strategy").value
if strategy:
strategy_cvs: List[ConfigVar] = get_strategy_config_map(strategy).values()
strategy_data: List[List[str, Any]] = [
[cv.key, len(str(cv.value)) * "*" if cv.is_secure else str(cv.value)]
for cv in strategy_cvs]
strategy_df: pd.DataFrame = pd.DataFrame(data=strategy_data, columns=columns)
self.app.log(f"\n{strategy} strategy configs:")
self.app.log(str(strategy_df))
self.app.log("\n")
elif obj == "trades":
lines = []
if self.strategy is None:
self.app.log("No strategy available, cannot show past trades.")
else:
if len(self.strategy.trades) > 0:
df = Trade.to_pandas(self.strategy.trades)
df_lines = str(df).split("\n")
lines.extend(["", " Past trades:"] +
[" " + line for line in df_lines])
else:
lines.extend([" No past trades."])
self.app.log("\n".join(lines))
else:
self.help("list")
def describe(self, wallet: bool = False, exchange: str = None):
if wallet:
if self.wallet is None:
self.app.log('None available. Your wallet may not have been initialized. Enter "start" to initialize '
'your wallet.')
else:
self.app.log(self.wallet.address)
self.app.log(f"{self.get_wallet_balance()}")
elif exchange is not None:
if exchange in self.markets:
self.app.log(f"{self.get_exchange_balance(exchange)}")
else:
raise InvalidCommandError("The exchange you specified has not been initialized")
else:
self.help("describe")
def start(self, log_level: Optional[str] = None):
is_valid = self.status()
if not is_valid:
return
if log_level is not None:
init_logging("hummingbot_logs.yml", override_log_level=log_level.upper())
# If macOS, disable App Nap.
if platform.system() == "Darwin":
import appnope
appnope.nope()
# TODO add option to select data feed
self.data_feed: DataFeedBase = CoinCapDataFeed.get_instance()
ExchangeRateConversion.get_instance().start()
strategy_name = in_memory_config_map.get("strategy").value
self.init_reporting_module()
self.app.log(f"\n Status check complete. Starting '{strategy_name}' strategy...")
asyncio.ensure_future(self.start_market_making(strategy_name))
async def _run_clock(self):
with self.clock as clock:
await clock.run()
async def start_market_making(self, strategy_name: str):
strategy_cm = get_strategy_config_map(strategy_name)
if strategy_name == "cross_exchange_market_making":
maker_market = strategy_cm.get("maker_market").value.lower()
taker_market = strategy_cm.get("taker_market").value.lower()
raw_maker_symbol = strategy_cm.get("maker_market_symbol").value.upper()
raw_taker_symbol = strategy_cm.get("taker_market_symbol").value.upper()
min_profitability = strategy_cm.get("min_profitability").value
trade_size_override = strategy_cm.get("trade_size_override").value
strategy_report_interval = global_config_map.get("strategy_report_interval").value
limit_order_min_expiration = strategy_cm.get("limit_order_min_expiration").value
cancel_order_threshold = strategy_cm.get("cancel_order_threshold").value
active_order_canceling = strategy_cm.get("active_order_canceling").value
top_depth_tolerance_rules = [(re.compile(re_str), value)
for re_str, value
in strategy_cm.get("top_depth_tolerance").value]
top_depth_tolerance = 0.0
for regex, tolerance_value in top_depth_tolerance_rules:
if regex.match(raw_maker_symbol) is not None:
top_depth_tolerance = tolerance_value
try:
maker_assets: Tuple[str, str] = SymbolSplitter.split(maker_market, raw_maker_symbol)
taker_assets: Tuple[str, str] = SymbolSplitter.split(taker_market, raw_taker_symbol)
except ValueError as e:
self.app.log(str(e))
return
market_names: List[Tuple[str, List[str]]] = [
(maker_market, [raw_maker_symbol]),
(taker_market, [raw_taker_symbol])
]
self._initialize_wallet(token_symbols=list(set(maker_assets + taker_assets)))
self._initialize_markets(market_names)
self.assets = set(maker_assets + taker_assets)
self.market_pair = CrossExchangeMarketPair(*([self.markets[maker_market], raw_maker_symbol] +
list(maker_assets) +
[self.markets[taker_market], raw_taker_symbol] +
list(taker_assets) + [top_depth_tolerance]))
strategy_logging_options = (CrossExchangeMarketMakingStrategy.OPTION_LOG_CREATE_ORDER |
CrossExchangeMarketMakingStrategy.OPTION_LOG_ADJUST_ORDER |
CrossExchangeMarketMakingStrategy.OPTION_LOG_MAKER_ORDER_FILLED |
CrossExchangeMarketMakingStrategy.OPTION_LOG_REMOVING_ORDER |
CrossExchangeMarketMakingStrategy.OPTION_LOG_STATUS_REPORT |
CrossExchangeMarketMakingStrategy.OPTION_LOG_MAKER_ORDER_HEDGED)
self.strategy = CrossExchangeMarketMakingStrategy(market_pairs=[self.market_pair],
min_profitability=min_profitability,
status_report_interval=strategy_report_interval,
logging_options=strategy_logging_options,
trade_size_override=trade_size_override,
limit_order_min_expiration=limit_order_min_expiration,
cancel_order_threshold=cancel_order_threshold,
active_order_canceling=active_order_canceling)
elif strategy_name == "arbitrage":
primary_market = strategy_cm.get("primary_market").value.lower()
secondary_market = strategy_cm.get("secondary_market").value.lower()
raw_primary_symbol = strategy_cm.get("primary_market_symbol").value.upper()
raw_secondary_symbol = strategy_cm.get("secondary_market_symbol").value.upper()
min_profitability = strategy_cm.get("min_profitability").value
try:
primary_assets: Tuple[str, str] = SymbolSplitter.split(primary_market, raw_primary_symbol)
secondary_assets: Tuple[str, str] = SymbolSplitter.split(secondary_market, raw_secondary_symbol)
except ValueError as e:
self.app.log(str(e))
return
market_names: List[Tuple[str, List[str]]] = [(primary_market, [raw_primary_symbol]),
(secondary_market, [raw_secondary_symbol])]
self._initialize_wallet(token_symbols=list(set(primary_assets + secondary_assets)))
self._initialize_markets(market_names)
self.assets = set(primary_assets + secondary_assets)
self.market_pair = ArbitrageMarketPair(*([self.markets[primary_market], raw_primary_symbol] +
list(primary_assets) +
[self.markets[secondary_market], raw_secondary_symbol] +
list(secondary_assets)))
strategy_logging_options = ArbitrageStrategy.OPTION_LOG_ALL
self.strategy = ArbitrageStrategy(market_pairs=[self.market_pair],
min_profitability=min_profitability,
logging_options=strategy_logging_options)
elif strategy_name == "discovery":
try:
market_1 = strategy_cm.get("primary_market").value.lower()
market_2 = strategy_cm.get("secondary_market").value.lower()
target_symbol_1 = list(strategy_cm.get("target_symbol_1").value)
target_symbol_2 = list(strategy_cm.get("target_symbol_2").value)
target_profitability = float(strategy_cm.get("target_profitability").value)
target_amount = float(strategy_cm.get("target_amount").value)
equivalent_token: List[List[str]] = list(strategy_cm.get("equivalent_tokens").value)
market_names: List[Tuple[str, List[str]]] = [(market_1, target_symbol_1),
(market_2, target_symbol_2)]
target_base_quote_1: List[Tuple[str, str]] = [
SymbolSplitter.split(market_1, symbol) for symbol in target_symbol_1
]
target_base_quote_2: List[Tuple[str, str]] = [
SymbolSplitter.split(market_2, symbol) for symbol in target_symbol_2
]
for asset_tuple in (target_base_quote_1 + target_base_quote_2):
self.assets.add(asset_tuple[0])
self.assets.add(asset_tuple[1])
self._initialize_wallet(token_symbols=list(self.assets))
self._initialize_markets(market_names)
self.market_pair = DiscoveryMarketPair(
*([self.markets[market_1], self.markets[market_1].get_active_exchange_markets] +
[self.markets[market_2], self.markets[market_2].get_active_exchange_markets]))
self.strategy = DiscoveryStrategy(market_pairs=[self.market_pair],
target_symbols=target_base_quote_1 + target_base_quote_2,
equivalent_token=equivalent_token,
target_profitability=target_profitability,
target_amount=target_amount
)
except Exception as e:
self.app.log(str(e))
self.logger().error("Error initializing strategy.", exc_info=True)
else:
raise NotImplementedError
try:
self.clock = Clock(ClockMode.REALTIME)
if self.wallet is not None:
self.clock.add_iterator(self.wallet)
for market in self.markets.values():
if market is not None:
self.clock.add_iterator(market)
self.clock.add_iterator(self.strategy)
self.strategy_task: asyncio.Task = asyncio.ensure_future(self._run_clock())
self.app.log(f"\n '{strategy_name}' strategy started.\n"
f" You can use the `status` command to query the progress.")
self.starting_balances = await self.wait_till_ready(self.balance_snapshot)
self.stop_loss_tracker = StopLossTracker(self.data_feed,
list(self.assets),
list(self.markets.values()),
lambda *args, **kwargs: asyncio.ensure_future(
self.stop(*args, **kwargs)
))
await self.wait_till_ready(self.stop_loss_tracker.start)
except Exception as e:
self.logger().error(str(e), exc_info=True)
async def stop(self, skip_order_cancellation: bool = False):
self.app.log("\nWinding down...")
# Restore App Nap on macOS.
if platform.system() == "Darwin":
import appnope
appnope.nap()
if not skip_order_cancellation:
# Remove the strategy from clock before cancelling orders, to
# prevent race condition where the strategy tries to create more
# orders during cancellation.
self.clock.remove_iterator(self.strategy)
success = await self._cancel_outstanding_orders()
if success:
# Only erase markets when cancellation has been successful
self.markets = {}
if self.reporting_module:
self.reporting_module.stop()
if self.strategy_task is not None and not self.strategy_task.cancelled():
self.strategy_task.cancel()
if self.strategy:
self.strategy.stop()
ExchangeRateConversion.get_instance().stop()
self.stop_loss_tracker.stop()
self.wallet = None
self.strategy_task = None
self.strategy = None
self.market_pair = None
self.clock = None
async def exit(self, force: bool = False):
if self.strategy_task is not None and not self.strategy_task.cancelled():
self.strategy_task.cancel()
if self.strategy:
self.strategy.stop()
if force is False:
success = await self._cancel_outstanding_orders()
if not success:
self.app.log('Wind down process terminated: Failed to cancel all outstanding orders. '
'\nYou may need to manually cancel remaining orders by logging into your chosen exchanges'
'\n\nTo force exit the app, enter "exit -f"')
return
# Freeze screen 1 second for better UI
await asyncio.sleep(1)
ExchangeRateConversion.get_instance().stop()
self.app.exit()
async def export_private_key(self):
if self.acct is None:
self.app.log("Your wallet is currently locked. Please enter \"config\""
" to unlock your wallet first")
else:
self.placeholder_mode = True
self.app.toggle_hide_input()
ans = await self.app.prompt("Are you sure you want to print your private key in plain text? (y/n) >>> ")
if ans.lower() in {"y", "yes"}:
self.app.log("\nWarning: Never disclose this key. Anyone with your private keys can steal any assets "
"held in your account.\n")
self.app.log("Your private key:")
self.app.log(self.acct.privateKey.hex())
self.app.change_prompt(prompt=">>> ")
self.app.toggle_hide_input()
self.placeholder_mode = False
def export_trades(self, path: str = ""):
if not path:
fname = f"trades_{pd.Timestamp.now().strftime('%Y-%m-%d-%H-%M-%S')}.csv"
path = join(dirname(__file__), f"../../logs/{fname}")
if self.strategy is None:
self.app.log("No strategy available, cannot export past trades.")
else:
if len(self.strategy.trades) > 0:
df: pd.DataFrame = Trade.to_pandas(self.strategy.trades)
df.to_csv(path, header=True)
self.app.log(f"Successfully saved trades to {path}")
def history(self):
self.list("trades")
self.compare_balance_snapshots()
async def wait_till_ready(self, func: Callable, *args, **kwargs):
while True:
all_ready = all([market.ready for market in self.markets.values()])
if not all_ready:
await asyncio.sleep(0.5)
else:
return func(*args, **kwargs)
def balance_snapshot(self) -> Dict[str, Dict[str, float]]:
snapshot: Dict[str, Any] = {}
for market_name in self.markets:
balance_dict = self.markets[market_name].get_all_balances()
for c in self.assets:
if c not in snapshot:
snapshot[c] = {}
if c in balance_dict:
snapshot[c][market_name] = balance_dict[c]
else:
snapshot[c][market_name] = 0.0
return snapshot
def compare_balance_snapshots(self):
if len(self.starting_balances) == 0:
self.app.log(" Balance snapshots are not available before bot starts")
return
rows = []
for market_name in self.markets:
for asset in self.assets:
starting_balance = self.starting_balances.get(asset).get(market_name)
current_balance = self.balance_snapshot().get(asset).get(market_name)
rows.append([market_name, asset, starting_balance, current_balance, current_balance - starting_balance])
df = pd.DataFrame(rows, index=None, columns=["Market", "Asset", "Starting", "Current", "Delta"])
lines = ["", " Performance:"] + [" " + line for line in str(df).split("\n")]
self.app.log("\n".join(lines))
|
py | b41609c30e218c57ac22fc87dd0ae3c72eda7d1d | # This sample tests various logical expressions.
class Foo:
def do_something1(self):
pass
def do_something2(self):
pass
class Bar:
def do_something1(self):
pass
a = 0
foo = Foo()
bar = Bar()
b = a and foo or bar
# This should not be flagged as an error because
# the type of b should be type Foo.
b.do_something1()
# This should be flagged as an error because
# Bar doesn't define a do_something2 method.
b.do_something2()
|
py | b4160aad40d6c6c5b30c9bfd1a35dce9f87247b6 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.identity import identity_service
from openstack import resource2 as resource
class Extension(resource.Resource):
resource_key = 'extension'
resources_key = 'extensions'
base_path = '/extensions'
service = identity_service.IdentityService()
# capabilities
allow_list = True
allow_get = True
# Properties
#: A unique identifier, which will be used for accessing the extension
#: through a dedicated url ``/extensions/*alias*``. The extension
#: alias uniquely identifies an extension and is prefixed by a vendor
#: identifier. *Type: string*
alias = resource.Body('alias', alternate_id=True)
#: A description of the extension. *Type: string*
description = resource.Body('description')
#: Links to the documentation in various format. *Type: string*
links = resource.Body('links')
#: The name of the extension. *Type: string*
name = resource.Body('name')
#: The second unique identifier of the extension after the alias.
#: It is usually a URL which will be used. Example:
#: "http://docs.openstack.org/identity/api/ext/s3tokens/v1.0"
#: *Type: string*
namespace = resource.Body('namespace')
#: The last time the extension has been modified (update date).
updated_at = resource.Body('updated')
@classmethod
def list(cls, session, paginated=False, **params):
resp = session.get(cls.base_path, endpoint_filter=cls.service,
params=params)
resp = resp.json()
for data in resp[cls.resources_key]['values']:
yield cls.existing(**data)
|
py | b4160ab576d0ab9c3923c91525b996b3e1e97032 | # -*- coding: utf-8 -*-
"""
This family file was auto-generated by generate_family_file.py script.
Configuration parameters:
url = http://wiki.projectgorgon.com/wiki/
name = projectgorgon
Please do not commit this to the Git repository!
"""
from __future__ import absolute_import, division, unicode_literals
from pywikibot import family
from pywikibot.tools import deprecated
class Family(family.Family): # noqa: D101
name = 'projectgorgon'
langs = {
'en': 'wiki.projectgorgon.com',
}
def scriptpath(self, code):
return {
'en': '/w',
}[code]
@deprecated('APISite.version()', since='20141225')
def version(self, code):
return {
'en': '1.29.2',
}[code]
def protocol(self, code):
return {
'en': 'http',
}[code]
|
py | b4160b3c8508196fa0536f9e1c0999d983a14d8f | import tkinter as tk
from tkinter import ttk, END
import math
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
matplotlib.use("TkAgg")
import numpy as np
LARGE_FONT =("Verdana", 12)
DEFAULT_FONT =("Verdana", 10)
voltou = False
def calculate():
global valor_1
global valor_2
global valor_3
value_list = [valor_1, valor_2, valor_3]
if "" in value_list:
return False
else:
delta = (int(valor_2)**2) - 4*int(valor_1)*int(valor_3)
if delta >= 0:
global delta_root
delta_root = math.sqrt(delta)
global bhask_pos
global bhask_neg
bhask_pos = (int(-valor_2) + (delta_root))/2*int(valor_1)
bhask_neg = (int(-valor_2) - (delta_root))/2*int(valor_1)
else:
pass
return True
def render_graph():
eixo_x = []
eixo_y = []
zero = []
print(voltou)
global bhask_pos
global bhask_neg
bhask_neg = ""
bhask_pos = ""
if voltou is True:
variacao = abs(bhask_pos - bhask_neg)
if variacao < 3:
variacao = 3
print(variacao)
for x in np.arange(bhask_pos - variacao, bhask_neg + variacao, variacao / 100):
y = valor_1 * (x ** 2 ) + valor_2 * (x) + valor_3
eixo_x.append(x)
eixo_y.append(y)
zero.append(0.0)
plt.plot(eixo_x,eixo_y,color="blue")
plt.plot(eixo_x,zero,color="black")
plt.draw()
print("foi")
class App(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self,*args, **kwargs)
#self.geometry("720x360")
self.title("Bhaskara Solver")
self.valor_1 = ""
self.valor_2 = ""
self.valor_3 = ""
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_columnconfigure(0, weight=1)
container.grid_rowconfigure(0, weight=1)
self.frames = {}
for F in (StartPage, PageOne):
frame = F(container, self)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(StartPage)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
global result
button = ttk.Button(self, text="Inserir valores", command=lambda: controller.show_frame(PageOne))
button.pack(side="top", padx=10, pady=20, expand=False)
# canvas = tk.Canvas(self, width=400, height=200, bg="#C0C0C0", bd="10")
# canvas.pack(side="bottom", padx=10, pady=20, expand=False)
global label
label = tk.Label(self, text="Valores ainda nรฃo definidos", bg="#D3D3D3", bd=10, width=80)
label.pack(side="bottom")
calculation_button = ttk.Button(self, text="Calcular raรญzes", command=calculate)
calculation_button.pack()
creditos = tk.Label(self, text="Bhaskara Solver by Paulo Jr 09/02/2019")
creditos.pack(padx=10, pady=20, side="right")
graph_button = ttk.Button(self, text="Ver grรกfico", command=lambda: controller.show_frame(Graph))
graph_button.pack()
class PageOne(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
def get_entry_data_a():
global valor_1
valor_1 = int(controller.valor_a.get())
entry_a.delete(0, END)
print(valor_1)
def get_entry_data_b():
global valor_2
valor_2 = int(controller.valor_b.get())
entry_b.delete(0, END)
print(valor_2)
def get_entry_data_c():
global valor_3
valor_3 = int(controller.valor_c.get())
entry_c.delete(0, END)
print(valor_3)
def event_data_a(event):
global valor_1
valor_1 = int(controller.valor_a.get())
entry_a.delete(0, END)
print(valor_1)
def event_data_b(event):
global valor_2
valor_2 = int(controller.valor_b.get())
entry_b.delete(0, END)
print(valor_2)
def event_data_c(event):
global valor_3
valor_3 = int(controller.valor_c.get())
entry_c.delete(0, END)
print(valor_3)
text_a = tk.Label(self, text="Valor de a:", padx=10, pady=10)
text_a.grid(row=1, column=1)
text_b = tk.Label(self, text="Valor de b:", padx=10, pady=10)
text_b.grid(row=2, column=1)
text_c = tk.Label(self, text="Valor de c", padx=10, pady=10)
text_c.grid(row=3, column=1)
controller.valor_a = tk.IntVar()
entry_a = tk.Entry(self, textvariable=controller.valor_a)
entry_a.grid(row=1, column=2)
entry_a.delete(0, END)
button_a = ttk.Button(self, text="Salvar valor", command=get_entry_data_a)
button_a.grid(row=1, column=3, padx=10, pady=10)
controller.valor_b = tk.IntVar()
entry_b = tk.Entry(self, textvariable=controller.valor_b)
entry_b.grid(row=2, column=2)
entry_b.delete(0, END)
button_b = ttk.Button(self, text="Salvar valor", command=get_entry_data_b)
button_b.grid(row=2, column=3, padx=10, pady=10)
controller.valor_c = tk.IntVar()
entry_c = tk.Entry(self, textvariable=controller.valor_c)
entry_c.grid(row=3, column=2)
entry_c.delete(0, END)
button_c = ttk.Button(self, text="Salvar valor", command=get_entry_data_c)
button_c.grid(row=3, column=3,padx=10, pady=10)
def backbutton_callback():
global voltou
voltou = True
controller.show_frame(StartPage)
print(voltou)
if voltou is True:
if calculate() is False:
result = tk.StringVar()
sentence = "Nรฃo foi possรญvel calcular as raรญzes pois o delta รฉ negativo."
result.set(str(sentence))
print(result.get())
print("erro")
elif calculate() is True:
global bhask_neg
global bhask_pos
result = tk.StringVar()
sentence = "A equaรงรฃo {0}xยฒ + {1}x + {2} tem como resultado as raรญzes {3} e {4}.".format(valor_1, valor_2, valor_3, bhask_neg, bhask_pos)
result.set(str(sentence))
label.config(text=result.get())
print(result.get())
print("certo")
else:
pass
return True
entry_a.bind("<Return>", event_data_a)
entry_b.bind("<Return>", event_data_b)
entry_c.bind("<Return>", event_data_c)
back_button = ttk.Button(self, text="Retornar ร pรกgina principal", command=backbutton_callback)
back_button.grid(row=5, column=2, padx=20, pady=20)
class Graph(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Teste")
if voltou is True:
render_graph()
print(voltou)
app = App()
app.mainloop()
|
py | b4160b93ad093d647bebb5e5de96b637910cd83b | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_group_facts
short_description: Retrieve information about one or more oVirt/RHV groups
author: "Ondra Machacek (@machacekondra)"
deprecated:
removed_in: "2.10"
why: When migrating to collection we decided to use only _info modules.
alternative: Use M(ovirt_group_info) instead
description:
- "Retrieve information about one or more oVirt/RHV groups."
- This module was called C(ovirt_group_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(ovirt_group_info) module no longer returns C(ansible_facts)!
notes:
- "This module returns a variable C(ovirt_groups), which
contains a list of groups. You need to register the result with
the I(register) keyword to use it."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search group X use following pattern: name=X"
extends_documentation_fragment:
- ovirt.ovirt.ovirt_info
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather information about all groups which names start with C(admin):
- ovirt_group_info:
pattern: name=admin*
register: result
- debug:
msg: "{{ result.ovirt_groups }}"
'''
RETURN = '''
ovirt_groups:
description: "List of dictionaries describing the groups. Group attributes are mapped to dictionary keys,
all groups attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/group."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.common.removed import removed_module
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_info_full_argument_spec,
)
def main():
argument_spec = ovirt_info_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
is_old_facts = module._name == 'ovirt_group_facts'
if is_old_facts:
module.deprecate("The 'ovirt_group_facts' module has been renamed to 'ovirt_group_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
groups_service = connection.system_service().groups_service()
groups = groups_service.list(search=module.params['pattern'])
result = dict(
ovirt_groups=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in groups
],
)
if is_old_facts:
module.exit_json(changed=False, ansible_facts=result)
else:
module.exit_json(changed=False, **result)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
removed_module("2.10")
|
py | b4160bd5ae135b41d2d7940caa520909e8523e99 | from .render_volume import RenderVolume
|
py | b4160e0ee6c1086282476e9f4c00cfb8389db225 | import heapq
class Solution(object):
def getLeastNumbers(self, arr, k):
"""
:type arr: List[int]
:type k: int
:rtype: List[int]
"""
return heapq.nsmallest(k, arr) |
py | b4160ea1fe960724af1f24cd2875b77da99f67ab | #!/usr/bin/env python3
# ******************************************************************************
# $Id: gdalchksum.py ffccab1ee20b5151e9bd45f1a2c46245a74f1f56 2021-04-23 14:05:42 +0300 Idan Miara $
#
# Project: GDAL
# Purpose: Application to checksum a GDAL image file.
# Author: Frank Warmerdam, [email protected]
#
# ******************************************************************************
# Copyright (c) 2003, Frank Warmerdam <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
import sys
from osgeo import gdal
def Usage():
print('Usage: gdalchksum.py [-b band] [-srcwin xoff yoff xsize ysize] file')
return 1
def main(argv):
srcwin = None
bands = []
filename = None
argv = gdal.GeneralCmdLineProcessor(argv)
if argv is None:
return 0
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-b':
i = i + 1
bands.append(int(argv[i]))
elif arg == '-srcwin':
srcwin = [int(argv[i + 1]), int(argv[i + 2]),
int(argv[i + 3]), int(argv[i + 3])]
i = i + 4
elif filename is None:
filename = argv[i]
else:
return Usage()
i = i + 1
if filename is None:
return Usage()
# Open source file
ds = gdal.Open(filename)
if ds is None:
print('Unable to open %s' % filename)
return 1
# Default values
if srcwin is None:
srcwin = [0, 0, ds.RasterXSize, ds.RasterYSize]
if not bands:
bands = list(range(1, (ds.RasterCount + 1)))
# Generate checksums
for band_num in bands:
oBand = ds.GetRasterBand(band_num)
result = oBand.Checksum(srcwin[0], srcwin[1], srcwin[2], srcwin[3])
print(result)
ds = None
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
py | b4160f9440e2a13973e9cd18688ff8f5ed5e3c9e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.Participant import Participant
from alipay.aop.api.domain.TransOrderDetail import TransOrderDetail
class AlipayFundBatchCreateModel(object):
def __init__(self):
self._biz_scene = None
self._business_params = None
self._order_title = None
self._out_batch_no = None
self._passback_params = None
self._payer_info = None
self._product_code = None
self._remark = None
self._time_expire = None
self._total_count = None
self._total_trans_amount = None
self._trans_order_list = None
@property
def biz_scene(self):
return self._biz_scene
@biz_scene.setter
def biz_scene(self, value):
self._biz_scene = value
@property
def business_params(self):
return self._business_params
@business_params.setter
def business_params(self, value):
self._business_params = value
@property
def order_title(self):
return self._order_title
@order_title.setter
def order_title(self, value):
self._order_title = value
@property
def out_batch_no(self):
return self._out_batch_no
@out_batch_no.setter
def out_batch_no(self, value):
self._out_batch_no = value
@property
def passback_params(self):
return self._passback_params
@passback_params.setter
def passback_params(self, value):
self._passback_params = value
@property
def payer_info(self):
return self._payer_info
@payer_info.setter
def payer_info(self, value):
if isinstance(value, Participant):
self._payer_info = value
else:
self._payer_info = Participant.from_alipay_dict(value)
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def time_expire(self):
return self._time_expire
@time_expire.setter
def time_expire(self, value):
self._time_expire = value
@property
def total_count(self):
return self._total_count
@total_count.setter
def total_count(self, value):
self._total_count = value
@property
def total_trans_amount(self):
return self._total_trans_amount
@total_trans_amount.setter
def total_trans_amount(self, value):
self._total_trans_amount = value
@property
def trans_order_list(self):
return self._trans_order_list
@trans_order_list.setter
def trans_order_list(self, value):
if isinstance(value, list):
self._trans_order_list = list()
for i in value:
if isinstance(i, TransOrderDetail):
self._trans_order_list.append(i)
else:
self._trans_order_list.append(TransOrderDetail.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.biz_scene:
if hasattr(self.biz_scene, 'to_alipay_dict'):
params['biz_scene'] = self.biz_scene.to_alipay_dict()
else:
params['biz_scene'] = self.biz_scene
if self.business_params:
if hasattr(self.business_params, 'to_alipay_dict'):
params['business_params'] = self.business_params.to_alipay_dict()
else:
params['business_params'] = self.business_params
if self.order_title:
if hasattr(self.order_title, 'to_alipay_dict'):
params['order_title'] = self.order_title.to_alipay_dict()
else:
params['order_title'] = self.order_title
if self.out_batch_no:
if hasattr(self.out_batch_no, 'to_alipay_dict'):
params['out_batch_no'] = self.out_batch_no.to_alipay_dict()
else:
params['out_batch_no'] = self.out_batch_no
if self.passback_params:
if hasattr(self.passback_params, 'to_alipay_dict'):
params['passback_params'] = self.passback_params.to_alipay_dict()
else:
params['passback_params'] = self.passback_params
if self.payer_info:
if hasattr(self.payer_info, 'to_alipay_dict'):
params['payer_info'] = self.payer_info.to_alipay_dict()
else:
params['payer_info'] = self.payer_info
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.time_expire:
if hasattr(self.time_expire, 'to_alipay_dict'):
params['time_expire'] = self.time_expire.to_alipay_dict()
else:
params['time_expire'] = self.time_expire
if self.total_count:
if hasattr(self.total_count, 'to_alipay_dict'):
params['total_count'] = self.total_count.to_alipay_dict()
else:
params['total_count'] = self.total_count
if self.total_trans_amount:
if hasattr(self.total_trans_amount, 'to_alipay_dict'):
params['total_trans_amount'] = self.total_trans_amount.to_alipay_dict()
else:
params['total_trans_amount'] = self.total_trans_amount
if self.trans_order_list:
if isinstance(self.trans_order_list, list):
for i in range(0, len(self.trans_order_list)):
element = self.trans_order_list[i]
if hasattr(element, 'to_alipay_dict'):
self.trans_order_list[i] = element.to_alipay_dict()
if hasattr(self.trans_order_list, 'to_alipay_dict'):
params['trans_order_list'] = self.trans_order_list.to_alipay_dict()
else:
params['trans_order_list'] = self.trans_order_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundBatchCreateModel()
if 'biz_scene' in d:
o.biz_scene = d['biz_scene']
if 'business_params' in d:
o.business_params = d['business_params']
if 'order_title' in d:
o.order_title = d['order_title']
if 'out_batch_no' in d:
o.out_batch_no = d['out_batch_no']
if 'passback_params' in d:
o.passback_params = d['passback_params']
if 'payer_info' in d:
o.payer_info = d['payer_info']
if 'product_code' in d:
o.product_code = d['product_code']
if 'remark' in d:
o.remark = d['remark']
if 'time_expire' in d:
o.time_expire = d['time_expire']
if 'total_count' in d:
o.total_count = d['total_count']
if 'total_trans_amount' in d:
o.total_trans_amount = d['total_trans_amount']
if 'trans_order_list' in d:
o.trans_order_list = d['trans_order_list']
return o
|
py | b416104152b471a666478862d295ee91c0974db6 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import random
while 1:
s = int(random.randint(1, 3))
if s == 1:
ind = "Rock"
elif s == 2:
ind = "Scissors"
elif s == 3:
ind = "Paper"
m = raw_input('Input Rock, Paper, Scissors to start, Input "end" to exit:')
blist = ['Rock', "Scissors", "Paper"]
if (m not in blist) and (m != 'end'):
print "Please input again"
elif (m not in blist) and (m == 'end'):
print "\nExiting..."
break
elif m == ind :
print "AI: " + ind + ", Draw!"
elif (m == 'Rock' and ind =='Scissors') or (m == 'Scissors' and ind =='Paper') or (m == 'Paper' and ind =='Rock'):
print "AI: " + ind +", You Win"
elif (m == 'Rock' and ind =='Paper') or (m == 'Scissors' and ind =='Rock') or (m == 'Paper' and ind =='Scissors'):
print "AI: " + ind +", You Lose"
|
py | b41611a91e0dff583c1cc109787bbbc61ca0195a | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test DistributionStrategy in the zero batch case."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import normalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class NormalizationTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.one_device_strategy,
],
mode=["graph"],
fused=[True, False]))
def testBNWithZeroBatchInputGraph(self, distribution, fused):
distribution.extended.experimental_enable_get_next_as_optional = True
with distribution.scope(), self.cached_session() as sess:
bn_list = []
inputs = np.random.random((0, 4, 4, 3)) + 100
targets = np.random.random((0, 4, 4, 3))
inputs_placeholder = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, 4, 4, 3])
targets_placeholder = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, 4, 4, 3])
def step_fn(is_training, inputs, targets=None):
bn = normalization.BatchNormalization(
axis=3, epsilon=1e-3, momentum=0.9, fused=fused)
bn_list.append(bn)
outputs = bn.apply(inputs, training=is_training)
if not is_training:
return outputs
loss = losses.mean_squared_error(targets, outputs)
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
train_op = optimizer.minimize(loss)
with ops.control_dependencies([train_op]):
return array_ops.identity(loss)
train_op = distribution.extended.call_for_each_replica(
step_fn, args=(True, inputs_placeholder, targets_placeholder))
predict_op = distribution.extended.call_for_each_replica(
step_fn, args=(False, inputs_placeholder))
bn = bn_list[0]
self.evaluate(variables.global_variables_initializer())
# Check for initial statistics and weights.
moving_mean, moving_var = self.evaluate(
[bn.moving_mean, bn.moving_variance])
self.assertAllEqual([0, 0, 0], moving_mean)
self.assertAllEqual([1, 1, 1], moving_var)
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
self.assertAllEqual([1, 1, 1], np_gamma)
self.assertAllEqual([0, 0, 0], np_beta)
for _ in range(100):
np_output, _, _ = sess.run([train_op] + bn.updates, {
inputs_placeholder: inputs,
targets_placeholder: targets
})
self.assertEqual(0.0, np_output)
# Verify that the statistics and weights are not changed after training.
moving_mean, moving_var = self.evaluate(
[bn.moving_mean, bn.moving_variance])
self.assertAllEqual([0, 0, 0], moving_mean)
self.assertAllEqual([1, 1, 1], moving_var)
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
self.assertAllEqual([1, 1, 1], np_gamma)
self.assertAllEqual([0, 0, 0], np_beta)
# Test inference.
np_output = sess.run(predict_op, {inputs_placeholder: inputs})
self.assertEqual([], np_output.tolist())
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.one_device_strategy,
],
mode=["eager"],
fused=[True, False]))
def testBNWithZeroBatchInput(self, distribution, fused):
distribution.extended.experimental_enable_get_next_as_optional = True
with distribution.scope():
inputs = np.random.random((0, 4, 4, 3)).astype(np.float32) + 100
targets = np.random.random((0, 4, 4, 3)).astype(np.float32)
bn = normalization.BatchNormalization(
axis=3, epsilon=1e-3, momentum=0.9, fused=fused)
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
@def_function.function
def train_step():
def step_fn(inputs, targets):
with backprop.GradientTape() as tape:
outputs = bn.apply(inputs, training=True)
loss = losses.mean_squared_error(targets, outputs)
grads = tape.gradient(loss, bn.variables)
optimizer.apply_gradients(zip(grads, bn.variables))
return loss
return distribution.run(step_fn, args=(inputs, targets))
for _ in range(100):
np_output = train_step().numpy()
self.assertEqual(0.0, np_output)
# Verify that the statistics and weights are not changed after training.
self.assertAllEqual([0, 0, 0], bn.moving_mean.numpy())
self.assertAllEqual([1, 1, 1], bn.moving_variance.numpy())
self.assertAllEqual([1, 1, 1], bn.gamma.numpy())
self.assertAllEqual([0, 0, 0], bn.beta.numpy())
@def_function.function
def test_step():
def step_fn(inputs):
outputs = bn.apply(inputs, training=False)
return outputs
return distribution.run(step_fn, args=(inputs,))
# Test inference.
self.assertAllEqual(np.zeros(shape=(0, 4, 4, 3), dtype=np.float32),
test_step().numpy())
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.one_device_strategy,
],
mode=["eager"],
fused=[True, False]))
def testBNWithDynamicBatchInputEager(self, distribution, fused):
distribution.extended.experimental_enable_get_next_as_optional = True
with distribution.scope():
# Explicitly create dataset with drop_remainder=False.
# This would make batch size unknown.
inputs = np.random.random((11, 4, 4, 3)).astype(np.float32) + 100
targets = np.random.random((11, 4, 4, 3)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)).batch(
10, drop_remainder=False).repeat()
dataset_iterator = iter(
distribution.experimental_distribute_dataset(dataset))
bn = normalization.BatchNormalization(
axis=-1, epsilon=1e-3, momentum=0.9, fused=fused)
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
@def_function.function
def train_step(iterator):
def step_fn(inputs):
features, targets = inputs
with backprop.GradientTape() as tape:
outputs = bn(features, training=True)
loss = losses.mean_squared_error(targets, outputs)
grads = tape.gradient(loss, bn.variables)
optimizer.apply_gradients(zip(grads, bn.variables))
return loss
return distribution.run(step_fn, args=(next(iterator),))
for _ in range(100):
train_step(dataset_iterator).numpy()
# Verify that the statistics and weights are updated.
self.assertNotAllEqual(np.ndarray([0, 0, 0]), bn.moving_mean.numpy())
self.assertNotAllEqual(np.ndarray([1, 1, 1]), bn.moving_variance.numpy())
self.assertNotAllEqual(np.ndarray([1, 1, 1]), bn.gamma.numpy())
self.assertNotAllEqual(np.ndarray([0, 0, 0]), bn.beta.numpy())
if __name__ == "__main__":
test.main()
|
py | b41611ebf8860c96e622a24e962086a455877cf4 | #-*-coding:utf8-*-
import json,urllib,urllib2
from qqbot import QQBotSlot as qqbotslot, RunBot
def talk(content,userid):
url = 'http://www.tuling123.com/openapi/api'
s = urllib2.Request(url)
da = {"key":"่พๅ
ฅๅพ็ตๆบๅจไบบ็ป็keyๅผ","info":content,"userid":userid}
da = urllib.urlencode(da)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
j = eval(opener.open(s,da).read())
# r = s.post(url,data = data)
# j = eval(r.text)
code = j['code']
if code == 100000:
recontent = j['text']
elif code == 200000:
recontent = j['text']+j['url']
elif code == 302000 or code ==308000:
recontent = j['text']+j['list'][0]['info']+j['list'][0]['detailurl']
elif code == 40004:
recontent = 'ๅฐVๆฏๅคฉๅช่ฝๅ็ญ5000ไธช้ฎ้ข๏ผไปๅคฉๅทฒ็ปๅพ็ดฏไบ๏ผๅฐV่ฆๅปไผๆฏไบๅฆ๏ฝ๏ฝ'
elif code == 40002:
recontent = 'ๆจๆไปไนๆณๅฏนๅฐV่ฏด็ๅ๏ผ๏ฝ'
elif code == 40007:
recontent = 'ๆจ่พๅ
ฅ็ๆฐๆฎๆ ผๅผๅผๅธธ๏ผ่ฏท้ๆฐ่พๅ
ฅ๏ผ'
else:
recontent = '่ฟ่ดง่ฟๆฒกๅญฆไผๆไนๅๅค่ฟๅฅ่ฏ'
return recontent
@qqbotslot
def onQQMessage(bot, contact, member, content):
if content == '--stop':
bot.SendTo(contact,'ๆ่ฝป่ฝป็่ตฐไบ๏ผๆญฃๅฆๆ่ฝป่ฝป็ๆฅใๆฅไธๆฅ่กฃ่ข๏ผๅฟๆๆ็ๆๆ๏ฝ๏ฝ')
bot.Stop()
else:
if getattr(member, 'uin', None) == bot.conf.qq:
return()
else:
if member == None:
bot.SendTo(contact,talk(content,contact.qq))
elif '@ME' in content:
bot.SendTo(contact, '@'+member.name+' '+talk(content,contact.qq))
else:
return()
RunBot()
|
py | b41611f5d7a1ec0b568fb2c05e5964823f797e2b | import instaloader, glob, telebot, shutil
# YOUR BOT TOKEN HERE
bot_token = '******************************************'
bot = telebot.TeleBot(bot_token, parse_mode=None) # You can set parse_mode by default. HTML or MARKDOWN
# YOUR IG USERNAME & PASSWORD HERE
ig_user = 'USERNAME_HERE'
ig_pass = 'PASSWORD_HERE'
# DEFINE A FUNCTION TO LOGIN ONCE YOU RUN THE SCRIPT
def login_insta(ig_user, ig_pass):
L = instaloader.Instaloader()
L.login(ig_user, ig_pass)
print('\n\n ****Login Successfull.****\n\n\n')
return L
# /start & /help commands
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
bot.reply_to(message, 'Howdy, how are you doing? \nIt is a profile loader bot. (: \nJust send me the ID with or without @')
# TO PROCESS USER REQUESTS
@bot.message_handler()
def profile_get(message):
id = message.text.lower().replace('@', '')
bot.reply_to(message, f'Fetching profile picture for @{id}, wait...')
print('ID: ',id)
mod.download_profile(id, profile_pic_only=True)
query = f'{id}/*.jpg'
for file in glob.glob(query): pic = file
pic = open(pic, 'rb')
user_id = message.from_user.id
bot.send_photo(user_id, pic)
shutil.rmtree(id)
# HAVE FUN (:
if (__name__ == '__main__'):
mod = login_insta(ig_user, ig_pass)
bot.infinity_polling()
|
py | b4161227ae768914e15a0ccadfca2f77c4c78a95 | from stix_shifter_utils.modules.base.stix_transmission.base_sync_connector import BaseSyncConnector
from .carbonblack_api_client import APIClient
import json
from stix_shifter_utils.utils.error_response import ErrorResponder
class UnexpectedResponseException(Exception):
pass
class Connector(BaseSyncConnector):
def __init__(self, connection, configuration):
self.api_client = APIClient(connection, configuration)
def _handle_errors(self, response, return_obj):
response_code = response.code
response_txt = response.read().decode('utf-8')
if 200 <= response_code < 300:
return_obj['success'] = True
if response_txt:
response_json = json.loads(response_txt)
if 'results' in response_json:
return_obj['data'] = response_json['results']
elif ErrorResponder.is_plain_string(response_txt):
ErrorResponder.fill_error(return_obj, message=response_txt)
elif ErrorResponder.is_json_string(response_txt):
response_json = json.loads(response_txt)
ErrorResponder.fill_error(return_obj, response_json, ['reason'])
else:
raise UnexpectedResponseException
return return_obj
def ping_connection(self):
response_txt = None
return_obj = {}
try:
response = self.api_client.ping_box()
return self._handle_errors(response, return_obj)
except Exception as e:
if response_txt is not None:
ErrorResponder.fill_error(return_obj, message='unexpected exception')
print('can not parse response: ' + str(response_txt))
else:
raise e
def create_results_connection(self, search_id, offset, length):
response_txt = None
return_obj = {}
try:
search_id = json.loads(search_id)
query = search_id["query"]
dialect = search_id["dialect"]
response = self.api_client.run_search(query, dialect, start=offset, rows=length)
return self._handle_errors(response, return_obj)
except Exception as e:
if response_txt is not None:
ErrorResponder.fill_error(return_obj, message='unexpected exception')
print('can not parse response: ' + str(response_txt))
else:
raise e
|
py | b416133c0b858ffaaf559e130c71d8d6dc3e11b8 | from ..base import InlineQueryResult, Field, InputMessageContent
from ..inline_keyboard_markup import InlineKeyboardMarkup
class InlineQueryResultCachedAudio(InlineQueryResult):
audio_file_id = Field()
caption = Field()
parse_mode = Field()
reply_markup = Field()
input_message_content = Field()
def __init__(self,
id: str,
audio_file_id: str,
caption: str = None,
parse_mode: str = None,
reply_markup: InlineKeyboardMarkup = None,
input_message_content: InputMessageContent = None
):
super().__init__(id, 'audio')
self.audio_file_id = \
Field(audio_file_id, [str])
self.caption = \
Field(caption, [str])
self.parse_mode = \
Field(parse_mode, [str])
self.reply_markup = \
Field(reply_markup, [InlineKeyboardMarkup])
self.input_message_content = \
Field(input_message_content, [InputMessageContent])
|
py | b4161426feafcf77c27cec70746518f0dfe73641 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#########################################################################
# Author: jonyqin
# Created Time: Thu 11 Sep 2014 03:55:41 PM CST
# File Name: Sample.py
# Description: WXBizMsgCrypt ไฝฟ็จdemoๆไปถ
#########################################################################
from WXBizMsgCrypt import WXBizMsgCrypt
import xml.etree.cElementTree as ET
if __name__ == "__main__":
#ๅ่ฎพไผไธๅจๅ
ฌไผๅนณๅฐไธ่ฎพ็ฝฎ็ๅๆฐๅฆไธ
sToken = "QDG6eK"
sEncodingAESKey = "jWmYm7qr5nMoAUwZRjGtBxmz3KA1tkAj3ykkR6q2B2C"
sCorpID = "wx5823bf96d3bd56c7"
'''
------------ไฝฟ็จ็คบไพไธ๏ผ้ช่ฏๅ่ฐURL---------------
*ไผไธๅผๅฏๅ่ฐๆจกๅผๆถ๏ผไผไธๅทไผๅ้ช่ฏurlๅ้ไธไธชget่ฏทๆฑ
ๅ่ฎพ็นๅป้ช่ฏๆถ๏ผไผไธๆถๅฐ็ฑปไผผ่ฏทๆฑ๏ผ
* GET /cgi-bin/wxpush?msg_signature=5c45ff5e21c57e6ad56bac8758b79b1d9ac89fd3×tamp=1409659589&nonce=263014780&echostr=P9nAzCzyDtyTWESHep1vC5X9xho%2FqYX3Zpb4yKa9SKld1DsH3Iyt3tP3zNdtp%2B4RPcs8TgAE7OaBO%2BFZXvnaqQ%3D%3D
* HTTP/1.1 Host: qy.weixin.qq.com
ๆฅๆถๅฐ่ฏฅ่ฏทๆฑๆถ๏ผไผไธๅบ 1.่งฃๆๅบGet่ฏทๆฑ็ๅๆฐ๏ผๅ
ๆฌๆถๆฏไฝ็ญพๅ(msg_signature)๏ผๆถ้ดๆณ(timestamp)๏ผ้ๆบๆฐๅญไธฒ(nonce)ไปฅๅๅ
ฌไผๅนณๅฐๆจ้่ฟๆฅ็้ๆบๅ ๅฏๅญ็ฌฆไธฒ(echostr),
่ฟไธๆญฅๆณจๆไฝURL่งฃ็ ใ
2.้ช่ฏๆถๆฏไฝ็ญพๅ็ๆญฃ็กฎๆง
3. ่งฃๅฏๅบechostrๅๆ๏ผๅฐๅๆๅฝไฝGet่ฏทๆฑ็response๏ผ่ฟๅ็ปๅ
ฌไผๅนณๅฐ
็ฌฌ2๏ผ3ๆญฅๅฏไปฅ็จๅ
ฌไผๅนณๅฐๆไพ็ๅบๅฝๆฐVerifyURLๆฅๅฎ็ฐใ
'''
wxcpt=WXBizMsgCrypt(sToken,sEncodingAESKey,sCorpID)
#sVerifyMsgSig=HttpUtils.ParseUrl("msg_signature")
sVerifyMsgSig="5c45ff5e21c57e6ad56bac8758b79b1d9ac89fd3"
#sVerifyTimeStamp=HttpUtils.ParseUrl("timestamp")
sVerifyTimeStamp="1409659589"
#sVerifyNonce=HttpUitls.ParseUrl("nonce")
sVerifyNonce="263014780"
#sVerifyEchoStr=HttpUtils.ParseUrl("echostr")
sVerifyEchoStr="P9nAzCzyDtyTWESHep1vC5X9xho/qYX3Zpb4yKa9SKld1DsH3Iyt3tP3zNdtp+4RPcs8TgAE7OaBO+FZXvnaqQ=="
ret,sEchoStr=wxcpt.VerifyURL(sVerifyMsgSig, sVerifyTimeStamp,sVerifyNonce,sVerifyEchoStr)
if(ret!=0):
print "ERR: VerifyURL ret: " + ret
sys.exit(1)
#้ช่ฏURLๆๅ๏ผๅฐsEchoStr่ฟๅ็ปไผไธๅท
#HttpUtils.SetResponse(sEchoStr)
'''
------------ไฝฟ็จ็คบไพไบ๏ผๅฏน็จๆทๅๅค็ๆถๆฏ่งฃๅฏ---------------
็จๆทๅๅคๆถๆฏๆ่
็นๅปไบไปถๅๅบๆถ๏ผไผไธไผๆถๅฐๅ่ฐๆถๆฏ๏ผๆญคๆถๆฏๆฏ็ป่ฟๅ
ฌไผๅนณๅฐๅ ๅฏไนๅ็ๅฏๆไปฅpostๅฝขๅผๅ้็ปไผไธ๏ผๅฏๆๆ ผๅผ่ฏทๅ่ๅฎๆนๆๆกฃ
ๅ่ฎพไผไธๆถๅฐๅ
ฌไผๅนณๅฐ็ๅ่ฐๆถๆฏๅฆไธ๏ผ
POST /cgi-bin/wxpush? msg_signature=477715d11cdb4164915debcba66cb864d751f3e6×tamp=1409659813&nonce=1372623149 HTTP/1.1
Host: qy.weixin.qq.com
Content-Length: 613
<xml> <ToUserName><![CDATA[wx5823bf96d3bd56c7]]></ToUserName><Encrypt><![CDATA[RypEvHKD8QQKFhvQ6QleEB4J58tiPdvo+rtK1I9qca6aM/wvqnLSV5zEPeusUiX5L5X/0lWfrf0QADHHhGd3QczcdCUpj911L3vg3W/sYYvuJTs3TUUkSUXxaccAS0qhxchrRYt66wiSpGLYL42aM6A8dTT+6k4aSknmPj48kzJs8qLjvd4Xgpue06DOdnLxAUHzM6+kDZ+HMZfJYuR+LtwGc2hgf5gsijff0ekUNXZiqATP7PF5mZxZ3Izoun1s4zG4LUMnvw2r+KqCKIw+3IQH03v+BCA9nMELNqbSf6tiWSrXJB3LAVGUcallcrw8V2t9EL4EhzJWrQUax5wLVMNS0+rUPA3k22Ncx4XXZS9o0MBH27Bo6BpNelZpS+/uh9KsNlY6bHCmJU9p8g7m3fVKn28H3KDYA5Pl/T8Z1ptDAVe0lXdQ2YoyyH2uyPIGHBZZIs2pDBS8R07+qN+E7Q==]]></Encrypt>
<AgentID><![CDATA[218]]></AgentID>
</xml>
ไผไธๆถๅฐpost่ฏทๆฑไนๅๅบ่ฏฅ 1.่งฃๆๅบurlไธ็ๅๆฐ๏ผๅ
ๆฌๆถๆฏไฝ็ญพๅ(msg_signature)๏ผๆถ้ดๆณ(timestamp)ไปฅๅ้ๆบๆฐๅญไธฒ(nonce)
2.้ช่ฏๆถๆฏไฝ็ญพๅ็ๆญฃ็กฎๆงใ 3.ๅฐpost่ฏทๆฑ็ๆฐๆฎ่ฟ่กxml่งฃๆ๏ผๅนถๅฐ<Encrypt>ๆ ็ญพ็ๅ
ๅฎน่ฟ่ก่งฃๅฏ๏ผ่งฃๅฏๅบๆฅ็ๆๆๅณๆฏ็จๆทๅๅคๆถๆฏ็ๆๆ๏ผๆๆๆ ผๅผ่ฏทๅ่ๅฎๆนๆๆกฃ
็ฌฌ2๏ผ3ๆญฅๅฏไปฅ็จๅ
ฌไผๅนณๅฐๆไพ็ๅบๅฝๆฐDecryptMsgๆฅๅฎ็ฐใ
'''
# sReqMsgSig = HttpUtils.ParseUrl("msg_signature")
sReqMsgSig = "477715d11cdb4164915debcba66cb864d751f3e6"
sReqTimeStamp = "1409659813"
sReqNonce = "1372623149"
sReqData = "<xml><ToUserName><![CDATA[wx5823bf96d3bd56c7]]></ToUserName><Encrypt><![CDATA[RypEvHKD8QQKFhvQ6QleEB4J58tiPdvo+rtK1I9qca6aM/wvqnLSV5zEPeusUiX5L5X/0lWfrf0QADHHhGd3QczcdCUpj911L3vg3W/sYYvuJTs3TUUkSUXxaccAS0qhxchrRYt66wiSpGLYL42aM6A8dTT+6k4aSknmPj48kzJs8qLjvd4Xgpue06DOdnLxAUHzM6+kDZ+HMZfJYuR+LtwGc2hgf5gsijff0ekUNXZiqATP7PF5mZxZ3Izoun1s4zG4LUMnvw2r+KqCKIw+3IQH03v+BCA9nMELNqbSf6tiWSrXJB3LAVGUcallcrw8V2t9EL4EhzJWrQUax5wLVMNS0+rUPA3k22Ncx4XXZS9o0MBH27Bo6BpNelZpS+/uh9KsNlY6bHCmJU9p8g7m3fVKn28H3KDYA5Pl/T8Z1ptDAVe0lXdQ2YoyyH2uyPIGHBZZIs2pDBS8R07+qN+E7Q==]]></Encrypt><AgentID><![CDATA[218]]></AgentID></xml>"
ret,sMsg=wxcpt.DecryptMsg( sReqData, sReqMsgSig, sReqTimeStamp, sReqNonce)
if( ret!=0 ):
print "ERR: DecryptMsg ret: " + ret
sys.exit(1)
# ่งฃๅฏๆๅ๏ผsMsgๅณไธบxmlๆ ผๅผ็ๆๆ
# TODO: ๅฏนๆๆ็ๅค็
# For example:
xml_tree = ET.fromstring(sMsg)
content = xml_tree.find("Content").text
# ...
# ...
'''
------------ไฝฟ็จ็คบไพไธ๏ผไผไธๅๅค็จๆทๆถๆฏ็ๅ ๅฏ---------------
ไผไธ่ขซๅจๅๅค็จๆท็ๆถๆฏไน้่ฆ่ฟ่กๅ ๅฏ๏ผๅนถไธๆผๆฅๆๅฏๆๆ ผๅผ็xmlไธฒใ
ๅ่ฎพไผไธ้่ฆๅๅค็จๆท็ๆๆๅฆไธ๏ผ
<xml>
<ToUserName><![CDATA[mycreate]]></ToUserName>
<FromUserName><![CDATA[wx5823bf96d3bd56c7]]></FromUserName>
<CreateTime>1348831860</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[this is a test]]></Content>
<MsgId>1234567890123456</MsgId>
<AgentID>128</AgentID>
</xml>
ไธบไบๅฐๆญคๆฎตๆๆๅๅค็ป็จๆท๏ผไผไธๅบ๏ผ 1.่ชๅทฑ็ๆๆถ้ดๆถ้ดๆณ(timestamp),้ๆบๆฐๅญไธฒ(nonce)ไปฅไพฟ็ๆๆถๆฏไฝ็ญพๅ๏ผไนๅฏไปฅ็ดๆฅ็จไปๅ
ฌไผๅนณๅฐ็post urlไธ่งฃๆๅบ็ๅฏนๅบๅผใ
2.ๅฐๆๆๅ ๅฏๅพๅฐๅฏๆใ 3.็จๅฏๆ๏ผๆญฅ้ชค1็ๆ็timestamp,nonceๅไผไธๅจๅ
ฌไผๅนณๅฐ่ฎพๅฎ็token็ๆๆถๆฏไฝ็ญพๅใ 4.ๅฐๅฏๆ๏ผๆถๆฏไฝ็ญพๅ๏ผๆถ้ดๆณ๏ผ้ๆบๆฐๅญไธฒๆผๆฅๆxmlๆ ผๅผ็ๅญ็ฌฆไธฒ๏ผๅ้็ปไผไธๅทใ
ไปฅไธ2๏ผ3๏ผ4ๆญฅๅฏไปฅ็จๅ
ฌไผๅนณๅฐๆไพ็ๅบๅฝๆฐEncryptMsgๆฅๅฎ็ฐใ
'''
sRespData = "<xml><ToUserName><![CDATA[mycreate]]></ToUserName><FromUserName><![CDATA[wx5823bf96d3bd56c7]]></FromUserName><CreateTime>1348831860</CreateTime><MsgType><![CDATA[text]]></MsgType><Content><![CDATA[this is a test]]></Content><MsgId>1234567890123456</MsgId><AgentID>128</AgentID></xml>"
ret,sEncryptMsg=wxcpt.EncryptMsg(sRespData, sReqNonce, sReqTimeStamp)
if( ret!=0 ):
print "ERR: EncryptMsg ret: " + ret
sys.exit(1)
#ret == 0 ๅ ๅฏๆๅ๏ผไผไธ้่ฆๅฐsEncryptMsg่ฟๅ็ปไผไธๅท
#TODO:
#HttpUitls.SetResponse(sEncryptMsg)
|
py | b41614be2630f8d7426f394cfc7dc0f5234270f4 | # Copyright (c) 2021 Graphcore Ltd. All rights reserved.
# Written by Hu Di
import cv2
import argparse
import os
import numpy as np
import time
import tqdm
import json
import torch
import sys
sys.path.append('./IPU')
from torchvision.ops import nms
from models.get_model import make_model
from utils.utils import load_onnx
from config import cfg
from datasets.factory import get_imdb
from yaml_parser import change_cfg_by_yaml_file
from ipu_tensor import gcop
from gc_session import Session
from utils.utils import load_from_pth_with_mappin
from utils import logger
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(
description='evaluate a Fast R-CNN network')
parser.add_argument('yaml', type=str, help='path of yaml')
parser.add_argument('--model-name',
dest='model_name',
help='model name',
default='',
type=str)
args = parser.parse_args()
return args
def clip_boxes_npy(boxes, im_shape):
"""
Clip boxes to image boundaries.
boxes must be tensor or Variable, im_shape can be anything but Variable
"""
boxes = boxes.reshape(boxes.shape[0], -1, 4)
boxes = np.stack([
boxes[:, :, 0].clip(0, im_shape[1] - 1), boxes[:, :, 1].clip(
0, im_shape[0] - 1), boxes[:, :, 2].clip(0, im_shape[1] - 1),
boxes[:, :, 3].clip(0, im_shape[0] - 1)
], 2).reshape(boxes.shape[0], -1)
return boxes
def bbox_transform_inv_npy(boxes, deltas):
# boxes: (n,4)
# deltas: (n,4)
# Input should be both tensor or both Variable and on the same device
if len(boxes) == 0:
return deltas * 0
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.concatenate([
_[:, :, np.newaxis] for _ in [
pred_ctr_x - 0.5 * pred_w, pred_ctr_y - 0.5 * pred_h, pred_ctr_x +
0.5 * pred_w, pred_ctr_y + 0.5 * pred_h
]
], 2).reshape(len(boxes), -1)
return pred_boxes
args = parse_args()
change_cfg_by_yaml_file(args.yaml)
# init outputs dir
output_dir = cfg.output_dir
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# init log
log_prefix = args.model_name if args.model_name != '' else ''
log_prefix = '_inference' + log_prefix
logger.init_log(output_dir,
log_name=cfg.task_name,
post_fix=log_prefix,
resume=False,
tb_on=False,
wandb_on=False)
logger.log_str('output dir:', output_dir)
if cfg.TEST.DATASET == 'voc':
imdb = get_imdb('voc_2007_test')
elif cfg.TEST.DATASET == 'coco':
imdb = get_imdb('coco_2017_val')
else:
raise ValueError("Unknown dataset!")
imdb.competition_mode(False)
val_size = imdb.num_images
logger.log_str('{:d} roidb entries'.format(val_size))
IM_WIDTH, IM_HEIGHT = cfg.TEST.SCALES
INPUT_SHAPE = [1, 3, IM_HEIGHT, IM_WIDTH]
THRESH = cfg.TEST.SCORE_THRESH_TEST
MAX_PER_IMAGE = 100
total_iters = val_size
last_state_json = os.path.join(output_dir, 'state.json')
if cfg.TEST.MODEL == '':
with open(last_state_json, 'r') as f:
last_state = json.load(f)
iters = last_state['iters']
pretrained_weights_path = os.path.join(output_dir,
'iter{}.onnx'.format(iters))
else:
pretrained_weights_path = cfg.TEST.MODEL
if args.model_name != '':
pretrained_weights_path = os.path.join(output_dir,
'{}.onnx'.format(args.model_name))
# load resnet50 weights
init_weights_path = cfg.INIT_WEIGHTS_PATH
mappin_path = cfg.WEIGHTS_MAPPIN_PAtH
initializer = load_from_pth_with_mappin(init_weights_path, mappin_path)
# load faster-rcnn trained weights
logger.log_str('load weights from :', pretrained_weights_path)
if pretrained_weights_path.endswith('.pth'):
pretrained_weights = load_from_pth_with_mappin(pretrained_weights_path,
mappin_path)
elif pretrained_weights_path.endswith('.onnx'):
pretrained_weights = load_onnx(pretrained_weights_path)
else:
raise RuntimeError('wrong file format')
# merge them
pretrained_weights = {
**initializer,
**pretrained_weights
} # overwrite some weights in initializer by weights in pretrained_weights
gcop.enable_global_initializer(pretrained_weights)
# set IPU
gcop.safe_mode_on()
cfg.SESSION.COMMON.enableEngineCaching = False
gcop.set_options(cfg.SESSION, training=False)
gcop.set_memory_proportion(cfg.TRAIN.AVAILABLE_MEMORY_PROPORTION)
# build net
net = make_model(
cfg.MODEL_NAME,
input_im_shape=INPUT_SHAPE,
fp16_on=cfg.FLOAT16_ON,
classes=[1] * cfg.NUM_CLASSES,
training=False,
)
net.bulid_graph()
currentT = time.time()
sess = Session(net.outputs)
logger.log_str('model build time:', (time.time() - currentT) / 60,
' miniutes')
# gather results
all_boxes = [[[] for _ in range(imdb.num_images)]
for _ in range(imdb.num_classes)]
for im_id in tqdm.tqdm(list(range(total_iters))):
im = cv2.imread(imdb.image_path_at(im_id))
normalized_im = im - cfg.TEST.PIXEL_MEAN
normalized_im = normalized_im / cfg.TEST.PIXEL_STD
h, w, _ = im.shape
if cfg.TEST.KEEP_RATIO: # IM_WIDTH,IM_HEIGHT
x_scale = min(IM_HEIGHT / h, IM_WIDTH / w)
y_scale = x_scale
else:
x_scale = IM_WIDTH / w
y_scale = IM_HEIGHT / h
normalized_im = cv2.resize(normalized_im,
None,
None,
fx=x_scale,
fy=y_scale,
interpolation=cv2.INTER_LINEAR).astype(
np.float32)
im_data = np.zeros([IM_HEIGHT, IM_WIDTH, 3], np.float32)
im_data[:normalized_im.shape[0], :normalized_im.
shape[1], :] = normalized_im
im_data = np.transpose(
im_data[np.newaxis, :, :, :],
[0, 3, 1, 2]).astype(np.float32)
im_data = np.ascontiguousarray(im_data)
start = time.time()
feed_dict = {net.inputs[k]: n for k, n in zip(net.inputs, [im_data])}
results = sess.run(feed_dict)
scores = results['cls_prob'].data.astype(np.float32) # 300,21
bbox_deltas = results['bbox_pred'].data.astype(np.float32) # 300,84
rois = results['fixed_length_roi'].data.astype(np.float32) # 1,300,4
rois_keep = results['roi_keeps'].data.astype(np.float32) # 1,300
# collect valid results
valid_area_mask = results['valid_area_mask'].data # 256,1
valid_area_indices = np.where(valid_area_mask[:, 0] > 0)[0]
scores = scores[valid_area_indices, :]
bbox_deltas = bbox_deltas[valid_area_indices, :]
rois = rois[:, valid_area_indices, :]
boxes = rois[0] / np.array([x_scale, y_scale, x_scale, y_scale],
dtype=np.float32)
pred_boxes = bbox_transform_inv_npy(boxes, bbox_deltas)
pred_boxes = clip_boxes_npy(pred_boxes, [IM_HEIGHT, IM_WIDTH])
for j in range(1, imdb.num_classes):
inds = np.where(scores[:, j] > THRESH)[0]
cls_scores = scores[inds, j]
cls_boxes = pred_boxes[inds, j * 4:(j + 1) * 4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(torch.from_numpy(cls_boxes), torch.from_numpy(cls_scores),
cfg.TEST.NMS).numpy() if cls_dets.size > 0 else []
cls_dets = cls_dets[keep, :]
all_boxes[j][im_id] = cls_dets
# Limit to MAX_PER_IMAGE detections *over all classes*
if MAX_PER_IMAGE > 0:
image_scores = np.hstack(
[all_boxes[j][im_id][:, -1] for j in range(1, imdb.num_classes)])
if len(image_scores) > MAX_PER_IMAGE:
image_thresh = np.sort(image_scores)[-MAX_PER_IMAGE]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][im_id][:, -1] >= image_thresh)[0]
all_boxes[j][im_id] = all_boxes[j][im_id][keep, :]
eval_output_dir = os.path.join(output_dir, 'eval')
if not os.path.exists(eval_output_dir):
os.mkdir(eval_output_dir)
np.save(os.path.join(eval_output_dir, 'all_boxes.npy'), all_boxes)
mAP = imdb.evaluate_detections(all_boxes, eval_output_dir, logger=logger)
logger.log_str('end!!!')
|
py | b416159d27b47f25b14e5a54dbb369fee3e7daa2 | """
This file offers the methods to automatically retrieve the graph Trichophyton equinum.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def TrichophytonEquinum(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Trichophyton equinum graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Trichophyton equinum graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="TrichophytonEquinum",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | b4161728ec8b1fb0c8867275bd057abf0c902623 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates a creative wrapper to the 'OUTER' wrapping order.
To determine which creative wrappers exist, run get_all_creative_wrappers.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
# Set the ID of the creative wrapper to update.
CREATIVE_WRAPPER_ID = 'INSERT_CREATIVE_WRAPPER_ID_HERE'
def main(client, creative_wrapper_id):
# Initialize appropriate service.
creative_wrapper_service = client.GetService('CreativeWrapperService',
version='v201711')
# Create statement to get a creative wrapper by ID.
statement = (dfp.StatementBuilder()
.Where('id = :creativeWrapperId')
.WithBindVariable('creativeWrapperId',
long(creative_wrapper_id)))
# Get creative wrappers.
response = creative_wrapper_service.getCreativeWrappersByStatement(
statement.ToStatement())
if 'results' in response:
updated_creative_wrappers = []
for creative_wrapper in response['results']:
creative_wrapper['ordering'] = 'OUTER'
updated_creative_wrappers.append(creative_wrapper)
# Update the creative wrappers on the server.
creative_wrappers = creative_wrapper_service.updateCreativeWrappers(
updated_creative_wrappers)
# Display results.
for creative_wrapper in creative_wrappers:
print (('Creative wrapper with ID "%s" and wrapping order "%s" '
'was updated.') % (creative_wrapper['id'],
creative_wrapper['ordering']))
else:
print 'No creative wrappers found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CREATIVE_WRAPPER_ID)
|
py | b416177c9b25f36903afa65272d890dc5905a0c3 | # encoding: utf-8
# module PySide.QtCore
# from C:\Python27\lib\site-packages\PySide\QtCore.pyd
# by generator 1.147
# no doc
# imports
import Shiboken as __Shiboken
from QAbstractAnimation import QAbstractAnimation
class QVariantAnimation(QAbstractAnimation):
# no doc
def currentValue(self, *args, **kwargs): # real signature unknown
pass
def duration(self, *args, **kwargs): # real signature unknown
pass
def easingCurve(self, *args, **kwargs): # real signature unknown
pass
def endValue(self, *args, **kwargs): # real signature unknown
pass
def event(self, *args, **kwargs): # real signature unknown
pass
def interpolated(self, *args, **kwargs): # real signature unknown
pass
def keyValueAt(self, *args, **kwargs): # real signature unknown
pass
def keyValues(self, *args, **kwargs): # real signature unknown
pass
def setDuration(self, *args, **kwargs): # real signature unknown
pass
def setEasingCurve(self, *args, **kwargs): # real signature unknown
pass
def setEndValue(self, *args, **kwargs): # real signature unknown
pass
def setKeyValueAt(self, *args, **kwargs): # real signature unknown
pass
def setKeyValues(self, *args, **kwargs): # real signature unknown
pass
def setStartValue(self, *args, **kwargs): # real signature unknown
pass
def startValue(self, *args, **kwargs): # real signature unknown
pass
def updateCurrentTime(self, *args, **kwargs): # real signature unknown
pass
def updateCurrentValue(self, *args, **kwargs): # real signature unknown
pass
def updateState(self, *args, **kwargs): # real signature unknown
pass
def valueChanged(self, *args, **kwargs): # real signature unknown
""" Signal """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
staticMetaObject = None # (!) real value is '<PySide.QtCore.QMetaObject object at 0x0000000003E67D88>'
|
py | b4161a9950fd3d189b15d2cf3356ed7035a608f8 | # Copyright 2019 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to find runfiles location."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from dmlab2d import dmlab2d_pybind
def _find_sub_directory(path, sub_directory):
"""Reverse walks `path` to find `sub_directory`.
Args:
path: Path to look for sub_directory in.
sub_directory: Name of subdirectory to search for.
Returns:
Returns full path to `sub_directory` if found otherwise None.
"""
while path:
result = os.path.join(path, sub_directory)
if os.path.isdir(result):
return result
last_path = path
path = os.path.dirname(last_path)
if last_path == path:
return None # At root.
return None
def find():
"""Returns path to folder containing DMLab2D assets.
Raises:
FileNotFoundError: The assets could not be found.
"""
sub_directory = 'org_deepmind_lab2d/dmlab2d/lib'
path = find_directory(sub_directory)
if path is None:
raise FileNotFoundError(sub_directory)
return find_directory(sub_directory)[:-len(sub_directory)]
def find_directory(sub_directory):
"""Searches for `sub_directory` heuristically.
Searches for `sub_directory` folder in possible built-in data dependency
directories, sys.path, working directory and absolute path.
Args:
sub_directory: Name of subdirectory that must exist.
Returns:
A path to an existing directory with suffix `sub_directory` or None.
"""
sub_directory = sub_directory or ''
# Try using environment variable created when running tests.
data_directory = os.environ.get('TEST_SRCDIR')
if data_directory:
return os.path.join(data_directory, sub_directory)
# Try using environment variable created by bazel run.
data_directory = _find_sub_directory(
os.environ.get('RUNFILES_MANIFEST_FILE'), sub_directory)
if data_directory:
return data_directory
# Try using path to current executable.
data_directory = _find_sub_directory(sys.argv[0], sub_directory)
if data_directory:
return data_directory
# Try using path to module.
data_directory = _find_sub_directory(
os.path.dirname(dmlab2d_pybind.__file__), sub_directory)
if data_directory:
return data_directory
# Try using path to working directory.
data_directory = _find_sub_directory(os.getcwd(), sub_directory)
if data_directory:
return data_directory
# Try using relative path directly.
data_directory = os.path.join(os.getcwd(), sub_directory)
if os.path.isdir(data_directory):
return data_directory
# Try using search path.
for path in sys.path:
data_directory = _find_sub_directory(path, sub_directory)
if data_directory:
return data_directory
data_directory = os.path.join(path, sub_directory)
if os.path.isdir(data_directory):
return data_directory
# Try using absolute path.
if os.path.isdir(sub_directory):
return sub_directory
return None
|
py | b4161b28594f80fc4e1df5392eb6e5f992d4206b | import numpy as np
np.random.seed(206)
import scipy
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import theano
import theano.tensor as tt
import pymc3 as pm
from pymc3.gp.cov import Covariance
import scipy.stats as st
import random
class MultiMarginal(pm.gp.gp.Base):
R"""
MultiMarginal Gaussian process.
The `MultiMarginal` class is an implementation of the sum of a GP
prior and additive noise. It has `marginal_likelihood`, `conditional`
and `predict` methods. This GP implementation can be used to
implement regression on data that is normally distributed. For more
information on the `prior` and `conditional` methods, see their docstrings.
Parameters
----------
cov_func: None, 2D array, or instance of Covariance
The covariance function. Defaults to zero.
mean_func: None, instance of Mean
The mean function. Defaults to zero.
Examples
--------
.. code:: python
# A one dimensional column vector of inputs.
X = np.linspace(0, 1, 10)[:, None]
with pm.Model() as model:
# Specify the covariance function.
cov_func = pm.gp.cov.ExpQuad(1, ls=0.1)
# Specify the GP. The default mean function is `Zero`.
gp = pm.gp.Marginal(cov_func=cov_func)
# Place a GP prior over the function f.
sigma = pm.HalfCauchy("sigma", beta=3)
y_ = gp.marginal_likelihood("y", X=X, y=y, noise=sigma)
...
# After fitting or sampling, specify the distribution
# at new points with .conditional
Xnew = np.linspace(-1, 2, 50)[:, None]
with model:
fcond = gp.conditional("fcond", Xnew=Xnew)
"""
def _build_marginal_likelihood(self, X, y, noise):
mu = tt.zeros_like(y) # self.mean_func(X)
Kxx = self.cov_func(X)
Knx = noise(X)
cov = Kxx + Knx
return mu, cov
def marginal_likelihood(self, name, X, y, colchol, noise, matrix_shape, is_observed=True, **kwargs):
R"""
Returns the marginal likelihood distribution, given the input
locations `X` and the data `y`.
This is integral over the product of the GP prior and a normal likelihood.
.. math::
y \mid X,\theta \sim \int p(y \mid f,\, X,\, \theta) \, p(f \mid X,\, \theta) \, df
Parameters
----------
name: string
Name of the random variable
X: array-like
Function input values. If one-dimensional, must be a column
vector with shape `(n, 1)`.
y: array-like
Data that is the sum of the function with the GP prior and Gaussian
noise. Must have shape `(n, )`.
noise: scalar, Variable, or Covariance
Standard deviation of the Gaussian noise. Can also be a Covariance for
non-white noise.
is_observed: bool
Whether to set `y` as an `observed` variable in the `model`.
Default is `True`.
**kwargs
Extra keyword arguments that are passed to `MvNormal` distribution
constructor.
"""
if not isinstance(noise, Covariance):
noise = pm.gp.cov.WhiteNoise(noise)
mu, cov = self._build_marginal_likelihood(X, y, noise)
self.X = X
self.y = y
self.noise = noise
# Warning: the shape of y is hardcode
if is_observed:
return pm.MatrixNormal(name, mu=mu, colchol=colchol, rowcov=cov, observed=y, shape=(matrix_shape[0],matrix_shape[1]), **kwargs)
else:
shape = infer_shape(X, kwargs.pop("shape", None))
return pm.MvNormal(name, mu=mu, cov=cov, shape=shape, **kwargs)
def _get_given_vals(self, given):
if given is None:
given = {}
if 'gp' in given:
cov_total = given['gp'].cov_func
mean_total = given['gp'].mean_func
else:
cov_total = self.cov_func
mean_total = self.mean_func
if all(val in given for val in ['X', 'y', 'noise']):
X, y, noise = given['X'], given['y'], given['noise']
if not isinstance(noise, Covariance):
noise = pm.gp.cov.WhiteNoise(noise)
else:
X, y, noise = self.X, self.y, self.noise
return X, y, noise, cov_total, mean_total
def _build_conditional(self, Xnew, pred_noise, diag, X, y, noise,
cov_total, mean_total):
Kxx = cov_total(X)
Kxs = self.cov_func(X, Xnew)
Knx = noise(X)
rxx = y - mean_total(X)
L = cholesky(stabilize(Kxx) + Knx)
A = solve_lower(L, Kxs)
v = solve_lower(L, rxx)
mu = self.mean_func(Xnew) + tt.dot(tt.transpose(A), v)
if diag:
Kss = self.cov_func(Xnew, diag=True)
var = Kss - tt.sum(tt.square(A), 0)
if pred_noise:
var += noise(Xnew, diag=True)
return mu, var
else:
Kss = self.cov_func(Xnew)
cov = Kss - tt.dot(tt.transpose(A), A)
if pred_noise:
cov += noise(Xnew)
return mu, cov if pred_noise else stabilize(cov)
def conditional(self, name, Xnew, pred_noise=False, given=None, **kwargs):
R"""
Returns the conditional distribution evaluated over new input
locations `Xnew`.
Given a set of function values `f` that the GP prior was over, the
conditional distribution over a set of new points, `f_*` is:
.. math::
f_* \mid f, X, X_* \sim \mathcal{GP}\left(
K(X_*, X) [K(X, X) + K_{n}(X, X)]^{-1} f \,,
K(X_*, X_*) - K(X_*, X) [K(X, X) + K_{n}(X, X)]^{-1} K(X, X_*) \right)
Parameters
----------
name: string
Name of the random variable
Xnew: array-like
Function input values. If one-dimensional, must be a column
vector with shape `(n, 1)`.
pred_noise: bool
Whether or not observation noise is included in the conditional.
Default is `False`.
given: dict
Can optionally take as key value pairs: `X`, `y`, `noise`,
and `gp`. See the section in the documentation on additive GP
models in PyMC3 for more information.
**kwargs
Extra keyword arguments that are passed to `MvNormal` distribution
constructor.
"""
givens = self._get_given_vals(given)
mu, cov = self._build_conditional(Xnew, pred_noise, False, *givens)
shape = infer_shape(Xnew, kwargs.pop("shape", None))
return pm.MvNormal(name, mu=mu, cov=cov, shape=shape, **kwargs)
def predict(self, Xnew, point=None, diag=False, pred_noise=False, given=None):
R"""
Return the mean vector and covariance matrix of the conditional
distribution as numpy arrays, given a `point`, such as the MAP
estimate or a sample from a `trace`.
Parameters
----------
Xnew: array-like
Function input values. If one-dimensional, must be a column
vector with shape `(n, 1)`.
point: pymc3.model.Point
A specific point to condition on.
diag: bool
If `True`, return the diagonal instead of the full covariance
matrix. Default is `False`.
pred_noise: bool
Whether or not observation noise is included in the conditional.
Default is `False`.
given: dict
Same as `conditional` method.
"""
if given is None:
given = {}
mu, cov = self.predictt(Xnew, diag, pred_noise, given)
return draw_values([mu, cov], point=point)
def predictt(self, Xnew, diag=False, pred_noise=False, given=None):
R"""
Return the mean vector and covariance matrix of the conditional
distribution as symbolic variables.
Parameters
----------
Xnew: array-like
Function input values. If one-dimensional, must be a column
vector with shape `(n, 1)`.
diag: bool
If `True`, return the diagonal instead of the full covariance
matrix. Default is `False`.
pred_noise: bool
Whether or not observation noise is included in the conditional.
Default is `False`.
given: dict
Same as `conditional` method.
"""
givens = self._get_given_vals(given)
mu, cov = self._build_conditional(Xnew, pred_noise, diag, *givens)
return mu, cov
# This function is used to calculate the cwc index by pinrw and picp as input
def cwc_cal(pinrw, picp, mu=0.8, eta=50):
gamma = 0 if picp >= mu else 1
cwc = pinrw*(1 + gamma*np.exp(-eta * (picp-mu)))
return cwc
# This function is used to calculate the index PICP and pinrw and CWC and so on
# The input is prediction of model and true measured value
def index_cal(y_pred, y_true, conf_int=0.95):
# conf_int = 0.95
alpha = 1-conf_int
n = np.shape(y_true)[0]
n_samples = np.shape(y_pred)[0]
y_pred_mu = np.mean(y_pred,axis=0)
y_pred_sd = np.std(y_pred,axis=0)
# Calculate the lower bound and upper bound of 95% confidence interval
y_pred_L = y_pred_mu - scipy.stats.norm.ppf(1-alpha/2) * y_pred_sd
y_pred_U = y_pred_mu + scipy.stats.norm.ppf(1-alpha/2) * y_pred_sd
coverage = np.zeros(n)
for i in range(n):
if (y_true[i] > y_pred_L[i]) & (y_true[i] < y_pred_U[i]):
coverage[i] = 1
else:
coverage[i] = 0
# prediction interval coverage probability
picp = np.sum(coverage) / n
R = np.max(y_true) - np.min(y_true)
# mean prediction interval width
mpiw = np.sum(y_pred_U-y_pred_L) / n
# normalized mean prediction interval width
nmpiw = mpiw / R
# root-mean-square prediction interval width
rpiw = (y_pred_U-y_pred_L)*(y_pred_U-y_pred_L)
rpiw = np.sqrt(np.sum(rpiw)/n)
# normalized root-mean-square prediction interval width
pinrw = rpiw / R
# CWC
cwc = cwc_cal(pinrw, picp, mu=0.8)
return pd.DataFrame([picp, mpiw, nmpiw, rpiw, pinrw, cwc],index=['picp', 'mpiw', 'nmpiw', 'rpiw', 'pinrw', 'cwc'])
# This is the function used for bayesian calibration
def MultiOutput_Bayesian_Calibration(n_y,DataComp,DataField,DataPred,output_folder):
# This is data preprocessing part
n = np.shape(DataField)[0] # number of measured data
m = np.shape(DataComp)[0] # number of simulation data
p = np.shape(DataField)[1] - n_y # number of input x
q = np.shape(DataComp)[1] - p - n_y # number of calibration parameters t
xc = DataComp[:,n_y:] # simulation input x + calibration parameters t
xf = DataField[:,n_y:] # observed input
yc = DataComp[:,:n_y] # simulation output
yf = DataField[:,:n_y] # observed output
x_pred = DataPred[:,n_y:] # design points for predictions
y_true = DataPred[:,:n_y] # true measured value for design points for predictions
n_pred = np.shape(x_pred)[0] # number of predictions
N = n+m+n_pred
# Put points xc, xf, and x_pred on [0,1]
for i in range(p):
x_min = min(min(xc[:,i]),min(xf[:,i]))
x_max = max(max(xc[:,i]),max(xf[:,i]))
xc[:,i] = (xc[:,i]-x_min)/(x_max-x_min)
xf[:,i] = (xf[:,i]-x_min)/(x_max-x_min)
x_pred[:,i] = (x_pred[:,i]-x_min)/(x_max-x_min)
# Put calibration parameters t on domain [0,1]
for i in range(p,(p+q)):
t_min = min(xc[:,i])
t_max = max(xc[:,i])
xc[:,i] = (xc[:,i]-t_min)/(t_max-t_min)
# store mean and std of yc for future scale back use
yc_mean = np.zeros(n_y)
yc_sd = np.zeros(n_y)
# standardization of output yf and yc
for i in range(n_y):
yc_mean[i] = np.mean(yc[:,i])
yc_sd[i] = np.std(yc[:,i])
yc[:,i] = (yc[:,i]-yc_mean[i])/yc_sd[i]
yf[:,i] = (yf[:,i]-yc_mean[i])/yc_sd[i]
# This is modeling part
with pm.Model() as model:
# Claim prior part
eta1 = pm.HalfCauchy("eta1", beta=5) # for eta of gaussian process
lengthscale = pm.Gamma("lengthscale", alpha=2, beta=1, shape=(p+q)) # for lengthscale of gaussian process
tf = pm.Beta("tf", alpha=2, beta=2, shape=q) # for calibration parameters
sigma1 = pm.HalfCauchy('sigma1', beta=5) # for noise
y_pred = pm.Normal('y_pred', 0, 1.5, shape=(n_pred,n_y)) # for y prediction
# Setup prior of right cholesky matrix
sd_dist = pm.HalfCauchy.dist(beta=2.5, shape=n_y)
colchol_packed = pm.LKJCholeskyCov('colcholpacked', n=n_y, eta=2,sd_dist=sd_dist)
colchol = pm.expand_packed_triangular(n_y, colchol_packed)
# Concate data into a big matrix[[xf tf], [xc tc], [x_pred tf]]
xf1 = tt.concatenate([xf, tt.fill(tt.zeros([n,q]), tf)], axis = 1)
x_pred1 = tt.concatenate([x_pred, tt.fill(tt.zeros([n_pred,q]), tf)], axis = 1)
X = tt.concatenate([xf1, xc, x_pred1], axis = 0)
# Concate data into a big matrix[[yf], [yc], [y_pred]]
y = tt.concatenate([yf, yc, y_pred], axis = 0)
# Covariance funciton of gaussian process
cov_z = eta1**2 * pm.gp.cov.ExpQuad((p+q), ls=lengthscale)
# Gaussian process with covariance funciton of cov_z
gp = MultiMarginal(cov_func = cov_z)
# Bayesian inference
matrix_shape = [n+m+n_pred,n_y]
outcome = gp.marginal_likelihood("outcome", X=X, y=y, colchol=colchol, noise=sigma1, matrix_shape=matrix_shape)
trace = pm.sample(250,cores=1)
# This part is for data collection and visualization
pm.summary(trace).to_csv(output_folder + '/trace_summary.csv')
print(pm.summary(trace))
name_columns = []
n_columns = n_pred
for i in range(n_columns):
for j in range(n_y):
name_columns.append('y'+str(j+1)+'_pred'+str(i+1))
y_prediction = pd.DataFrame(np.array(trace['y_pred']).reshape(500,n_pred*n_y),columns=name_columns)
#Draw Picture of cvrmse_dist and calculate index
for i in range(n_y):
index = list(range(0+i,n_pred*n_y+i,n_y))
y_prediction1 = pd.DataFrame(y_prediction.iloc[:,index])
y_prediction1 = y_prediction1*yc_sd[i]+yc_mean[i] # Scale y_prediction back
y_prediction1.to_csv(output_folder + '/y_pred'+str(i+1)+'.csv') # Store y_prediction
# Calculate the distribution of cvrmse
cvrmse = 100*np.sqrt(np.sum(np.square(y_prediction1-y_true[:,i]),axis=1)/n_pred)/np.mean(y_true[:,i])
# Calculate the index and store it into csv
index_cal(y_prediction1,y_true[:,i]).to_csv(output_folder + '/index'+str(i+1)+'.csv')
# Draw pictrue of cvrmse distribution of each y
plt.subplot(n_y, 1, i+1)
plt.hist(cvrmse)
plt.savefig(output_folder + '/cvrmse_dist.pdf')
plt.close()
#Draw Picture of Prediction_Plot
for i in range(n_y):
index = list(range(0+i,n_pred*n_y+i,n_y))
y_prediction_mean = np.array(pm.summary(trace)['mean'][index])*yc_sd[i]+yc_mean[i]
y_prediction_975 = np.array(pm.summary(trace)['hpd_97.5'][index])*yc_sd[i]+yc_mean[i]
y_prediction_025 = np.array(pm.summary(trace)['hpd_2.5'][index])*yc_sd[i]+yc_mean[i]
plt.subplot(n_y, 1, i+1)
# estimated probability
plt.scatter(x=range(n_pred), y=y_prediction_mean)
# error bars on the estimate
plt.vlines(range(n_pred), ymin=y_prediction_025, ymax=y_prediction_975)
# actual outcomes
plt.scatter(x=range(n_pred),
y=y_true[:,i], marker='x')
plt.xlabel('predictor')
plt.ylabel('outcome')
# This is just to print original cvrmse to test whether outcome good
if i == 0:
cvrmse = 100*np.sqrt(np.sum(np.square(y_prediction_mean-y_true[:,0]))/len(y_prediction_mean-y_true[:,0]))/np.mean(y_true[:,0])
print(cvrmse)
plt.savefig(output_folder + '/Prediction_Plot.pdf')
plt.close()
# Resouce file
folder = './'
DataComp = np.asarray(pd.read_csv("./DATACOMP_Multi.csv"))
DataField = np.asarray(pd.read_csv("./DATAFIELD_Multi.csv"))[:12,:]
DataPred = np.asarray(pd.read_csv("./DATAFIELD_Multi.csv"))[12:,:]
output_folder = folder
# Indicate the number of output
n_y = 3
MultiOutput_Bayesian_Calibration(n_y,DataComp,DataField,DataPred,output_folder) |
py | b4161b2f9856fc3ce359867044254b9a5744d301 | #!/usr/bin/env python
# coding=utf-8
# Author: YAO Matrix ([email protected])
import numpy as np
def precision(predictions, labels):
"""Return the precision based on dense predictions and sparse labels."""
return (100.0 *
np.sum(np.argmax(predictions, 1) == labels) /
predictions.shape[0])
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels."""
return 100.0 - (100.0 *
np.sum(np.argmax(predictions, 1) == labels) /
predictions.shape[0])
|
py | b4161b3bc171118ceaa11306def116bb61a4ca60 | # Meadowlark_LCVR.py qtlab driver for Meadowlark LCVR
# Device manual at http://www.zaber.com/wiki/Manuals/T-NM
#
# Reinier Heeres <[email protected]>, 2009
# Umberto Perinetti <[email protected]>, 2009
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import ctypes
import numpy as np
import time
from instrument import Instrument
import types
import logging
import qt
# Code that identifies the LCVR controller
_guid = np.array([0x8b, 0x5b, 0x2b, 0xa2,
0x70, 0xc6, 0x98, 0x41,
0x93, 0x85, 0xaa, 0xba,
0x9d, 0xfc, 0x7d, 0x2b], dtype=np.uint8)
_flags = 0x40000000
def get_device_count():
'''Return number of devices present.'''
return _drv.USBDRVD_GetDevCount(_guid.ctypes.data)
def open_device(devid):
'''Open device and return device handle.'''
ret = _drv.USBDRVD_OpenDevice(devid, _flags, _guid.ctypes.data)
if ret == -1:
print 'Unable to open device'
return ret
def close_device(devhandle):
return _drv.USBDRVD_CloseDevice(devhandle)
def open_pipe(devid, pipeid):
'''Open a pipe and return the pipe handle.'''
ret = _drv.USBDRVD_PipeOpen(devid, pipeid, _flags, _guid.ctypes.data)
if ret == -1:
print 'Unable to open pipe'
return ret
def close_pipe(devid, pipehandle):
return _drv.USBDRVD_PipeClose(pipehandle)
def bulk_write(devhandle, cmd):
buf = ctypes.create_string_buffer(cmd)
ret = _drv.USBDRVD_BulkWrite(devhandle, 1, buf, len(cmd))
if ret == -1:
print 'Write failed'
return ret
def bulk_read(devhandle):
buf = ctypes.create_string_buffer('\000' * 256)
output = _drv.USBDRVD_BulkRead(devhandle, 0, buf, len(buf))
return buf.value
class Meadowlark_LCVR(Instrument):
def __init__(self, name, devid=1):
logging.info(__name__ + ' : Initializing Meadowlark LCVR')
Instrument.__init__(self, name, tags=['physical'])
self._devhandle = open_device(devid)
self._pipe0 = open_pipe(devid, 0)
self._pipe1 = open_pipe(devid, 1)
self._alias_info = {}
self.add_parameter('version',
flags=Instrument.FLAG_GET,
type=types.StringType)
self.add_parameter('voltage',
flags=Instrument.FLAG_GETSET,
channels=(1, 4),
type=types.FloatType)
def write(self, cmd):
ret = bulk_write(self._devhandle, cmd)
def read(self):
reply = bulk_read(self._devhandle)
reply = reply.rstrip()
return reply
def ask(self, cmd):
self.write(cmd)
time.sleep(0.02)
return self.read()
def do_get_version(self):
return self.ask('ver:?\r')
def do_get_voltage(self, channel):
reply = self.ask('ld:%d,?\r' % channel)
if reply.find(',') != -1:
val = round(1000 * int(reply.split(',')[1]) / 6553.5)
return val / 1000
return 0
def do_set_voltage(self, volts, channel):
ii = round(volts * 6553.5)
return self.ask('ld:%d,%d\r' % (channel, ii))
def do_set_alias(self, name, val):
if name not in self._alias_info:
logging.warning('LCVR: No such alias')
return False
if val not in self._alias_info[name]:
logging.warning('LCVR: No such alias value')
return False
for ch, chval in self._alias_info[name][val]:
self.set('voltage%d' % ch, chval)
return True
def add_alias(self, name, channel_info):
'''
channel_info is a dictionary. For each possible setting it
contains a list of (channel, value) tuples. For example:
{
'H': ((1, 1.0), (2, 2.0)),
'V': ((2, 2.0), (1, 1.0)),
}
'''
if name in self._alias_info:
logging.warning('Alias already exists!')
return False
self._alias_info[name] = channel_info
self.add_parameter(name,
flags=Instrument.FLAG_SET|Instrument.FLAG_SOFTGET,
option_list=channel_info.keys(),
set_func=lambda val: self.do_set_alias(name, val),
type=types.StringType,
)
def detect_instruments():
count = get_device_count()
for id in range(count):
qt.instruments.create('LCVR%d' % id + 1,
'Meadowlark_LCVR', devid=id + 1)
# Apparently it differs whether windll or cdll is required
_drv = ctypes.cdll.usbdrvd
try:
get_device_count()
except:
_drv = ctypes.windll.usbdrvd
|
py | b4161b5b691b827fd401f30bcc346cab0322ccb1 | import pypisa
assert pypisa.__version__ == '0.1.0'
|
py | b4161b84f6cb80ec1608f3025e699e6fb59d3de7 | # -*- coding: utf-8 -*-
"""Beta Geo Beta BinomFitter."""
from __future__ import division
from __future__ import print_function
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
import numpy as np
import pandas as pd
from autograd.numpy import log, exp, logaddexp
from pandas import DataFrame
from autograd.scipy.special import gammaln, betaln, beta as betaf
from scipy.special import binom
from ..utils import _check_inputs
from . import BaseFitter
from ..generate_data import beta_geometric_beta_binom_model
class BetaGeoBetaBinomFitter(BaseFitter):
"""
Also known as the Beta-Geometric/Beta-Binomial Model [1]_.
Future purchases opportunities are treated as discrete points in time.
In the literature, the model provides a better fit than the Pareto/NBD
model for a nonprofit organization with regular giving patterns.
The model is estimated with a recency-frequency matrix with n transaction
opportunities.
Parameters
----------
penalizer_coef: float
The coefficient applied to an l2 norm on the parameters
Attributes
----------
penalizer_coef: float
The coefficient applied to an l2 norm on the parameters
params_: :obj: Series
The fitted parameters of the model
data: :obj: DataFrame
A DataFrame with the values given in the call to `fit`
variance_matrix_: :obj: DataFrame
A DataFrame with the variance matrix of the parameters.
confidence_intervals_: :obj: DataFrame
A DataFrame 95% confidence intervals of the parameters
standard_errors_: :obj: Series
A Series with the standard errors of the parameters
summary: :obj: DataFrame
A DataFrame containing information about the fitted parameters
References
----------
.. [1] Fader, Peter S., Bruce G.S. Hardie, and Jen Shang (2010),
"Customer-Base Analysis in a Discrete-Time Noncontractual Setting,"
Marketing Science, 29 (6), 1086-1108.
"""
def __init__(self, penalizer_coef=0.0):
"""Initialization, set penalizer_coef."""
self.penalizer_coef = penalizer_coef
@staticmethod
def _loglikelihood(params, x, tx, T):
warnings.simplefilter(action="ignore", category=FutureWarning)
"""Log likelihood for optimizer."""
alpha, beta, gamma, delta = params
betaln_ab = betaln(alpha, beta)
betaln_gd = betaln(gamma, delta)
A = betaln(alpha + x, beta + T - x) - betaln_ab + betaln(gamma, delta + T) - betaln_gd
B = 1e-15 * np.ones_like(T)
recency_T = T - tx - 1
for j in np.arange(recency_T.max() + 1):
ix = recency_T >= j
B = B + ix * betaf(alpha + x, beta + tx - x + j) * betaf(gamma + 1, delta + tx + j)
B = log(B) - betaln_gd - betaln_ab
return logaddexp(A, B)
@staticmethod
def _negative_log_likelihood(log_params, frequency, recency, n_periods, weights, penalizer_coef=0):
params = exp(log_params)
penalizer_term = penalizer_coef * sum(params ** 2)
return (
-(BetaGeoBetaBinomFitter._loglikelihood(params, frequency, recency, n_periods) * weights).sum()
/ weights.sum()
+ penalizer_term
)
def fit(
self,
frequency,
recency,
n_periods,
weights=None,
initial_params=None,
verbose=False,
tol=1e-7,
index=None,
**kwargs
):
"""
Fit the BG/BB model.
Parameters
----------
frequency: array_like
Total periods with observed transactions
recency: array_like
Period of most recent transaction
n_periods: array_like
Number of transaction opportunities. Previously called `n`.
weights: None or array_like
Number of customers with given frequency/recency/T,
defaults to 1 if not specified. Fader and
Hardie condense the individual RFM matrix into all
observed combinations of frequency/recency/T. This
parameter represents the count of customers with a given
purchase pattern. Instead of calculating individual
log-likelihood, the log-likelihood is calculated for each
pattern and multiplied by the number of customers with
that pattern. Previously called `n_custs`.
verbose: boolean, optional
Set to true to print out convergence diagnostics.
tol: float, optional
Tolerance for termination of the function minimization process.
index: array_like, optional
Index for resulted DataFrame which is accessible via self.data
kwargs:
Key word arguments to pass to the scipy.optimize.minimize
function as options dict
Returns
-------
BetaGeoBetaBinomFitter
fitted and with parameters estimated
"""
frequency = np.asarray(frequency).astype(int)
recency = np.asarray(recency).astype(int)
n_periods = np.asarray(n_periods).astype(int)
if weights is None:
weights = np.ones_like(recency)
else:
weights = np.asarray(weights)
_check_inputs(frequency, recency, n_periods)
log_params_, self._negative_log_likelihood_, self._hessian_ = self._fit(
(frequency, recency, n_periods, weights, self.penalizer_coef), initial_params, 4, verbose, tol, **kwargs
)
self.params_ = pd.Series(np.exp(log_params_), index=["alpha", "beta", "gamma", "delta"])
self.data = DataFrame(
{"frequency": frequency, "recency": recency, "n_periods": n_periods, "weights": weights}, index=index
)
self.generate_new_data = lambda size=1: beta_geometric_beta_binom_model(
# Making a large array replicating n by n_custs having n.
np.array(sum([n_] * n_cust for (n_, n_cust) in zip(n_periods, weights))),
*self._unload_params("alpha", "beta", "gamma", "delta"),
size=size
)
self.variance_matrix_ = self._compute_variance_matrix()
self.standard_errors_ = self._compute_standard_errors()
self.confidence_intervals_ = self._compute_confidence_intervals()
return self
def conditional_expected_number_of_purchases_up_to_time(self, m_periods_in_future, frequency, recency, n_periods):
r"""
Conditional expected purchases in future time period.
The expected number of future transactions across the next m_periods_in_future
transaction opportunities by a customer with purchase history
(x, tx, n).
.. math:: E(X(n_{periods}, n_{periods}+m_{periods_in_future})| \alpha, \beta, \gamma, \delta, frequency, recency, n_{periods})
See (13) in Fader & Hardie 2010.
Parameters
----------
t: array_like
time n_periods (n+t)
Returns
-------
array_like
predicted transactions
"""
x = frequency
tx = recency
n = n_periods
params = self._unload_params("alpha", "beta", "gamma", "delta")
alpha, beta, gamma, delta = params
p1 = 1 / exp(self._loglikelihood(params, x, tx, n))
p2 = exp(betaln(alpha + x + 1, beta + n - x) - betaln(alpha, beta))
p3 = delta / (gamma - 1) * exp(gammaln(gamma + delta) - gammaln(1 + delta))
p4 = exp(gammaln(1 + delta + n) - gammaln(gamma + delta + n))
p5 = exp(gammaln(1 + delta + n + m_periods_in_future) - gammaln(gamma + delta + n + m_periods_in_future))
return p1 * p2 * p3 * (p4 - p5)
def conditional_probability_alive(self, m_periods_in_future, frequency, recency, n_periods):
"""
Conditional probability alive.
Conditional probability customer is alive at transaction opportunity
n_periods + m_periods_in_future.
.. math:: P(alive at n_periods + m_periods_in_future|alpha, beta, gamma, delta, frequency, recency, n_periods)
See (A10) in Fader and Hardie 2010.
Parameters
----------
m: array_like
transaction opportunities
Returns
-------
array_like
alive probabilities
"""
params = self._unload_params("alpha", "beta", "gamma", "delta")
alpha, beta, gamma, delta = params
p1 = betaln(alpha + frequency, beta + n_periods - frequency) - betaln(alpha, beta)
p2 = betaln(gamma, delta + n_periods + m_periods_in_future) - betaln(gamma, delta)
p3 = self._loglikelihood(params, frequency, recency, n_periods)
return exp(p1 + p2) / exp(p3)
def expected_number_of_transactions_in_first_n_periods(self, n):
r"""
Return expected number of transactions in first n n_periods.
Expected number of transactions occurring across first n transaction
opportunities.
Used by Fader and Hardie to assess in-sample fit.
.. math:: Pr(X(n) = x| \alpha, \beta, \gamma, \delta)
See (7) in Fader & Hardie 2010.
Parameters
----------
n: float
number of transaction opportunities
Returns
-------
DataFrame:
Predicted values, indexed by x
"""
params = self._unload_params("alpha", "beta", "gamma", "delta")
alpha, beta, gamma, delta = params
x_counts = self.data.groupby("frequency")["weights"].sum()
x = np.asarray(x_counts.index)
p1 = binom(n, x) * exp(
betaln(alpha + x, beta + n - x) - betaln(alpha, beta) + betaln(gamma, delta + n) - betaln(gamma, delta)
)
I = np.arange(x.min(), n)
@np.vectorize
def p2(j, x):
i = I[int(j) :]
return np.sum(
binom(i, x)
* exp(
betaln(alpha + x, beta + i - x)
- betaln(alpha, beta)
+ betaln(gamma + 1, delta + i)
- betaln(gamma, delta)
)
)
p1 += np.fromfunction(p2, (x.shape[0],), x=x)
idx = pd.Index(x, name="frequency")
return DataFrame(p1 * x_counts.sum(), index=idx, columns=["model"])
|
py | b4161c21303afd3b1f6194d39bdb78558e29e2b9 | from order import views
from django.urls import path, include
from django.conf.urls import url
urlpatterns = [
path('', views.list, name="product_list"),
url(r'^buy_product/(?P<product_id>\d+)/$', views.buyProduct),
url(r'^finish_buy/(?P<order_id>\d+)/$', views.finishBuy),
]
|
py | b4161c45f00392f235d9dd06bfbe7f5578e7ad0e | import pandas as pd
import pickle
import streamlit as st
def prediction(input):
return model.predict(input)[0]
def main():
st.title('Employee Future Prediction')
html_temp = """
<div style ="background-color:blue;padding:13px">
<h1 style ="color:white;text-align:center;">Employee Future Prediction App </h1>
</div>
"""
st.markdown(html_temp, unsafe_allow_html=True)
education = st.selectbox('Education', ('Bachelors', 'Masters', 'PHD'))
joining_year = st.text_input(
'Joining Year', '')
city = st.selectbox('City', ('Bangalore', 'New Delhi', 'Pune'))
payment_tier = st.selectbox('Payment Tier', ('1', '2', '3'))
age = st.text_input('Age', '')
gender = st.selectbox('Gender', ('Male', 'Female'))
ever_benched = st.selectbox('Ever Benched?', ('Yes', 'No'))
experience = st.text_input('Experience in Current Domain', '')
leave = None
if st.button('Predict'):
leave = prediction(
pd.DataFrame({
'Education': education,
'JoiningYear': int(joining_year),
'City': city,
'PaymentTier': int(payment_tier),
'Age': int(age),
'Gender': gender,
'EverBenched': ever_benched,
'ExperienceInCurrentDomain': int(experience)
}, index=[0])
)
if leave == None:
pass
elif leave == 0:
st.success('The Employee will stay in the company.')
else:
st.success('The Employee will leave in the next two years.')
if __name__ == '__main__':
with open('model.pickle', 'rb') as f:
model = pickle.load(f)
main()
|
py | b4161c52dc2c98fc468fa3377e4a4c6e40da7743 | '''Copyright (c) 2018-2019 Machine Zone, Inc. All rights reserved.'''
import os
import pytest
from cobras.client.connection import ActionException
from cobras.client.credentials import getDefaultRoleForApp, getDefaultSecretForApp
from cobras.client.health_check import getDefaultHealthCheckUrl, healthCheck
from .test_utils import makeRunner, makeUniqueString
@pytest.fixture()
def runner():
runner, appsConfigPath = makeRunner(debugMemory=False)
yield runner
runner.terminate()
os.unlink(appsConfigPath)
def test_health_check(runner):
'''Starts a server, then run a health check'''
port = runner.port
url = getDefaultHealthCheckUrl(None, port)
role = getDefaultRoleForApp('health')
secret = getDefaultSecretForApp('health')
channel = makeUniqueString()
healthCheck(url, role, secret, channel, retry=False, httpCheck=False)
@pytest.fixture()
def redisDownRunner():
redisUrls = 'redis://localhost:9999'
runner, appsConfigPath = makeRunner(
debugMemory=False,
enableStats=False,
redisUrls=redisUrls,
probeRedisOnStartup=False,
)
yield runner
runner.terminate()
os.unlink(appsConfigPath)
def test_health_check_with_no_redis(redisDownRunner):
'''Starts a server, then run a health check'''
port = redisDownRunner.port
url = getDefaultHealthCheckUrl(None, port)
role = getDefaultRoleForApp('health')
secret = getDefaultSecretForApp('health')
channel = makeUniqueString()
with pytest.raises(ActionException):
healthCheck(url, role, secret, channel)
|
py | b4161ced8e78aa60325a8c74b93bdd9776383167 | from __future__ import print_function
import cntk as C
import numpy as np
from .common import _FLOATX, _EPSILON, image_dim_ordering, image_data_format
from collections import defaultdict
from contextlib import contextmanager
import warnings
C.set_global_option('align_axis', 1)
b_any = any
dev = C.device.use_default_device()
if dev.type() == 0:
warnings.warn(
'CNTK backend warning: GPU is not detected. '
'CNTK\'s CPU version is not fully optimized,'
'please run with GPU to get better performance.')
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
_LEARNING_PHASE = C.parameter(shape=(1,), dtype=np.float32)
_UID_PREFIXES = defaultdict(int)
# cntk doesn't support gradient as symbolic op, to hook up with keras model,
# we will create gradient as a constant placeholder, here use this global
# map to keep the mapping from grad placeholder to parameter
grad_parameter_dict = {}
NAME_SCOPE_STACK = []
@contextmanager
def name_scope(name):
global NAME_SCOPE_STACK
NAME_SCOPE_STACK.append(name)
yield
NAME_SCOPE_STACK.pop()
def get_uid(prefix=''):
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
def learning_phase():
# False = test, True = train
global _LEARNING_PHASE
value = _LEARNING_PHASE.value
return value[0]
def set_learning_phase(value):
global _LEARNING_PHASE
if value not in {0, 1}:
raise ValueError('CNTK Backend: Set learning phase '
'with value %s is not supported, '
'expected 0 or 1.' % value)
v = np.float32([value])
_LEARNING_PHASE.value = v
def in_train_phase(x, alt, training=None):
global _LEARNING_PHASE
if training is None:
training = learning_phase()
uses_learning_phase = True
else:
uses_learning_phase = False
if training is 1.0 or training:
if callable(x) and not isinstance(x, C.cntk_py.Function):
return x()
else:
return x
elif training is 0.0 or training is False:
if callable(alt) and not isinstance(x, C.cntk_py.Function):
return alt()
else:
return alt
if learning_phase() is 1.0:
return x
elif learning_phase() is 0.0:
return alt
if callable(x) and isinstance(x, C.cntk_py.Function) is False:
x = x()
if callable(alt) and isinstance(x, C.cntk_py.Function) is False:
alt = alt()
_LEARNING_PHASE.value = np.asarray([1])
x._uses_learning_phase = uses_learning_phase
return x
def in_test_phase(x, alt):
global _LEARNING_PHASE
if learning_phase() is 1:
return alt
elif learning_phase() is 0:
return x
# else: assume learning phase is a placeholder tensor.
_LEARNING_PHASE.value = np.asarray([0])
return x
def _convert_string_dtype(dtype):
# cntk only support float32 and float64
if dtype == 'float32':
return np.float32
elif dtype == 'float64':
return np.float64
else:
# cntk only running with float,
# try to cast to float to run the model
return np.float32
def _convert_dtype_string(dtype):
if dtype == np.float32:
return 'float32'
elif dtype == np.float64:
return 'float64'
else:
raise ValueError('CNTK Backend: Unsupported dtype: %s. '
'CNTK only supports float32 and '
'float64.' % dtype)
def variable(value, dtype=_FLOATX, name=None):
if name is None:
name = ''
if isinstance(
value,
C.variables.Constant) or isinstance(
value,
C.variables.Parameter):
value = value.value
# we don't support init parameter with symbolic op, so eval it first as
# workaround
if isinstance(value, C.cntk_py.Function):
value = eval(value)
shape = value.shape if hasattr(value, 'shape') else ()
if hasattr(value, 'dtype') and value.dtype != dtype and len(shape) > 0:
value = value.astype(dtype)
# cntk will init type based on the value type
v = C.parameter(shape=shape,
init=value,
name=_prepare_name(name, 'variable'))
v._keras_shape = v.shape
v._uses_learning_phase = False
return v
def bias_add(x, bias, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
dims = len(x.shape)
if dims > 0 and x.shape[0] == C.InferredDimension:
dims -= 1
bias_dims = len(bias.shape)
if bias_dims != 1 and bias_dims != dims:
raise ValueError('Unexpected bias dimensions %d, '
'expected 1 or %d dimensions' % (bias_dims, dims))
if dims == 4:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1, 1, 1)
else:
shape = (bias.shape[3],) + bias.shape[:3]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, 1, 1, bias.shape[0])
else:
shape = bias.shape
elif dims == 3:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1, 1)
else:
shape = (bias.shape[2],) + bias.shape[:2]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, 1, bias.shape[0])
else:
shape = bias.shape
elif dims == 2:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1)
else:
shape = (bias.shape[1],) + bias.shape[:1]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, bias.shape[0])
else:
shape = bias.shape
else:
shape = bias.shape
return x + reshape(bias, shape)
def eval(x):
if isinstance(x, C.cntk_py.Function):
return x.eval()
elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter):
return x.value
else:
raise ValueError('CNTK Backend: `eval` method on '
'`%s` type is not supported. '
'CNTK only supports `eval` with '
'`Function`, `Constant` or '
'`Parameter`.' % type(x))
def placeholder(
shape=None,
ndim=None,
dtype=_FLOATX,
sparse=False,
name=None,
dynamic_axis_num=1):
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
cntk_shape = [C.InferredDimension if s is None else s for s in shape]
cntk_shape = tuple(cntk_shape)
if dynamic_axis_num > len(cntk_shape):
raise ValueError('CNTK backend: creating placeholder with '
'%d dimension is not supported, at least '
'%d dimensions are needed.'
% (len(cntk_shape, dynamic_axis_num)))
if name is None:
name = ''
cntk_shape = cntk_shape[dynamic_axis_num:]
x = C.input(
shape=cntk_shape,
dtype=_convert_string_dtype(dtype),
is_sparse=sparse,
name=name)
x._keras_shape = shape
x._uses_learning_phase = False
return x
def is_keras_tensor(x):
return hasattr(x, '_keras_history')
def shape(x):
shape = list(int_shape(x))
num_dynamic = _get_dynamic_axis_num(x)
non_dyn_shape = []
for i in range(len(x.shape)):
if shape[i + num_dynamic] is None:
non_dyn_shape.append(x.shape[i])
else:
non_dyn_shape.append(shape[i + num_dynamic])
return shape[:num_dynamic] + non_dyn_shape
def is_sparse(tensor):
return tensor.is_sparse
def int_shape(x):
if hasattr(x, '_keras_shape'):
return x._keras_shape
shape = x.shape
if hasattr(x, 'dynamic_axes'):
dynamic_shape = [None for a in x.dynamic_axes]
shape = tuple(dynamic_shape) + shape
return shape
def ndim(x):
shape = int_shape(x)
return len(shape)
def _prepare_name(name, default):
prefix = '_'.join(NAME_SCOPE_STACK)
if name is None or name == '':
return prefix + '/' + default
return prefix + '/' + name
def constant(value, dtype=None, shape=None, name=None):
if dtype is None:
dtype = _FLOATX
if shape is None:
shape = ()
np_value = value * np.ones(shape)
const = C.constant(np_value,
dtype=dtype,
name=_prepare_name(name, 'constant'))
const._keras_shape = shape
const._uses_learning_phase = False
return const
def random_binomial(shape, p=0.0, dtype=None, seed=None):
# use numpy workaround now
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e7)
np.random.seed(seed)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
size = 1
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
size *= _
binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape)
return variable(value=binomial, dtype=dtype)
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
return random_uniform_variable(shape, minval, maxval, dtype, seed)
def random_uniform_variable(shape, low, high, dtype=_FLOATX,
name=None, seed=None):
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e3)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
if name is None:
name = ''
scale = (high - low) / 2
p = C.parameter(
shape,
init=C.initializer.uniform(
scale,
seed=seed),
dtype=dtype,
name=name)
return variable(value=p.value + low + scale)
def random_normal_variable(
shape,
mean,
scale,
dtype=_FLOATX,
name=None,
seed=None):
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e7)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
if name is None:
name = ''
return C.parameter(
shape=shape,
init=C.initializer.normal(
scale=scale,
seed=seed),
dtype=dtype,
name=name)
def random_normal(shape, mean=0.0, stddev=1.0, dtype=_FLOATX, seed=None):
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
# how to apply mean and stddev
return random_normal_variable(shape=shape, mean=mean, scale=1.0)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
if seed is None:
seed = np.random.randint(1, 10e6)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
return C.parameter(
shape, init=C.initializer.truncated_normal(
stddev, seed=seed), dtype=dtype)
def zeros_like(x, dtype=None, name=None):
return x * 0
def dtype(x):
return _convert_dtype_string(x.dtype)
def zeros(shape, dtype=_FLOATX, name=None):
ctype = _convert_string_dtype(dtype)
return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name)
def ones(shape, dtype=_FLOATX, name=None):
ctype = _convert_string_dtype(dtype)
return variable(value=np.ones(shape, ctype), dtype=dtype, name=name)
def eye(size, dtype=_FLOATX, name=None):
return variable(np.eye(size), dtype, name)
def ones_like(x, name=None):
return zeros_like(x) + 1
def count_params(x):
for _ in x.shape:
if _ == C.InferredDimension or _ == C.FreeDimension:
raise ValueError('CNTK backend: `count_params` with dynamic '
'shape is not supported. Please provide '
'fixed dimension instead of `None`.')
return np.prod([x.shape[i] for i in range(len(x.shape))])
def cast(x, dtype):
# cntk calculate everything in float, so don't need case from bool / int
return x
def dot(x, y):
if len(x.shape) > 2 or len(y.shape) > 2:
y_shape = int_shape(y)
if len(y_shape) > 2:
permutation = [len(y_shape) - 2]
permutation += list(range(0, len(y_shape) - 2))
permutation += [len(y_shape) - 1]
y = C.transpose(y, perm=permutation)
return C.times(x, y, len(y_shape) - 1)
else:
return C.times(x, y)
def batch_dot(x, y, axes=None):
x_shape = int_shape(x)
y_shape = int_shape(y)
if isinstance(axes, int):
axes = (axes, axes)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [len(x_shape) - 1, len(y_shape) - 2]
if len(x_shape) == 2 and len(y_shape) == 2:
return sum(x * y, axis=1, keepdims=True)
else:
if len(y_shape) == 2:
y = expand_dims(y)
normalized_axis = []
normalized_axis.append(_normalize_axis(axes[0], x)[0])
normalized_axis.append(_normalize_axis(axes[1], y)[0])
# transpose
i = normalized_axis[0]
while i < len(x.shape) - 1:
x = C.swapaxes(x, i, i + 1)
i += 1
i = normalized_axis[1]
while i > 0:
y = C.swapaxes(y, i, i - 1)
i -= 1
result = C.times(x, y, output_rank=(len(y.shape) - 1)
if len(y.shape) > 1 else 1)
if len(y_shape) == 2:
result = squeeze(result, -1)
return result
def transpose(x):
return C.swapaxes(x, 0, 1)
def gather(reference, indices):
return C.ops.gather(reference, indices)
def _remove_dims(x, axis, keepdims=False):
if keepdims is False and isinstance(axis, list):
# sequence axis is removed by default, so don't need reshape on it
reduce_axes = []
for a in axis:
if isinstance(a, C.Axis) is False:
reduce_axes.append(a)
return _reshape_dummy_dim(x, reduce_axes)
else:
if isinstance(axis, list):
has_seq = False
for a in axis:
if isinstance(a, C.Axis):
has_seq = True
break
if has_seq:
nones = _get_dynamic_axis_num(x)
x = expand_dims(x, nones)
return x
def max(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_max')
return _remove_dims(output, axis, keepdims)
def min(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_min')
return _remove_dims(output, axis, keepdims)
def sum(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_sum')
return _remove_dims(output, axis, keepdims)
def prod(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_prod')
return _remove_dims(output, axis, keepdims)
def logsumexp(x, axis=None, keepdims=False):
return log(sum(exp(x), axis=axis, keepdims=keepdims))
def var(x, axis=None, keepdims=False):
m = mean(x, axis, keepdims=True)
devs_squared = C.square(x - m)
return mean(devs_squared, axis=axis, keepdims=keepdims)
def std(x, axis=None, keepdims=False):
return C.sqrt(var(x, axis=axis, keepdims=keepdims))
def expand_dims(x, axis=-1):
shape = list(int_shape(x))
nones = _get_dynamic_axis_num(x)
index = axis if axis >= 0 else len(shape) + 1
shape.insert(index, 1)
new_shape = shape[nones:]
new_shape = tuple(
[C.InferredDimension if _ is None else _ for _ in new_shape])
return C.reshape(x, new_shape)
def squeeze(x, axis):
if isinstance(axis, tuple):
axis = list(axis)
if not isinstance(axis, list):
axis = [axis]
shape = list(int_shape(x))
_axis = []
for _ in axis:
if isinstance(_, int):
_axis.append(_ if _ >= 0 else _ + len(shape))
if len(_axis) == 0:
return x
nones = _get_dynamic_axis_num(x)
for _ in sorted(_axis, reverse=True):
del shape[_]
new_shape = tuple(shape[nones:])
return C.reshape(x, new_shape)
def tile(x, n):
if isinstance(n, list):
n = tuple(n)
shape = int_shape(x)
num_dynamic_axis = _get_dynamic_axis_num(x)
# Padding the axis
if len(n) < len(shape):
n = tuple([None for _ in range(len(shape) - len(n))]) + n
if len(n) != len(shape):
raise NotImplementedError
i = num_dynamic_axis
for i, rep in enumerate(n):
if i >= num_dynamic_axis and shape[i] is not None:
tmp = [x] * rep
x = C.splice(*tmp, axis=i - num_dynamic_axis)
i += 1
return x
def _normalize_axis(axis, x):
shape = int_shape(x)
ndim = len(shape)
nones = _get_dynamic_axis_num(x)
if type(axis) is tuple:
_axis = list(axis)
elif type(axis) is int:
_axis = [axis]
elif type(axis) is list:
_axis = list(axis)
else:
_axis = axis
if type(_axis) is list:
for i, a in enumerate(_axis):
if a is not None and a < 0:
_axis[i] = (a % ndim)
if _axis[i] is not None:
if _axis[i] < nones:
_axis[i] = x.dynamic_axes[_axis[i]]
else:
_axis[i] -= nones
else:
if _axis is None:
_axis = C.Axis.all_axes()
return _axis
def _reshape_dummy_dim(x, axis):
shape = list(x.shape)
_axis = [_ + len(shape) if _ < 0 else _ for _ in axis]
if shape.count(C.InferredDimension) > 1:
result = x
for index in sorted(_axis, reverse=True):
result = C.reshape(result,
shape=(),
begin_axis=index,
end_axis=index + 1)
return result
else:
for index in sorted(_axis, reverse=True):
del shape[index]
shape = tuple(shape)
return C.reshape(x, shape)
def mean(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_mean')
return _remove_dims(output, axis, keepdims)
def any(x, axis=None, keepdims=False):
reduce_result = sum(x, axis, keepdims=keepdims)
any_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(any_matrix)
else:
return any_matrix
def all(x, axis=None, keepdims=False):
reduce_result = prod(x, axis, keepdims=keepdims)
all_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(all_matrix)
else:
return all_matrix
def classification_error(output, target, axis=-1):
return C.ops.reduce_mean(
C.equal(
argmax(
output,
axis=-1),
argmax(
target,
axis=-1)),
axis=C.Axis.all_axes())
def argmax(x, axis=-1):
axis = [axis]
axis = _normalize_axis(axis, x)
output = C.ops.argmax(x, axis=axis[0])
return _reshape_dummy_dim(output, axis)
def argmin(x, axis=-1):
axis = [axis]
axis = _normalize_axis(axis, x)
output = C.ops.argmin(x, axis=axis[0])
return _reshape_dummy_dim(output, axis)
def square(x):
return C.square(x)
def abs(x):
return C.abs(x)
def sqrt(x):
return C.sqrt(x)
def exp(x):
return C.exp(x)
def log(x):
return C.log(x)
def round(x):
return C.round(x)
def sigmoid(x):
return C.sigmoid(x)
def sign(x):
return x / C.abs(x)
def pow(x, a):
return C.pow(x, a)
def clip(x, min_value, max_value):
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
if min_value is None:
min_value = -np.inf
return C.clip(x, min_value, max_value)
def binary_crossentropy(output, target, from_logits=False):
if from_logits:
output = C.sigmoid(output)
output = C.clip(output, _EPSILON, 1.0 - _EPSILON)
output = -target * C.log(output) - (1.0 - target) * C.log(1.0 - output)
return output
def get_variable_shape(x):
return x.shape
def update(x, new_x):
return C.assign(x, new_x)
def moving_average_update(variable, value, momentum):
return C.assign(variable, variable * momentum + value * (1. - momentum))
def update_add(x, increment):
result = x + increment
return C.assign(x, result)
def gradients(loss, variables):
# cntk does not support gradients as symbolic op,
# to hook up with keras model
# we will return a constant as place holder, the cntk learner will apply
# the gradient during training.
global grad_parameter_dict
if isinstance(variables, list) is False:
variables = [variables]
grads = []
for v in variables:
g = C.constant(0, shape=v.shape, name='keras_grad_placeholder')
grads.append(g)
grad_parameter_dict[g] = v
return grads
def equal(x, y):
return C.equal(x, y)
def not_equal(x, y):
return C.not_equal(x, y)
def greater(x, y):
return C.greater(x, y)
def greater_equal(x, y):
return C.greater_equal(x, y)
def less(x, y):
return C.less(x, y)
def less_equal(x, y):
return C.less_equal(x, y)
def maximum(x, y):
return C.element_max(x, y)
def minimum(x, y):
return C.element_min(x, y)
def sin(x):
return C.sin(x)
def cos(x):
return C.cos(x)
def normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
if gamma is None:
if beta is None:
gamma = ones_like(x)
else:
gamma = ones_like(beta)
if beta is None:
if gamma is None:
beta = zeros_like(x)
else:
beta = zeros_like(gamma)
mean, variant = _moments(x, _normalize_axis(reduction_axes, x))
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
normalized = batch_normalization(
x, mean, variant, beta, gamma, epsilon)
else:
# need broadcasting
target_shape = []
x_shape = int_shape(x)
# skip the batch axis
for axis in range(1, ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(x_shape[axis])
broadcast_mean = C.reshape(mean, target_shape)
broadcast_var = C.reshape(variant, target_shape)
broadcast_gamma = C.reshape(gamma, target_shape)
broadcast_beta = C.reshape(beta, target_shape)
normalized = batch_normalization(
x,
broadcast_mean,
broadcast_var,
broadcast_beta,
broadcast_gamma,
epsilon)
return normalized, mean, variant
def _moments(x, axes=None, shift=None, keep_dims=False):
_axes = tuple(axes)
if shift is None:
shift = x
# Compute true mean while keeping the dims for proper broadcasting.
for axis in _axes:
shift = C.reduce_mean(shift, axis=axis)
shift = C.stop_gradient(shift)
shifted_mean = C.minus(x, shift)
for axis in _axes:
shifted_mean = C.reduce_mean(shifted_mean, axis=axis)
variance_mean = C.square(C.minus(x, shift))
for axis in _axes:
variance_mean = C.reduce_mean(variance_mean, axis=axis)
variance = C.minus(variance_mean, C.square(shifted_mean))
mean = C.plus(shifted_mean, shift)
if not keep_dims:
mean = squeeze(mean, _axes)
variance = squeeze(variance, _axes)
return mean, variance
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
if gamma is None:
gamma = ones_like(var)
if beta is None:
beta = zeros_like(mean)
return gamma * ((x - mean) / C.sqrt(var + epsilon)) + beta
def concatenate(tensors, axis=-1):
if len(tensors) == 0:
return None
axis = [axis]
axis = _normalize_axis(axis, tensors[0])
return C.splice(*tensors, axis=axis[0])
def flatten(x):
return reshape(x, (-1,))
def reshape(x, shape):
if isinstance(x, C.variables.Parameter):
return C.reshape(x, shape)
else:
num_dynamic_axis = _get_dynamic_axis_num(x)
if num_dynamic_axis == 1 and len(shape) > 0 and shape[0] == -1:
# collapse axis with batch axis
if b_any(_ == C.InferredDimension for _ in x.shape) or b_any(
_ == C.FreeDimension for _ in x.shape):
warnings.warn(
'Warning: CNTK backend does not support '
'collapse of batch axis with inferred dimension. '
'The reshape did not take place.')
return x
return C.user_function(ReshapeBatch(x, shape[1:]))
else:
# no collaps, then first need to padding the shape
if num_dynamic_axis >= len(shape):
i = 0
while i < len(shape):
if shape[i] is None or shape[i] == -1:
i += 1
else:
break
shape = tuple([-1 for _ in range(num_dynamic_axis - i)]) + shape
new_shape = list(shape)
new_shape = new_shape[num_dynamic_axis:]
new_shape = [C.InferredDimension if _ is None else _ for _ in new_shape]
return C.reshape(x, new_shape)
def permute_dimensions(x, pattern):
dims = len(int_shape(x))
num_dynamic_axis = _get_dynamic_axis_num(x)
current_layout = tuple([i for i in range(dims)])
if num_dynamic_axis > 0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]:
raise ValueError('CNTK backend: the permute pattern %s '
'requested permute on dynamic axis, '
'which is not supported. Please do permute '
'on static axis.' % pattern)
axis = list(pattern)
axis = axis[num_dynamic_axis:]
axis = _normalize_axis(axis, x)
return C.transpose(x, axis)
def resize_images(X, height_factor, width_factor, data_format):
if data_format == 'channels_first':
output = repeat_elements(X, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
elif data_format == 'channels_last':
output = repeat_elements(X, height_factor, axis=1)
output = repeat_elements(output, width_factor, axis=2)
return output
else:
raise ValueError('CNTK Backend: Invalid dim_ordering:', data_format)
def repeat_elements(x, rep, axis):
axis = _normalize_axis(axis, x)
axis = axis[0]
slices = []
shape = x.shape
i = 0
while i < shape[axis]:
tmp = C.ops.slice(x, axis, i, i + 1)
for _ in range(rep):
slices.append(tmp)
i += 1
return C.splice(*slices, axis=axis)
def repeat(x, n):
# this is a workaround for recurrent layer
# if n is inferred dimension,
# we can't figure out how to repeat it in cntk now
# return the same x to take cntk broadcast feature
# to make the recurrent layer work.
# need to be fixed in GA.
if n is C.InferredDimension:
return x
index = 1 - _get_dynamic_axis_num(x)
if index < 0 or index > 1:
raise NotImplementedError
new_shape = list(x.shape)
new_shape.insert(index, 1)
new_shape = tuple(new_shape)
x = C.reshape(x, new_shape)
temp = [x] * n
return C.splice(*temp, axis=index)
def tanh(x):
return C.tanh(x)
def _static_rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None):
shape = int_shape(inputs)
dims = len(shape)
if dims < 3:
raise ValueError('Input should be at least 3D.')
# if the second axis is static axis, CNTK will do unroll by default
if shape[1] is None:
raise ValueError('CNTK Backend: the input of static rnn '
'has shape `%s`, the second axis '
'is not static. If you want to run '
'rnn with non-static axis, plesae try '
'dynamic rnn with sequence axis.' % shape)
if constants is None:
constants = []
if mask is not None:
mask_shape = int_shape(mask)
if len(mask_shape) == dims - 1:
mask = expand_dims(mask)
nones = _get_dynamic_axis_num(inputs)
states = tuple(initial_states)
outputs = []
time_axis = 1 - nones if nones > 0 else 1
if go_backwards:
i = shape[1] - 1
while i >= 0:
current = C.ops.slice(inputs, time_axis, i, i + 1)
# remove dummy dimension
current = squeeze(current, time_axis)
output, new_states = step_function(
current, tuple(states) + tuple(constants))
if mask is not None:
mask_slice = C.ops.slice(mask, time_axis, i, i + 1)
mask_slice = squeeze(mask_slice, time_axis)
if len(outputs) == 0:
prev_output = zeros_like(output)
else:
prev_output = outputs[-1]
output = C.ops.element_select(mask_slice, output, prev_output)
return_states = []
for s, n_s in zip(states, new_states):
return_states.append(
C.ops.element_select(
mask_slice, n_s, s))
new_states = return_states
outputs.append(output)
states = new_states
i -= 1
else:
i = 0
while i < shape[1]:
current = C.ops.slice(inputs, time_axis, i, i + 1)
# remove dummy dimension
current = squeeze(current, 1)
output, new_states = step_function(
current, tuple(states) + tuple(constants))
if mask is not None:
mask_slice = C.ops.slice(mask, time_axis, i, i + 1)
mask_slice = squeeze(mask_slice, 1)
if len(outputs) == 0:
prev_output = zeros_like(output)
else:
prev_output = outputs[-1]
output = C.ops.element_select(mask_slice, output, prev_output)
return_states = []
for s, n_s in zip(states, new_states):
return_states.append(
C.ops.element_select(
mask_slice, n_s, s))
new_states = return_states
outputs.append(output)
states = new_states[:len(states)]
i += 1
i = 1
# add the time_step axis back
final_output = expand_dims(outputs[0], 1)
last_output = outputs[0]
while i < len(outputs):
# add the time_step axis back
output_slice = expand_dims(outputs[i], 1)
final_output = C.splice(final_output, output_slice, axis=time_axis)
last_output = outputs[i]
i += 1
return last_output, final_output, states
def rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None):
shape = int_shape(inputs)
dims = len(shape)
if dims < 3:
raise ValueError('CNTK Backend: the input of rnn has only rank %d '
'Need at least rank 3 to run RNN.' % dims)
if _get_dynamic_axis_num(inputs) == 0 or unroll:
return _static_rnn(
step_function,
inputs,
initial_states,
go_backwards,
mask,
constants,
unroll,
input_length)
if mask is not None:
raise ValueError('RNN with mask is not support in CNTK currently.')
if constants is None:
constants = []
num_time_step = shape[1]
if num_time_step is None and not has_seq_axis(inputs):
num_time_step = inputs.shape[0]
need_convert = not has_seq_axis(inputs)
if need_convert:
inputs = C.to_sequence(inputs)
j = 0
while j < len(constants):
if isinstance(constants[j], list):
i = 0
while i < len(constants[j]):
if _get_dynamic_axis_num(constants[j][i]) == 1:
constants[j][i] = C.sequence.broadcast_as(constants[j][i], inputs)
i += 1
else:
if _get_dynamic_axis_num(constants[j]) == 1:
constants[j] = C.sequence.broadcast_as(constants[j], inputs)
j += 1
states = tuple(initial_states)
with C.default_options(axis_offset=1):
def _recurrence(x, states):
# create place holder
place_holders = [C.placeholder() for _ in states]
past_values = []
for s, p in zip(states, place_holders):
past_values.append(
C.sequence.past_value(
p, s) if go_backwards is False else C.sequence.future_value(
p, s))
new_output, new_states = step_function(
x, tuple(past_values) + tuple(constants))
n_s = []
for o, p in zip(new_states, place_holders):
n_s.append(o.replace_placeholders({p: o.output}))
if len(n_s) > 0:
new_output = n_s[0]
return new_output, n_s
final_output, final_states = _recurrence(inputs, states)
last_output = C.sequence.last(final_output)
last_states = final_states
if need_convert:
final_output = C.sequence.unpack(final_output, 0, no_mask_output=True)
last_states = [
C.sequence.unpack(
s, 0, no_mask_output=True) for s in last_states]
if num_time_step is not None and num_time_step is not C.FreeDimension:
final_output = _reshape_sequence(final_output, num_time_step)
last_states = [
_reshape_sequence(
_, num_time_step) for _ in last_states]
return last_output, final_output, last_states
def has_seq_axis(x):
return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) > 1
def l2_normalize(x, axis):
axis = [axis]
axis = _normalize_axis(axis, x)
norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0]))
return x / norm
def hard_sigmoid(x):
x = (0.2 * x) + 0.5
x = C.clip(x, 0.0, 1.0)
return x
def conv1d(x, kernel, strides=1, padding='valid',
data_format=None, dilation_rate=1):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel.shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
if data_format == 'channels_last':
x = C.swapaxes(x, 0, 1)
kernel = C.swapaxes(kernel, 0, 2)
padding = _preprocess_border_mode(padding)
strides = [strides]
x = C.convolution(
kernel,
x,
strides=tuple(strides),
auto_padding=[
False,
padding])
if data_format == 'channels_last':
x = C.swapaxes(x, 0, 1)
return x
def conv2d(x, kernel, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
kernel = _preprocess_conv2d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding])
else:
assert dilation_rate[0] == dilation_rate[1]
assert strides == (1, 1), 'Invalid strides for dilated convolution'
x = C.convolution(
kernel,
x,
strides=dilation_rate[0],
auto_padding=[
False,
padding,
padding])
return _postprocess_conv2d_output(x, data_format)
def conv3d(x, kernel, strides=(1, 1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv3d_input(x, data_format)
kernel = _preprocess_conv3d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = strides + (strides[0],)
x = C.convolution(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding,
padding])
return _postprocess_conv3d_output(x, data_format)
def pool2d(x, pool_size, strides=(1, 1),
padding='valid', data_format=None,
pool_mode='max'):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
padding = _preprocess_border_mode(padding)
strides = strides
pool_size = pool_size
x = _preprocess_conv2d_input(x, data_format)
if pool_mode == 'max':
x = C.pooling(
x,
C.MAX_POOLING,
pool_size,
strides,
auto_padding=[padding])
elif pool_mode == 'avg':
x = C.pooling(
x,
C.AVG_POOLING,
pool_size,
strides,
auto_padding=[padding])
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
return _postprocess_conv2d_output(x, data_format)
def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid',
data_format=None, pool_mode='max'):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
padding = _preprocess_border_mode(padding)
x = _preprocess_conv3d_input(x, data_format)
if pool_mode == 'max':
x = C.pooling(
x,
C.MAX_POOLING,
pool_size,
strides,
auto_padding=[padding])
elif pool_mode == 'avg':
x = C.pooling(
x,
C.AVG_POOLING,
pool_size,
strides,
auto_padding=[padding])
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
return _postprocess_conv3d_output(x, data_format)
def relu(x, alpha=0., max_value=None):
if alpha != 0.:
negative_part = C.relu(-x)
x = C.relu(x)
if max_value is not None:
x = C.clip(x, 0.0, max_value)
if alpha != 0.:
x -= alpha * negative_part
return x
def dropout(x, level, noise_shape=None, seed=None):
if level < 0. or level >= 1:
raise ValueError('CNTK Backend: Invalid dropout level %s, '
'must be in interval [0, 1].' % level)
return C.dropout(x, level)
def batch_flatten(x):
# cntk's batch axis is not in shape,
# so just flatten all the dim in x.shape
dim = np.prod(x.shape)
x = C.reshape(x, (-1,))
x._keras_shape = (None, dim)
return x
def softmax(x):
return C.softmax(x)
def softplus(x):
return C.softplus(x)
def softsign(x):
return x / (1 + C.abs(x))
def categorical_crossentropy(output, target, from_logits=False):
if from_logits:
result = C.cross_entropy_with_softmax(output, target)
# cntk's result shape is (batch, 1), while keras expect (batch, )
return C.reshape(result, ())
else:
# scale preds so that the class probas of each sample sum to 1
output /= C.reduce_sum(output, axis=-1)
# avoid numerical instability with _EPSILON clipping
output = C.clip(output, _EPSILON, 1.0 - _EPSILON)
return -sum(target * C.log(output), axis=-1)
def sparse_categorical_crossentropy(output, target, from_logits=False):
target = C.one_hot(target, output.shape[-1])
target = C.reshape(target, output.shape)
return categorical_crossentropy(output, target, from_logits)
class Function(object):
def __init__(self, inputs, outputs, updates=[], **kwargs):
self.placeholders = inputs
self.trainer = None
self.unrelated_updates = None
self.updates = updates
if len(updates) > 0:
assert len(outputs) > 0
self.loss = outputs[0]
# need group update by gradient place holder
u_ops = []
unrelated_updates = []
for update in updates:
if isinstance(update, tuple):
if len(update) != 2:
raise NotImplementedError
else:
u = C.assign(update[0], update[1])
else:
u = update
if len(u.arguments) == 0:
u_ops.append(u)
else:
unrelated_updates.append(u)
update_func = C.combine([u.output for u in u_ops])
grads = update_func.find_all_with_name('keras_grad_placeholder')
u_list = []
p_list = []
for g in grads:
if g in grad_parameter_dict:
p_list.append(grad_parameter_dict[g])
u_list.append(g)
else:
raise ValueError('CNTK backend: when constructing trainer, '
'found gradient node `%s` which is not '
'related to any parameters in the model. '
'Please double check how the gradient node '
'is constructed.' % g)
if len(u_list) > 0:
learner = C.cntk_py.universal_learner(p_list, u_list, update_func)
criterion = (
outputs[0],
outputs[1]) if len(outputs) > 1 else (
outputs[0],
)
self.trainer = C.trainer.Trainer(
outputs[0], criterion, [learner])
self.trainer_output = tuple([f.output for f in criterion])
elif len(u_ops) > 0:
unrelated_updates.extend(u_ops)
if len(unrelated_updates) > 0:
self.unrelated_updates = C.combine([_.output for _ in unrelated_updates])
if self.trainer is None:
self.metrics_outputs = [f.output for f in outputs]
self.metrics_func = C.combine(self.metrics_outputs)
# cntk only could handle loss and 1 metric in trainer, for metrics more
# than 2, need manual eval
elif len(outputs) > 2:
self.metrics_outputs = [f.output for f in outputs[2:]]
self.metrics_func = C.combine(self.metrics_outputs)
else:
self.metrics_func = None
def __call__(self, inputs):
assert type(inputs) in {list, tuple}
feed_dict = {}
for tensor, value in zip(self.placeholders, inputs):
# cntk only support calculate on float, do auto cast here
if (hasattr(value, 'dtype') and
value.dtype != np.float32 and
value.dtype != np.float64):
value = value.astype(np.float32)
feed_dict[tensor] = value
updated = []
if self.trainer is not None:
input_dict = {}
for argument in self.loss.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError('CNTK backend: argument %s is not found in inputs. '
'Please double check the model and inputs in '
'`train_function`.' % argument.name)
result = self.trainer.train_minibatch(
input_dict, self.trainer_output)
assert(len(result) == 2)
outputs = result[1]
for o in self.trainer_output:
updated.append(outputs[o])
if self.metrics_func is not None:
input_dict = {}
for argument in self.metrics_func.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError('CNTK backend: metrics argument %s '
'is not found in inputs. Please double '
'check the model and inputs.' % argument.name)
output_values = self.metrics_func.eval(input_dict, as_numpy=False)
if isinstance(output_values, dict):
for o in self.metrics_outputs:
value = output_values[o]
v = value.asarray()
updated.append(v)
else:
v = output_values.asarray()
for o in self.metrics_outputs:
updated.append(v)
if self.unrelated_updates is not None:
input_dict = {}
for argument in self.unrelated_updates.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError('CNTK backend: assign ops argument %s '
'is not found in inputs. Please double '
'check the model and inputs.' % argument.name)
self.unrelated_updates.eval(input_dict, as_numpy=False)
return updated
def function(inputs, outputs, updates=[], **kwargs):
return Function(inputs, outputs, updates=updates, **kwargs)
def temporal_padding(x, padding=(1, 1)):
assert len(padding) == 2
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if num_dynamic_axis > 0:
assert len(base_shape) == 2
x = _padding(x, padding, 0)
else:
assert len(base_shape) == 3
x = _padding(x, padding, 1)
return x
def _padding(x, pattern, axis):
base_shape = x.shape
if b_any([dim < 0 for dim in base_shape]):
raise ValueError('CNTK Backend: padding input tensor with '
'shape `%s` contains non-specified dimension, '
'which is not supported. Please give fixed '
'dimension to enable padding.' % base_shape)
if pattern[0] > 0:
prefix_shape = list(base_shape)
prefix_shape[axis] = pattern[0]
prefix_shape = tuple(prefix_shape)
x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis)
base_shape = x.shape
if pattern[1] > 0:
postfix_shape = list(base_shape)
postfix_shape[axis] = pattern[1]
postfix_shape = tuple(postfix_shape)
x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis)
return x
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if data_format == 'channels_first':
if num_dynamic_axis > 0:
assert len(base_shape) == 3
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
else:
assert len(base_shape) == 4
x = _padding(x, padding[0], 2)
x = _padding(x, padding[1], 3)
else:
if num_dynamic_axis > 0:
assert len(base_shape) == 3
x = _padding(x, padding[0], 0)
x = _padding(x, padding[1], 1)
else:
assert len(base_shape) == 4
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
return x
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if data_format == 'channels_first':
if num_dynamic_axis > 0:
assert len(base_shape) == 4
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
x = _padding(x, padding[2], 3)
else:
assert len(base_shape) == 5
x = _padding(x, padding[0], 2)
x = _padding(x, padding[1], 3)
x = _padding(x, padding[2], 4)
else:
if num_dynamic_axis > 0:
assert len(base_shape) == 4
x = _padding(x, padding[0], 0)
x = _padding(x, padding[1], 1)
x = _padding(x, padding[2], 2)
else:
assert len(base_shape) == 5
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
x = _padding(x, padding[2], 3)
return x
def one_hot(indices, nb_classes):
return C.one_hot(indices, nb_classes)
def get_value(x):
if isinstance(
x,
C.variables.Parameter) or isinstance(
x,
C.variables.Constant):
return x.value
else:
return eval(x)
def batch_get_value(xs):
result = []
for x in xs:
if (isinstance(x, C.variables.Parameter) or
isinstance(x, C.variables.Constant)):
result.append(x.value)
else:
result.append(eval(x))
return result
def set_value(x, value):
if (isinstance(x, C.variables.Parameter) or
isinstance(x, C.variables.Constant)):
if isinstance(value, float):
value = np.full(x.shape, value)
x.value = value
else:
raise NotImplementedError
def print_tensor(x, message=''):
return C.user_function(
LambdaFunc(x,
when=lambda x: True,
execute=lambda x: print(message)))
def batch_set_value(tuples):
for t in tuples:
x = t[0]
value = t[1]
if isinstance(value, np.ndarray) is False:
value = np.asarray(value)
if isinstance(x, C.variables.Parameter):
x.value = value
else:
raise NotImplementedError
def stop_gradient(variables):
return C.stop_gradient(C.combine(variables))
def switch(condition, then_expression, else_expression):
return C.element_select(condition,
then_expression,
else_expression)
def elu(x, alpha=1.):
res = C.elu(x)
if alpha == 1:
return res
else:
return C.element_select(C.greater(x, 0), res, alpha * res)
def in_top_k(predictions, targets, k):
_targets = C.one_hot(targets, predictions.shape[-1])
result = C.classification_error(predictions, _targets, topN=k)
return 1 - C.reshape(result, shape=())
def conv2d_transpose(x, kernel, output_shape, strides=(1, 1),
padding='valid', data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
kernel = _preprocess_conv2d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = (1,) + strides
# cntk output_shape does not include batch axis
output_shape = output_shape[1:]
# in keras2, need handle output shape in different format
if data_format == 'channels_last':
shape = list(output_shape)
shape[0] = output_shape[2]
shape[1] = output_shape[0]
shape[2] = output_shape[1]
output_shape = tuple(shape)
x = C.convolution_transpose(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding],
output_shape=output_shape)
return _postprocess_conv2d_output(x, data_format)
def identity(x):
return C.alias(x, name=('%s_alias' % (x.name)))
def _preprocess_conv2d_input(x, data_format):
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols)
# TF input shape: (samples, rows, cols, input_depth)
x = C.transpose(x, (2, 0, 1))
return x
def _preprocess_conv2d_kernel(kernel, data_format):
# As of Keras 2.0.0, all kernels are normalized
# on the format `(rows, cols, input_depth, depth)`,
# independently of `data_format`.
# CNTK expects `(depth, input_depth, rows, cols)`.
kernel = C.transpose(kernel, (3, 2, 0, 1))
return kernel
def _preprocess_border_mode(padding):
if padding == 'same':
padding = True
elif padding == 'valid':
padding = False
else:
raise ValueError('Invalid border mode: ' + str(padding))
return padding
def _postprocess_conv2d_output(x, data_format):
if data_format == 'channels_last':
x = C.transpose(x, (1, 2, 0))
return x
def _preprocess_conv3d_input(x, data_format):
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
# TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3,
# input_depth)
x = C.transpose(x, (3, 0, 1, 2))
return x
def _preprocess_conv3d_kernel(kernel, dim_ordering):
kernel = C.transpose(kernel, (4, 3, 0, 1, 2))
return kernel
def _postprocess_conv3d_output(x, dim_ordering):
if dim_ordering == 'channels_last':
x = C.transpose(x, (1, 2, 3, 0))
return x
def _get_dynamic_axis_num(x):
if hasattr(x, 'dynamic_axes'):
return len(x.dynamic_axes)
else:
return 0
def _contain_seqence_axis(x):
if _get_dynamic_axis_num(x) > 1:
return x.dynamic_axes[1] == C.Axis.default_dynamic_axis()
else:
return False
def get_num_dynamic_axis(x):
return _get_dynamic_axis_num(x)
def _reduce_on_axis(x, axis, reduce_fun_name):
if isinstance(axis, list):
for a in axis:
if isinstance(a, C.Axis) and a != C.Axis.default_batch_axis():
x = getattr(C.sequence, reduce_fun_name)(x, a)
else:
x = getattr(C, reduce_fun_name)(x, a)
else:
x = getattr(C, reduce_fun_name)(x, axis)
return x
def _reshape_sequence(x, time_step):
tmp_shape = list(int_shape(x))
tmp_shape[1] = time_step
return reshape(x, tmp_shape)
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride = strides[0]
kernel_shape = int_shape(kernel)
output_length, feature_dim, filters = kernel_shape
xs = []
for i in range(output_length):
slice_length = slice(i * stride,
i * stride + kernel_size[0])
xs.append(reshape(inputs[:, slice_length, :],
(-1, 1, feature_dim)))
x_aggregate = concatenate(xs, axis=1)
# transpose kernel to output_filters first, to apply broadcast
weight = permute_dimensions(kernel, (2, 0, 1))
# Shape: (batch, filters, output_length, input_length * kernel_size)
output = x_aggregate * weight
# Shape: (batch, filters, output_length)
output = sum(output, axis=3)
# Shape: (batch, output_length, filters)
return permute_dimensions(output, (0, 2, 1))
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride_row, stride_col = strides
output_row, output_col = output_shape
kernel_shape = int_shape(kernel)
_, feature_dim, filters = kernel_shape
xs = []
for i in range(output_row):
for j in range(output_col):
slice_row = slice(i * stride_row,
i * stride_row + kernel_size[0])
slice_col = slice(j * stride_col,
j * stride_col + kernel_size[1])
if data_format == 'channels_first':
xs.append(reshape(inputs[:, :, slice_row, slice_col],
(-1, 1, feature_dim)))
else:
xs.append(reshape(inputs[:, slice_row, slice_col, :],
(-1, 1, feature_dim)))
x_aggregate = concatenate(xs, axis=1)
# transpose kernel to put filters first
weight = permute_dimensions(kernel, (2, 0, 1))
# shape: batch, filters, output_length, input_length * kernel_size
output = x_aggregate * weight
# shape: batch, filters, output_length
output = sum(output, axis=3)
# shape: batch, filters, row, col
output = reshape(output,
(-1, filters, output_row, output_col))
if data_format == 'channels_last':
# shape: batch, row, col, filters
output = permute_dimensions(output, (0, 2, 3, 1))
return output
class ReshapeBatch(C.ops.functions.UserFunction):
def __init__(self, input, shape, name='reshape_with_batch'):
super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name)
self.from_shape = input.shape
self.target_shape = shape
def infer_outputs(self):
batch_axis = C.Axis.default_batch_axis()
return [
C.output_variable(
self.target_shape,
self.inputs[0].dtype,
[batch_axis])]
def forward(self, arguments, device=None, outputs_to_retain=None):
num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape))
num_static_element = np.prod(np.asarray(self.target_shape))
num_batch = int(num_element / num_static_element)
result = arguments.data().as_shape((num_batch,) + self.target_shape)
return None, C.cntk_py.Value(result)
def backward(self, state, root_gradients):
grad_array_view = root_gradients.data()
num_element = root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape))
num_static_element = np.prod(np.asarray(self.from_shape))
num_old_batch = int(num_element / num_static_element)
return C.cntk_py.Value(
grad_array_view.as_shape(
(num_old_batch,) + self.from_shape))
class LambdaFunc(C.ops.functions.UserFunction):
def __init__(self,
arg,
when=lambda arg: True,
execute=lambda arg: print(arg),
name=''):
self.when = when
self.execute = execute
super(LambdaFunc, self).__init__([arg], name=name)
def infer_outputs(self):
return [
C.output_variable(
self.inputs[0].shape,
self.inputs[0].dtype,
self.inputs[0].dynamic_axes)]
def forward(self, argument, device=None, outputs_to_retain=None):
if self.when(argument):
self.execute(argument)
return None, argument
def backward(self, state, root_gradients):
return root_gradients
|
py | b4161cfff596f3e067dfb2a99441f8d55d8fa42c | """
Stores jobs in a database table using SQLAlchemy.
"""
import pickle
import logging
import sqlalchemy
from apscheduler.jobstores.base import JobStore
from apscheduler.job import Job
try:
from sqlalchemy import *
except ImportError: # pragma: nocover
raise ImportError('SQLAlchemyJobStore requires SQLAlchemy installed')
logger = logging.getLogger(__name__)
class SQLAlchemyJobStore(JobStore):
def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', metadata=None,
pickle_protocol=pickle.HIGHEST_PROTOCOL):
self.jobs = []
self.pickle_protocol = pickle_protocol
if engine:
self.engine = engine
elif url:
self.engine = create_engine(url)
else:
raise ValueError('Need either "engine" or "url" defined')
if sqlalchemy.__version__ < '0.7':
pickle_coltype = PickleType(pickle_protocol, mutable=False)
else:
pickle_coltype = PickleType(pickle_protocol)
self.jobs_t = Table(
tablename, metadata or MetaData(),
Column('id', Integer, Sequence(tablename + '_id_seq', optional=True), primary_key=True),
Column('trigger', pickle_coltype, nullable=False),
Column('func_ref', String(1024), nullable=False),
Column('args', pickle_coltype, nullable=False),
Column('kwargs', pickle_coltype, nullable=False),
Column('name', Unicode(1024)),
Column('misfire_grace_time', Integer, nullable=False),
Column('coalesce', Boolean, nullable=False),
Column('max_runs', Integer),
Column('max_instances', Integer),
Column('next_run_time', DateTime, nullable=False),
Column('runs', BigInteger))
self.jobs_t.create(self.engine, True)
def add_job(self, job):
job_dict = job.__getstate__()
result = self.engine.execute(self.jobs_t.insert().values(**job_dict))
job.id = result.inserted_primary_key[0]
self.jobs.append(job)
def remove_job(self, job):
delete = self.jobs_t.delete().where(self.jobs_t.c.id == job.id)
self.engine.execute(delete)
self.jobs.remove(job)
def load_jobs(self):
jobs = []
for row in self.engine.execute(select([self.jobs_t])):
try:
job = Job.__new__(Job)
job_dict = dict(row.items())
job.__setstate__(job_dict)
jobs.append(job)
except Exception:
job_name = job_dict.get('name', '(unknown)')
logger.exception('Unable to restore job "%s"', job_name)
self.jobs = jobs
def update_job(self, job):
job_dict = job.__getstate__()
update = self.jobs_t.update().where(self.jobs_t.c.id == job.id).\
values(next_run_time=job_dict['next_run_time'], runs=job_dict['runs'])
self.engine.execute(update)
def close(self):
self.engine.dispose()
def __repr__(self):
return '<%s (url=%s)>' % (self.__class__.__name__, self.engine.url)
|
py | b4161e04748049d081f93109ef00907f6ad203f2 | import pytest
import stk
from ....case_data import CaseData
from ...building_blocks import get_iron_complex, get_tritopic_linker
@pytest.fixture(
scope='session',
params=(
lambda name: CaseData(
molecule=stk.ConstructedMolecule(
topology_graph=stk.cage.M4L4Tetrahedron(
building_blocks={
get_iron_complex(): range(4),
get_tritopic_linker(): range(4, 8),
},
),
),
smiles=(
'[H]C1=C([H])C([H])=N2->[Fe+2]3456<-N7=C([H])C([H])=C'
'([H])C([H])=C7C([H])=N->3C3=C([H])C([H])=C(C([H])=C3'
'[H])N3C7=C([H])C([H])=C(C([H])=C7[H])N7->[Fe+2]89%10'
'(<-N%11=C([H])C([H])=C([H])C([H])=C%11C([H])=N->8C8='
'C([H])C([H])=C(C([H])=C8[H])N(C8=C([H])C([H])=C(C([H'
'])=C8[H])N->4=C([H])C2=C1[H])C1=C([H])C([H])=C(C([H]'
')=C1[H])N1->[Fe+2]248(<-N%11=C([H])C([H])=C([H])C([H'
'])=C%11C=1[H])<-N1=C([H])C([H])=C([H])C([H])=C1C([H]'
')=N->2C1=C([H])C([H])=C(C([H])=C1[H])N(C1=C([H])C([H'
'])=C(C([H])=C1[H])N->5=C([H])C1=C([H])C([H])=C([H])C'
'([H])=N->61)C1=C([H])C([H])=C(C([H])=C1[H])N1->[Fe+2'
']25(<-N6=C([H])C([H])=C([H])C([H])=C6C([H])=N->2C2=C'
'([H])C([H])=C3C([H])=C2[H])(<-N2=C([H])C([H])=C([H])'
'C([H])=C2C=1[H])<-N1=C([H])C([H])=C([H])C([H])=C1C(['
'H])=N->5C1=C([H])C([H])=C(C([H])=C1[H])N(C1=C([H])C('
'[H])=C(C([H])=C1[H])N->9=C([H])C1=C([H])C([H])=C([H]'
')C([H])=N->%101)C1=C([H])C([H])=C(C([H])=C1[H])N->4='
'C([H])C1=C([H])C([H])=C([H])C([H])=N->81)<-N1=C([H])'
'C([H])=C([H])C([H])=C1C=7[H]'
),
name=name,
),
),
)
def metal_cage_m4l4_tetrahedron(request) -> CaseData:
return request.param(
f'{request.fixturename}{request.param_index}',
)
|
py | b4161fea441a71898bf2aa7e7ee6533b048cb87c | # ===============================================================================
# NAME: ComponentHVisitor.py
#
# DESCRIPTION: A visitor responsible for the generation of component header
# file.
#
# AUTHOR: reder
# EMAIL: [email protected]
# DATE CREATED : Feb 5, 2007
#
# Copyright 2013, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
# ===============================================================================
#
# Python standard modules
#
import datetime
import logging
import os
import sys
from getpass import getuser
from fprime_ac.generators import formatters
# from fprime_ac.utils import DiffAndRename
from fprime_ac.generators.visitors import AbstractVisitor
#
# Python extention modules and custom interfaces
#
# from Cheetah import Template
# from fprime_ac.utils import version
from fprime_ac.utils import ConfigManager, DictTypeConverter
#
# Import precompiled templates here
#
try:
from fprime_ac.generators.templates.events import EventHeader
from fprime_ac.generators.templates.events import EventBody
except ImportError:
print("ERROR: must generate python templates first.")
sys.exit(-1)
#
# Universal globals used within module go here.
# (DO NOT USE MANY!)
#
# Global logger init. below.
PRINT = logging.getLogger("output")
DEBUG = logging.getLogger("debug")
#
# Module class or classes go here.
class EventVisitor(AbstractVisitor.AbstractVisitor):
"""
A visitor class responsible for generation of Event Python classes.
"""
__instance = None
__config = None
__fp = None
__form = None
__form_comment = None
__model_parser = None
def __init__(self):
"""
Constructor.
"""
super().__init__()
self.__config = ConfigManager.ConfigManager.getInstance()
self.__form = formatters.Formatters.getInstance()
self.__form_comment = formatters.CommentFormatters()
DEBUG.info("EventVisitor: Instanced.")
self.bodytext = ""
self.prototypetext = ""
def _writeTmpl(self, c, fp, visit_str):
"""
Wrapper to write tmpl to files desc.
"""
DEBUG.debug("EventVisitor:%s" % visit_str)
DEBUG.debug("===================================")
DEBUG.debug(c)
fp.writelines(c.__str__())
DEBUG.debug("===================================")
def DictStartVisit(self, obj):
"""
Defined to generate files for generated code products.
@parms obj: the instance of the event model to visit.
"""
# Build filename here...
# Make dictionary directly if it doesn't exist
output_dir = os.environ["DICT_DIR"] + "/events"
if not (os.path.isdir(output_dir)):
os.makedirs(output_dir)
self.__fp = list()
if len(obj.get_ids()) == 1:
pyfile = "{}/{}.py".format(output_dir, obj.get_name())
fd = open(pyfile, "w")
if fd is None:
raise Exception("Could not open %s file." % pyfile)
self.__fp.append(fd)
else:
inst = 0
for id in obj.get_ids():
pyfile = "%s/%s_%d.py" % (output_dir, obj.get_name(), inst)
inst += 1
DEBUG.info("Open file: %s" % pyfile)
fd = open(pyfile, "w")
if fd is None:
raise Exception("Could not open %s file." % pyfile)
DEBUG.info("Completed %s open" % pyfile)
self.__fp.append(fd)
def DictHeaderVisit(self, obj):
"""
Defined to generate header for event python class.
@parms obj: the instance of the event model to operation on.
"""
inst = 0
for id in obj.get_ids():
c = EventHeader.EventHeader()
d = datetime.datetime.now()
c.date = d.strftime("%A, %d %B %Y")
c.user = getuser()
c.source = obj.get_xml_filename()
self._writeTmpl(c, self.__fp[inst], "eventHeaderVisit")
inst += 1
def DictBodyVisit(self, obj):
"""
Defined to generate the body of the Python event class
@parms obj: the instance of the event model to operation on.
"""
inst = 0
for id in obj.get_ids():
c = EventBody.EventBody()
if len(obj.get_ids()) > 1:
c.name = obj.get_name() + "_%d" % inst
else:
c.name = obj.get_name()
c.id = id
c.severity = obj.get_severity()
c.format_string = obj.get_format_string()
c.description = obj.get_comment()
c.component = obj.get_component_name()
c.arglist = list()
c.ser_import_list = list()
arg_num = 0
for arg_obj in obj.get_args():
n = arg_obj.get_name()
t = arg_obj.get_type()
s = arg_obj.get_size()
d = arg_obj.get_comment()
# convert XML types to Python classes
(
type_string,
ser_import,
type_name,
dontcare,
) = DictTypeConverter.DictTypeConverter().convert(t, s)
if ser_import is not None:
c.ser_import_list.append(ser_import)
# convert format specifier if necessary
if type_name == "enum":
format_string = (
DictTypeConverter.DictTypeConverter().format_replace(
c.format_string, arg_num, "d", "s"
)
)
# check for an error
if format_string is None:
PRINT.info(
"Event %s in component %s had error processing format specifier"
% (c.name, c.component)
)
sys.exit(-1)
else:
c.format_string = format_string
c.arglist.append((n, d, type_string))
arg_num += 1
self._writeTmpl(c, self.__fp[inst], "eventBodyVisit")
self.__fp[inst].close()
inst += 1
|
py | b41620a8c1332a153a3fdc7a8b3c049b9d809910 | from machine import SPI, Pin
import sdcard
import os
spisd = SPI(-1, sck=Pin(14), mosi=Pin(15), miso=Pin(2))
sdPresent = True
try:
sd = sdcard.SDCard(spisd, Pin(13))
except:
sdPresent = False
print("1 sd present:", sdPresent)
if sdPresent:
os.mount(sd, '/sd')
print("/ - ", os.listdir(''))
print("/sd - "os.listdir('/sd'))
|
py | b41620b61b9308c81d4a761c33b1e69fd1bbe48a | ## __iox__: Operating-System with Tradeable-Platform based on PayPal-Account and Cubical-Bank as well Rufusles-Security by AsusTech.org and Xioami.org
def __AutomatedStrategies__():
pass
def __BarsType__():
pass
def __Indicarors__():
pass
def __Indexes__():
pass
def __PayPalVendoring__():
pass
def __Atom__():
pass
def __GitHubBoards__():
pass
def __RufusCybersecurityStore__():
pass
def __AliExpressConnection__():
pass
def __AliExpressFinancialStors__():
pass
def __XioamiStudio__():
pass
def __CubiaclConnection__():
pass
def __CanaryConnection__():
pass
def __AsusCloud__():
pass
def __XioamiNostros__():
print('Automated or SemiAutomated')
pass
def __AsusOffices__():
pass
def __RogComputers__():
pass
def __ProArt__():
print('Exclusive to Enterprises')
pass
def __JupyterNotebook__():
print('iox terminal')
pass
def __RogCommunications__():
pass
def __XioamiPro__():
pass
def __XioamiFirewall__():
pass
|
py | b416213a666ae3743631950bbd18f3655b0847f7 | import logging
import pytest
from semantic_version import Version
from ocs_ci.framework import config
from ocs_ci.framework.testlib import tier4c, skipif_managed_service
from ocs_ci.ocs import constants
from ocs_ci.utility import prometheus
from ocs_ci.ocs.ocp import OCP
log = logging.getLogger(__name__)
@tier4c
@pytest.mark.polarion_id("OCS-2323")
@pytest.mark.bugzilla("1953615")
@skipif_managed_service
def test_rgw_unavailable(measure_stop_rgw):
"""
Test that there is appropriate alert when RGW is unavailable and that
this alert is cleared when the RGW interface is back online.
"""
api = prometheus.PrometheusAPI()
# get alerts from time when manager deployment was scaled down
alerts = measure_stop_rgw.get("prometheus_alerts")
target_label = constants.ALERT_CLUSTEROBJECTSTORESTATE
# The alert message is changed since OCS 4.7
ocs_version = config.ENV_DATA["ocs_version"]
if Version.coerce(ocs_version) < Version.coerce("4.7"):
target_msg = (
"Cluster Object Store is in unhealthy state for more than 15s. "
"Please check Ceph cluster health or RGW connection."
)
else:
target_msg = "Cluster Object Store is in unhealthy state. Please check Ceph cluster health."
states = ["pending", "firing"]
prometheus.check_alert_list(
label=target_label,
msg=target_msg,
alerts=alerts,
states=states,
severity="error",
)
api.check_alert_cleared(
label=target_label, measure_end_time=measure_stop_rgw.get("stop"), time_min=300
)
def teardown_module():
ocs_obj = OCP()
ocs_obj.login_as_sa()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.