filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_11382 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
import unittest
import os
from tern.classes.image_layer import ImageLayer
from tern.classes.package import Package
from tern.classes.file_data import FileData
from tern.utils import rootfs
from test_fixtures import TestTemplate1
from test_fixtures import TestTemplate2
class TestClassImageLayer(unittest.TestCase):
def setUp(self):
self.layer = ImageLayer('123abc', 'path/to/tar')
rootfs.set_working_dir()
def tearDown(self):
del self.layer
def testInstance(self):
self.assertEqual(self.layer.diff_id, '123abc')
self.assertEqual(self.layer.tar_file, 'path/to/tar')
self.assertFalse(self.layer.packages)
self.assertFalse(self.layer.created_by)
self.assertRaises(AttributeError, setattr, self.layer,
'diff_id', '456def')
self.assertRaises(AttributeError, setattr, self.layer, 'tar_file',
'some/other/path')
self.layer.created_by = 'some string'
self.assertEqual(self.layer.created_by, 'some string')
self.layer.pkg_format = 'rpm'
self.assertEqual(self.layer.pkg_format, 'rpm')
self.layer.os_guess = 'operating system'
self.assertEqual(self.layer.os_guess, 'operating system')
self.assertFalse(self.layer.files_analyzed)
self.layer.files_analyzed = True
self.assertTrue(self.layer.files_analyzed)
self.assertRaises(ValueError, setattr, self.layer,
'files_analyzed', 'some string')
self.assertEqual("", self.layer.analyzed_output)
self.layer.analyzed_output = 'some string'
self.assertEqual(self.layer.analyzed_output, 'some string')
self.assertRaises(ValueError, setattr, self.layer,
'analyzed_output', 123)
def testAddPackage(self):
err = "Object type String, should be Package"
p1 = Package('x')
self.layer.add_package(p1)
self.assertEqual(len(self.layer.packages), 1)
with self.assertRaises(TypeError, msg=err):
self.layer.add_package("not_a_package")
def testRemovePackage(self):
p1 = Package('x')
p2 = Package('y')
self.layer.add_package(p1)
self.layer.add_package(p2)
self.assertTrue(self.layer.remove_package('y'))
self.assertFalse(self.layer.remove_package('y'))
def testAddFile(self):
err = "Object type String, should be FileData"
file1 = FileData('file1', 'path/to/file1')
self.layer.add_file(file1)
self.assertEqual(len(self.layer.files), 1)
with self.assertRaises(TypeError, msg=err):
self.layer.add_file("afile")
def testRemoveFile(self):
file1 = FileData('file1', 'path/to/file1')
self.layer.add_file(file1)
self.assertFalse(self.layer.remove_file('file1'))
self.assertTrue(self.layer.remove_file('path/to/file1'))
self.assertFalse(self.layer.remove_file('path/to/file1'))
def testToDict(self):
p1 = Package('x')
f1 = FileData('file1', 'path/to/file1')
self.layer.add_package(p1)
self.layer.add_file(f1)
a_dict = self.layer.to_dict()
self.assertEqual(a_dict['diff_id'], '123abc')
self.assertEqual(len(a_dict['packages']), 1)
self.assertEqual(a_dict['packages'][0]['name'], 'x')
self.assertEqual(len(a_dict['files']), 1)
self.assertEqual(a_dict['files'][0]['name'], 'file1')
self.assertEqual(a_dict['files'][0]['path'], 'path/to/file1')
self.assertEqual(a_dict['tar_file'], 'path/to/tar')
def testToDictTemplate(self):
template1 = TestTemplate1()
template2 = TestTemplate2()
p1 = Package('x')
self.layer.add_package(p1)
f1 = FileData('file1', 'path/to/file1')
self.layer.add_file(f1)
dict1 = self.layer.to_dict(template1)
dict2 = self.layer.to_dict(template2)
self.assertEqual(len(dict1.keys()), 4)
self.assertEqual(dict1['layer.diff'], '123abc')
self.assertEqual(dict1['layer.tarfile'], 'path/to/tar')
self.assertEqual(len(dict1['layer.packages']), 1)
self.assertEqual(len(dict1['layer.files']), 1)
self.assertEqual(len(dict2.keys()), 5)
self.assertFalse(dict2['notes'])
self.assertFalse(dict2['layer.packages'][0]['notes'])
self.assertFalse(dict2['layer.files'][0]['notes'])
def testGetPackageNames(self):
p1 = Package('x')
self.layer.add_package(p1)
pkgs = self.layer.get_package_names()
self.assertEqual(pkgs[0], 'x')
def testGetFilePaths(self):
f1 = FileData('file1', 'path/to/file1')
f2 = FileData('file2', 'path/to/file2')
self.layer.add_file(f1)
self.layer.add_file(f2)
file_paths = self.layer.get_file_paths()
self.assertEqual(file_paths, ['path/to/file1', 'path/to/file2'])
def testSetChecksum(self):
self.layer.set_checksum('sha256', '12345abcde')
self.assertEqual(self.layer.checksum_type, 'sha256')
self.assertEqual(self.layer.checksum, '12345abcde')
def testAddChecksums(self):
self.layer.add_checksums({'SHA1': '12345abcde',
'MD5': '1ff38cc592c4c5d0c8e3ca38be8f1eb1'})
self.assertEqual(self.layer.checksums,
{'sha1': '12345abcde',
'md5': '1ff38cc592c4c5d0c8e3ca38be8f1eb1'})
def testSetExtensionInfo(self):
self.layer.extension_info = {"header": set({"Test Header"})}
self.assertIsInstance(self.layer.extension_info, dict)
self.assertIsNotNone(
self.layer.extension_info.get("header", None), None)
self.assertIsInstance(
self.layer.extension_info.get("header", None), set)
header = self.layer.extension_info.get("header").pop()
self.assertEqual(header, "Test Header")
def testGetUntarDir(self):
self.layer.image_layout = "oci"
self.assertEqual(self.layer.image_layout, "oci")
self.layer.image_layout = "docker"
self.assertEqual(self.layer.image_layout, "docker")
self.layer.image_layout = ""
self.assertEqual(self.layer.image_layout, "oci")
self.layer.layer_index = 1
self.assertEqual(self.layer.layer_index, "1")
expected_path = os.path.join(rootfs.get_working_dir(),
'1/contents')
self.assertEqual(self.layer.get_untar_dir(), expected_path)
self.layer.image_layout = "docker"
expected_path = os.path.join(rootfs.get_working_dir(),
'path/to/contents')
self.assertEqual(self.layer.get_untar_dir(), expected_path)
# Kaniko image format test
self.layer = ImageLayer('123abc', 'some_layer_tar_file.tar.gz')
self.layer.image_layout = "docker"
expected_path = os.path.join(rootfs.get_working_dir(),
'some_layer_tar_file/contents')
self.assertEqual(self.layer.get_untar_dir(), expected_path)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_11383 | try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
import sys, platform
sys.path.append('python')
extra_compile_args = ['-DHAVE_KALLOC']
include_dirs = ["."]
if platform.machine() in ["aarch64", "arm64"]:
include_dirs.append("sse2neon/")
extra_compile_args.extend(['-ftree-vectorize', '-DKSW_SSE2_ONLY', '-D__SSE2__'])
else:
extra_compile_args.append('-msse4.1') # WARNING: ancient x86_64 CPUs don't have SSE4
def readme():
with open('python/README.rst') as f:
return f.read()
setup(
name = 'mappy',
version = '2.21',
url = 'https://github.com/lh3/minimap2',
description = 'Minimap2 python binding',
long_description = readme(),
author = 'Heng Li',
author_email = '[email protected]',
license = 'MIT',
keywords = 'sequence-alignment',
scripts = ['python/minimap2.py'],
ext_modules = [Extension('mappy',
sources = ['python/mappy.pyx', 'align.c', 'bseq.c', 'lchain.c', 'seed.c', 'format.c', 'hit.c', 'index.c', 'pe.c', 'options.c',
'ksw2_extd2_sse.c', 'ksw2_exts2_sse.c', 'ksw2_extz2_sse.c', 'ksw2_ll_sse.c',
'kalloc.c', 'kthread.c', 'map.c', 'misc.c', 'sdust.c', 'sketch.c', 'esterr.c', 'splitidx.c'],
depends = ['minimap.h', 'bseq.h', 'kalloc.h', 'kdq.h', 'khash.h', 'kseq.h', 'ksort.h',
'ksw2.h', 'kthread.h', 'kvec.h', 'mmpriv.h', 'sdust.h',
'python/cmappy.h', 'python/cmappy.pxd'],
extra_compile_args = extra_compile_args,
include_dirs = include_dirs,
libraries = ['z', 'm', 'pthread'])],
classifiers = [
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: C',
'Programming Language :: Cython',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics'],
setup_requires=["cython"])
|
the-stack_0_11384 | from fontTools.designspaceLib import DesignSpaceDocument
from fontTools.pens.pointPen import PointToSegmentPen
from fontTools.varLib.models import VariationModel, allEqual, normalizeLocation
from ufoLib2 import Font as UFont
from .objects import Component, Glyph, MathDict
from .utils import makeTransformVarCo, tuplifyLocation
class VarCoGlyph(Glyph):
@classmethod
def loadFromUFOs(cls, ufos, locations, glyphName, axes):
uglyph = ufos[0][glyphName]
self = cls.loadFromGlyphObject(uglyph)
self.axes = axes
self._postParse(ufos, locations)
return self
def _postParse(self, ufos, locations):
# Filter out and collect component info from the outline
self.outline, components = self.outline.splitComponents()
# Build Component objects
vcComponentData = self.lib.get("varco.components", [])
if vcComponentData:
assert len(components) == len(vcComponentData), (
self.name,
len(components),
len(vcComponentData),
components,
)
else:
vcComponentData = [None] * len(components)
assert len(self.components) == 0
for (baseGlyph, affine), vcCompo in zip(components, vcComponentData):
if vcCompo is None:
xx, xy, yx, yy, dx, dy = affine
assert xy == 0, "rotation and skew are not implemented"
assert yx == 0, "rotation and skew are not implemented"
coord = {}
transform = MathDict(
x=dx,
y=dy,
rotation=0,
scalex=xx,
scaley=yy,
skewx=0,
skewy=0,
tcenterx=0,
tcentery=0,
)
else:
assert affine[:4] == (1, 0, 0, 1)
x, y = affine[4:]
coord = vcCompo["coord"]
transformDict = vcCompo["transform"]
transform = MathDict(
x=affine[4],
y=affine[5],
rotation=transformDict.get("rotation", 0),
scalex=transformDict.get("scalex", 1),
scaley=transformDict.get("scaley", 1),
skewx=transformDict.get("skewx", 0),
skewy=transformDict.get("skewy", 0),
tcenterx=transformDict.get("tcenterx", 0),
tcentery=transformDict.get("tcentery", 0),
)
self.components.append(Component(baseGlyph, MathDict(coord), transform))
assert len(self.variations) == 0
if ufos:
assert len(ufos) == len(locations)
for ufo, location in zip(ufos[1:], locations[1:]):
if self.name not in ufo:
continue
for axisName, axisValue in location.items():
assert -1 <= axisValue <= 1, (axisName, axisValue)
varGlyph = self.__class__.loadFromGlyphObject(ufo[self.name])
varGlyph._postParse([], [])
varGlyph.location = location
self.variations.append(varGlyph)
if self.variations:
locations = [{}] + [variation.location for variation in self.variations]
self.model = VariationModel(locations)
class VarCoFont:
def __init__(self, designSpacePath):
doc = DesignSpaceDocument.fromfile(designSpacePath)
self.axes, self.ufos, self.locations = unpackDesignSpace(doc)
self.varcoGlyphs = {}
def drawGlyph(self, pen, glyphName, location):
self.drawPointsGlyph(PointToSegmentPen(pen), glyphName, location)
def drawPointsGlyph(self, pen, glyphName, location, transform=None):
varGlyph = self[glyphName]
instanceGlyph = varGlyph.instantiate(location)
outline = instanceGlyph.outline
if transform is not None:
outline = outline.transform(transform)
outline.drawPoints(pen)
for component in instanceGlyph.components:
t = makeTransformVarCo(**component.transform)
if transform is not None:
t = transform.transform(t)
self.drawPointsGlyph(pen, component.name, component.coord, t)
def keys(self):
return self.ufos[0].keys()
def __contains__(self, glyphName):
return glyphName in self.ufos[0]
def __len__(self):
return len(self.ufos[0])
def __iter__(self):
return iter(self.ufos[0].keys())
def __getitem__(self, glyphName):
varcoGlyph = self.varcoGlyphs.get(glyphName)
if varcoGlyph is None:
varcoGlyph = VarCoGlyph.loadFromUFOs(
self.ufos, self.locations, glyphName, self.axes
)
self.varcoGlyphs[glyphName] = varcoGlyph
return varcoGlyph
def get(self, glyphName, default=None):
try:
glyph = self[glyphName]
except KeyError:
glyph = default
return glyph
def extractVarCoData(self, globalAxisNames, neutralOnly=False):
allLocations = set()
vcData = {}
neutralGlyphNames = []
for glyphName in sorted(self.keys()):
glyph = self[glyphName]
axisTags = {axisTag for v in glyph.variations for axisTag in v.location}
if neutralOnly and not axisTags - globalAxisNames:
masters = [glyph]
neutralGlyphNames.append(glyphName)
else:
masters = [glyph] + glyph.variations
if not glyph.outline.isEmpty() and glyph.components:
assert not any(
c.coord for c in glyph.components
), "can't mix outlines and variable components"
# ensure only the offset may vary across masters
for attr in [
"rotation",
"scalex",
"scaley",
"skewx",
"skewy",
"tcenterx",
"tcentery",
]:
values = {c.transform[attr] for m in masters for c in m.components}
assert len(values) == 1, f"classic component varies {attr}"
# This glyph mixes outlines and classic components, it will be
# flattened upon TTF compilation, so should not be part of the VarC table
continue
locations = [m.location for m in masters]
allLocations.update(tuplifyLocation(loc) for loc in locations)
components = []
for i in range(len(glyph.components)):
assert allEqual([m.components[i].name for m in masters])
coords = [m.components[i].coord for m in masters]
fillMissingFromNeutral(coords)
transforms = [
# Filter out x and y, as they'll be in glyf and gvar
{
_transformFieldMapping[k]: v
for k, v in m.components[i].transform.items()
if k not in {"x", "y"}
}
for m in masters
]
components.append(list(zip(coords, transforms)))
if components:
vcData[glyphName] = components, locations
allLocations = [dict(items) for items in sorted(allLocations)]
return vcData, allLocations, neutralGlyphNames
def fillMissingFromNeutral(coords):
# This ensures that all variation coord dicts contain all the
# keys from the neutral coord dict. If missing, the value from
# the neutral coord is used. This is crucial for the variation
# building mechanism.
firstCoord = coords[0]
for coord in coords[1:]:
for k, v in firstCoord.items():
coord.setdefault(k, v)
def unpackDesignSpace(doc):
axisTagMapping = {axis.name: axis.tag for axis in doc.axes}
axes = {axis.tag: (axis.minimum, axis.default, axis.maximum) for axis in doc.axes}
# We want the default source to be the first in the list; the rest of
# the order is not important
sources = sorted(doc.sources, key=lambda src: src != doc.default)
ufos = []
locations = []
_loaded = {}
for src in sources:
loc = src.location
loc = {
axisTagMapping[axisName]: axisValue for axisName, axisValue in loc.items()
}
loc = normalizeLocation(loc, axes)
loc = {
axisName: axisValue for axisName, axisValue in loc.items() if axisValue != 0
}
locations.append(loc)
ufo = _loaded.get(src.path)
if ufo is None:
ufo = UFont(src.path)
_loaded[src.path] = ufo
if src.layerName is None:
ufo.layers.defaultLayer
else:
ufo = ufo.layers[src.layerName]
ufos.append(ufo)
userAxes = {
axis.tag: (axis.minimum, axis.default, axis.maximum)
for axis in doc.axes
if not axis.hidden
}
return userAxes, ufos, locations
_transformFieldMapping = {
"rotation": "Rotation",
"scalex": "ScaleX",
"scaley": "ScaleY",
"skewx": "SkewX",
"skewy": "SkewY",
"tcenterx": "TCenterX",
"tcentery": "TCenterY",
}
if __name__ == "__main__":
import sys
ufoPath = sys.argv[1]
vcFont = VarCoFont(ufoPath)
g = vcFont["DC_5927_03"]
print(g.components)
print(g.axes)
x = g + 0.5 * (g.variations[0] - g)
print(g.components[-1].transform)
print(x.components[-1].transform)
print(g.variations[0].components[-1].transform)
print(list(vcFont.keys())[:100])
print("AE_PieZhe" in vcFont)
# for x in vcFont:
# print(x)
|
the-stack_0_11387 | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for jobs.batch_jobs.validation_jobs."""
from __future__ import absolute_import
from __future__ import unicode_literals
from core.platform import models
import feconf
from jobs import job_test_utils
from jobs.batch_jobs import validation_jobs
from jobs.transforms.validation import base_validation
from jobs.types import base_validation_errors
from jobs.types import model_property
(auth_models, base_models, user_models) = models.Registry.import_models(
[models.NAMES.auth, models.NAMES.base_model, models.NAMES.user])
class AuditAllStorageModelsJobTests(job_test_utils.JobTestBase):
JOB_CLASS = validation_jobs.AuditAllStorageModelsJob
VALID_USER_ID = 'uid_%s' % ('a' * feconf.USER_ID_RANDOM_PART_LENGTH)
def test_empty_storage(self):
self.assert_job_output_is_empty()
def test_base_validation(self):
base_model_with_invalid_id = self.create_model(
base_models.BaseModel, id='123@?!*', deleted=False)
base_model_with_invalid_timestamps = self.create_model(
base_models.BaseModel, id='124', deleted=False,
created_on=self.NOW, last_updated=self.YEAR_LATER)
base_model_with_inconsistent_timestamps = self.create_model(
base_models.BaseModel, id='125', deleted=False,
created_on=self.YEAR_LATER, last_updated=self.YEAR_AGO)
expired_base_model = self.create_model(
base_models.BaseModel, id='126', deleted=True)
valid_base_model = self.create_model(
base_models.BaseModel, id='127', deleted=False)
self.put_multi([
base_model_with_invalid_id,
base_model_with_invalid_timestamps,
base_model_with_inconsistent_timestamps,
expired_base_model,
valid_base_model,
])
self.assert_job_output_is([
base_validation_errors.ModelIdRegexError(
base_model_with_invalid_id,
base_validation.BASE_MODEL_ID_PATTERN),
base_validation_errors.ModelMutatedDuringJobError(
base_model_with_invalid_timestamps),
base_validation_errors.InconsistentTimestampsError(
base_model_with_inconsistent_timestamps),
base_validation_errors.ModelExpiredError(expired_base_model),
])
def test_user_audits(self):
user_settings_model_with_invalid_id = self.create_model(
user_models.UserSettingsModel,
id='128', email='[email protected]')
user_settings_model_with_valid_id = self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID, email='[email protected]')
self.put_multi([
user_settings_model_with_invalid_id,
user_settings_model_with_valid_id,
])
self.assert_job_output_is([
base_validation_errors.ModelIdRegexError(
user_settings_model_with_invalid_id, feconf.USER_ID_REGEX),
])
def test_reports_error_when_id_property_target_does_not_exist(self):
self.put_multi([
# UserEmailPreferencesModel.id -> UserSettingsModel.id.
self.create_model(
user_models.UserEmailPreferencesModel, id=self.VALID_USER_ID),
# UserSettingsModel missing.
])
self.assert_job_output_is([
base_validation_errors.ModelRelationshipError(
model_property.ModelProperty(
user_models.UserEmailPreferencesModel,
user_models.UserEmailPreferencesModel.id),
self.VALID_USER_ID, 'UserSettingsModel', self.VALID_USER_ID),
])
def test_empty_when_id_property_target_exists(self):
self.put_multi([
self.create_model(
user_models.UserEmailPreferencesModel, id=self.VALID_USER_ID),
self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID, email='[email protected]'),
])
self.assert_job_output_is_empty()
def test_empty_when_web_of_id_property_targets_exist(self):
self.put_multi([
self.create_model(
auth_models.UserAuthDetailsModel,
id=self.VALID_USER_ID, firebase_auth_id='abc', gae_id='123'),
self.create_model(
auth_models.UserIdByFirebaseAuthIdModel,
id='abc', user_id=self.VALID_USER_ID),
self.create_model(
auth_models.UserIdentifiersModel,
id='123', user_id=self.VALID_USER_ID),
])
self.assert_job_output_is_empty()
def test_reports_missing_id_property_target_even_if_sibling_property_is_valid(self): # pylint: disable=line-too-long
self.put_multi([
self.create_model(
auth_models.UserAuthDetailsModel, id=self.VALID_USER_ID,
# Value is not None, so UserIdentifiersModel must exist.
gae_id='abc',
# Value is None, so missing UserIdByFirebaseAuthIdModel is OK.
firebase_auth_id=None),
self.create_model(
auth_models.UserIdentifiersModel, user_id=self.VALID_USER_ID,
# Should be gae_id='abc', so error will occur.
id='123'),
])
self.assert_job_output_is([
base_validation_errors.ModelRelationshipError(
model_property.ModelProperty(
auth_models.UserAuthDetailsModel,
auth_models.UserAuthDetailsModel.gae_id),
self.VALID_USER_ID, 'UserIdentifiersModel', 'abc'),
])
|
the-stack_0_11389 | import argparse
import logging
import os
import platform
from yaml import safe_load
configdata = None
if platform.system() == "Windows":
APPDATA = os.environ["APPDATA"]
CONFIGFILE = os.path.join(APPDATA, "svtplay-dl", "svtplay-dl.yaml")
else:
CONFIGFILE = os.path.expanduser("~/.svtplay-dl.yaml")
class Options:
"""
Options used when invoking the script from another Python script.
Simple container class used when calling get_media() from another Python
script. The variables corresponds to the command line parameters parsed
in main() when the script is called directly.
When called from a script there are a few more things to consider:
* Logging is done to 'log'. main() calls setup_log() which sets the
logging to either stdout or stderr depending on the silent level.
A user calling get_media() directly can either also use setup_log()
or configure the log manually.
* Progress information is printed to 'progress_stream' which defaults to
sys.stderr but can be changed to any stream.
* Many errors results in calls to system.exit() so catch 'SystemExit'-
Exceptions to prevent the entire application from exiting if that happens.
"""
def __init__(self):
self.default = {}
def set(self, key, value):
self.default[key] = value
def get(self, key):
if key in self.default:
return self.default[key]
def get_variable(self):
return self.default
def set_variable(self, value):
self.default = value
def gen_parser(version="unknown"):
parser = argparse.ArgumentParser(prog="svtplay-dl")
general = parser.add_argument_group()
general.add_argument("--version", action="version", version=f"%(prog)s {version}")
general.add_argument("-o", "--output", metavar="output", default=None, help="outputs to the given filename or folder")
general.add_argument(
"--subfolder",
action="store_true",
default=False,
help="Create a subfolder titled as the show, non-series gets in folder movies",
)
general.add_argument("--config", dest="configfile", metavar="configfile", default=CONFIGFILE, help="Specify configuration file")
general.add_argument("-f", "--force", action="store_true", dest="force", default=False, help="overwrite if file exists already")
general.add_argument("-r", "--resume", action="store_true", dest="resume", default=False, help="resume a download (RTMP obsolete)")
general.add_argument("-l", "--live", action="store_true", dest="live", default=False, help="enable for live streams (RTMP based ones)")
general.add_argument("-c", "--capture_time", default=-1, type=int, metavar="capture_time", help="define capture time in minutes of a live stream")
general.add_argument("-s", "--silent", action="store_true", dest="silent", default=False, help="be less verbose")
general.add_argument(
"--silent-semi",
action="store_true",
dest="silent_semi",
default=False,
help="only show a message when the file is downloaded",
)
general.add_argument("-u", "--username", default=None, help="username")
general.add_argument("-p", "--password", default=None, help="password")
general.add_argument(
"-t",
"--thumbnail",
action="store_true",
dest="thumbnail",
default=False,
help="download thumbnail from the site if available",
)
general.add_argument(
"-g",
"--get-url",
action="store_true",
dest="get_url",
default=False,
help="do not download any video, but instead print the URL.",
)
general.add_argument(
"--get-only-episode-url",
action="store_true",
dest="get_only_episode_url",
default=False,
help="do not get video URLs, only print the episode URL.",
)
general.add_argument(
"--dont-verify-ssl-cert",
action="store_false",
dest="ssl_verify",
default=True,
help="Don't attempt to verify SSL certificates.",
)
general.add_argument(
"--http-header",
dest="http_headers",
default=None,
metavar="header1=value;header2=value2",
help="A header to add to each HTTP request.",
)
general.add_argument(
"--cookies",
dest="cookies",
default=None,
metavar="cookie1=value;cookie2=value2",
help="A cookies to add to each HTTP request.",
)
general.add_argument("--remux", dest="remux", default=False, action="store_true", help="Remux from one container to mp4 using ffmpeg or avconv")
general.add_argument(
"--exclude",
dest="exclude",
default=None,
metavar="WORD1,WORD2,...",
help="exclude videos with the WORD(s) in the filename. comma separated.",
)
general.add_argument("--after-date", dest="after_date", default=None, metavar="yyyy-MM-dd", help="only videos published on or after this date")
general.add_argument(
"--proxy",
dest="proxy",
default=None,
metavar="proxy",
help="Use the specified HTTP/HTTPS/SOCKS proxy. To enable experimental "
"SOCKS proxy, specify a proper scheme. For example "
"socks5://127.0.0.1:1080/.",
)
general.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False, help="explain what is going on")
general.add_argument("--nfo", action="store_true", dest="nfo", default=False, help="create a NFO file")
general.add_argument("--force-nfo", action="store_true", dest="force_nfo", default=False, help="download only NFO if used with --nfo")
general.add_argument(
"--only-audio",
action="store_true",
dest="only_audio",
default=False,
help="only download audio if audio and video is seperated",
)
general.add_argument(
"--only-video",
action="store_true",
dest="only_video",
default=False,
help="only download video if audio and video is seperated",
)
quality = parser.add_argument_group("Quality")
quality.add_argument(
"-q",
"--quality",
default=0,
metavar="quality",
help="choose what format to download based on bitrate / video resolution." "it will download the best format by default",
)
quality.add_argument(
"-Q",
"--flexible-quality",
default=0,
metavar="amount",
dest="flexibleq",
help="allow given quality (as above) to differ by an amount",
)
quality.add_argument("-P", "--preferred", default=None, metavar="preferred", help="preferred download method (dash, hls, hds, or http)")
quality.add_argument("--list-quality", dest="list_quality", action="store_true", default=False, help="list the quality for a video")
quality.add_argument(
"--stream-priority",
dest="stream_prio",
default=None,
metavar="dash,hls,hds,http",
help="If two streams have the same quality, choose the one you prefer",
)
quality.add_argument(
"--format-preferred",
dest="format_preferred",
default=None,
metavar="h264,h264-51",
help="Choose the format you prefer, --list-quality to show which one to choose from",
)
subtitle = parser.add_argument_group("Subtitle")
subtitle.add_argument(
"-S",
"--subtitle",
action="store_true",
dest="subtitle",
default=False,
help="download subtitle from the site if available",
)
subtitle.add_argument(
"-M",
"--merge-subtitle",
action="store_true",
dest="merge_subtitle",
default=False,
help="merge subtitle with video/audio file with corresponding ISO639-3 language code." "this invokes --remux automatically.",
)
subtitle.add_argument(
"--force-subtitle",
dest="force_subtitle",
default=False,
action="store_true",
help="download only subtitle if its used with -S",
)
subtitle.add_argument(
"--require-subtitle",
dest="require_subtitle",
default=False,
action="store_true",
help="download only if a subtitle is available",
)
subtitle.add_argument(
"--all-subtitles",
dest="get_all_subtitles",
default=False,
action="store_true",
help="Download all available subtitles for the video",
)
subtitle.add_argument(
"--raw-subtitles",
dest="get_raw_subtitles",
default=False,
action="store_true",
help="also download the subtitles in their native format",
)
subtitle.add_argument(
"--convert-subtitle-colors",
dest="convert_subtitle_colors",
default=False,
action="store_true",
help='converts the color information in subtitles, to <font color=""> tags',
)
alleps = parser.add_argument_group("All")
alleps.add_argument("-A", "--all-episodes", action="store_true", dest="all_episodes", default=False, help="try to download all episodes")
alleps.add_argument("--all-last", dest="all_last", default=-1, type=int, metavar="NN", help="get last NN episodes instead of all episodes")
alleps.add_argument("--include-clips", dest="include_clips", default=False, action="store_true", help="include clips from websites when using -A")
cmorep = parser.add_argument_group("C More")
cmorep.add_argument("--cmore-operatorlist", dest="cmoreoperatorlist", default=False, action="store_true", help="show operatorlist for cmore")
cmorep.add_argument("--cmore-operator", dest="cmoreoperator", default=None, metavar="operator")
parser.add_argument("urls", nargs="*")
return parser
def parser(version):
parser = gen_parser(version)
options = parser.parse_args()
return parser, options
def setup_defaults():
options = Options()
options.set("output", None)
options.set("subfolder", False)
options.set("configfile", CONFIGFILE)
options.set("resume", False)
options.set("live", False)
options.set("capture_time", -1)
options.set("silent", False)
options.set("force", False)
options.set("quality", 0)
options.set("flexibleq", 0)
options.set("list_quality", False)
options.set("other", None)
options.set("subtitle", False)
options.set("username", None)
options.set("password", None)
options.set("thumbnail", False)
options.set("all_episodes", False)
options.set("all_last", -1)
options.set("merge_subtitle", False)
options.set("force_subtitle", False)
options.set("require_subtitle", False)
options.set("get_all_subtitles", False)
options.set("get_raw_subtitles", False)
options.set("convert_subtitle_colors", False)
options.set("preferred", None)
options.set("verbose", False)
options.set("nfo", False)
options.set("force_nfo", False)
options.set("output_auto", False)
options.set("service", None)
options.set("cookies", None)
options.set("exclude", None)
options.set("after_date", None)
options.set("get_url", False)
options.set("get_only_episode_url", False)
options.set("ssl_verify", True)
options.set("http_headers", None)
options.set("format_preferred", None)
options.set("stream_prio", None)
options.set("remux", False)
options.set("silent_semi", False)
options.set("proxy", None)
options.set("include_clips", False)
options.set("cmoreoperatorlist", False)
options.set("filename", "{title}.s{season}e{episode}.{episodename}-{id}-{service}.{ext}")
options.set("only_audio", False)
options.set("only_video", False)
return _special_settings(options)
def parsertoconfig(config, parser):
config.set("output", parser.output)
config.set("subfolder", parser.subfolder)
config.set("configfile", parser.configfile)
config.set("resume", parser.resume)
config.set("live", parser.live)
config.set("capture_time", parser.capture_time)
config.set("silent", parser.silent)
config.set("force", parser.force)
config.set("quality", parser.quality)
config.set("flexibleq", parser.flexibleq)
config.set("list_quality", parser.list_quality)
config.set("subtitle", parser.subtitle)
config.set("merge_subtitle", parser.merge_subtitle)
config.set("silent_semi", parser.silent_semi)
config.set("username", parser.username)
config.set("password", parser.password)
config.set("thumbnail", parser.thumbnail)
config.set("all_episodes", parser.all_episodes)
config.set("all_last", parser.all_last)
config.set("force_subtitle", parser.force_subtitle)
config.set("require_subtitle", parser.require_subtitle)
config.set("preferred", parser.preferred)
config.set("verbose", parser.verbose)
config.set("nfo", parser.nfo)
config.set("force_nfo", parser.force_nfo)
config.set("exclude", parser.exclude)
config.set("after_date", parser.after_date)
config.set("get_url", parser.get_url)
config.set("get_only_episode_url", parser.get_only_episode_url)
config.set("ssl_verify", parser.ssl_verify)
config.set("http_headers", parser.http_headers)
config.set("cookies", parser.cookies)
config.set("format_preferred", parser.format_preferred)
config.set("stream_prio", parser.stream_prio)
config.set("remux", parser.remux)
config.set("get_all_subtitles", parser.get_all_subtitles)
config.set("get_raw_subtitles", parser.get_raw_subtitles)
config.set("convert_subtitle_colors", parser.convert_subtitle_colors)
config.set("include_clips", parser.include_clips)
config.set("cmoreoperatorlist", parser.cmoreoperatorlist)
config.set("cmoreoperator", parser.cmoreoperator)
config.set("proxy", parser.proxy)
config.set("only_audio", parser.only_audio)
config.set("only_video", parser.only_video)
return _special_settings(config)
def _special_settings(config):
if config.get("require_subtitle"):
if config.get("merge_subtitle"):
config.set("merge_subtitle", True)
else:
config.set("subtitle", True)
if config.get("merge_subtitle"):
config.set("remux", True)
config.set("subtitle", True)
if config.get("silent_semi"):
config.set("silent", True)
if config.get("proxy"):
config.set("proxy", config.get("proxy").replace("socks5", "socks5h", 1))
config.set("proxy", dict(http=config.get("proxy"), https=config.get("proxy")))
if config.get("get_only_episode_url"):
config.set("get_url", True)
return config
def merge(old, new):
if isinstance(new, list):
new = {list(i.keys())[0]: i[list(i.keys())[0]] for i in new}
config = setup_defaults()
if new:
for item in new:
if item in new:
if new[item] != config.get(item): # Check if new value is not a default one.
old[item] = new[item]
else:
old[item] = new[item]
options = Options()
options.set_variable(old)
return options
def readconfig(config, configfile, service=None, preset=None):
global configdata
if configfile and configdata is None:
try:
with open(configfile) as fd:
data = fd.read()
configdata = safe_load(data)
except PermissionError:
logging.error(f"Permission denied while reading config: {configfile}")
if configdata is None:
return config
if "default" in configdata:
config = merge(config.get_variable(), configdata["default"])
if service and "service" in configdata and service in configdata["service"]:
config = merge(config.get_variable(), configdata["service"][service])
if preset and "presets" in configdata and preset in configdata["presets"]:
config = merge(config.get_variable(), configdata["presets"][preset])
return config
|
the-stack_0_11391 | from django.conf.urls import include, url
from resources import views
from resources.api import ResourceResource, ResourceSubmissionResource
resource_resource = ResourceResource()
resource_submission_resource = ResourceSubmissionResource()
urlpatterns = [
url(r'^api/', include(resource_resource.urls)),
url(r'^api/', include(resource_submission_resource.urls)),
url(r'^$', views.index, name='resources.index'),
url(r'^create/$', views.create_resource, name='resources.views.create_resource'),
url(r'^(?P<resource_id>[^/]+)/comment/$', views.comment_on_resource,
name='resources.views.comment_on_resource'),
url(r'^(?P<resource_id>[^/]+)/$', views.detail, name='detail'),
]
|
the-stack_0_11395 | #!/usr/bin/env python3
"""
Setup script that reads in the users.yml and courses.yml files in the ../data directory and then
creates the users and courses for the system. This is primarily used by Vagrant and Travis to
figure the environments easily, but it could be run pretty much anywhere, unless the courses
already exist as else the system will probably fail.
Usage: ./setup_sample_courses.py
./setup_sample_courses.py [course [course]]
./setup_sample_courses.py --help
The first will create all couress in courses.yml while the second will only create the courses
specified (which is useful for something like Travis where we don't need the "demo classes", and
just the ones used for testing.
"""
from __future__ import print_function, division
import argparse
from collections import OrderedDict
from datetime import datetime, timedelta
from shutil import copyfile
import glob
import grp
import hashlib
import json
import os
import pwd
import random
import shutil
import subprocess
import uuid
import os.path
import string
import pdb
import docker
from tempfile import TemporaryDirectory
from submitty_utils import dateutils
from ruamel.yaml import YAML
from sqlalchemy import create_engine, Table, MetaData, bindparam, select, join, func
yaml = YAML(typ='safe')
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
SETUP_DATA_PATH = os.path.join(CURRENT_PATH, "..", "data")
SUBMITTY_INSTALL_DIR = "/usr/local/submitty"
SUBMITTY_DATA_DIR = "/var/local/submitty"
SUBMITTY_REPOSITORY = os.path.join(SUBMITTY_INSTALL_DIR, "GIT_CHECKOUT/Submitty")
MORE_EXAMPLES_DIR = os.path.join(SUBMITTY_INSTALL_DIR, "more_autograding_examples")
TUTORIAL_DIR = os.path.join(SUBMITTY_INSTALL_DIR, "GIT_CHECKOUT/Tutorial", "examples")
DB_HOST = "localhost"
with open(os.path.join(SUBMITTY_INSTALL_DIR, "config", "database.json")) as database_config:
database_config_json = json.load(database_config)
DB_USER = database_config_json["database_user"]
DB_PASS = database_config_json["database_password"]
DB_ONLY = False
NO_SUBMISSIONS = False
NO_GRADING = False
NOW = dateutils.get_current_time()
def main():
"""
Main program execution. This gets us our commandline arugments, reads in the data files,
and then sets us up to run the create methods for the users and courses.
"""
global DB_ONLY, NO_SUBMISSIONS, NO_GRADING
args = parse_args()
DB_ONLY = args.db_only
NO_SUBMISSIONS = args.no_submissions
NO_GRADING = args.no_grading
if not os.path.isdir(SUBMITTY_DATA_DIR):
raise SystemError("The following directory does not exist: " + SUBMITTY_DATA_DIR)
for directory in ["courses"]:
if not os.path.isdir(os.path.join(SUBMITTY_DATA_DIR, directory)):
raise SystemError("The following directory does not exist: " + os.path.join(
SUBMITTY_DATA_DIR, directory))
use_courses = args.course
# We have to stop all running daemon grading and jobs handling
# processes as otherwise we end up with the process grabbing the
# homework files that we are inserting before we're ready to (and
# permission errors exist) which ends up with just having a ton of
# build failures. Better to wait on grading any homeworks until
# we've done all steps of setting up a course.
print("pausing the autograding and jobs hander daemons")
os.system("systemctl stop submitty_autograding_shipper")
os.system("systemctl stop submitty_autograding_worker")
os.system("systemctl stop submitty_daemon_jobs_handler")
os.system("systemctl stop submitty_websocket_server")
courses = {} # dict[str, Course]
users = {} # dict[str, User]
for course_file in sorted(glob.iglob(os.path.join(args.courses_path, '*.yml'))):
# only create the plagiarism course if we have a local LichenTestData repo
if os.path.basename(course_file) == "plagiarism.yml" and not os.path.isdir(os.path.join(SUBMITTY_INSTALL_DIR, "GIT_CHECKOUT", "LichenTestData")):
continue
course_json = load_data_yaml(course_file)
if len(use_courses) == 0 or course_json['code'] in use_courses:
course = Course(course_json)
courses[course.code] = course
create_group("submitty_course_builders")
for user_file in sorted(glob.iglob(os.path.join(args.users_path, '*.yml'))):
user = User(load_data_yaml(user_file))
if user.id in ['submitty_php', 'submitty_daemon', 'submitty_cgi', 'submitty_dbuser', 'vagrant', 'postgres'] or \
user.id.startswith("untrusted"):
continue
user.create()
users[user.id] = user
if user.courses is not None:
for course in user.courses:
if course in courses:
courses[course].users.append(user)
else:
for key in courses.keys():
courses[key].users.append(user)
# To make Rainbow Grades testing possible, need to seed random to have the same users each time
random.seed(10090542)
# we get the max number of extra students, and then create a list that holds all of them,
# which we then randomly choose from to add to a course
extra_students = 0
for course_id in sorted(courses.keys()):
course = courses[course_id]
tmp = course.registered_students + course.unregistered_students + \
course.no_rotating_students + \
course.no_registration_students
extra_students = max(tmp, extra_students)
extra_students = generate_random_users(extra_students, users)
submitty_engine = create_engine("postgresql://{}:{}@{}/submitty".format(DB_USER, DB_PASS, DB_HOST))
submitty_conn = submitty_engine.connect()
submitty_metadata = MetaData(bind=submitty_engine)
user_table = Table('users', submitty_metadata, autoload=True)
for user_id in sorted(users.keys()):
user = users[user_id]
submitty_conn.execute(user_table.insert(),
user_id=user.id,
user_numeric_id = user.numeric_id,
user_password=get_php_db_password(user.password),
user_firstname=user.firstname,
user_preferred_firstname=user.preferred_firstname,
user_lastname=user.lastname,
user_preferred_lastname=user.preferred_lastname,
user_email=user.email,
user_access_level=user.access_level,
last_updated=NOW.strftime("%Y-%m-%d %H:%M:%S%z"))
for user in extra_students:
submitty_conn.execute(user_table.insert(),
user_id=user.id,
user_numeric_id=user.numeric_id,
user_password=get_php_db_password(user.password),
user_firstname=user.firstname,
user_preferred_firstname=user.preferred_firstname,
user_lastname=user.lastname,
user_preferred_lastname=user.preferred_lastname,
user_email=user.email,
last_updated=NOW.strftime("%Y-%m-%d %H:%M:%S%z"))
# INSERT term into terms table, based on today's date.
today = datetime.today()
year = str(today.year)
if today.month < 7:
term_id = "s" + year[-2:]
term_name = "Spring " + year
term_start = "01/02/" + year
term_end = "06/30/" + year
else:
term_id = "f" + year[-2:]
term_name = "Fall " + year
term_start = "07/01/" + year
term_end = "12/23/" + year
terms_table = Table("terms", submitty_metadata, autoload=True)
submitty_conn.execute(terms_table.insert(),
term_id = term_id,
name = term_name,
start_date = term_start,
end_date = term_end)
submitty_conn.close()
for course_id in sorted(courses.keys()):
course = courses[course_id]
total_students = course.registered_students + course.no_registration_students + \
course.no_rotating_students + course.unregistered_students
students = extra_students[:total_students]
key = 0
for i in range(course.registered_students):
reg_section = (i % course.registration_sections) + 1
rot_section = (i % course.rotating_sections) + 1
students[key].courses[course.code] = {"registration_section": reg_section, "rotating_section": rot_section}
course.users.append(students[key])
key += 1
for i in range(course.no_rotating_students):
reg_section = (i % course.registration_sections) + 1
students[key].courses[course.code] = {"registration_section": reg_section, "rotating_section": None}
course.users.append(students[key])
key += 1
for i in range(course.no_registration_students):
rot_section = (i % course.rotating_sections) + 1
students[key].courses[course.code] = {"registration_section": None, "rotating_section": rot_section}
course.users.append(students[key])
key += 1
for i in range(course.unregistered_students):
students[key].courses[course.code] = {"registration_section": None, "rotating_section": None}
course.users.append(students[key])
key += 1
course.users.sort(key=lambda x: x.id)
for course in sorted(courses.keys()):
courses[course].instructor = users[courses[course].instructor]
courses[course].check_rotating(users)
courses[course].create()
if courses[course].make_customization:
courses[course].make_course_json()
# restart the autograding daemon
print("restarting the autograding and jobs handler daemons")
os.system("systemctl restart submitty_autograding_shipper")
os.system("systemctl restart submitty_autograding_worker")
os.system("systemctl restart submitty_daemon_jobs_handler")
os.system("systemctl restart submitty_websocket_server")
if not NO_GRADING:
# queue up all of the newly created submissions to grade!
os.system("/usr/local/submitty/bin/regrade.py --no_input /var/local/submitty/courses/")
def get_random_text_from_file(filename):
line = ""
with open(os.path.join(SETUP_DATA_PATH, 'random', filename)) as comment:
line = next(comment)
for num, aline in enumerate(comment):
if random.randrange(num + 2):
continue
line = aline
return line.strip()
def generate_random_user_id(length=15):
return ''.join(random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for _ in range(length))
def generate_random_ta_comment():
return get_random_text_from_file('TAComment.txt')
def generate_random_ta_note():
return get_random_text_from_file('TANote.txt')
def generate_random_student_note():
return get_random_text_from_file('StudentNote.txt')
def generate_random_marks(default_value, max_value):
with open(os.path.join(SETUP_DATA_PATH, 'random', 'marks.yml')) as f:
marks_yml = yaml.load(f)
if default_value == max_value and default_value > 0:
key = 'count_down'
else:
key = 'count_up'
marks = []
mark_list = random.choice(marks_yml[key])
for i in range(len(mark_list)):
marks.append(Mark(mark_list[i], i))
return marks
def generate_versions_to_submit(num=3, original_value=3):
if num == 1:
return original_value
if random.random() < 0.3:
return generate_versions_to_submit(num-1, original_value)
else:
return original_value-(num-1)
def generate_probability_space(probability_dict, default=0):
"""
This function takes in a dictionary whose key is the probability (decimal less than 1),
and the value is the outcome (whatever the outcome is).
"""
probability_counter = 0
target_random = random.random()
prev_random_counter = 0
for key in sorted(probability_dict.keys()):
value = probability_dict[key]
probability_counter += key
if probability_counter >= target_random and target_random > prev_random_counter:
return value
prev_random_counter = probability_counter
return default
def generate_random_users(total, real_users):
"""
:param total:
:param real_users:
:return:
:rtype: list[User]
"""
with open(os.path.join(SETUP_DATA_PATH, 'random', 'lastNames.txt')) as last_file, \
open(os.path.join(SETUP_DATA_PATH, 'random', 'maleFirstNames.txt')) as male_file, \
open(os.path.join(SETUP_DATA_PATH, 'random', 'womenFirstNames.txt')) as woman_file:
last_names = last_file.read().strip().split()
male_names = male_file.read().strip().split()
women_names = woman_file.read().strip().split()
users = []
user_ids = []
anon_ids = []
with open(os.path.join(SETUP_DATA_PATH, "random_users.txt"), "w") as random_users_file:
for i in range(total):
if random.random() < 0.5:
first_name = random.choice(male_names)
else:
first_name = random.choice(women_names)
last_name = random.choice(last_names)
user_id = last_name.replace("'", "")[:5] + first_name[0]
user_id = user_id.lower()
anon_id = generate_random_user_id(15)
# create a binary string for the numeric ID
numeric_id = '{0:09b}'.format(i)
while user_id in user_ids or user_id in real_users:
if user_id[-1].isdigit():
user_id = user_id[:-1] + str(int(user_id[-1]) + 1)
else:
user_id = user_id + "1"
if anon_id in anon_ids:
anon_id = generate_random_user_id()
new_user = User({"user_id": user_id,
"user_numeric_id": numeric_id,
"anon_id": anon_id,
"user_firstname": first_name,
"user_lastname": last_name,
"user_group": 4,
"courses": dict()})
new_user.create()
user_ids.append(user_id)
users.append(new_user)
anon_ids.append(anon_id)
random_users_file.write(user_id + "\n")
return users
def load_data_json(file_name):
"""
Loads json file from the .setup/data directory returning the parsed structure
:param file_name: name of file to load
:return: parsed JSON structure from loaded file
"""
file_path = os.path.join(SETUP_DATA_PATH, file_name)
if not os.path.isfile(file_path):
raise IOError("Missing the json file .setup/data/{}".format(file_name))
with open(file_path) as open_file:
json_file = json.load(open_file)
return json_file
def load_data_yaml(file_path):
"""
Loads yaml file from the .setup/data directory returning the parsed structure
:param file_path: name of file to load
:return: parsed YAML structure from loaded file
"""
if not os.path.isfile(file_path):
raise IOError("Missing the yaml file {}".format(file_path))
with open(file_path) as open_file:
yaml_file = yaml.load(open_file)
return yaml_file
def user_exists(user):
"""
Checks to see if the user exists on the linux file system. We can use this to delete a user
so that we can recreate them which avoids users having left over data from a previous run of
setting up the sample courses.
:param user: string to check if user exists
:return: boolean on if user exists or not
"""
try:
pwd.getpwnam(user)
return True
except KeyError:
return False
def group_exists(group):
"""
Checks to see if the group exists on the linux file system so that we don't try to create
groups that already exist.
:param group: string to check if group exists
:return: boolean on if group exists or not
"""
try:
grp.getgrnam(group)
return True
except KeyError:
return False
def create_group(group):
"""
Creates the group on the system, adding some base users to the group as well that are necessary
for the system to function and are not defined within the users.yml file.
:param group: name of the group to create
"""
if not group_exists(group):
os.system("groupadd {}".format(group))
if group == "sudo":
return
def add_to_group(group, user_id):
"""
Adds the user to the specified group, creating the group if it does not exist.
:param group:
:param user_id:
"""
create_group(group)
os.system("usermod -a -G {} {}".format(group, user_id))
def get_php_db_password(password):
"""
Generates a password to be used within the site for database authentication. The password_hash
function (http://php.net/manual/en/function.password-hash.php) generates us a nice secure
password and takes care of things like salting and hashing.
:param password:
:return: password hash to be inserted into the DB for a user
"""
proc = subprocess.Popen(
["php", "-r", "print(password_hash('{}', PASSWORD_DEFAULT));".format(password)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
return out.decode('utf-8')
def get_current_semester():
"""
Given today's date, generates a three character code that represents the semester to use for
courses such that the first half of the year is considered "Spring" and the last half is
considered "Fall". The "Spring" semester gets an S as the first letter while "Fall" gets an
F. The next two characters are the last two digits in the current year.
:return:
"""
today = datetime.today()
semester = "f" + str(today.year)[-2:]
if today.month < 7:
semester = "s" + str(today.year)[-2:]
return semester
def parse_args():
"""
Parses out the arguments that might be passed to this script as it's run as a commandline
application.
:return: parsed args from the argparse module
"""
parser = argparse.ArgumentParser(
description="Sets up the sample courses as well as creating the necessary users for the "
"course as needed. It reads in the courses.json and users.json files from the "
".setup/data directory to determine what courses/users are allowed and then "
"either adds all or just a few depending on what gets passed to this script")
parser.add_argument("--db_only", action='store_true', default=False)
parser.add_argument("--no_submissions", action='store_true', default=False)
parser.add_argument("--no_grading", action='store_true', default=False)
parser.add_argument("--users_path", default=os.path.join(SETUP_DATA_PATH, "users"),
help="Path to folder that contains .yml files to use for user creation. Defaults to "
"../data/users")
parser.add_argument("--submission_url", type=str, default="",help="top level url for the website")
parser.add_argument("--courses_path", default=os.path.join(SETUP_DATA_PATH, "courses"),
help="Path to the folder that contains .yml files to use for course creation. Defaults to "
"../data/courses")
parser.add_argument("course", nargs="*",
help="course code to build. If no courses are passed in, then it'll use "
"all courses in courses.json")
return parser.parse_args()
def create_user(user_id):
if not user_exists(id):
print("Creating user {}...".format(user_id))
os.system("useradd --home /tmp -c \'AUTH ONLY account\' "
"-M --shell /bin/false {}".format(user_id))
print("Setting password for user {}...".format(user_id))
os.system("echo {}:{} | chpasswd".format(user_id, user_id))
def create_gradeable_submission(src, dst):
"""
Given a source and a destination, copy the files from the source to the destination. First, before
copying, we check if the source is a directory, if it is, then we zip the contents of this to a temp
zip file (stored in /tmp) and store the path to this newly created zip as our new source.
At this point, (for all uploads), we check if our source is a zip (by just checking file extension is
a .zip), then we will extract the contents of the source (using Shutil) to the destination, else we
just do a simple copy operation of the source file to the destination location.
At this point, if we created a zip file (as part of that first step), we remove it from the /tmp directory.
:param src: path of the file or directory we want to use for this submission
:type src: str
:param dst: path to the folder where we should copy the submission to
:type src: str
"""
zip_dst = None
if os.path.isdir(src):
zip_dst = os.path.join("/tmp", str(uuid.uuid4()))
zip_dst = shutil.make_archive(zip_dst, 'zip', src)
src = zip_dst
if src[-3:] == "zip":
shutil.unpack_archive(src, dst)
else:
shutil.copy(src, dst)
if zip_dst is not None and isinstance(zip_dst, str):
os.remove(zip_dst)
class User(object):
"""
A basic object to contain the objects loaded from the users.json file. We use this to link
against the courses.
Attributes:
id
numeric_id
anon_id
password
firstname
lastname
email
group
preferred_firstname
preferred_lastname
access_level
registration_section
rotating_section
unix_groups
courses
"""
def __init__(self, user):
self.id = user['user_id']
self.numeric_id = user['user_numeric_id']
self.anon_id = user['anon_id']
self.password = self.id
self.firstname = user['user_firstname']
self.lastname = user['user_lastname']
self.email = self.id + "@example.com"
self.group = 4
self.preferred_firstname = None
self.preferred_lastname = None
self.access_level = 3
self.registration_section = None
self.rotating_section = None
self.grading_registration_section = None
self.unix_groups = None
self.courses = None
self.manual = False
self.sudo = False
if 'user_preferred_firstname' in user:
self.preferred_firstname = user['user_preferred_firstname']
if 'user_preferred_lastname' in user:
self.preferred_lastname = user['user_preferred_lastname']
if 'user_email' in user:
self.email = user['user_email']
if 'user_group' in user:
self.group = user['user_group']
if self.group < 1 or 4 < self.group:
raise SystemExit("ASSERT: user {}, user_group is not between 1 - 4. Check YML file.".format(self.id))
if 'user_access_level' in user:
self.access_level = user['user_access_level']
if self.access_level < 1 or 3 < self.access_level:
raise SystemExit("ASSERT: user {}, user_access_level is not between 1 - 3. Check YML file.".format(self.id))
if 'registration_section' in user:
self.registration_section = int(user['registration_section'])
if 'rotating_section' in user:
self.rotating_section = int(user['rotating_section'])
if 'grading_registration_section' in user:
self.grading_registration_section = user['grading_registration_section']
if 'unix_groups' in user:
self.unix_groups = user['unix_groups']
if 'manual_registration' in user:
self.manual = user['manual_registration'] is True
if 'courses' in user:
self.courses = {}
if isinstance(user['courses'], list):
for course in user['courses']:
self.courses[course] = {"user_group": self.group}
elif isinstance(user['courses'], dict):
self.courses = user['courses']
for course in self.courses:
if 'user_group' not in self.courses[course]:
self.courses[course]['user_group'] = self.group
else:
raise ValueError("Invalid type for courses key, it should either be list or dict")
if 'sudo' in user:
self.sudo = user['sudo'] is True
if 'user_password' in user:
self.password = user['user_password']
def create(self, force_ssh=False):
if not DB_ONLY:
if self.group > 2 and not force_ssh:
self._create_non_ssh()
else:
self._create_ssh()
if self.group <= 1:
add_to_group("submitty_course_builders", self.id)
if self.sudo:
add_to_group("sudo", self.id)
def _create_ssh(self):
if not user_exists(self.id):
print("Creating user {}...".format(self.id))
os.system("useradd -m -c 'First Last,RoomNumber,WorkPhone,HomePhone' {}".format(self.id))
self.set_password()
def _create_non_ssh(self):
if not DB_ONLY and not user_exists(self.id):
print("Creating user {}...".format(self.id))
os.system("useradd --home /tmp -c \'AUTH ONLY account\' "
"-M --shell /bin/false {}".format(self.id))
self.set_password()
def set_password(self):
print("Setting password for user {}...".format(self.id))
os.system("echo {}:{} | chpasswd".format(self.id, self.password))
def get_detail(self, course, detail):
if self.courses is not None and course in self.courses:
user_detail = "user_" + detail
if user_detail in self.courses[course]:
return self.courses[course][user_detail]
elif detail in self.courses[course]:
return self.courses[course][detail]
if detail in self.__dict__:
return self.__dict__[detail]
else:
return None
class Course(object):
"""
Object to represent the courses loaded from the courses.json file as well as the list of
users that are needed for this particular course (which is a list of User objects).
Attributes:
code
semester
instructor
gradeables
users
max_random_submissions
"""
def __init__(self, course):
self.semester = get_current_semester()
self.code = course['code']
self.instructor = course['instructor']
self.gradeables = []
self.make_customization = False
ids = []
if 'gradeables' in course:
for gradeable in course['gradeables']:
self.gradeables.append(Gradeable(gradeable))
assert self.gradeables[-1].id not in ids
ids.append(self.gradeables[-1].id)
self.users = []
self.registration_sections = 10
self.rotating_sections = 5
self.registered_students = 50
self.no_registration_students = 10
self.no_rotating_students = 10
self.unregistered_students = 10
if 'registration_sections' in course:
self.registration_sections = course['registration_sections']
if 'rotating_sections' in course:
self.rotating_sections = course['rotating_sections']
if 'registered_students' in course:
self.registered_students = course['registered_students']
if 'no_registration_students' in course:
self.no_registration_students = course['no_registration_students']
if 'no_rotating_students' in course:
self.no_rotating_students = course['no_rotating_students']
if 'unregistered_students' in course:
self.unregistered_students = course['unregistered_students']
if 'make_customization' in course:
self.make_customization = course['make_customization']
def create(self):
# Sort users and gradeables in the name of determinism
self.users.sort(key=lambda x: x.get_detail(self.code, "id"))
self.gradeables.sort(key=lambda x: x.id)
self.course_path = os.path.join(SUBMITTY_DATA_DIR, "courses", self.semester, self.code)
# To make Rainbow Grades testing possible, need to seed random
m = hashlib.md5()
m.update(bytes(self.code, 'utf-8'))
random.seed(int(m.hexdigest(), 16))
course_group = self.code + "_tas_www"
archive_group = self.code + "_archive"
create_group(self.code)
create_group(course_group)
create_group(archive_group)
add_to_group(self.code, self.instructor.id)
add_to_group(course_group, self.instructor.id)
add_to_group(archive_group, self.instructor.id)
add_to_group("submitty_course_builders", self.instructor.id)
add_to_group(course_group, "submitty_php")
add_to_group(course_group, "submitty_daemon")
add_to_group(course_group, "submitty_cgi")
os.system("{}/sbin/create_course.sh {} {} {} {}"
.format(SUBMITTY_INSTALL_DIR, self.semester, self.code, self.instructor.id,
course_group))
os.environ['PGPASSWORD'] = DB_PASS
database = "submitty_" + self.semester + "_" + self.code
print("Database created, now populating ", end="")
submitty_engine = create_engine("postgresql://{}:{}@{}/submitty".format(DB_USER, DB_PASS, DB_HOST))
submitty_conn = submitty_engine.connect()
submitty_metadata = MetaData(bind=submitty_engine)
print("(Master DB connection made, metadata bound)...")
engine = create_engine("postgresql://{}:{}@{}/{}".format(DB_USER, DB_PASS, DB_HOST, database))
self.conn = engine.connect()
self.metadata = MetaData(bind=engine)
print("(Course DB connection made, metadata bound)...")
print("Creating registration sections ", end="")
table = Table("courses_registration_sections", submitty_metadata, autoload=True)
print("(tables loaded)...")
for section in range(1, self.registration_sections+1):
print("Create section {}".format(section))
submitty_conn.execute(table.insert(), semester=self.semester, course=self.code, registration_section_id=str(section))
print("Creating rotating sections ", end="")
table = Table("sections_rotating", self.metadata, autoload=True)
print("(tables loaded)...")
for section in range(1, self.rotating_sections+1):
print("Create section {}".format(section))
self.conn.execute(table.insert(), sections_rotating_id=section)
print("Create users ", end="")
submitty_users = Table("courses_users", submitty_metadata, autoload=True)
users_table = Table("users", self.metadata, autoload=True)
reg_table = Table("grading_registration", self.metadata, autoload=True)
print("(tables loaded)...")
for user in self.users:
print("Creating user {} {} ({})...".format(user.get_detail(self.code, "firstname"),
user.get_detail(self.code, "lastname"),
user.get_detail(self.code, "id")))
reg_section = user.get_detail(self.code, "registration_section")
if reg_section is not None and reg_section > self.registration_sections:
reg_section = None
rot_section = user.get_detail(self.code, "rotating_section")
if rot_section is not None and rot_section > self.rotating_sections:
rot_section = None
if reg_section is not None:
reg_section=str(reg_section)
# We already have a row in submitty.users for this user,
# just need to add a row in courses_users which will put a
# a row in the course specific DB, and off we go.
submitty_conn.execute(submitty_users.insert(),
semester=self.semester,
course=self.code,
user_id=user.get_detail(self.code, "id"),
user_group=user.get_detail(self.code, "group"),
registration_section=reg_section,
manual_registration=user.get_detail(self.code, "manual"))
update = users_table.update(values={
users_table.c.rotating_section: bindparam('rotating_section'),
users_table.c.anon_id: bindparam('anon_id')
}).where(users_table.c.user_id == bindparam('b_user_id'))
self.conn.execute(update, rotating_section=rot_section, anon_id=user.anon_id, b_user_id=user.id)
if user.get_detail(self.code, "grading_registration_section") is not None:
try:
grading_registration_sections = str(user.get_detail(self.code,"grading_registration_section"))
grading_registration_sections = [int(x) for x in grading_registration_sections.split(",")]
except ValueError:
grading_registration_sections = []
for grading_registration_section in grading_registration_sections:
self.conn.execute(reg_table.insert(),
user_id=user.get_detail(self.code, "id"),
sections_registration_id=str(grading_registration_section))
if user.unix_groups is None:
if user.get_detail(self.code, "group") <= 1:
add_to_group(self.code, user.id)
add_to_group(self.code + "_archive", user.id)
if user.get_detail(self.code, "group") <= 2:
add_to_group(self.code + "_tas_www", user.id)
gradeable_table = Table("gradeable", self.metadata, autoload=True)
electronic_table = Table("electronic_gradeable", self.metadata, autoload=True)
peer_assign = Table("peer_assign", self.metadata, autoload=True)
reg_table = Table("grading_rotating", self.metadata, autoload=True)
component_table = Table('gradeable_component', self.metadata, autoload=True)
mark_table = Table('gradeable_component_mark', self.metadata, autoload=True)
gradeable_data = Table("gradeable_data", self.metadata, autoload=True)
gradeable_component_data = Table("gradeable_component_data", self.metadata, autoload=True)
gradeable_component_mark_data = Table('gradeable_component_mark_data', self.metadata, autoload=True)
gradeable_data_overall_comment = Table('gradeable_data_overall_comment', self.metadata, autoload=True)
electronic_gradeable_data = Table("electronic_gradeable_data", self.metadata, autoload=True)
electronic_gradeable_version = Table("electronic_gradeable_version", self.metadata, autoload=True)
for gradeable in self.gradeables:
gradeable.create(self.conn, gradeable_table, electronic_table, peer_assign, reg_table, component_table, mark_table)
form = os.path.join(self.course_path, "config", "form", "form_{}.json".format(gradeable.id))
with open(form, "w") as open_file:
json.dump(gradeable.create_form(), open_file, indent=2)
os.system("chown -f submitty_php:{}_tas_www {}".format(self.code, os.path.join(self.course_path, "config", "form", "*")))
if not os.path.isfile(os.path.join(self.course_path, "ASSIGNMENTS.txt")):
os.system("touch {}".format(os.path.join(self.course_path, "ASSIGNMENTS.txt")))
os.system("chown {}:{}_tas_www {}".format(self.instructor.id, self.code,
os.path.join(self.course_path, "ASSIGNMENTS.txt")))
os.system("chmod -R g+w {}".format(self.course_path))
os.system("su {} -c '{}'".format("submitty_daemon", os.path.join(self.course_path,
"BUILD_{}.sh".format(self.code))))
#os.system("su {} -c '{}'".format(self.instructor.id, os.path.join(self.course_path,
# "BUILD_{}.sh".format(self.code))))
os.system("chown -R {}:{}_tas_www {}".format(self.instructor.id, self.code, os.path.join(self.course_path, "build")))
os.system("chown -R {}:{}_tas_www {}".format(self.instructor.id, self.code,
os.path.join(self.course_path, "test_*")))
# On python 3, replace with os.makedirs(..., exist_ok=True)
os.system("mkdir -p {}".format(os.path.join(self.course_path, "submissions")))
os.system('chown submitty_php:{}_tas_www {}'.format(self.code, os.path.join(self.course_path, 'submissions')))
for gradeable in self.gradeables:
# create_teams
if gradeable.team_assignment is True:
json_team_history = self.make_sample_teams(gradeable)
if gradeable.type == 0 and \
(len(gradeable.submissions) == 0 or
gradeable.sample_path is None or
gradeable.config_path is None):
# Make sure the electronic gradeable is valid
continue
# creating the folder containing all the submissions
gradeable_path = os.path.join(self.course_path, "submissions", gradeable.id)
submission_count = 0
max_submissions = gradeable.max_random_submissions
max_individual_submissions = gradeable.max_individual_submissions
# makes a section be ungraded if the gradeable is not electronic
ungraded_section = random.randint(1, max(1, self.registration_sections if gradeable.grade_by_registration else self.rotating_sections))
# This for loop adds submissions for users and teams(if applicable)
if not NO_SUBMISSIONS:
only_submit_plagiarized_users = gradeable.lichen_sample_path is not None and len(gradeable.plagiarized_user) > 0
for user in self.users:
if only_submit_plagiarized_users and user.id not in gradeable.plagiarized_user:
continue
submitted = False
team_id = None
if gradeable.team_assignment is True:
# If gradeable is team assignment, then make sure to make a team_id and don't over submit
res = self.conn.execute("SELECT teams.team_id FROM teams INNER JOIN gradeable_teams\
ON teams.team_id = gradeable_teams.team_id where user_id='{}' and g_id='{}'".format(user.id, gradeable.id))
temp = res.fetchall()
if len(temp) != 0:
team_id = temp[0][0]
previous_submission = select([electronic_gradeable_version]).where(
electronic_gradeable_version.c['team_id'] == team_id)
res = self.conn.execute(previous_submission)
if res.rowcount > 0:
continue
submission_path = os.path.join(gradeable_path, team_id)
else:
continue
res.close()
else:
submission_path = os.path.join(gradeable_path, user.id)
if gradeable.type == 0 and gradeable.submission_open_date < NOW:
if user.id in gradeable.plagiarized_user:
# If the user is a bad and unethical student(plagiarized_user), then the version to submit is going to
# be the same as the number of assignments defined in users.yml in the lichen_submissions folder.
versions_to_submit = len(gradeable.plagiarized_user[user.id])
elif gradeable.lichen_sample_path is not None:
# if we have set a plagiarism configuration but no manually-specified submissions, submit the default number
versions_to_submit = gradeable.plagiarism_versions_per_user
else:
versions_to_submit = generate_versions_to_submit(max_individual_submissions, max_individual_submissions)
if ((gradeable.gradeable_config is not None
and (gradeable.has_due_date is True and (gradeable.submission_due_date < NOW or random.random() < 0.5))
and (random.random() < 0.9) and (max_submissions is None or submission_count < max_submissions))
or (gradeable.gradeable_config is not None and user.id in gradeable.plagiarized_user)):
# only create these directories if we're actually going to put something in them
if not os.path.exists(gradeable_path):
os.makedirs(gradeable_path)
os.system("chown -R submitty_php:{}_tas_www {}".format(self.code, gradeable_path))
if not os.path.exists(submission_path):
os.makedirs(submission_path)
# Reduce the probability to get a cancelled submission (active_version = 0)
# This is done my making other possibilities three times more likely
version_population = []
for version in range(1, versions_to_submit+1):
version_population.append((version, 3))
# disallow cancelled submission if this is a manually-specified user
if user.id not in gradeable.plagiarized_user:
version_population = [(0, 1)] + version_population
version_population = [ver for ver, freq in version_population for i in range(freq)]
active_version = random.choice(version_population)
if team_id is not None:
json_history = {"active_version": active_version, "history": [], "team_history": []}
else:
json_history = {"active_version": active_version, "history": []}
random_days = 1
if random.random() < 0.3:
random_days = random.choice(range(-3, 2))
for version in range(1, versions_to_submit+1):
os.system("mkdir -p " + os.path.join(submission_path, str(version)))
submitted = True
submission_count += 1
current_time_string = dateutils.write_submitty_date(gradeable.submission_due_date - timedelta(days=random_days+version/versions_to_submit))
if team_id is not None:
self.conn.execute(electronic_gradeable_data.insert(), g_id=gradeable.id, user_id=None,
team_id=team_id, g_version=version, submission_time=current_time_string)
if version == versions_to_submit:
self.conn.execute(electronic_gradeable_version.insert(), g_id=gradeable.id, user_id=None,
team_id=team_id, active_version=active_version)
json_history["team_history"] = json_team_history[team_id]
else:
self.conn.execute(electronic_gradeable_data.insert(), g_id=gradeable.id, user_id=user.id,
g_version=version, submission_time=current_time_string)
if version == versions_to_submit:
self.conn.execute(electronic_gradeable_version.insert(), g_id=gradeable.id, user_id=user.id,
active_version=active_version)
json_history["history"].append({"version": version, "time": current_time_string, "who": user.id, "type": "upload"})
with open(os.path.join(submission_path, str(version), ".submit.timestamp"), "w") as open_file:
open_file.write(current_time_string + "\n")
if user.id in gradeable.plagiarized_user:
# If the user is in the plagirized folder, then only add those submissions
src = os.path.join(gradeable.lichen_sample_path, gradeable.plagiarized_user[user.id][version-1])
dst = os.path.join(submission_path, str(version))
# pdb.set_trace()
create_gradeable_submission(src, dst)
elif gradeable.lichen_sample_path is not None:
if len(gradeable.plagiarism_submissions) > 0: # check to make sure we haven't run out of data
# if there were no specified plagiarized users but we have plagiarism submissions, grab a random submisison
src = os.path.join(gradeable.lichen_sample_path, gradeable.plagiarism_submissions.pop())
dst = os.path.join(submission_path, str(version))
create_gradeable_submission(src, dst)
else:
if isinstance(gradeable.submissions, dict):
for key in sorted(gradeable.submissions.keys()):
os.system("mkdir -p " + os.path.join(submission_path, str(version), key))
submission = random.choice(gradeable.submissions[key])
src = os.path.join(gradeable.sample_path, submission)
dst = os.path.join(submission_path, str(version), key)
create_gradeable_submission(src, dst)
else:
submission = random.choice(gradeable.submissions)
if isinstance(submission, list):
submissions = submission
else:
submissions = [submission]
for submission in submissions:
src = os.path.join(gradeable.sample_path, submission)
dst = os.path.join(submission_path, str(version))
create_gradeable_submission(src, dst)
random_days -= 0.5
with open(os.path.join(submission_path, "user_assignment_settings.json"), "w") as open_file:
json.dump(json_history, open_file)
if gradeable.grade_start_date < NOW and os.path.exists(os.path.join(submission_path, str(versions_to_submit))):
if (gradeable.has_release_date is True and gradeable.grade_released_date < NOW) or (random.random() < 0.5 and (submitted or gradeable.type !=0)):
status = 1 if gradeable.type != 0 or submitted else 0
print("Inserting {} for {}...".format(gradeable.id, user.id))
# gd_overall_comment no longer does anything, and will be removed in a future update.
values = {'g_id': gradeable.id, 'gd_overall_comment' : ''}
overall_comment_values = {'g_id' : gradeable.id, 'goc_overall_comment': 'lorem ipsum lodar', 'goc_grader_id' : self.instructor.id}
if gradeable.team_assignment is True:
values['gd_team_id'] = team_id
overall_comment_values['goc_team_id'] = team_id
else:
values['gd_user_id'] = user.id
overall_comment_values['goc_user_id'] = user.id
if gradeable.grade_released_date < NOW and random.random() < 0.5:
values['gd_user_viewed_date'] = NOW.strftime('%Y-%m-%d %H:%M:%S%z')
ins = gradeable_data.insert().values(**values)
res = self.conn.execute(ins)
gd_id = res.inserted_primary_key[0]
if gradeable.type != 0 or gradeable.use_ta_grading:
skip_grading = random.random()
if skip_grading > 0.3 and random.random() > 0.01:
ins = gradeable_data_overall_comment.insert().values(**overall_comment_values)
res = self.conn.execute(ins)
for component in gradeable.components:
if random.random() < 0.01 and skip_grading < 0.3:
# This is used to simulate unfinished grading.
break
if status == 0 or random.random() < 0.4:
score = 0
else:
max_value_score = random.randint(component.lower_clamp * 2, component.max_value * 2) / 2
uppser_clamp_score = random.randint(component.lower_clamp * 2, component.upper_clamp * 2) / 2
score = generate_probability_space({0.7: max_value_score, 0.2: uppser_clamp_score, 0.08: -max_value_score, 0.02: -99999})
grade_time = gradeable.grade_start_date.strftime("%Y-%m-%d %H:%M:%S%z")
self.conn.execute(gradeable_component_data.insert(), gc_id=component.key, gd_id=gd_id,
gcd_score=score, gcd_component_comment=generate_random_ta_comment(),
gcd_grader_id=self.instructor.id, gcd_grade_time=grade_time, gcd_graded_version=versions_to_submit)
first = True
first_set = False
for mark in component.marks:
if (random.random() < 0.5 and first_set == False and first == False) or random.random() < 0.2:
self.conn.execute(gradeable_component_mark_data.insert(), gc_id=component.key, gd_id=gd_id, gcm_id=mark.key, gcd_grader_id=self.instructor.id)
if(first):
first_set = True
first = False
if gradeable.type == 0 and os.path.isdir(submission_path):
os.system("chown -R submitty_php:{}_tas_www {}".format(self.code, submission_path))
if (gradeable.type != 0 and gradeable.grade_start_date < NOW and ((gradeable.has_release_date is True and gradeable.grade_released_date < NOW) or random.random() < 0.5) and
random.random() < 0.9 and (ungraded_section != (user.get_detail(self.code, 'registration_section') if gradeable.grade_by_registration else user.get_detail(self.code, 'rotating_section')))):
res = self.conn.execute(gradeable_data.insert(), g_id=gradeable.id, gd_user_id=user.id, gd_overall_comment='')
gd_id = res.inserted_primary_key[0]
skip_grading = random.random()
for component in gradeable.components:
if random.random() < 0.01 and skip_grading < 0.3:
break
if random.random() < 0.1:
continue
elif gradeable.type == 1:
score = generate_probability_space({0.2: 0, 0.1: 0.5}, 1)
else:
score = random.randint(component.lower_clamp * 2, component.upper_clamp * 2) / 2
grade_time = gradeable.grade_start_date.strftime("%Y-%m-%d %H:%M:%S%z")
self.conn.execute(gradeable_component_data.insert(), gc_id=component.key, gd_id=gd_id,
gcd_score=score, gcd_component_comment="", gcd_grader_id=self.instructor.id, gcd_grade_time=grade_time, gcd_graded_version=-1)
# This segment adds the sample data for features in the sample course only
if self.code == "sample":
self.add_sample_forum_data()
print('Added forum data to sample course.')
self.add_sample_polls_data()
print('Added polls data to sample course.')
self.add_sample_queue_data()
print('Added office hours queue data to sample course.')
self.conn.close()
submitty_conn.close()
os.environ['PGPASSWORD'] = ""
if self.code == 'sample':
student_image_folder = os.path.join(SUBMITTY_DATA_DIR, 'courses', self.semester, self.code, 'uploads', 'student_images')
zip_path = os.path.join(SUBMITTY_REPOSITORY, 'sample_files', 'user_photos', 'CSCI-1300-01.zip')
with TemporaryDirectory() as tmpdir:
shutil.unpack_archive(zip_path, tmpdir)
inner_folder = os.path.join(tmpdir, 'CSCI-1300-01')
for f in os.listdir(inner_folder):
shutil.move(os.path.join(inner_folder, f), os.path.join(student_image_folder, f))
if self.code == 'tutorial':
client = docker.from_env()
client.images.pull('submitty/tutorial:tutorial_18')
client.images.pull('submitty/tutorial:database_client')
def check_rotating(self, users):
for gradeable in self.gradeables:
for grading_rotating in gradeable.grading_rotating:
string = "Invalid user_id {} for rotating section for gradeable {}".format(
grading_rotating['user_id'], gradeable.id)
if grading_rotating['user_id'] not in users:
raise ValueError(string)
def getForumDataFromFile(self, filename):
forum_path = os.path.join(SETUP_DATA_PATH, "forum")
forum_data = []
for line in open(os.path.join(forum_path, filename)):
l = [x.replace("\\n", "\n").strip() for x in line.split("|")]
if(len(line) > 1):
forum_data.append(l)
return forum_data
def make_sample_teams(self, gradeable):
"""
arg: any team gradeable
This function adds teams to the database and gradeable.
return: A json object filled with team information
"""
assert gradeable.team_assignment
json_team_history = {}
gradeable_teams_table = Table("gradeable_teams", self.metadata, autoload=True)
teams_table = Table("teams", self.metadata, autoload=True)
ucounter = self.conn.execute(select([func.count()]).select_from(gradeable_teams_table)).scalar()
for user in self.users:
#the unique team id is made up of 5 digits, an underline, and the team creater's userid.
#example: 00001_aphacker
unique_team_id=str(ucounter).zfill(5)+"_"+user.get_detail(self.code, "id")
reg_section = user.get_detail(self.code, "registration_section")
if reg_section is None:
continue
# The teams are created based on the order of the users. As soon as the number of teamates
# exceeds the max team size, then a new team will be created within the same registration section
print("Adding team for " + unique_team_id + " in gradeable " + gradeable.id)
# adding json data for team history
teams_registration = select([gradeable_teams_table]).where(
(gradeable_teams_table.c['registration_section'] == str(reg_section)) &
(gradeable_teams_table.c['g_id'] == gradeable.id))
res = self.conn.execute(teams_registration)
added = False
if res.rowcount != 0:
# If the registration has a team already, join it
for team_in_section in res:
members_in_team = select([teams_table]).where(
teams_table.c['team_id'] == team_in_section['team_id'])
res = self.conn.execute(members_in_team)
if res.rowcount < gradeable.max_team_size:
self.conn.execute(teams_table.insert(),
team_id=team_in_section['team_id'],
user_id=user.get_detail(self.code, "id"),
state=1)
json_team_history[team_in_section['team_id']].append({"action": "admin_create",
"time": dateutils.write_submitty_date(gradeable.submission_open_date),
"admin_user": "instructor",
"added_user": user.get_detail(self.code, "id")})
added = True
if not added:
# if the team the user tried to join is full, make a new team
self.conn.execute(gradeable_teams_table.insert(),
team_id=unique_team_id,
g_id=gradeable.id,
registration_section=str(reg_section),
rotating_section=str(random.randint(1, self.rotating_sections)))
self.conn.execute(teams_table.insert(),
team_id=unique_team_id,
user_id=user.get_detail(self.code, "id"),
state=1)
json_team_history[unique_team_id] = [{"action": "admin_create",
"time": dateutils.write_submitty_date(gradeable.submission_open_date),
"admin_user": "instructor",
"first_user": user.get_detail(self.code, "id")}]
ucounter += 1
res.close()
return json_team_history
def add_sample_forum_data(self):
# set sample course to have forum enabled by default
course_json_file = os.path.join(self.course_path, 'config', 'config.json')
with open(course_json_file, 'r+') as open_file:
course_json = json.load(open_file)
course_json['course_details']['forum_enabled'] = True
open_file.seek(0)
open_file.truncate()
json.dump(course_json, open_file, indent=2)
f_data = (self.getForumDataFromFile('posts.txt'), self.getForumDataFromFile('threads.txt'), self.getForumDataFromFile('categories.txt'))
forum_threads = Table("threads", self.metadata, autoload=True)
forum_posts = Table("posts", self.metadata, autoload=True)
forum_cat_list = Table("categories_list", self.metadata, autoload=True)
forum_thread_cat = Table("thread_categories", self.metadata, autoload=True)
for catData in f_data[2]:
self.conn.execute(forum_cat_list.insert(), category_desc=catData[0], rank=catData[1], color=catData[2])
for thread_id, threadData in enumerate(f_data[1], start = 1):
self.conn.execute(forum_threads.insert(),
title=threadData[0],
created_by=threadData[1],
pinned=True if threadData[2] == "t" else False,
deleted=True if threadData[3] == "t" else False,
merged_thread_id=threadData[4],
merged_post_id=threadData[5],
is_visible=True if threadData[6] == "t" else False)
self.conn.execute(forum_thread_cat.insert(), thread_id=thread_id, category_id=threadData[7])
counter = 1
for postData in f_data[0]:
if(postData[10] != "f" and postData[10] != ""):
# In posts.txt, if the 10th column is f or empty, then no attachment is added. If anything else is in the column, then it will be treated as the file name.
attachment_path = os.path.join(self.course_path, "forum_attachments", str(postData[0]), str(counter))
os.makedirs(attachment_path)
os.system("chown -R submitty_php:sample_tas_www {}".format(os.path.join(self.course_path, "forum_attachments", str(postData[0]))))
copyfile(os.path.join(SETUP_DATA_PATH, "forum", "attachments", postData[10]), os.path.join(attachment_path, postData[10]))
counter += 1
self.conn.execute(forum_posts.insert(),
thread_id=postData[0],
parent_id=postData[1],
author_user_id=postData[2],
content=postData[3],
timestamp=postData[4],
anonymous=True if postData[5] == "t" else False,
deleted=True if postData[6] == "t" else False,
endorsed_by=postData[7],
resolved = True if postData[8] == "t" else False,
type=postData[9],
has_attachment=True if postData[10] != "f" else False)
def add_sample_polls_data(self):
# set sample course to have polls enabled by default
course_json_file = os.path.join(self.course_path, 'config', 'config.json')
with open(course_json_file, 'r+') as open_file:
course_json = json.load(open_file)
course_json['course_details']['polls_enabled'] = True
open_file.seek(0)
open_file.truncate()
json.dump(course_json, open_file, indent=2)
# load the sample polls from input file
polls_data_path = os.path.join(SETUP_DATA_PATH, "polls", "polls_data.json")
with open(polls_data_path, 'r') as polls_file:
polls_data = json.load(polls_file)
# set some values that depend on current time
polls_data[0]["image_path"] = self.course_path + polls_data[0]["image_path"]
polls_data[2]["release_date"] = f"{datetime.today().date()}"
# add attached image
image_dir = os.path.dirname(polls_data[0]["image_path"])
if os.path.isdir(image_dir):
shutil.rmtree(image_dir)
os.makedirs(image_dir)
os.system(f"chown -R submitty_php:sample_tas_www {image_dir}")
copyfile(os.path.join(SETUP_DATA_PATH, "polls", "sea_animals.png"), polls_data[0]["image_path"])
# add polls to DB
polls_table = Table("polls", self.metadata, autoload=True)
poll_options_table = Table("poll_options", self.metadata, autoload=True)
poll_responses_table = Table("poll_responses", self.metadata, autoload=True)
for poll in polls_data:
self.conn.execute(polls_table.insert(),
name=poll["name"],
question=poll["question"],
status=poll["status"],
release_date=poll["release_date"],
image_path=poll["image_path"],
question_type=poll["question_type"],
release_histogram=poll["release_histogram"])
for i in range(len(poll["responses"])):
self.conn.execute(poll_options_table.insert(),
option_id=i,
order_id=i,
poll_id=poll["id"],
response=poll["responses"][i],
correct=(i in poll["correct_responses"]))
# generate responses to the polls
poll_responses_data = []
# poll1: for each self.users make a random number (0-5) of responses
poll1_response_ids = list(range(len(polls_data[0]['responses'])))
for user in self.users:
random_responses = random.sample(poll1_response_ids, random.randint(0, len(polls_data[0]['responses'])))
for response_id in random_responses:
poll_responses_data.append({
"poll_id": polls_data[0]["id"],
"student_id": user.id,
"option_id": response_id
})
# poll2: take a large portion of self.users and make each submit one random response
for user in self.users:
if random.random() < 0.8:
poll_responses_data.append({
"poll_id": polls_data[1]["id"],
"student_id": user.id,
"option_id": random.randint(0, len(polls_data[1]['responses']) - 1)
})
# add responses to DB
for response in poll_responses_data:
self.conn.execute(poll_responses_table.insert(),
poll_id=response["poll_id"],
student_id=response["student_id"],
option_id=response["option_id"])
def add_sample_queue_data(self):
# load the sample polls from input file
queue_data_path = os.path.join(SETUP_DATA_PATH, "queue", "queue_data.json")
with open(queue_data_path, 'r') as queue_file:
queue_data = json.load(queue_file)
# set sample course to have office hours queue enabled by default
course_json_file = os.path.join(self.course_path, 'config', 'config.json')
with open(course_json_file, 'r+') as open_file:
course_json = json.load(open_file)
course_json['course_details']['queue_enabled'] = True
course_json['course_details']['queue_message'] = queue_data["queue_message"]
course_json['course_details']['queue_announcement_message'] = queue_data["queue_announcement_message"]
open_file.seek(0)
open_file.truncate()
json.dump(course_json, open_file, indent=2)
# generate values that depend on current date and time
# helped for the first time today, done --- LAB queue
queue_data["queue_entries"][0]["time_in"] = datetime.now() - timedelta(minutes=25)
queue_data["queue_entries"][0]["time_out"] = datetime.now() - timedelta(minutes=19)
queue_data["queue_entries"][0]["time_help_start"] = datetime.now() - timedelta(minutes=24)
# helped, done --- LAB queue
queue_data["queue_entries"][1]["time_in"] = datetime.now() - timedelta(minutes=24)
queue_data["queue_entries"][1]["time_out"] = datetime.now() - timedelta(minutes=15)
queue_data["queue_entries"][1]["time_help_start"] = datetime.now() - timedelta(minutes=23)
# removed by self --- LAB queue
queue_data["queue_entries"][2]["time_in"] = datetime.now() - timedelta(minutes=22)
queue_data["queue_entries"][2]["time_out"] = datetime.now() - timedelta(minutes=21)
# being helped --- HW queue
queue_data["queue_entries"][3]["time_in"] = datetime.now() - timedelta(minutes=23)
queue_data["queue_entries"][3]["time_help_start"] = datetime.now() - timedelta(minutes=14)
# waiting for help for second time today --- LAB queue
queue_data["queue_entries"][4]["time_in"] = datetime.now() - timedelta(minutes=21)
queue_data["queue_entries"][4]["last_time_in_queue"] = queue_data["queue_entries"][0]["time_in"]
# paused --- HW queue
queue_data["queue_entries"][5]["time_in"] = datetime.now() - timedelta(minutes=20)
queue_data["queue_entries"][5]["time_paused_start"] = datetime.now() - timedelta(minutes=18)
# wait for the first time --- HW queue
queue_data["queue_entries"][6]["time_in"] = datetime.now() - timedelta(minutes=15)
# waiting for help for second time this week --- LAB queue
queue_data["queue_entries"][7]["time_in"] = datetime.now() - timedelta(minutes=10)
queue_data["queue_entries"][7]["last_time_in_queue"] = datetime.now() - timedelta(days=1, minutes=30)
queues_table = Table("queue_settings", self.metadata, autoload=True)
queue_entries_table = Table("queue", self.metadata, autoload=True)
# make two sample queues
self.conn.execute(queues_table.insert(),
open=True,
code="Lab Help",
token="lab")
self.conn.execute(queues_table.insert(),
open=True,
code="Homework Debugging",
token="hw_debug")
# add, help, remove, pause, etc. students in the queue
for queue_entry in queue_data["queue_entries"]:
self.conn.execute(queue_entries_table.insert(),
current_state=queue_entry["current_state"],
removal_type=queue_entry["removal_type"],
queue_code=queue_entry["queue_code"],
user_id=queue_entry["user_id"],
name=queue_entry["name"],
time_in=queue_entry["time_in"],
time_out=queue_entry["time_out"],
added_by=queue_entry["added_by"],
help_started_by=queue_entry["help_started_by"],
removed_by=queue_entry["removed_by"],
contact_info=queue_entry["contact_info"],
last_time_in_queue=queue_entry["last_time_in_queue"],
time_help_start=queue_entry["time_help_start"],
paused=queue_entry["paused"],
time_paused=queue_entry["time_paused"],
time_paused_start=queue_entry["time_paused_start"])
def make_course_json(self):
"""
This function generates customization_sample.json in case it has changed from the provided version in the test suite
within the Submitty repository. Ideally this function will be pulled out and made independent, or better yet when
the code for the web interface is done, that will become the preferred route and this function can be retired.
Keeping this function after the web interface would mean we have another place where we need to update code anytime
the expected format of customization.json changes.
Right now the code uses the Gradeable and Component classes, so to avoid code duplication the function lives inside
setup_sample_courses.py
:return:
"""
course_id = self.code
# Reseed to minimize the situations under which customization.json changes
m = hashlib.md5()
m.update(bytes(course_id, "utf-8"))
random.seed(int(m.hexdigest(), 16))
# Would be great if we could install directly to test_suite, but
# currently the test uses "clean" which will blow away test_suite
customization_path = os.path.join(SUBMITTY_INSTALL_DIR, ".setup")
print("Generating customization_{}.json".format(course_id))
gradeables = {}
gradeables_json_output = {}
# Create gradeables by syllabus bucket
for gradeable in self.gradeables:
if gradeable.syllabus_bucket not in gradeables:
gradeables[gradeable.syllabus_bucket] = []
gradeables[gradeable.syllabus_bucket].append(gradeable)
# Randomly generate the impact of each bucket on the overall grade
gradeables_percentages = []
gradeable_percentage_left = 100 - len(gradeables)
for i in range(len(gradeables)):
gradeables_percentages.append(random.randint(1, max(1, gradeable_percentage_left)) + 1)
gradeable_percentage_left -= (gradeables_percentages[-1] - 1)
if gradeable_percentage_left > 0:
gradeables_percentages[-1] += gradeable_percentage_left
# Compute totals and write out each syllabus bucket in the "gradeables" field of customization.json
bucket_no = 0
# for bucket,g_list in gradeables.items():
for bucket in sorted(gradeables.keys()):
g_list = gradeables[bucket]
bucket_json = {"type": bucket, "count": len(g_list), "percent": 0.01*gradeables_percentages[bucket_no],
"ids" : []}
g_list.sort(key=lambda x: x.id)
# Manually total up the non-penalty non-extra-credit max scores, and decide which gradeables are 'released'
for gradeable in g_list:
use_ta_grading = gradeable.use_ta_grading
g_type = gradeable.type
components = gradeable.components
g_id = gradeable.id
max_auto = 0
max_ta = 0
print_grades = True if g_type != 0 or (gradeable.submission_open_date < NOW) else False
release_grades = (gradeable.has_release_date is True) and (gradeable.grade_released_date < NOW)
gradeable_config_dir = os.path.join(SUBMITTY_DATA_DIR, "courses", get_current_semester(), "sample",
"config", "complete_config")
# For electronic gradeables there is a config file - read through to get the total
if os.path.isdir(gradeable_config_dir):
gradeable_config = os.path.join(gradeable_config_dir, "complete_config_" + g_id + ".json")
if os.path.isfile(gradeable_config):
try:
with open(gradeable_config, 'r') as gradeable_config_file:
gradeable_json = json.load(gradeable_config_file)
# Not every config has AUTO_POINTS, so have to parse through test cases
# Add points to max if not extra credit, and points>0 (not penalty)
if "testcases" in gradeable_json:
for test_case in gradeable_json["testcases"]:
if "extra_credit" in test_case:
continue
if "points" in test_case and test_case["points"] > 0:
max_auto += test_case["points"]
except EnvironmentError:
print("Failed to load JSON")
# For non-electronic gradeables, or electronic gradeables with TA grading, read through components
if use_ta_grading or g_type != 0:
for component in components:
if component.max_value >0:
max_ta += component.max_value
# Add the specific associative array for this gradeable in customization.json to the output string
max_points = max_auto + max_ta
if print_grades:
bucket_json["ids"].append({"id": g_id, "max": max_points})
if not release_grades:
bucket_json["ids"][-1]["released"] = False
# Close the bucket's array in customization.json
if "gradeables" not in gradeables_json_output:
gradeables_json_output["gradeables"] = []
gradeables_json_output["gradeables"].append(bucket_json)
bucket_no += 1
# Generate the section labels
section_ta_mapping = {}
for section in range(1, self.registration_sections + 1):
section_ta_mapping[section] = []
for user in self.users:
if user.get_detail(course_id, "grading_registration_section") is not None:
grading_registration_sections = str(user.get_detail(course_id, "grading_registration_section"))
grading_registration_sections = [int(x) for x in grading_registration_sections.split(",")]
for section in grading_registration_sections:
section_ta_mapping[section].append(user.id)
for section in section_ta_mapping:
if len(section_ta_mapping[section]) == 0:
section_ta_mapping[section] = "TBA"
else:
section_ta_mapping[section] = ", ".join(section_ta_mapping[section])
# Construct the rest of the JSON dictionary
benchmarks = ["a-", "b-", "c-", "d"]
gradeables_json_output["display"] = ["instructor_notes", "grade_summary", "grade_details"]
gradeables_json_output["display_benchmark"] = ["average", "stddev", "perfect"]
gradeables_json_output["benchmark_percent"] = {}
for i in range(len(benchmarks)):
gradeables_json_output["display_benchmark"].append("lowest_" + benchmarks[i])
gradeables_json_output["benchmark_percent"]["lowest_" + benchmarks[i]] = 0.9 - (0.1 * i)
gradeables_json_output["section"] = section_ta_mapping
messages = ["<b>{} Course</b>".format(course_id),
"Note: Please be patient with data entry/grade corrections for the most recent "
"lab, homework, and test.",
"Please contact your graduate lab TA if a grade remains missing or incorrect for more than a week."]
gradeables_json_output["messages"] = messages
# Attempt to write the customization.json file
try:
with open(os.path.join(customization_path, "customization_" + course_id + ".json"), 'w') as customization_file:
customization_file.write("/*\n"
"This JSON is based on the automatically generated customization for\n"
"the development course \"{}\" as of {}.\n"
"It is intended as a simple example, with additional documentation online.\n"
"*/\n".format(course_id,NOW.strftime("%Y-%m-%d %H:%M:%S%z")))
json.dump(gradeables_json_output,
open(os.path.join(customization_path, "customization_" + course_id + ".json"), 'a'),indent=2)
except EnvironmentError as e:
print("Failed to write to customization file: {}".format(e))
print("Wrote customization_{}.json".format(course_id))
class Gradeable(object):
"""
Attributes:
config_path
id
type
"""
def __init__(self, gradeable):
self.id = ""
self.gradeable_config = None
self.config_path = None
self.sample_path = None
self.lichen_sample_path = None
self.plagiarized_user = {}
self.title = ""
self.instructions_url = ""
self.overall_ta_instructions = ""
self.peer_grading = False
self.grade_by_registration = True
self.grader_assignment_method = 1
self.is_repository = False
self.subdirectory = ""
self.use_ta_grading = True
self.late_days = 2
self.precision = 0.5
self.syllabus_bucket = "none (for practice only)"
self.min_grading_group = 3
self.grading_rotating = []
self.submissions = []
self.max_random_submissions = None
self.max_individual_submissions = 3
self.team_assignment = False
self.max_team_size = 1
self.has_due_date = True
self.has_release_date = True
self.allow_custom_marks = True
self.plagiarism_submissions = []
self.plagiarism_versions_per_user = 1
if 'gradeable_config' in gradeable:
self.gradeable_config = gradeable['gradeable_config']
self.type = 0
if 'g_id' in gradeable:
self.id = gradeable['g_id']
else:
self.id = gradeable['gradeable_config']
if 'eg_max_random_submissions' in gradeable:
self.max_random_submissions = int(gradeable['eg_max_random_submissions'])
if 'eg_max_individual_submissions' in gradeable:
self.max_individual_submissions = int(gradeable['eg_max_individual_submissions'])
if 'config_path' in gradeable:
self.config_path = gradeable['config_path']
else:
examples_path = os.path.join(MORE_EXAMPLES_DIR, self.gradeable_config, "config")
tutorial_path = os.path.join(TUTORIAL_DIR, self.gradeable_config, "config")
if os.path.isdir(examples_path):
self.config_path = examples_path
elif os.path.isdir(tutorial_path):
self.config_path = tutorial_path
else:
self.config_path = None
examples_path = os.path.join(MORE_EXAMPLES_DIR, self.gradeable_config, "submissions")
tutorial_path = os.path.join(TUTORIAL_DIR, self.gradeable_config, "submissions")
if 'eg_lichen_sample_path' in gradeable:
# pdb.set_trace()
self.lichen_sample_path = gradeable['eg_lichen_sample_path']
if 'eg_plagiarized_users' in gradeable:
for user in gradeable['eg_plagiarized_users']:
temp = user.split(" - ")
self.plagiarized_user[temp[0]] = temp[1:]
else: # if we weren't given a list of plagiarized users, make one
self.plagiarism_submissions = os.listdir(self.lichen_sample_path)
random.shuffle(self.plagiarism_submissions)
if 'eg_plagiarism_versions_per_user' in gradeable:
self.plagiarism_versions_per_user = gradeable['plagiarism_versions_per_user']
if 'sample_path' in gradeable:
self.sample_path = gradeable['sample_path']
else:
if os.path.isdir(examples_path):
self.sample_path = examples_path
elif os.path.isdir(tutorial_path):
self.sample_path = tutorial_path
else:
self.sample_path = None
else:
self.id = gradeable['g_id']
self.type = int(gradeable['g_type'])
self.config_path = None
self.sample_path = None
# To make Rainbow Grades testing possible, need to seed random
m = hashlib.md5()
m.update(bytes(self.id, 'utf-8'))
random.seed(int(m.hexdigest(), 16))
if 'g_bucket' in gradeable:
self.syllabus_bucket = gradeable['g_bucket']
assert 0 <= self.type <= 2
if 'g_title' in gradeable:
self.title = gradeable['g_title']
else:
self.title = self.id.replace("_", " ").title()
if 'g_grader_assignment_method' in gradeable:
self.grade_by_registration = gradeable['g_grader_assignment_method'] == 1
self.grader_assignment_method = gradeable['g_grader_assignment_method']
if 'grading_rotating' in gradeable:
self.grading_rotating = gradeable['grading_rotating']
self.ta_view_date = dateutils.parse_datetime(gradeable['g_ta_view_start_date'])
self.grade_start_date = dateutils.parse_datetime(gradeable['g_grade_start_date'])
self.grade_due_date = dateutils.parse_datetime(gradeable['g_grade_due_date'])
self.grade_released_date = dateutils.parse_datetime(gradeable['g_grade_released_date'])
if self.type == 0:
self.submission_open_date = dateutils.parse_datetime(gradeable['eg_submission_open_date'])
self.submission_due_date = dateutils.parse_datetime(gradeable['eg_submission_due_date'])
self.team_lock_date = dateutils.parse_datetime(gradeable['eg_submission_due_date'])
self.grade_inquiry_start_date = dateutils.parse_datetime(gradeable['eg_grade_inquiry_start_date'])
self.grade_inquiry_due_date = dateutils.parse_datetime(gradeable['eg_grade_inquiry_due_date'])
self.student_view = True
self.student_submit = True
if 'eg_is_repository' in gradeable:
self.is_repository = gradeable['eg_is_repository'] is True
if self.is_repository and 'eg_subdirectory' in gradeable:
self.subdirectory = gradeable['eg_subdirectory']
if 'eg_peer_grading' in gradeable:
self.peer_grading = gradeable['eg_peer_grading']
if 'eg_use_ta_grading' in gradeable:
self.use_ta_grading = gradeable['eg_use_ta_grading'] is True
if 'eg_student_view' in gradeable:
self.student_view = gradeable['eg_student_view'] is True
if 'eg_student_submit' in gradeable:
self.student_submit = gradeable['eg_student_submit'] is True
if 'eg_late_days' in gradeable:
self.late_days = max(0, int(gradeable['eg_late_days']))
else:
self.late_days = random.choice(range(0, 3))
if 'eg_precision' in gradeable:
self.precision = float(gradeable['eg_precision'])
if 'eg_team_assignment' in gradeable:
self.team_assignment = gradeable['eg_team_assignment'] is True
if 'eg_max_team_size' in gradeable:
self.max_team_size = gradeable['eg_max_team_size']
if 'eg_team_lock_date' in gradeable:
self.team_lock_date = submitty_utils.parse_datetime(gradeable['eg_team_lock_date'])
self.has_due_date = gradeable['eg_has_due_date'] if 'eg_has_due_date' in gradeable else True
self.has_release_date = gradeable['eg_has_release_date'] if 'eg_has_release_date' in gradeable else True
if self.config_path is None:
examples_path = os.path.join(MORE_EXAMPLES_DIR, self.id, "config")
tutorial_path = os.path.join(TUTORIAL_DIR, self.id, "config")
if os.path.isdir(examples_path):
self.config_path = examples_path
elif os.path.isdir(tutorial_path):
self.config_path = tutorial_path
else:
self.config_path = None
assert self.ta_view_date < self.submission_open_date
assert self.has_due_date is False or self.submission_open_date < self.submission_due_date
assert self.has_due_date is False or self.submission_due_date < self.grade_start_date
assert self.has_release_date is False or self.grade_released_date <= self.grade_inquiry_start_date
assert self.grade_inquiry_start_date < self.grade_inquiry_due_date
if self.gradeable_config is not None:
if self.sample_path is not None:
if os.path.isfile(os.path.join(self.sample_path, "submissions.yml")):
self.submissions = load_data_yaml(os.path.join(self.sample_path, "submissions.yml"))
else:
self.submissions = os.listdir(self.sample_path)
self.submissions = list(filter(lambda x: not x.startswith("."), self.submissions))
# Ensure we're not sensitive to directory traversal order
self.submissions.sort()
if isinstance(self.submissions, list):
for elem in self.submissions:
if isinstance(elem, dict):
raise TypeError("Cannot have dictionary inside of list for submissions "
"for {}".format(self.sample_path))
assert self.ta_view_date < self.grade_start_date
assert self.grade_start_date < self.grade_due_date
assert self.has_release_date is False or self.grade_due_date <= self.grade_released_date
self.components = []
for i in range(len(gradeable['components'])):
component = gradeable['components'][i]
if self.type >= 0:
component['gc_ta_comment'] = generate_random_ta_note()
component['gc_student_comment'] = generate_random_student_note()
component['gc_page'] = 0
if self.type == 1:
component['gc_lower_clamp'] = 0
component['gc_default'] = 0
component['gc_max_value'] = 1
component['gc_upper_clamp'] = 1
if self.type != 2:
component['gc_is_text'] = False
i -= 1
self.components.append(Component(component, i+1))
def create(self, conn, gradeable_table, electronic_table, peer_assign, reg_table, component_table, mark_table):
conn.execute(gradeable_table.insert(), g_id=self.id, g_title=self.title,
g_instructions_url=self.instructions_url,
g_overall_ta_instructions=self.overall_ta_instructions,
g_gradeable_type=self.type,
g_grader_assignment_method=self.grader_assignment_method,
g_ta_view_start_date=self.ta_view_date,
g_grade_start_date=self.grade_start_date,
g_grade_due_date=self.grade_due_date,
g_grade_released_date=self.grade_released_date,
g_syllabus_bucket=self.syllabus_bucket,
g_allow_custom_marks=self.allow_custom_marks,
g_min_grading_group=self.min_grading_group,
g_closed_date=None)
for rotate in self.grading_rotating:
conn.execute(reg_table.insert(), g_id=self.id, user_id=rotate['user_id'],
sections_rotating=rotate['section_rotating_id'])
if self.peer_grading is True:
with open(os.path.join(SETUP_DATA_PATH, 'random', 'graders.txt')) as graders, \
open(os.path.join(SETUP_DATA_PATH, 'random', 'students.txt')) as students:
graders = graders.read().strip().split()
students = students.read().strip().split()
length = len(graders)
for i in range(length):
conn.execute(peer_assign.insert(), g_id=self.id, grader_id=graders[i], user_id=students[i])
if self.type == 0:
conn.execute(electronic_table.insert(), g_id=self.id,
eg_submission_open_date=self.submission_open_date,
eg_submission_due_date=self.submission_due_date,
eg_is_repository=self.is_repository, eg_subdirectory=self.subdirectory,
eg_team_assignment=self.team_assignment,
eg_max_team_size=self.max_team_size,
eg_team_lock_date=self.team_lock_date,
eg_use_ta_grading=self.use_ta_grading,
eg_student_view=self.student_view,
eg_student_submit=self.student_submit,
eg_config_path=self.config_path,
eg_late_days=self.late_days, eg_precision=self.precision, eg_peer_grading=self.peer_grading,
eg_grade_inquiry_start_date=self.grade_inquiry_start_date,
eg_grade_inquiry_due_date=self.grade_inquiry_due_date)
for component in self.components:
component.create(self.id, conn, component_table, mark_table)
def create_form(self):
form_json = OrderedDict()
form_json['gradeable_id'] = self.id
if self.type == 0:
form_json['config_path'] = self.config_path
form_json['gradeable_title'] = self.title
form_json['gradeable_type'] = self.get_gradeable_type_text()
form_json['instructions_url'] = self.instructions_url
form_json['ta_view_date'] = dateutils.write_submitty_date(self.ta_view_date)
if self.type == 0:
form_json['date_submit'] = dateutils.write_submitty_date(self.submission_open_date)
form_json['date_due'] = dateutils.write_submitty_date(self.submission_due_date)
form_json['grade_inquiry_start_date'] = dateutils.write_submitty_date(self.grade_inquiry_start_date)
form_json['grade_inquiry_due_date'] = dateutils.write_submitty_date(self.grade_inquiry_due_date)
form_json['date_grade'] = dateutils.write_submitty_date(self.grade_start_date)
form_json['date_grade_due'] = dateutils.write_submitty_date(self.grade_due_date)
form_json['date_released'] = dateutils.write_submitty_date(self.grade_released_date)
if self.type == 0:
form_json['section_type'] = self.get_submission_type()
form_json['eg_late_days'] = self.late_days
form_json['upload_type'] = self.get_upload_type()
form_json['upload_repo'] = self.subdirectory
form_json['comment_title'] = []
form_json['points'] = []
form_json['eg_extra'] = []
form_json['ta_comment'] = []
form_json['student_comment'] = []
for i in range(len(self.components)):
component = self.components[i]
form_json['comment_title'].append(component.title)
# form_json['lower_clamp'].append(component.lower_clamp)
# form_json['default'].append(component.default)
form_json['points'].append(component.max_value)
# form_json['upper_clamp'].append(component.upper_clamp)
form_json['ta_comment'].append(component.ta_comment)
form_json['student_comment'].append(component.student_comment)
elif self.type == 1:
form_json['checkpoint_label'] = []
form_json['checkpoint_extra'] = []
for i in range(len(self.components)):
component = self.components[i]
form_json['checkpoint_label'].append(component.title)
else:
form_json['num_numeric_items'] = 0
form_json['numeric_labels'] = []
form_json['lower_clamp'] = []
form_json['default'] = []
form_json['max_score'] = []
form_json['upper_clamp'] = []
form_json['numeric_extra'] = []
form_json['num_text_items'] = 0
form_json['text_label'] = []
for i in range(len(self.components)):
component = self.components[i]
if component.is_text:
form_json['num_text_items'] += 1
form_json['text_label'].append(component.title)
else:
form_json['num_numeric_items'] += 1
form_json['numeric_labels'].append(component.title)
form_json['lower_clamp'].append(component.lower_clamp)
form_json['default'].append(component.default)
form_json['max_score'].append(component.max_value)
form_json['upper_clamp'].append(component.upper_clamp)
form_json['minimum_grading_group'] = self.min_grading_group
form_json['gradeable_buckets'] = self.syllabus_bucket
return form_json
def get_gradeable_type_text(self):
if self.type == 0:
return "Electronic File"
elif self.type == 1:
return "Checkpoints"
else:
return "Numeric"
def get_submission_type(self):
if self.grade_by_registration:
return "reg_section"
else:
return "rotating-section"
def get_upload_type(self):
if self.is_repository:
return "Repository"
else:
return "Upload File"
class Component(object):
def __init__(self, component, order):
self.title = component['gc_title']
self.ta_comment = ""
self.student_comment = ""
self.is_text = False
self.is_peer_component = False
self.page = 0
self.order = order
self.marks = []
if 'gc_ta_comment' in component:
self.ta_comment = component['gc_ta_comment']
if 'gc_is_peer' in component:
self.is_peer_component = component['gc_is_peer']
if 'gc_student_comment' in component:
self.student_comment = component['gc_student_comment']
if 'gc_is_text' in component:
self.is_text = component['gc_is_text'] is True
if 'gc_page' in component:
self.page = int(component['gc_page'])
if self.is_text:
self.lower_clamp = 0
self.default = 0
self.max_value = 0
self.upper_clamp = 0
else:
self.lower_clamp = float(component['gc_lower_clamp'])
self.default = float(component['gc_default'])
self.max_value = float(component['gc_max_value'])
self.upper_clamp = float(component['gc_upper_clamp'])
if 'marks' in component:
for i in range(len(component['marks'])):
mark = component['marks'][i]
self.marks.append(Mark(mark, i))
else:
self.marks = generate_random_marks(self.default, self.max_value)
self.key = None
def create(self, g_id, conn, table, mark_table):
ins = table.insert().values(g_id=g_id, gc_title=self.title, gc_ta_comment=self.ta_comment,
gc_student_comment=self.student_comment,
gc_lower_clamp=self.lower_clamp, gc_default=self.default, gc_max_value=self.max_value,
gc_upper_clamp=self.upper_clamp, gc_is_text=self.is_text,
gc_is_peer=self.is_peer_component, gc_order=self.order, gc_page=self.page)
res = conn.execute(ins)
self.key = res.inserted_primary_key[0]
for mark in self.marks:
mark.create(self.key, conn, mark_table)
class Mark(object):
def __init__(self, mark, order):
self.note = mark['gcm_note']
self.points = mark['gcm_points']
self.order = order
self.grader = 'instructor'
self.key = None
def create(self, gc_id, conn, table):
ins = table.insert().values(gc_id=gc_id, gcm_points=self.points, gcm_note=self.note,
gcm_order=self.order)
res = conn.execute(ins)
self.key = res.inserted_primary_key[0]
if __name__ == "__main__":
main()
|
the-stack_0_11396 | """
A binary search tree is a sorted binary tree that allows for log complexity* searching. Unlike binary search on an
array, inserting and deleting nodes only takes log time*
*Assuming the tree is relatively balanced such as an AVL tree or red-black tree. As an extreme example, the binary tree
below is essentially a linked list:
1
\
2
\
3
\
4
\
5
BSTs are binary trees where all nodes in node X's left subtree have a value less than X and all nodes in X's right
subtree have a value greater than X. By default, they don't have duplicates (but we can add in an attribute to each node
as the count, for example). An inorder traversal will process nodes from lowest to highest
"""
import random
class Node:
def __init__(self, val=None):
self.val = val
self.left = None
self.right = None
class BST:
def __init__(self):
self.root = None
def insert(self, val):
if not self.root:
self.root = Node(val)
return True
curr = self.root
while True:
if val < curr.val:
if not curr.left:
curr.left = Node(val)
return True
curr = curr.left
elif val > curr.val:
if not curr.right:
curr.right = Node(val)
return True
curr = curr.right
else:
return False # No duplicates
def delete(self, val):
def newNode(node): # Node that will take the deleted node's spot
# At least one child missing: Replace the node with the other child
if not node.left:
return node.right
if not node.right:
return node.left
# Two children: replace this node with the inorder successor
curr_node = node.right
curr_parent = node
while True:
if not curr_node.left:
node.val = curr_node.val
curr_parent.left = curr_node.right # Delete this node
return node
curr_parent = curr_node
curr_node = curr_node.left
if not self.root:
return False
parent = None
curr = self.root
if curr.val == val: # Handles the root case
self.root = newNode(curr)
return True
while curr: # Find the node
if curr.val > val:
parent = curr
curr = curr.left
elif curr.val < val:
parent = curr
curr = curr.right
else: # Replace it with the new node
if parent.left == curr:
parent.left = newNode(curr)
else:
parent.right = newNode(curr)
return True
return False
def search(self, val):
curr = self.root
while curr:
if curr.val < val:
curr = curr.left
elif curr.val > val:
curr = curr.right
else:
return True
return False
def minValue(self):
if self.root is None:
return None
curr = self.root
while True:
if not curr.left:
return curr.val
curr = curr.left
def maxValue(self):
if self.root is None:
return None
curr = self.root
while True:
if not curr.right:
return curr.val
curr = curr.right
def print2D(self):
"""
Wrapper method for __print2D
Prints this binary tree in a 2-D representation
"""
self.__print2D(self.root)
def __print2D(self, root, space=0):
if not root:
return
space += 4
# Process right child first
self.__print2D(root.right, space)
# Print current node after a newline
print()
for spaces in range(4, space):
print(end=" ")
print(root.val)
# Process left child
self.__print2D(root.left, space)
if __name__ == "__main__":
bst = BST()
bst.insert(50)
nodes = [50]
for i in range(50):
value = random.randint(1, 100)
nodes.append(value)
bst.insert(value)
bst.print2D()
maximum = bst.maxValue()
assert maximum == max(nodes)
print(f"\nMaximum value: {maximum}")
minimum = bst.minValue()
assert minimum == min(nodes)
print(f"Minimum value: {minimum}")
for i in nodes:
bst.delete(i)
bst.print2D()
assert bst.root is None
|
the-stack_0_11397 | import warnings
warnings.filterwarnings('ignore')
# data processing
import pandas as pd
import numpy as np
# image processing
from PIL import Image
# tf and keras
import tensorflow as tf
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose, Dropout, BatchNormalization
from keras.layers.core import Dense, Activation, Flatten
from keras.optimizers import SGD, Adam
from keras.utils import plot_model
from keras.callbacks import ModelCheckpoint
from keras import backend as K
# dataset processing, ml models and metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import scipy
import glob
from itertools import product
# Object Detection Metrics
import xml.etree.ElementTree as ET
__all__ = [
'SIMILARS_MAP',
'GREEKS',
'read_csv',
'remove_transparency',
'preprocess_img',
'populate_images',
'convert_to_one_hot_encode',
'one_hot_encode_to_char',
'one_hot_encode_to_char_list',
'convert_pred_list_ohe_to_labels',
'get_df_split',
'gen_x_y_train_test_stratified_1df',
'process_x_y_train_test_stratified_2df',
'process_x_y_train_test_stratified_ann',
'get_label_count_df',
'get_label_count_train_test_dfs',
'dir_',
'model_dir',
'data_dir',
'processed_data_dir',
'get_symbols',
'generate_eqns',
'read_annotation',
'get_iou',
'calculate_map',
]
dir_ = 'HASYv2/'
model_dir = 'trained_models/'
data_dir = 'data/'
processed_data_dir = 'processed_data/'
SIMILARS_MAP = [
(r'\times', 'x', 'X'),
('y', 'Y'),
('c', r'\subset', r'\subseteq'),
('g', '9'),
('o', 'O', '0'),
('s', '5'),
('z', 'Z', '2'),
]
GREEKS = [r'\sigma', r'\Sigma', r'\gamma', r'\delta', r'\Delta',
r'\eta', r'\theta', r'\epsilon', r'\lambda', r'\mu',
r'\Pi', r'\rho', r'\phi', r'\omega', r'\ohm']
def read_csv(path):
return pd.read_csv(path)
# Image Preprocessing
def remove_transparency(im, bg_colour=(255, 255, 255)):
# Only process if image has transparency
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
# Need to convert to RGBA if LA format due to a bug in PIL
alpha = im.convert('RGBA').split()[-1]
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
return bg
else:
return im
def preprocess_img(path):
# Open Image
im = Image.open(dir_ + path)
# Resize image to 32 by 32
if im.size != (32, 32):
im = im.resize((32, 32))
# Convert image to a single greyscale channel
im = remove_transparency(im).convert('L')
# Convert image to numpy array
I = np.asarray(im)
# Close image
im.close()
return I
def populate_images(dataset):
temp = []
for i in range(len(dataset)):
path = dataset.iloc[i]['path']
pathsplit = path.split('/')
if len(pathsplit) > 2:
path = '/'.join([pathsplit[-2], pathsplit[-1]])
img = preprocess_img(path)
temp.append(img)
dataset['img'] = [i for i in temp]
return dataset
def convert_to_one_hot_encode(data, no_categories):
data = np.array(data).reshape(-1)
print('len of dataset', len(data))
return np.eye(no_categories)[data]
# to process output to the value
# returns a list with all the categories with more than 50% accuracy
def one_hot_encode_to_char(arr, threshold=0.5, get_max=True):
result = []
val = 0
for i in range(len(arr)):
if arr[i] >= threshold:
result.append((val, arr[i]))
val += 1
_max = []
high = 0
if get_max:
for i in result:
if i[1] > high:
_max = [i[0]]
high = i[1]
return _max
else:
return [i[0] for i in result]
def one_hot_encode_to_char_list(arr, threshold=0.5, get_max=True):
result = []
for i in range(len(arr)):
if arr[i] >= threshold:
result.append((i, arr[i]))
_max = []
result = sorted(result, key=lambda x: x[1], reverse=True)
if get_max:
return result[0]
return result
def convert_pred_list_ohe_to_labels(pred_data, threshold=0.5, get_max=True):
result = []
for i in range(len(pred_data)):
val = one_hot_encode_to_char(pred_data[i], threshold=threshold, get_max=get_max)
if len(val) > 0:
if get_max:
result.append(val[0])
else:
result.append(val)
else:
result.append(None)
print(":( :( :(")
return result
# Dataset Splitting
# Stratified Train Test Split (new function)
def get_df_split(ds, stratify_col, test_size=0.2):
_train, _test = train_test_split(ds, test_size=test_size, stratify=ds[stratify_col])
return _train, _test
# function to split whole dataset at once (old function)
def gen_x_y_train_test_stratified_1df(dataset, input_shape, test_size=0.2):
x = np.array(list(dataset['img']))
y = np.array(list(dataset['symbol_id_ohe']))
x = x.reshape((x.shape[0], 1, input_shape[1], input_shape[2]))
# Normalize data to 0-1
x = x.astype("float32") / 255.0
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=test_size, stratify=y)
return X_train, X_test, y_train, y_test
# function to process already split data
def process_x_y_train_test_stratified_2df(_tr, _ts, input_shape):
# train df
X_train = np.array(list(_tr['img']))
y_train = np.array(list(_tr['symbol_id_ohe']))
X_train = X_train.reshape((X_train.shape[0], 1, input_shape[1], input_shape[2]))
# Normalize data to 0-1
X_train = X_train.astype("float32") / 255.0
# test df
X_test = np.array(list(_ts['img']))
y_test = np.array(list(_ts['symbol_id_ohe']))
X_test = X_test.reshape((X_test.shape[0], 1, input_shape[1], input_shape[2]))
# Normalize data to 0-1
X_test = X_test.astype("float32") / 255.0
return X_train, X_test, y_train, y_test
def process_x_y_train_test_stratified_ann(_tr, _ts, input_shape):
X_train = np.array(list(_tr['img']))
y_train = np.array(list(_tr['symbol_id_ohe']))
X_train = X_train.reshape((X_train.shape[0], input_shape[0]))
# Normalize data to 0-1
X_train = X_train.astype("float32") / 255.0
# test df
X_test = np.array(list(_ts['img']))
y_test = np.array(list(_ts['symbol_id_ohe']))
X_test = X_test.reshape((X_test.shape[0], input_shape[0]))
# Normalize data to 0-1
X_test = X_test.astype("float32") / 255.0
return X_train, X_test, y_train, y_test
# Dataset metrics
# generate label counts for dataframe and list
def get_label_count_df(df_train, df_test, sym_list):
train_labels_count = {}
test_labels_count = {}
perc_labels_count = {}
for i in sym_list:
train_labels_count[i] = 0
test_labels_count[i] = 0
for i in range(len(df_train)):
train_labels_count[df_train.loc[i, 'symbol_id']] += 1
for i in range(len(df_test)):
test_labels_count[df_test.loc[i, 'symbol_id']] += 1
for i in sym_list:
perc = (train_labels_count[i] / (train_labels_count[i] + test_labels_count[i])) * 100
perc_labels_count[i] = (train_labels_count[i], test_labels_count[i], round(perc, 2))
return perc_labels_count
def get_label_count_train_test_dfs(df_train, df_test):
train_labels_count = {}
test_labels_count = {}
perc_labels_count = {}
train_syms = df_train['symbol_id'].unique()
test_syms = df_test['symbol_id'].unique()
sym_list = np.unique(np.concatenate([train_syms, test_syms], axis=0))
for i in sym_list:
train_labels_count[i] = 0
test_labels_count[i] = 0
for i in range(len(df_train)):
train_labels_count[df_train.loc[i, 'symbol_id']] += 1
for i in range(len(df_test)):
test_labels_count[df_test.loc[i, 'symbol_id']] += 1
for i in sym_list:
perc = (train_labels_count[i] / (train_labels_count[i] + test_labels_count[i])) * 100
perc_labels_count[i] = (train_labels_count[i], test_labels_count[i], round(perc, 2))
return perc_labels_count
def get_label_count_list(lst_data, sym_list):
labels_count = {}
for i in sym_list:
labels_count[i] = 0
for i in range(len(lst_data)):
j = one_hot_encode_to_char(lst_data[i])[0]
labels_count[j] += 1
return labels_count
# Error Handling before Syntactical Analysis
def get_symbols(syms_):
result_syms = []
for i in syms_:
sym_maps = None
for j in i:
#if sym_maps is not None:
# break
# ignore greeks for now since greeks are not included in lexer
if j[0] in GREEKS:
continue
for k in SIMILARS_MAP:
if j[0] in k:
sym_maps = k
break
break
if sym_maps is not None:
result_syms.append(sym_maps)
else:
for j in i:
if j[0] not in GREEKS:
result_syms.append((j[0],))
break
return result_syms
def generate_eqns(err_handled_symbols):
return [i for i in product(*err_handled_symbols)]
# ************************************************************
# Object Detection metrics
# read xml file
def read_annotation(xml_file):
tree = ET.parse(xml_file)
root = tree.getroot()
all_boxes = []
for i in root.iter('object'):
ymin, xmin, ymax, xmax = None, None, None, None
for j in i.findall("bndbox"):
ymin = int(j.find("ymin").text)
xmin = int(j.find("xmin").text)
ymax = int(j.find("ymax").text)
xmax = int(j.find("xmax").text)
# bbox = [xmin, xmax, ymin, ymax]
bbox = {
'x1': xmin,
'x2': xmax,
'y1': ymin,
'y2': ymax
}
all_boxes.append(bbox)
return all_boxes
# calculate iou
def get_iou(bb1, bb2):
"""
Calculate the Intersection over Union (IoU) of two bounding boxes.
Parameters
----------
bb1 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
bb2 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x, y) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
Returns
-------
float
in [0, 1]
"""
assert bb1['x1'] < bb1['x2']
assert bb1['y1'] < bb1['y2']
assert bb2['x1'] < bb2['x2']
assert bb2['y1'] < bb2['y2']
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
def calculate_map(map_data):
"""
map_data: a list of tuples with each tuple containing (precision, recall)
"""
md = sorted(map_data, key=lambda x: x[1])
md = [(i, round(j, 1)) for i, j in md]
ap_11_precs = {str(round(k*0.1, 1)): None for k in range(11)}
for p_, r_ in md:
if not ap_11_precs[str(r_)] or p_ > ap_11_precs[str(r_)]:
ap_11_precs[str(r_)] = p_
ap_11_precs_list = list(ap_11_precs.values())
ap_11_precs_list = [z if z is not None else 0 for z in ap_11_precs_list]
mean_ap = np.mean(ap_11_precs_list)
return mean_ap
# ********************************************************
|
the-stack_0_11398 | import pymssql
from scrape_microcenter import ScrapeMicrocenter
from time import time
class MSSQL_Database:
def __init__(self, server, user, password, database, autocommit=True):
self._server = server
self._user = user
self._password = password
self._database = database
self._autocommit = autocommit
self._conn = pymssql.connect(server=server, port=1433, user=user,
password=password, database=database,
autocommit=autocommit)
self._tablecol = {}
def mssql_dec(func):
def execute(self, *args, **kwargs):
with self._conn.cursor() as cursor:
command = func(self, *args, **kwargs)
cursor.execute(command)
print(command)
return execute
def time_dec(func):
def wrapper(self, *args, **kwargs):
init_t = time()
ret = func(self, *args, **kwargs)
final_t = time()
print(func, args, kwargs, final_t-init_t)
return ret
return wrapper
@time_dec
@mssql_dec
def create_db(self, db_name):
command = """
IF NOT EXISTS(SELECT * FROM master.dbo.sysdatabases WHERE NAME = '{db_name}')
BEGIN
CREATE DATABASE [{db_name}]
END;""".format(db_name=db_name)
return command
@time_dec
@mssql_dec
def create_table(self, table_name, **kwargs):
self._tablecol = kwargs
command ="""
IF OBJECT_ID('{name}', 'U') IS NULL
CREATE TABLE {name} (
ID int IDENTITY(1,1) PRIMARY KEY,\n""".format(name=table_name)
if kwargs is not None:
for col, col_type in kwargs.items():
if col_type.upper() == 'VARCHAR':
command += "\t\t{col} {col_type}(255),\n".format(
col=col, col_type=col_type)
else:
command += "\t\t{col} {col_type},\n".format(
col=col, col_type=col_type)
command += "\t\t);"
return command
@time_dec
@mssql_dec
def insert_table(self, table_name, **kwargs):
assert kwargs is not None,"Product not passed. Check to see argument is passed"
command = "INSERT INTO {table_name} (".format(table_name=table_name)
for col_name in kwargs:
command += "{}, ".format(col_name)
command = command[0:-2]
command += ")\nVALUES ("
for col_name in kwargs:
kwargs[col_name] = kwargs[col_name].replace("\'", "\'\'")
if self._tablecol[col_name] in ('varchar', 'datetime'):
command += "'{}', ".format(kwargs[col_name])
else:
command += "{}, ".format(kwargs[col_name])
command = command[0:-2]
command += ");"
print(command)
return command
@time_dec
def get_tablecol(self):
return self._tablecol
@time_dec
def commit_command(self):
self._conn.commit()
@time_dec
def close_conn(self):
self._conn.close()
|
the-stack_0_11399 | import numpy as np
from src.strategize.game import Game
def prisoners_dilemma():
player_set = np.array(['Alice', 'Bob'])
action_set = np.array([['Cooperate', 'Defect'], ['Cooperate', 'Defect']])
utility_set = np.array([[[2, 2], [0, 3]], [[3, 0], [1, 1]]])
pd = Game(player_set, action_set, utility_set)
return pd
def common_payoff():
player_set = np.array(['Alice', 'Bob'])
action_set = np.array([['Left', 'Right'], ['Left', 'Right']])
utility_set = np.array([[[1, 1], [0, 0]], [[0, 0], [1, 1]]])
cp = Game(player_set, action_set, utility_set)
return cp
def zero_sum():
player_set = np.array(['Alice', 'Bob'])
action_set = np.array([['Heads', 'Tails'], ['Left', 'Right']])
utility_set = np.array([[[1, -1], [-1, 1]], [[-1, 1], [1, -1]]])
zs = Game(player_set, action_set, utility_set)
return zs
game = prisoners_dilemma()
print(game)
game.plot() |
the-stack_0_11402 | #!/usr/bin/python
'''
Copyright 2011 Daniel Arndt
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
@author: Daniel Arndt <[email protected]>
dependencies:
pip install netifaces
apt-get install python-libpcap
'''
import sys, traceback
import argparse
import logging
import time
import binascii as ba
import socket
import struct
import string
import pcap
from flow import Flow
#Set up default logging system.
log = logging.getLogger()
log.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s;%(levelname)s:: "
"%(message)s :: %(filename)s:%(lineno)s",
"%H:%M:%S")
ch.setFormatter(formatter)
log.addHandler(ch)
def sort_by_IP(t):
'''
Re-arrange a flow tuple to have lowest IP first, for lookup
'''
return (t[2], t[3], t[0], t[1], t[4]) if t[2] < t[0] else t
def dumphex(s):
bytes = map(lambda x: '%.2x' % x, map(ord, s))
for i in xrange(0,len(bytes)/16):
log.error(' %s' % string.join(bytes[i*16:(i+1)*16],' '))
log.error(' %s' % string.join(bytes[(i+1)*16:],' '))
class Flowtbag:
'''
classdocs
'''
def __init__(self, packets):
try:
self.count = 0
self.flow_count = 0
self.active_flows = {}
for pkt in packets: self.callback(*pkt)
except KeyboardInterrupt:
exit(0)
def __repr__(self):
raise NotImplementedError()
def __str__(self):
return "I am a Flowtbag of size %s" % (len(self.active_flows))
def exportAll(self):
for flow in self.active_flows.values():
flow.export()
def create_flow(self, pkt, flow_tuple):
self.flow_count += 1
flow = Flow(pkt, self.flow_count)
self.active_flows[flow_tuple] = flow
def cleanup_active(self, time):
count = 0
for flow_tuple in self.active_flows.keys():
flow = self.active_flows[flow_tuple]
if flow.checkidle(time):
#flow.export()
del self.active_flows[flow_tuple]
count += 1
log.info("Cleaned up %d idle flows" % count)
def decode_IP_layer(self, data, pkt):
pkt['version'] = (ord(data[0]) & 0xf0) >> 4
pkt['iphlen'] = (ord(data[0]) & 0x0f) * 4
pkt['dscp'] = ord(data[1]) >> 2
pkt['len'] = socket.ntohs(struct.unpack('H',data[2:4])[0])
pkt['proto'] = ord(data[9])
pkt['srcip'] = pcap.ntoa(struct.unpack('i',data[12:16])[0])
pkt['dstip'] = pcap.ntoa(struct.unpack('i',data[16:20])[0])
pkt['data'] = data[pkt['iphlen']:]
def decode_TCP_layer(self, data, pkt):
pkt['srcport'] = socket.ntohs(struct.unpack('H', data[0:2])[0])
pkt['dstport'] = socket.ntohs(struct.unpack('H', data[2:4])[0])
pkt['prhlen'] = ((ord(data[12]) & 0xf0) >> 4) * 4
pkt['flags'] = ord(data[13]) & 0x3f
def decode_UDP_layer(self, data, pkt):
pkt['srcport'] = socket.ntohs(struct.unpack('H', data[0:2])[0])
pkt['dstport'] = socket.ntohs(struct.unpack('H', data[2:4])[0])
pkt['prhlen'] = socket.ntohs(struct.unpack('H', data[4:6])[0])
def callback(self, pktlen, data, ts):
'''
The callback function to be used to process each packet
This function is applied to each individual packet in the capture via a
loop function in the construction of the Flowtbag.
Args:
pktlen -- The length of the packet
data -- The packet payload
ts -- The timestamp of the packet
'''
self.count += 1
if not data:
# I don't know when this happens, so I wanna know.
raise Exception
pkt={}
# Check if the packet is an IP packet
if not data[12:14] == '\x08\x00':
#log.debug('Ignoring non-IP packet')
return
pkt['num'] = self.count
if len(data) < 34:
#Hmm, IP header seems to be too short
raise Exception
self.decode_IP_layer(data[14:], pkt)
if pkt['version'] != 4:
#Ignore non-IPv4
return
if pkt['proto'] == 6:
if len(pkt['data']) < 20:
log.info("Ignoring malformed TCP header on packet %d" %
(pkt['num']))
return
try:
self.decode_TCP_layer(pkt['data'], pkt)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
log.error("Error reading TCP header on packet %d" %
(pkt['num']))
log.error("Size: %d iphlen: %d" %
(len(data), pkt['iphlen']))
log.error("TCP header size: %d" % len(pkt['data']))
dumphex(data)
log.error(repr(traceback.format_exception(exc_type,
exc_value,
exc_traceback)))
raise e
elif pkt['proto'] == 17:
if len(pkt['data']) < 8:
log.info("Ignoring malformed UDP header on packet %d" %
(pkt['num']))
return
try:
self.decode_UDP_layer(pkt['data'], pkt)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
log.error("Error reading UDP header on packet %d" %
(pkt['num']))
dumphex(data)
log.error(repr(traceback.format_exception(exc_type,
exc_value,
exc_traceback)))
raise e
else:
#log.debug('Ignoring non-TCP/UDP packet')
return
# We're really going ahead with this packet! Let's get 'er done.
pkt['time'] = int(ts * 1000000)
flow_tuple = (pkt['srcip'],
pkt['srcport'],
pkt['dstip'],
pkt['dstport'],
pkt['proto'])
flow_tuple = sort_by_IP(flow_tuple)
# Find if a flow already exists for this tuple
if flow_tuple not in self.active_flows:
# A flow of this tuple does not exists yet, create it.
self.create_flow(pkt, flow_tuple)
else:
# A flow of this tuple already exists, add to it.
flow = self.active_flows[flow_tuple]
return_val = flow.add(pkt)
if return_val == 0:
return
elif return_val == 1:
#This packet ended the TCP connection. Export it.
#flow.export()
del self.active_flows[flow_tuple]
elif return_val == 2:
# This packet has been added to the wrong flow. This means the
# previous flow has ended. We export the old flow, remove it,
# and create a new flow.
#flow.export()
del self.active_flows[flow_tuple]
self.create_flow(pkt, flow_tuple)
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(description='Converts a network capture '\
'file into a comma seperated value list of integers representing ' \
'a list of flow statistics.')
arg_parser.add_argument('capture_file',
help='The capture file to be converted')
arg_parser.add_argument('--debug',
dest='debug',
action='store_true',
default=False,
help='display debugging information')
arg_parser.add_argument('-r',
dest='report',
type=int,
default=5000000,
help='interval (num pkts) which stats be reported')
args = arg_parser.parse_args()
if args.report:
REPORT_INTERVAL = args.report
if args.debug:
log.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
log.debug("Flowtbag begin")
Flowtbag(args.capture_file)
log.debug("Flowtbag end")
|
the-stack_0_11404 | import cv2
import numpy as np
def showImage():
filename="Images/lena.jpg"
img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
cv2.imshow('image', img)
equ_img = cv2.equalizeHist(img)
cv2.imshow('equalized image', equ_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
showImage() |
the-stack_0_11406 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=0.25, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha, (float, int)): self.alpha = torch.Tensor([alpha, 1 - alpha])
if isinstance(alpha, list): self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim() > 2:
input = input.view(input.size(0), input.size(1), -1) # N,C,H,W => N,C,H*W
input = input.transpose(1, 2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1, 1)
logpt = F.log_softmax(input)
logpt = logpt.gather(1, target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type() != input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0, target.data.view(-1))
logpt = logpt * Variable(at)
loss = -1 * (1 - pt) ** self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
|
the-stack_0_11407 | from django.conf import settings
from django.http import Http404
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
from rest_framework.views import APIView
from netbox.api.authentication import TokenAuthentication
from sidekick import utils
from sidekick.models import (
NetworkServiceGroup,
)
from tenancy.models import Tenant
class NetworkUsageListGroupsView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
renderer_class = JSONRenderer
def get(self, request):
result = []
for g in NetworkServiceGroup.objects.all():
result.append({
"id": g.id,
"name": g.name,
})
return Response(result)
class NetworkUsageListMembersView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
renderer_class = JSONRenderer
def get(self, request):
result = []
for t in Tenant.objects.filter(group__name='Members'):
result.append({
"id": t.id,
"name": t.name,
})
return Response(result)
class NetworkUsageGroupView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
renderer_class = JSONRenderer
def get(self, request, group_id=None):
graphite_render_host = settings.PLUGINS_CONFIG['sidekick'].get('graphite_render_host', None)
if graphite_render_host is None:
return Response({})
group_id = self.kwargs.get('group_id', None)
if group_id is None:
raise Http404
try:
service_group = NetworkServiceGroup.objects.get(id=group_id)
except NetworkServiceGroup.DoesNotExist:
raise Http404
period = utils.get_period(request)
services_by_member = {}
accounting_by_member = {}
for network_service in service_group.network_services.all():
member = network_service.member
if member.name not in services_by_member.keys():
services_by_member[member.name] = []
if member.name not in accounting_by_member.keys():
accounting_by_member[member.name] = []
services_by_member[member.name].append(network_service)
accounting_by_member[member.name] = utils.get_accounting_sources(member)
services_in = []
services_out = []
accounting_in = []
accounting_out = []
remaining_in = []
remaining_out = []
for member_name in services_by_member.keys():
services = services_by_member[member_name]
accounting = accounting_by_member[member_name]
(_in, _out) = utils.format_graphite_service_query(services)
services_in.append(_in)
services_out.append(_out)
(_in, _out) = utils.format_graphite_accounting_query(accounting)
accounting_in.append(_in)
accounting_out.append(_out)
(_in, _out) = utils.format_graphite_remaining_query(services, accounting)
remaining_in.append(_in)
remaining_out.append(_out)
service_data = utils.get_graphite_data(graphite_render_host, services_in, services_out, period)
accounting_data = utils.get_graphite_data(graphite_render_host, accounting_in, accounting_out, period)
remaining_data = utils.get_graphite_data(graphite_render_host, remaining_in, remaining_out, period)
graph_data = {
'service_data': service_data['data'],
'remaining_data': [service_data['data'][0], [0], [0]],
'accounting_data': [service_data['data'][0], [0], [0]],
}
queries = {
'service_data': service_data['query'],
'remaining_data': remaining_data['query'],
'accounting_data': accounting_data['query'],
}
if accounting_data is not None and 'data' in accounting_data:
graph_data['accounting_data'] = accounting_data['data']
if remaining_data is not None and 'data' in remaining_data:
graph_data['remaining_data'] = remaining_data['data']
return Response({
'graph_data': graph_data,
'queries': queries,
})
class NetworkUsageMemberView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
renderer_class = JSONRenderer
def get(self, request, member_id=None):
graphite_render_host = settings.PLUGINS_CONFIG['sidekick'].get('graphite_render_host', None)
if graphite_render_host is None:
return Response({})
member_id = self.kwargs.get('member_id', None)
if member_id is None:
raise Http404
try:
member = Tenant.objects.get(id=member_id)
except Tenant.DoesNotExist:
raise Http404
services = utils.get_services(member)
period = utils.get_period(request)
(services_in, services_out) = utils.format_graphite_service_query(services)
service_data = utils.get_graphite_data(graphite_render_host, [services_in], [services_out], period)
accounting_data = None
accounting_sources = utils.get_accounting_sources(member)
if len(accounting_sources) > 0:
(accounting_in, accounting_out) = utils.format_graphite_accounting_query(accounting_sources)
accounting_data = utils.get_graphite_data(graphite_render_host, [accounting_in], [accounting_out], period)
(remaining_in, remaining_out) = utils.format_graphite_remaining_query(services, accounting_sources)
remaining_data = utils.get_graphite_data(graphite_render_host, [remaining_in], [remaining_out], period)
graph_data = {
'service_data': service_data['data'],
'remaining_data': [service_data['data'][0], [0], [0]],
'accounting_data': [service_data['data'][0], [0], [0]],
}
queries = {
'service_data': service_data['query'],
}
if accounting_data is not None and 'data' in accounting_data:
graph_data['accounting_data'] = accounting_data['data']
queries['accounting_data'] = accounting_data['query']
if remaining_data is not None and 'data' in remaining_data:
graph_data['remaining_data'] = remaining_data['data']
queries['remaining_data'] = remaining_data['query']
return Response({
'graph_data': graph_data,
'queries': queries,
})
|
the-stack_0_11409 | import numpy as np
import pandas
output_data = []
train_csv = pandas.read_csv('data/train.csv', index_col=0)
test_csv = pandas.read_csv('data/test_2.csv', index_col=0)
train_X = train_csv.drop(train_csv.columns[range(146, 210)], axis=1).values
for i in range(62): # t=121 to 180, and D+1, D+2
if i == 60:
name_of_column = 'Ret_PlusOne'
name_of_weight = 'Weight_Daily'
elif i == 61:
name_of_column = 'Ret_PlusTwo'
name_of_weight = 'Weight_Daily'
else:
name_of_column = 'Ret_' + str(i + 120)
name_of_weight = 'Weight_Intraday'
train_y = train_csv[name_of_column].values
train_weights = train_csv[name_of_weight].values
test_X = test_csv.values
# training and predict logics
# model.train()
# pred = model.predict()
for stock_id, val in enumerate(pred):
output_data.append(
{'Id': str(stock_id + 1) + '_' + str(i), 'Predicted': val})
output = pandas.DataFrame(data=output_data)
output.sort_values(by='Id', inplace=True)
# print(output.head())
output.to_csv(path_or_buf='data/output.csv', index=False)
|
the-stack_0_11410 | # -*- coding: utf-8 -*-
"""
numcolorpy.py
Created Saturday April 22 2017
@author: del
[email protected]
[email protected]
import numcolorpy as ncp
"""
import time
import numpy as np
from PIL import Image as IP
from PIL import ImageColor as IC
import colorsys
def range_norm(Z, lo=0.0, hi=1.0):
""" normaize input matrix Z within a lo - hi range
"""
I = graphic_norm(Z)
hi = max(min(hi, 1.0), 0.0)
lo = min(max(lo, 0.0), 1.0)
low_fence = min(hi, lo)
hi_fence = max(hi, lo)
if low_fence == hi_fence:
return I
v_span = hi_fence - low_fence
I = I * v_span + low_fence
return I
def etg_norm(Z0, Z, ET):
""" Zd, Zr, ETn = etg_norm(Z0, Z, ET); Graphically usable matrices from escape time algorithm result
"""
ETn = mat2graphic(ET)
Zv = Z - Z0
Zd = mat2graphic(Zv)
Zr = mat2graphic(np.arctan2(np.imag(Zv), np.real(Zv)))
return Zd, Zr, ETn
def mat2graphic(Z):
""" M, nClrs = mat2graphic(Z)
Use all the transformation tricks to prepare input matrix Z
for conversion to a viewable image.
Args:
Z: real or complex (rows x xcols x 1) matrix
Returns:
M: real (rows x xcols x 1) matrix (0 <= M <= 1)
"""
M, nClrs = flat_index(np.abs(Z))
return graphic_norm(M)
def graphic_norm(Z):
""" rescale matrix z to distance (float) s.t.
0 <= z <= 1 (will include 0,1 if it has more than 1 value)
Args:
Z: is a real or complex two dimensional matrix
Returns:
Z: same size, real valued matrix with smallest member = 0, largest = 1
"""
EPSILON = 1e-15
I = np.abs(Z)
I = I - I.min()
return I / max(EPSILON, I.max())
def flat_index(float_mat):
""" convert the input matrix to integers from 0 to number of unique values.
Args:
float_mat: two dimensional matrix.
Return:
float_mat: re-enumerated so that the matrix values are all sequential ints.
n_colors: number of unique values in the input / output matrix
"""
rows = float_mat.shape[0]
cols = float_mat.shape[1]
float_mat = np.reshape(float_mat, (1, float_mat.size))
ixA = np.argsort(float_mat)[0]
current_value = float_mat[0, ixA[0]]
enumeration_value = 0
for ix in ixA:
if float_mat[0,ix] != current_value:
current_value = float_mat[0,ix]
enumeration_value += 1
float_mat[0,ix] = enumeration_value
float_mat = np.array(np.reshape(float_mat, (rows, cols)))
float_mat = np.int_(float_mat)
n_colors = enumeration_value + 1
return float_mat, n_colors
def gray_mat(V):
n_rows = V.shape[0]
n_cols = V.shape[1]
V = V * 255
I = IP.new('RGB', (n_cols, n_rows))
for row in range(0, I.height):
for col in range(0, I.width):
P = tuple(np.int_([V[row, col], V[row, col], V[row, col]]))
I.putpixel((col, row), P)
return I
def rgb_2_hsv_mat(H, S, V):
n_rows = H.shape[0]
n_cols = H.shape[1]
I = IP.new('RGB', (n_cols, n_rows))
for row in range(0, I.height):
for col in range(0, I.width):
red, green, blue = colorsys.hsv_to_rgb(H[row, col], S[row, col], V[row, col])
red = int(np.round( red * 255 ))
green = int(np.round( green * 255 ))
blue = int(np.round( blue * 255 ))
P = (red, green, blue)
I.putpixel((col, row), P)
return I
def mat_to_gray(V, max_v=255, min_v=0):
R_max = max(min(max_v, 255), 0)
R_floor = min(max(min_v, 0), R_max)
G_max = max(min(max_v, 255), 0)
G_floor = min(max(min_v, 0), G_max)
B_max = max(min(max_v, 255), 0)
B_floor = min(max(min_v, 0), B_max)
return mat_to_Shade(V, R_max, G_max, B_max, R_floor, G_floor, B_floor)
def mat_to_red(V):
R_max = 255
R_floor = 180
G_max = 250
G_floor = 30
B_max = 250
B_floor = 30
return mat_to_Shade(V, R_max, G_max, B_max, R_floor, G_floor, B_floor)
def mat_to_green(V):
R_max = 250
R_floor = 30
G_max = 255
G_floor = 130
B_max = 250
B_floor = 30
return mat_to_Shade(V, R_max, G_max, B_max, R_floor, G_floor, B_floor)
def mat_to_blue(V):
R_max = 250
R_floor = 30
G_max = 250
G_floor = 30
B_max = 255
B_floor = 130
return mat_to_Shade(V, R_max, G_max, B_max, R_floor, G_floor, B_floor)
def mat_to_Shade(V, R_max, G_max, B_max, R_floor=0, G_floor=0, B_floor=0):
""" I = mat_to_gray(V)
matrix of values V, converted to a gray scale image
Args:
V: rows x cols x 1 numerical matrix
Returns:
I: rows x cols x 3 grayscale image
"""
R = R_max - R_floor
G = G_max - G_floor
B = B_max - B_floor
V = graphic_norm(V)
n_rows = V.shape[0]
n_cols = V.shape[1]
I = IP.new('RGB', (n_cols, n_rows))
for row in range(0, I.height):
for col in range(0, I.width):
P = tuple(np.int_(
[R_floor + V[row, col] * R, G_floor + V[row, col] * G, B_floor + V[row, col] * B]))
I.putpixel((col, row), P)
return I
def resize_color_map(mp0, n_colors):
""" givin a RGB colormap input return the same color order with n_colors number of colors
"""
mp = np.zeros((n_colors,3))
n_colors0 = mp0.shape[0]
if n_colors0 != n_colors:
tc = n_colors0 * n_colors
x = np.linspace(1,tc, n_colors0)
xq = np.linspace(1,tc, n_colors)
mp[:,0] = np.interp(xq, x, mp0[:,0])
mp[:,1] = np.interp(xq, x, mp0[:,1])
mp[:,2] = np.interp(xq, x, mp0[:,2])
return mp
def normat_hsv_intrgb(H, S, V, H_max=1.0, H_min=0.0, S_max=1.0, S_min=0.0, V_max=1.0, V_min=0.0):
""" I = normat_hsv_intrgb(H, S, V, H_max=1.0, H_min=0.0, S_max=1.0, S_min=0.0, V_max=1.0, V_min=0.0)
Three normaized matrices as hsv image converted to rgb
'normalized' means 0 <= M <= 1 where M is H, S, or V
Args:
H: rows x cols x 1 normalized matrix
S: rows x cols x 1 normalized matrix
V: rows x cols x 1 normalized matrix
Returns:
I: rows x cols x 3 hue-saturation-values image
"""
H_mul = H_max - H_min
S_mul = S_max - S_min
V_mul = V_max - V_min
n_rows = H.shape[0]
n_cols = H.shape[1]
I = IP.new('RGB', (n_cols, n_rows))
for row in range(0, I.height):
for col in range(0, I.width):
red, green, blue = colorsys.hsv_to_rgb(
H_min + H_mul * H[row, col],
S_min + S_mul * S[row, col],
V_min + V_mul * V[row, col])
red = int(np.round( red * 255 ))
green = int(np.round( green * 255 ))
blue = int(np.round( blue * 255 ))
P = (red, green, blue)
I.putpixel((col, row), P)
return I
def mat_to_mapped(A, mp):
n_rows = A.shape[0]
n_cols = A.shape[1]
A, nClrs = flat_index(A)
mp = resize_color_map(mp, nClrs)*255
I = IP.new('RGB', (n_cols, n_rows))
for r in range(0, n_rows):
for c in range(0, n_cols):
I.putpixel((c,r), tuple(np.uint8(mp[A[r,c], :])))
return I
|
the-stack_0_11411 | #!/usr/bin/env python2
# Copyright (c) 2014 The oaccoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Exercise the wallet backup code. Ported from walletbackup.sh.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from test_framework import oaccoinTestFramework
from util import *
from random import randint
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
class WalletBackupTest(oaccoinTestFramework):
def setup_chain(self):
logging.info("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
# nodes 1, 2,3 are spenders, let's give them a keypool=100
extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
self.nodes = start_nodes(4, self.options.tmpdir, extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.is_network_split=False
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].setgenerate(True, 1)
# As above, this mirrors the original bash test.
def start_three(self):
self.nodes[0] = start_node(0, self.options.tmpdir)
self.nodes[1] = start_node(1, self.options.tmpdir)
self.nodes[2] = start_node(2, self.options.tmpdir)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
stop_node(self.nodes[2], 2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[1].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[2].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[3].setgenerate(True, 100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
logging.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
logging.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].setgenerate(True, 101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
logging.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
logging.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
logging.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if __name__ == '__main__':
WalletBackupTest().main()
|
the-stack_0_11412 | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/Satoshi:0.13.(0|1|2|99)/|/PubgcoinCore:0.13.(0|1|2|99)/|/PubgcoinCore:0.14.(0|1|2|99)/|/PubgcoinCore:0.15.(0|1|2|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
the-stack_0_11413 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: alexnet-dorefa.py
# Author: Yuxin Wu, Yuheng Zou ({wyx,zyh}@megvii.com)
import cv2
import tensorflow as tf
import argparse
import numpy as np
import os
import sys
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import prediction_incorrect
from tensorpack.tfutils.summary import add_moving_summary, add_param_summary
from tensorpack.tfutils.varreplace import remap_variables
from tensorpack.dataflow import dataset
from tensorpack.utils.gpu import get_nr_gpu
from imagenet_utils import get_imagenet_dataflow, fbresnet_augmentor
from dorefa import get_dorefa, ternarize
"""
This is a tensorpack script for the ImageNet results in paper:
DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients
http://arxiv.org/abs/1606.06160
The original experiements are performed on a proprietary framework.
This is our attempt to reproduce it on tensorpack & TensorFlow.
Accuracy:
Trained with 4 GPUs and (W,A,G)=(1,2,6), it can reach top-1 single-crop validation error of 47.6%,
after 70 epochs. This number is better than what's in the paper
due to more sophisticated augmentations.
With (W,A,G)=(32,32,32) -- full precision baseline, 41.4% error.
With (W,A,G)=(t,32,32) -- TTQ, 42.3% error
With (W,A,G)=(1,32,32) -- BWN, 44.3% error
With (W,A,G)=(1,1,32) -- BNN, 53.4% error
With (W,A,G)=(1,2,6), 47.6% error
With (W,A,G)=(1,2,4), 58.4% error
Training with 2 or 8 GPUs is supported but the result may get slightly
different, due to limited per-GPU batch size.
You may want to adjust total batch size and learning rate accordingly.
Speed:
About 11 iteration/s on 4 P100s. (Each epoch is set to 10000 iterations)
Note that this code was written early without using NCHW format. You
should expect a speed up if the code is ported to NCHW format.
To Train, for example:
./alexnet-dorefa.py --dorefa 1,2,6 --data PATH --gpu 0,1
PATH should look like:
PATH/
train/
n02134418/
n02134418_198.JPEG
...
...
val/
ILSVRC2012_val_00000001.JPEG
...
And you'll need the following to be able to fetch data efficiently
Fast disk random access (Not necessarily SSD. I used a RAID of HDD, but not sure if plain HDD is enough)
More than 20 CPU cores (for data processing)
More than 10G of free memory
To run pretrained model:
./alexnet-dorefa.py --load alexnet-126.npz --run a.jpg --dorefa 1,2,6
"""
BITW = 1
BITA = 2
BITG = 6
TOTAL_BATCH_SIZE = 128
BATCH_SIZE = None
class Model(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, [None, 224, 224, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')]
def build_graph(self, image, label):
image = image / 255.0
if BITW == 't':
fw, fa, fg = get_dorefa(32, 32, 32)
fw = ternarize
else:
fw, fa, fg = get_dorefa(BITW, BITA, BITG)
# monkey-patch tf.get_variable to apply fw
def new_get_variable(v):
name = v.op.name
# don't binarize first and last layer
if not name.endswith('W') or 'conv0' in name or 'fct' in name:
return v
else:
logger.info("Quantizing weight {}".format(v.op.name))
return fw(v)
def nonlin(x):
if BITA == 32:
return tf.nn.relu(x) # still use relu for 32bit cases
return tf.clip_by_value(x, 0.0, 1.0)
def activate(x):
return fa(nonlin(x))
with remap_variables(new_get_variable), \
argscope(BatchNorm, momentum=0.9, epsilon=1e-4), \
argscope(Conv2D, use_bias=False):
logits = (LinearWrap(image)
.Conv2D('conv0', 96, 12, strides=4, padding='VALID')
.apply(activate)
.Conv2D('conv1', 256, 5, padding='SAME', split=2)
.apply(fg)
.BatchNorm('bn1')
.MaxPooling('pool1', 3, 2, padding='SAME')
.apply(activate)
.Conv2D('conv2', 384, 3)
.apply(fg)
.BatchNorm('bn2')
.MaxPooling('pool2', 3, 2, padding='SAME')
.apply(activate)
.Conv2D('conv3', 384, 3, split=2)
.apply(fg)
.BatchNorm('bn3')
.apply(activate)
.Conv2D('conv4', 256, 3, split=2)
.apply(fg)
.BatchNorm('bn4')
.MaxPooling('pool4', 3, 2, padding='VALID')
.apply(activate)
.FullyConnected('fc0', 4096)
.apply(fg)
.BatchNorm('bnfc0')
.apply(activate)
.FullyConnected('fc1', 4096, use_bias=False)
.apply(fg)
.BatchNorm('bnfc1')
.apply(nonlin)
.FullyConnected('fct', 1000, use_bias=True)())
tf.nn.softmax(logits, name='output')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1'))
wrong = prediction_incorrect(logits, label, 5, name='wrong-top5')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5'))
# weight decay on all W of fc layers
wd_cost = regularize_cost('fc.*/W', l2_regularizer(5e-6), name='regularize_cost')
add_param_summary(('.*/W', ['histogram', 'rms']))
total_cost = tf.add_n([cost, wd_cost], name='cost')
add_moving_summary(cost, wd_cost, total_cost)
return total_cost
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=1e-4, trainable=False)
return tf.train.AdamOptimizer(lr, epsilon=1e-5)
def get_data(dataset_name):
isTrain = dataset_name == 'train'
augmentors = fbresnet_augmentor(isTrain)
return get_imagenet_dataflow(
args.data, dataset_name, BATCH_SIZE, augmentors)
def get_config():
data_train = get_data('train')
data_test = get_data('val')
return TrainConfig(
dataflow=data_train,
callbacks=[
ModelSaver(),
# HumanHyperParamSetter('learning_rate'),
ScheduledHyperParamSetter(
'learning_rate', [(56, 2e-5), (64, 4e-6)]),
InferenceRunner(data_test,
[ScalarStats('cost'),
ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')])
],
model=Model(),
steps_per_epoch=10000,
max_epoch=100,
)
def run_image(model, sess_init, inputs):
pred_config = PredictConfig(
model=model,
session_init=sess_init,
input_names=['input'],
output_names=['output']
)
predictor = OfflinePredictor(pred_config)
meta = dataset.ILSVRCMeta()
pp_mean = meta.get_per_pixel_mean()
pp_mean_224 = pp_mean[16:-16, 16:-16, :]
words = meta.get_synset_words_1000()
def resize_func(im):
h, w = im.shape[:2]
scale = 256.0 / min(h, w)
desSize = map(int, (max(224, min(w, scale * w)),
max(224, min(h, scale * h))))
im = cv2.resize(im, tuple(desSize), interpolation=cv2.INTER_CUBIC)
return im
transformers = imgaug.AugmentorList([
imgaug.MapImage(resize_func),
imgaug.CenterCrop((224, 224)),
imgaug.MapImage(lambda x: x - pp_mean_224),
])
for f in inputs:
assert os.path.isfile(f)
img = cv2.imread(f).astype('float32')
assert img is not None
img = transformers.augment(img)[np.newaxis, :, :, :]
outputs = predictor(img)[0]
prob = outputs[0]
ret = prob.argsort()[-10:][::-1]
names = [words[i] for i in ret]
print(f + ":")
print(list(zip(names, prob[ret])))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='the physical ids of GPUs to use')
parser.add_argument('--load', help='load a checkpoint, or a npz (given as the pretrained model)')
parser.add_argument('--data', help='ILSVRC dataset dir')
parser.add_argument('--dorefa', required=True,
help='number of bits for W,A,G, separated by comma. W="t" means TTQ')
parser.add_argument('--run', help='run on a list of images with the pretrained model', nargs='*')
args = parser.parse_args()
dorefa = args.dorefa.split(',')
if dorefa[0] == 't':
assert dorefa[1] == '32' and dorefa[2] == '32'
BITW, BITA, BITG = 't', 32, 32
else:
BITW, BITA, BITG = map(int, dorefa)
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.run:
assert args.load.endswith('.npz')
run_image(Model(), DictRestore(dict(np.load(args.load))), args.run)
sys.exit()
nr_tower = max(get_nr_gpu(), 1)
BATCH_SIZE = TOTAL_BATCH_SIZE // nr_tower
logger.set_logger_dir(os.path.join(
'train_log', 'alexnet-dorefa-{}'.format(args.dorefa)))
logger.info("Batch per tower: {}".format(BATCH_SIZE))
config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
launch_train_with_config(config, SyncMultiGPUTrainer(nr_tower))
|
the-stack_0_11414 | import csv
import os
import re
import subprocess
from threading import Thread
from enum import Enum
JAVACLASSES = {}
DEPENDENCIES = []
MATCHES = {}
## Support for multithreading with return value
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args,
**self._kwargs)
def join(self):
Thread.join(self)
return self._return
## Analyses the source code files and imports the HUSACCT dependencies
class Setup(object):
def __init__(self, filepath_repository, filepath_dependencies, namespace, matchfilter):
self.regex_p1 = re.compile('\.[a-z].*\.[A-Z].*$|^[a-z]')
self.regex_p2 = re.compile('\.[A-Z]')
self.ignore = [namespace + ".R"]
self.getJavaClasses(filepath_repository)
self.getDependencies(filepath_dependencies)
self.getMatches(matchfilter)
# Create a dictionary of all Java classes
# This is used for getting a code block using a HUSACCT dependency
def getJavaClasses(self, filepath):
global JAVACLASSES
for root, dirs, files in os.walk(filepath):
for file in files:
if file.endswith(".java"):
JAVACLASSES[file[:-5]] = os.path.join(root, file)
# Detect innerclasses in HUSACCT dependencies and adds this relation to JAVACLASSES
# This only works if it is actually an inner class. Two classes on the same level won't work
def detectInnerclass(self, s, namespace):
if namespace in s and not(s in self.ignore):
s = s[12:]
# Format string to [(Object.)*Class]
x1 = self.regex_p1.search(s)
if x1 != None:
s = s[x1.span()[0]+1:]
x2 = self.regex_p2.search(s)
if x2 != None:
s = s[x2.span()[0]+1:]
# Split string
values = s.split('.')
path = JAVACLASSES[values[0]]
for i in reversed(range(1, len(values))):
JAVACLASSES[values[i]] = path
# Parse the XML file of HUSACCT dependencies
def getDependencies(self, filepath):
with open(filepath, newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
self.detectInnerclass(row[0], namespace)
self.detectInnerclass(row[1], namespace)
DEPENDENCIES.append(row)
# Generate an array of matches based on file and linenumber
def getMatches(self, dependency):
for i in range(0, len(DEPENDENCIES)):
row = DEPENDENCIES[i]
if (dependency in row[1] and row[2] != "Import"):
t1 = ThreadWithReturnValue(target=self.searchDependencies, args=("Search up ", row[0], i, -1))
t2 = ThreadWithReturnValue(target=self.searchDependencies, args=("Search down", row[0], i, 1))
t1.start()
t2.start()
results = t1.join() + t2.join()
if len(results) == 0:
continue
try:
old = MATCHES[row[0]]
new = [row[4], row[2], results]
old.append(new)
MATCHES[row[0]] = old
except:
MATCHES[row[0]] = [[row[4], row[2], results]]
# Search for matching dependencies on file and linenumber
def searchDependencies(self, threadName, file, startlinenr, searchDirection):
i = startlinenr + searchDirection
results = []
while i >= 0 and i < len(DEPENDENCIES) - 1 and DEPENDENCIES[i][0] == file:
if (int(DEPENDENCIES[i][4]) == int(DEPENDENCIES[startlinenr][4])):
results.append(i)
i += searchDirection
return results
## Static class that prints blocks of code
class Lines():
# Get lines using filename
def getLines(file, linenr, offset):
try:
filepath = JAVACLASSES[file]
except:
file = Tools.convertNot(file)
filepath = JAVACLASSES[file]
cmd = f'cat {filepath} | head -{linenr + offset} | tail -{1 + 2 * offset}'
return subprocess.check_output(cmd, shell=True).decode('UTF-8')
# Get lines by filename and linenumber
def getLinesByRange(file, start_linenr, end_linenr, offset):
try:
filepath = JAVACLASSES[file]
except:
file = Tools.convertNot(file)
filepath = JAVACLASSES[file]
cmd = f'cat {filepath} | head -{end_linenr + offset} | tail -{end_linenr - start_linenr + 2*offset + 1}'
return subprocess.check_output(cmd, shell=True).decode('UTF-8')
# Get lines using dependency
def printDependencyWithCodeLines(dependency, offset):
print(Lines.getLines(dependency[0], int(dependency[4]), offset))
# Get lines of code where matches start in target
def getCodeLinesStartingFromTarget(component, offset):
keys = list(MATCHES.keys())
for key in keys:
if component != None and Tools.convertNot(key) != component:
continue
# Preprocessing of matches
matches = MATCHES[key]
# Group by line number distance of max 3
grouped_matches = Lines.groupLinesByLinenumber(matches, 3)
for group in grouped_matches:
# [match1, match2] but sorted by linenr
linenr_start = group[0][0]
linenr_end = group[len(group) - 1][0]
for match in group:
print(f"{Tools.convertNot(key)}:{match[0]}| {match[1]} \
--> {[Tools.convertNot(DEPENDENCIES[x][1]) for x in match[2]]}")
print(Lines.getLinesByRange(key, int(linenr_start), int(linenr_end), offset))
# Get lines of code where matches end in target
def getCodeLinesEndingAtTarget(component, offset):
keys = list(MATCHES.keys())
for key in keys:
# Preprocessing of matches
matches = MATCHES[key]
# Group by line number distance of max 3
grouped_matches = Lines.groupLinesByLinenumber(matches, 3)
for group in grouped_matches:
# [match1, match2] but sorted by linenr
linenr_start = group[0][0]
linenr_end = group[len(group) - 1][0]
shouldPrint = False
for match in group:
if component in [Tools.convertNot(DEPENDENCIES[x][1]) for x in match[2]]:
shouldPrint = True
if not(shouldPrint):
continue
for match in group:
print(f"{Tools.convertNot(key)}:{match[0]}| {match[1]} \
--> {[Tools.convertNot(DEPENDENCIES[x][1]) for x in match[2]]}")
print(Lines.getLinesByRange(key, int(linenr_start), int(linenr_end), offset))
# Groups matches by distance of line numbers --> [[match1, match2], [match3]]
def groupLinesByLinenumber(matches, separation):
matches.sort(key=lambda x: int(x[0]))
linenrs = [int(item[0]) for item in matches]
grouped = []
current_group = []
prev_linenr = -1
for i in range(0, len(linenrs)):
# New matches --> Fill first group
if (prev_linenr == -1):
current_group.append(matches[i])
prev_linenr = linenrs[i]
continue
# Already worked with matches
current_linenr = linenrs[i]
if (current_linenr - prev_linenr < separation):
current_group.append(matches[i])
elif len(current_group) > 0:
grouped.append(current_group)
current_group = []
prev_linenr = linenrs[i]
# Still have groups left at the end
if len(current_group) > 0:
grouped.append(current_group)
current_group = []
return grouped
# Print lines of code of components that occur in the matches
def getCodeLines(componentfilter, offset, bothDirections = True):
for target in componentfilter:
Lines.getCodeLinesStartingFromTarget(target, offset)
print("_______________\n")
# If target is None, all matches are shown. This should not be displayed twice
if bothDirections and target is not None:
Lines.getCodeLinesEndingAtTarget(target, offset)
print("_______________\n")
print("_______________\n")
class Tools(object):
# Convert notation (HUSSACT: x.x.x.y -> y)
def convertNot(s):
regex_p1 = re.compile('\.[a-z].*\.[A-Z].*$|^[a-z]')
regex_p2 = re.compile('\.[A-Z]')
x1 = regex_p1.search(s)
if x1 != None:
s = s[x1.span()[0]+1:]
x2 = regex_p2.search(s)
if x2 != None:
s = s[x2.span()[0]+1:]
if "." in s:
return s.split(".")[0]
return s
# Search the dependencies using criteria
def searchDependencies(component, relationship):
result = []
for dep in DEPENDENCIES:
if (component in dep[1] and dep[2] == relationship):
result.append(dep)
return result
# Find a list of context-declared broadcast receivers
def findContextDeclaredBroadcastReceivers():
result = Tools.searchDependencies("xLibraries.android.content.BroadcastReceiver", "Inheritance")
if len(result) == 0:
print("No BroadcastReceivers found")
return
for r in result:
print(f"{Tools.convertNot(r[0])} \t\t <-- {r[0]}")
result += Tools.searchDependencies(r[0], "Inheritance")
# Find list of third party dependencies
def findingThirdPartyDependencies(namespaces):
namespaces += ["android.support", "butterknife", "xLibraries.android", "xLibraries.com.bumptech",
"xLibraries.com.google", "xLibraries.com.squareup", "xLibraries.java", "xLibraries.org.apache",
"xLibraries.org.json", "xLibraries.org.jsoup", "xLibraries.org.junit", "xLibraries.org.powermock",
"xLibraries.rx", "xLibraries.timber"]
thirdParties = set()
for dep in DEPENDENCIES:
dep_0 = False
dep_1 = False
for namespace in namespaces:
if namespace in dep[0]:
dep_0 = True
if namespace in dep[1]:
dep_1 = True
if dep_0 == False:
thirdParties.add(dep[0])
if dep_1 == False:
thirdParties.add(dep[1])
thirdParties = list(thirdParties)
thirdParties.sort()
for thirdParty in thirdParties:
print(thirdParty)
class MatchFilters(Enum):
## Match filters ##
INTENT_URI = "xLibraries.android.content.Intent"
PENDING_INTENT_URI = "xLibraries.android.app.PendingIntent"
# Content resolver does not work if context.getContentResolver() is used (and context is too broad)
CONTENT_RESOLVER_URI = "Loader"
# Detecting binders
BINDER_URI = "xLibraries.android.os.Binder"
# Used for finding all references from/to a component
NONE_URI = ""
# Detecting context-declared broadcast receivers
LOCAL_RECEIVERS = "BroadcastReceiver"
CUSTOM_COMPONENT = "Any component"
filepath_repository = "/home/yorick/Repositories/Omni-Notes"
filepath_dependencies = "/home/yorick/Repositories/OZP/dependencies_OmniNotes.csv"
namespace = "it.feio.android.omninotes"
matchFilter = MatchFilters.INTENT_URI
# Stupid exceptions when class files contain two separate classes :<
JAVACLASSES["ImageAndTextViewHolder"] = "/home/yorick/Repositories/Omni-Notes/omniNotes/src/main/java/it/feio/android/omninotes/models/adapters/ImageAndTextAdapter.java"
JAVACLASSES["NoteDrawerAdapterViewHolder"] = "/home/yorick/Repositories/Omni-Notes/omniNotes/src/main/java/it/feio/android/omninotes/models/adapters/NavDrawerAdapter.java"
JAVACLASSES["NoteDrawerCategoryAdapterViewHolder"] = "/home/yorick/Repositories/Omni-Notes/omniNotes/src/main/java/it/feio/android/omninotes/models/adapters/NavDrawerCategoryAdapter.java"
# Run setup; find java files and matches
Setup(filepath_repository, filepath_dependencies, namespace, matchFilter.value)
## Component filters ##
componentFilter = [None] # Show all matches
componentFilter = ["CategoriesUpdatedEvent","DynamicNavigationReadyEvent","NavigationUpdatedEvent",
"NavigationUpdatedNavDrawerClosedEvent", "NotesLoadedEvent",
"NotesMergeEvent", "NotesUpdatedEvent", "NotificationRemovedEvent", "PasswordRemovedEvent",
"PushbulletReplyEvent", "SwitchFragmentEvent"]
Lines.getCodeLines(componentFilter, 1, True)
# Tools.findContextDeclaredBroadcastReceivers()
# Tools.findingThirdPartyDependencies([namespace]) |
the-stack_0_11417 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Flops Analyser."""
import json
import os
from mindinsight.profiler.analyser.base_analyser import BaseAnalyser
from mindinsight.profiler.common.exceptions.exceptions import ProfilerIOException
from mindinsight.profiler.common.log import logger
from mindinsight.profiler.common.validator.validate_path import validate_and_normalize_path
class FlopsAnalyser(BaseAnalyser):
"""
Analyse flops data from file.
"""
_flops_summary_filename = 'flops_summary_{}.json'
_flops_scope_filename = 'flops_scope_{}.json'
def _load(self):
"""Load data according to the parsed profiling files."""
def _filter(self, filter_condition):
"""
Filter the profiling data according to the filter condition.
Args:
filter_condition (dict): The filter condition.
"""
def get_flops_summary(self):
"""
Get flops summary information for UI display.
Returns:
json, the content of flops summary information.
"""
summary_filename = self._flops_summary_filename.format(self._device_id)
file_path = os.path.join(self._profiling_dir, summary_filename)
file_path = validate_and_normalize_path(
file_path, raise_key='Invalid flops summary path.'
)
flops_summary = {}
if os.path.exists(file_path):
try:
with open(file_path, 'r') as f_obj:
flops_summary = json.load(f_obj)
except (IOError, OSError, json.JSONDecodeError) as err:
logger.error('Error occurred when read flops summary file: %s', err)
raise ProfilerIOException()
else:
logger.warning('No flops summary file. Please check the output path.')
return flops_summary
def get_flops_scope(self):
"""
Get flops information of each scope for UI display.
Returns:
json, the content of flops summary information.
"""
flops_scope_filename = self._flops_scope_filename.format(self._device_id)
file_path = os.path.join(self._profiling_dir, flops_scope_filename)
file_path = validate_and_normalize_path(
file_path, raise_key='Invalid flops scope path.'
)
flops_scope = {}
if os.path.exists(file_path):
try:
with open(file_path, 'r') as f_obj:
flops_scope = json.load(f_obj)
except (IOError, OSError, json.JSONDecodeError) as err:
logger.error('Error occurred when read flops scope file: %s', err)
raise ProfilerIOException()
else:
logger.warning('No flops scope file. Please check the output path.')
return flops_scope
|
the-stack_0_11419 | # patchbomb.py - sending Mercurial changesets as patch emails
#
# Copyright 2005-2009 Matt Mackall <[email protected]> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to send changesets as (a series of) patch emails
The series is started off with a "[PATCH 0 of N]" introduction, which
describes the series as a whole.
Each patch email has a Subject line of "[PATCH M of N] ...", using the
first line of the changeset description as the subject text. The
message contains two or three body parts:
- The changeset description.
- [Optional] The result of running diffstat on the patch.
- The patch itself, as generated by :hg:`export`.
Each message refers to the first in the series using the In-Reply-To
and References headers, so they will show up as a sequence in threaded
mail and news readers, and in mail archives.
To configure other defaults, add a section like this to your
configuration file::
[email]
from = My Name <my@email>
to = recipient1, recipient2, ...
cc = cc1, cc2, ...
bcc = bcc1, bcc2, ...
reply-to = address1, address2, ...
Use ``[patchbomb]`` as configuration section name if you need to
override global ``[email]`` address settings.
Then you can use the :hg:`email` command to mail a series of
changesets as a patchbomb.
You can also either configure the method option in the email section
to be a sendmail compatible mailer or fill out the [smtp] section so
that the patchbomb extension can automatically send patchbombs
directly from the commandline. See the [email] and [smtp] sections in
hgrc(5) for details.
By default, :hg:`email` will prompt for a ``To`` or ``CC`` header if
you do not supply one via configuration or the command line. You can
override this to never prompt by configuring an empty value::
[email]
cc =
You can control the default inclusion of an introduction message with the
``patchbomb.intro`` configuration option. The configuration is always
overwritten by command line flags like --intro and --desc::
[patchbomb]
intro=auto # include introduction message if more than 1 patch (default)
intro=never # never include an introduction message
intro=always # always include an introduction message
You can specify a template for flags to be added in subject prefixes. Flags
specified by --flag option are exported as ``{flags}`` keyword::
[patchbomb]
flagtemplate = "{separate(' ',
ifeq(branch, 'default', '', branch|upper),
flags)}"
You can set patchbomb to always ask for confirmation by setting
``patchbomb.confirm`` to true.
'''
import email as emailmod
import errno
import os
import socket
import tempfile
from mercurial.i18n import _
from mercurial import (
cmdutil,
commands,
error,
formatter,
hg,
mail,
node as nodemod,
patch,
registrar,
repair,
scmutil,
templater,
util,
)
stringio = util.stringio
cmdtable = {}
command = registrar.command(cmdtable)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = 'ships-with-hg-core'
def _addpullheader(seq, ctx):
"""Add a header pointing to a public URL where the changeset is available
"""
repo = ctx.repo()
# experimental config: patchbomb.publicurl
# waiting for some logic that check that the changeset are available on the
# destination before patchbombing anything.
publicurl = repo.ui.config('patchbomb', 'publicurl')
if publicurl:
return ('Available At %s\n'
'# hg pull %s -r %s' % (publicurl, publicurl, ctx))
return None
def uisetup(ui):
cmdutil.extraexport.append('pullurl')
cmdutil.extraexportmap['pullurl'] = _addpullheader
def reposetup(ui, repo):
if not repo.local():
return
repo._wlockfreeprefix.add('last-email.txt')
def prompt(ui, prompt, default=None, rest=':'):
if default:
prompt += ' [%s]' % default
return ui.prompt(prompt + rest, default)
def introwanted(ui, opts, number):
'''is an introductory message apparently wanted?'''
introconfig = ui.config('patchbomb', 'intro', 'auto')
if opts.get('intro') or opts.get('desc'):
intro = True
elif introconfig == 'always':
intro = True
elif introconfig == 'never':
intro = False
elif introconfig == 'auto':
intro = 1 < number
else:
ui.write_err(_('warning: invalid patchbomb.intro value "%s"\n')
% introconfig)
ui.write_err(_('(should be one of always, never, auto)\n'))
intro = 1 < number
return intro
def _formatflags(ui, repo, rev, flags):
"""build flag string optionally by template"""
tmpl = ui.config('patchbomb', 'flagtemplate')
if not tmpl:
return ' '.join(flags)
out = util.stringio()
opts = {'template': templater.unquotestring(tmpl)}
with formatter.templateformatter(ui, out, 'patchbombflag', opts) as fm:
fm.startitem()
fm.context(ctx=repo[rev])
fm.write('flags', '%s', fm.formatlist(flags, name='flag'))
return out.getvalue()
def _formatprefix(ui, repo, rev, flags, idx, total, numbered):
"""build prefix to patch subject"""
flag = _formatflags(ui, repo, rev, flags)
if flag:
flag = ' ' + flag
if not numbered:
return '[PATCH%s]' % flag
else:
tlen = len(str(total))
return '[PATCH %0*d of %d%s]' % (tlen, idx, total, flag)
def makepatch(ui, repo, rev, patchlines, opts, _charsets, idx, total, numbered,
patchname=None):
desc = []
node = None
body = ''
for line in patchlines:
if line.startswith('#'):
if line.startswith('# Node ID'):
node = line.split()[-1]
continue
if line.startswith('diff -r') or line.startswith('diff --git'):
break
desc.append(line)
if not patchname and not node:
raise ValueError
if opts.get('attach') and not opts.get('body'):
body = ('\n'.join(desc[1:]).strip() or
'Patch subject is complete summary.')
body += '\n\n\n'
if opts.get('plain'):
while patchlines and patchlines[0].startswith('# '):
patchlines.pop(0)
if patchlines:
patchlines.pop(0)
while patchlines and not patchlines[0].strip():
patchlines.pop(0)
ds = patch.diffstat(patchlines)
if opts.get('diffstat'):
body += ds + '\n\n'
addattachment = opts.get('attach') or opts.get('inline')
if not addattachment or opts.get('body'):
body += '\n'.join(patchlines)
if addattachment:
msg = emailmod.MIMEMultipart.MIMEMultipart()
if body:
msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
p = mail.mimetextpatch('\n'.join(patchlines), 'x-patch',
opts.get('test'))
binnode = nodemod.bin(node)
# if node is mq patch, it will have the patch file's name as a tag
if not patchname:
patchtags = [t for t in repo.nodetags(binnode)
if t.endswith('.patch') or t.endswith('.diff')]
if patchtags:
patchname = patchtags[0]
elif total > 1:
patchname = cmdutil.makefilename(repo, '%b-%n.patch',
binnode, seqno=idx,
total=total)
else:
patchname = cmdutil.makefilename(repo, '%b.patch', binnode)
disposition = 'inline'
if opts.get('attach'):
disposition = 'attachment'
p['Content-Disposition'] = disposition + '; filename=' + patchname
msg.attach(p)
else:
msg = mail.mimetextpatch(body, display=opts.get('test'))
prefix = _formatprefix(ui, repo, rev, opts.get('flag'), idx, total,
numbered)
subj = desc[0].strip().rstrip('. ')
if not numbered:
subj = ' '.join([prefix, opts.get('subject') or subj])
else:
subj = ' '.join([prefix, subj])
msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
msg['X-Mercurial-Node'] = node
msg['X-Mercurial-Series-Index'] = '%i' % idx
msg['X-Mercurial-Series-Total'] = '%i' % total
return msg, subj, ds
def _getpatches(repo, revs, **opts):
"""return a list of patches for a list of revisions
Each patch in the list is itself a list of lines.
"""
ui = repo.ui
prev = repo['.'].rev()
for r in revs:
if r == prev and (repo[None].files() or repo[None].deleted()):
ui.warn(_('warning: working directory has '
'uncommitted changes\n'))
output = stringio()
cmdutil.export(repo, [r], fp=output,
opts=patch.difffeatureopts(ui, opts, git=True))
yield output.getvalue().split('\n')
def _getbundle(repo, dest, **opts):
"""return a bundle containing changesets missing in "dest"
The `opts` keyword-arguments are the same as the one accepted by the
`bundle` command.
The bundle is a returned as a single in-memory binary blob.
"""
ui = repo.ui
tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-')
tmpfn = os.path.join(tmpdir, 'bundle')
btype = ui.config('patchbomb', 'bundletype')
if btype:
opts['type'] = btype
try:
commands.bundle(ui, repo, tmpfn, dest, **opts)
return util.readfile(tmpfn)
finally:
try:
os.unlink(tmpfn)
except OSError:
pass
os.rmdir(tmpdir)
def _getdescription(repo, defaultbody, sender, **opts):
"""obtain the body of the introduction message and return it
This is also used for the body of email with an attached bundle.
The body can be obtained either from the command line option or entered by
the user through the editor.
"""
ui = repo.ui
if opts.get('desc'):
body = open(opts.get('desc')).read()
else:
ui.write(_('\nWrite the introductory message for the '
'patch series.\n\n'))
body = ui.edit(defaultbody, sender, repopath=repo.path)
# Save series description in case sendmail fails
msgfile = repo.vfs('last-email.txt', 'wb')
msgfile.write(body)
msgfile.close()
return body
def _getbundlemsgs(repo, sender, bundle, **opts):
"""Get the full email for sending a given bundle
This function returns a list of "email" tuples (subject, content, None).
The list is always one message long in that case.
"""
ui = repo.ui
_charsets = mail._charsets(ui)
subj = (opts.get('subject')
or prompt(ui, 'Subject:', 'A bundle for your repository'))
body = _getdescription(repo, '', sender, **opts)
msg = emailmod.MIMEMultipart.MIMEMultipart()
if body:
msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
datapart = emailmod.MIMEBase.MIMEBase('application', 'x-mercurial-bundle')
datapart.set_payload(bundle)
bundlename = '%s.hg' % opts.get('bundlename', 'bundle')
datapart.add_header('Content-Disposition', 'attachment',
filename=bundlename)
emailmod.Encoders.encode_base64(datapart)
msg.attach(datapart)
msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
return [(msg, subj, None)]
def _makeintro(repo, sender, revs, patches, **opts):
"""make an introduction email, asking the user for content if needed
email is returned as (subject, body, cumulative-diffstat)"""
ui = repo.ui
_charsets = mail._charsets(ui)
# use the last revision which is likely to be a bookmarked head
prefix = _formatprefix(ui, repo, revs.last(), opts.get('flag'),
0, len(patches), numbered=True)
subj = (opts.get('subject') or
prompt(ui, '(optional) Subject: ', rest=prefix, default=''))
if not subj:
return None # skip intro if the user doesn't bother
subj = prefix + ' ' + subj
body = ''
if opts.get('diffstat'):
# generate a cumulative diffstat of the whole patch series
diffstat = patch.diffstat(sum(patches, []))
body = '\n' + diffstat
else:
diffstat = None
body = _getdescription(repo, body, sender, **opts)
msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
msg['Subject'] = mail.headencode(ui, subj, _charsets,
opts.get('test'))
return (msg, subj, diffstat)
def _getpatchmsgs(repo, sender, revs, patchnames=None, **opts):
"""return a list of emails from a list of patches
This involves introduction message creation if necessary.
This function returns a list of "email" tuples (subject, content, None).
"""
ui = repo.ui
_charsets = mail._charsets(ui)
patches = list(_getpatches(repo, revs, **opts))
msgs = []
ui.write(_('this patch series consists of %d patches.\n\n')
% len(patches))
# build the intro message, or skip it if the user declines
if introwanted(ui, opts, len(patches)):
msg = _makeintro(repo, sender, revs, patches, **opts)
if msg:
msgs.append(msg)
# are we going to send more than one message?
numbered = len(msgs) + len(patches) > 1
# now generate the actual patch messages
name = None
assert len(revs) == len(patches)
for i, (r, p) in enumerate(zip(revs, patches)):
if patchnames:
name = patchnames[i]
msg = makepatch(ui, repo, r, p, opts, _charsets, i + 1,
len(patches), numbered, name)
msgs.append(msg)
return msgs
def _getoutgoing(repo, dest, revs):
'''Return the revisions present locally but not in dest'''
ui = repo.ui
url = ui.expandpath(dest or 'default-push', dest or 'default')
url = hg.parseurl(url)[0]
ui.status(_('comparing with %s\n') % util.hidepassword(url))
revs = [r for r in revs if r >= 0]
if not revs:
revs = [len(repo) - 1]
revs = repo.revs('outgoing(%s) and ::%ld', dest or '', revs)
if not revs:
ui.status(_("no changes found\n"))
return revs
emailopts = [
('', 'body', None, _('send patches as inline message text (default)')),
('a', 'attach', None, _('send patches as attachments')),
('i', 'inline', None, _('send patches as inline attachments')),
('', 'bcc', [], _('email addresses of blind carbon copy recipients')),
('c', 'cc', [], _('email addresses of copy recipients')),
('', 'confirm', None, _('ask for confirmation before sending')),
('d', 'diffstat', None, _('add diffstat output to messages')),
('', 'date', '', _('use the given date as the sending date')),
('', 'desc', '', _('use the given file as the series description')),
('f', 'from', '', _('email address of sender')),
('n', 'test', None, _('print messages that would be sent')),
('m', 'mbox', '', _('write messages to mbox file instead of sending them')),
('', 'reply-to', [], _('email addresses replies should be sent to')),
('s', 'subject', '', _('subject of first message (intro or single patch)')),
('', 'in-reply-to', '', _('message identifier to reply to')),
('', 'flag', [], _('flags to add in subject prefixes')),
('t', 'to', [], _('email addresses of recipients'))]
@command('email',
[('g', 'git', None, _('use git extended diff format')),
('', 'plain', None, _('omit hg patch header')),
('o', 'outgoing', None,
_('send changes not found in the target repository')),
('b', 'bundle', None, _('send changes not in target as a binary bundle')),
('B', 'bookmark', '', _('send changes only reachable by given bookmark')),
('', 'bundlename', 'bundle',
_('name of the bundle attachment file'), _('NAME')),
('r', 'rev', [], _('a revision to send'), _('REV')),
('', 'force', None, _('run even when remote repository is unrelated '
'(with -b/--bundle)')),
('', 'base', [], _('a base changeset to specify instead of a destination '
'(with -b/--bundle)'), _('REV')),
('', 'intro', None, _('send an introduction email for a single patch')),
] + emailopts + cmdutil.remoteopts,
_('hg email [OPTION]... [DEST]...'))
def email(ui, repo, *revs, **opts):
'''send changesets by email
By default, diffs are sent in the format generated by
:hg:`export`, one per message. The series starts with a "[PATCH 0
of N]" introduction, which describes the series as a whole.
Each patch email has a Subject line of "[PATCH M of N] ...", using
the first line of the changeset description as the subject text.
The message contains two or three parts. First, the changeset
description.
With the -d/--diffstat option, if the diffstat program is
installed, the result of running diffstat on the patch is inserted.
Finally, the patch itself, as generated by :hg:`export`.
With the -d/--diffstat or --confirm options, you will be presented
with a final summary of all messages and asked for confirmation before
the messages are sent.
By default the patch is included as text in the email body for
easy reviewing. Using the -a/--attach option will instead create
an attachment for the patch. With -i/--inline an inline attachment
will be created. You can include a patch both as text in the email
body and as a regular or an inline attachment by combining the
-a/--attach or -i/--inline with the --body option.
With -B/--bookmark changesets reachable by the given bookmark are
selected.
With -o/--outgoing, emails will be generated for patches not found
in the destination repository (or only those which are ancestors
of the specified revisions if any are provided)
With -b/--bundle, changesets are selected as for --outgoing, but a
single email containing a binary Mercurial bundle as an attachment
will be sent. Use the ``patchbomb.bundletype`` config option to
control the bundle type as with :hg:`bundle --type`.
With -m/--mbox, instead of previewing each patchbomb message in a
pager or sending the messages directly, it will create a UNIX
mailbox file with the patch emails. This mailbox file can be
previewed with any mail user agent which supports UNIX mbox
files.
With -n/--test, all steps will run, but mail will not be sent.
You will be prompted for an email recipient address, a subject and
an introductory message describing the patches of your patchbomb.
Then when all is done, patchbomb messages are displayed.
In case email sending fails, you will find a backup of your series
introductory message in ``.hg/last-email.txt``.
The default behavior of this command can be customized through
configuration. (See :hg:`help patchbomb` for details)
Examples::
hg email -r 3000 # send patch 3000 only
hg email -r 3000 -r 3001 # send patches 3000 and 3001
hg email -r 3000:3005 # send patches 3000 through 3005
hg email 3000 # send patch 3000 (deprecated)
hg email -o # send all patches not in default
hg email -o DEST # send all patches not in DEST
hg email -o -r 3000 # send all ancestors of 3000 not in default
hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST
hg email -B feature # send all ancestors of feature bookmark
hg email -b # send bundle of all patches not in default
hg email -b DEST # send bundle of all patches not in DEST
hg email -b -r 3000 # bundle of all ancestors of 3000 not in default
hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST
hg email -o -m mbox && # generate an mbox file...
mutt -R -f mbox # ... and view it with mutt
hg email -o -m mbox && # generate an mbox file ...
formail -s sendmail \\ # ... and use formail to send from the mbox
-bm -t < mbox # ... using sendmail
Before using this command, you will need to enable email in your
hgrc. See the [email] section in hgrc(5) for details.
'''
_charsets = mail._charsets(ui)
bundle = opts.get('bundle')
date = opts.get('date')
mbox = opts.get('mbox')
outgoing = opts.get('outgoing')
rev = opts.get('rev')
bookmark = opts.get('bookmark')
if not (opts.get('test') or mbox):
# really sending
mail.validateconfig(ui)
if not (revs or rev or outgoing or bundle or bookmark):
raise error.Abort(_('specify at least one changeset with -B, -r or -o'))
if outgoing and bundle:
raise error.Abort(_("--outgoing mode always on with --bundle;"
" do not re-specify --outgoing"))
if rev and bookmark:
raise error.Abort(_("-r and -B are mutually exclusive"))
if outgoing or bundle:
if len(revs) > 1:
raise error.Abort(_("too many destinations"))
if revs:
dest = revs[0]
else:
dest = None
revs = []
if rev:
if revs:
raise error.Abort(_('use only one form to specify the revision'))
revs = rev
elif bookmark:
if bookmark not in repo._bookmarks:
raise error.Abort(_("bookmark '%s' not found") % bookmark)
revs = repair.stripbmrevset(repo, bookmark)
revs = scmutil.revrange(repo, revs)
if outgoing:
revs = _getoutgoing(repo, dest, revs)
if bundle:
opts['revs'] = [str(r) for r in revs]
# check if revision exist on the public destination
publicurl = repo.ui.config('patchbomb', 'publicurl')
if publicurl:
repo.ui.debug('checking that revision exist in the public repo')
try:
publicpeer = hg.peer(repo, {}, publicurl)
except error.RepoError:
repo.ui.write_err(_('unable to access public repo: %s\n')
% publicurl)
raise
if not publicpeer.capable('known'):
repo.ui.debug('skipping existence checks: public repo too old')
else:
out = [repo[r] for r in revs]
known = publicpeer.known(h.node() for h in out)
missing = []
for idx, h in enumerate(out):
if not known[idx]:
missing.append(h)
if missing:
if 1 < len(missing):
msg = _('public "%s" is missing %s and %i others')
msg %= (publicurl, missing[0], len(missing) - 1)
else:
msg = _('public url %s is missing %s')
msg %= (publicurl, missing[0])
revhint = ' '.join('-r %s' % h
for h in repo.set('heads(%ld)', missing))
hint = _("use 'hg push %s %s'") % (publicurl, revhint)
raise error.Abort(msg, hint=hint)
# start
if date:
start_time = util.parsedate(date)
else:
start_time = util.makedate()
def genmsgid(id):
return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn())
# deprecated config: patchbomb.from
sender = (opts.get('from') or ui.config('email', 'from') or
ui.config('patchbomb', 'from') or
prompt(ui, 'From', ui.username()))
if bundle:
bundledata = _getbundle(repo, dest, **opts)
bundleopts = opts.copy()
bundleopts.pop('bundle', None) # already processed
msgs = _getbundlemsgs(repo, sender, bundledata, **bundleopts)
else:
msgs = _getpatchmsgs(repo, sender, revs, **opts)
showaddrs = []
def getaddrs(header, ask=False, default=None):
configkey = header.lower()
opt = header.replace('-', '_').lower()
addrs = opts.get(opt)
if addrs:
showaddrs.append('%s: %s' % (header, ', '.join(addrs)))
return mail.addrlistencode(ui, addrs, _charsets, opts.get('test'))
# not on the command line: fallback to config and then maybe ask
addr = (ui.config('email', configkey) or
ui.config('patchbomb', configkey))
if not addr:
specified = (ui.hasconfig('email', configkey) or
ui.hasconfig('patchbomb', configkey))
if not specified and ask:
addr = prompt(ui, header, default=default)
if addr:
showaddrs.append('%s: %s' % (header, addr))
return mail.addrlistencode(ui, [addr], _charsets, opts.get('test'))
elif default:
return mail.addrlistencode(
ui, [default], _charsets, opts.get('test'))
return []
to = getaddrs('To', ask=True)
if not to:
# we can get here in non-interactive mode
raise error.Abort(_('no recipient addresses provided'))
cc = getaddrs('Cc', ask=True, default='')
bcc = getaddrs('Bcc')
replyto = getaddrs('Reply-To')
confirm = ui.configbool('patchbomb', 'confirm')
confirm |= bool(opts.get('diffstat') or opts.get('confirm'))
if confirm:
ui.write(_('\nFinal summary:\n\n'), label='patchbomb.finalsummary')
ui.write(('From: %s\n' % sender), label='patchbomb.from')
for addr in showaddrs:
ui.write('%s\n' % addr, label='patchbomb.to')
for m, subj, ds in msgs:
ui.write(('Subject: %s\n' % subj), label='patchbomb.subject')
if ds:
ui.write(ds, label='patchbomb.diffstats')
ui.write('\n')
if ui.promptchoice(_('are you sure you want to send (yn)?'
'$$ &Yes $$ &No')):
raise error.Abort(_('patchbomb canceled'))
ui.write('\n')
parent = opts.get('in_reply_to') or None
# angle brackets may be omitted, they're not semantically part of the msg-id
if parent is not None:
if not parent.startswith('<'):
parent = '<' + parent
if not parent.endswith('>'):
parent += '>'
sender_addr = emailmod.Utils.parseaddr(sender)[1]
sender = mail.addressencode(ui, sender, _charsets, opts.get('test'))
sendmail = None
firstpatch = None
for i, (m, subj, ds) in enumerate(msgs):
try:
m['Message-Id'] = genmsgid(m['X-Mercurial-Node'])
if not firstpatch:
firstpatch = m['Message-Id']
m['X-Mercurial-Series-Id'] = firstpatch
except TypeError:
m['Message-Id'] = genmsgid('patchbomb')
if parent:
m['In-Reply-To'] = parent
m['References'] = parent
if not parent or 'X-Mercurial-Node' not in m:
parent = m['Message-Id']
m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version()
m['Date'] = emailmod.Utils.formatdate(start_time[0], localtime=True)
start_time = (start_time[0] + 1, start_time[1])
m['From'] = sender
m['To'] = ', '.join(to)
if cc:
m['Cc'] = ', '.join(cc)
if bcc:
m['Bcc'] = ', '.join(bcc)
if replyto:
m['Reply-To'] = ', '.join(replyto)
if opts.get('test'):
ui.status(_('displaying '), subj, ' ...\n')
ui.pager('email')
generator = emailmod.Generator.Generator(ui, mangle_from_=False)
try:
generator.flatten(m, 0)
ui.write('\n')
except IOError as inst:
if inst.errno != errno.EPIPE:
raise
else:
if not sendmail:
sendmail = mail.connect(ui, mbox=mbox)
ui.status(_('sending '), subj, ' ...\n')
ui.progress(_('sending'), i, item=subj, total=len(msgs),
unit=_('emails'))
if not mbox:
# Exim does not remove the Bcc field
del m['Bcc']
fp = stringio()
generator = emailmod.Generator.Generator(fp, mangle_from_=False)
generator.flatten(m, 0)
sendmail(sender_addr, to + bcc + cc, fp.getvalue())
ui.progress(_('writing'), None)
ui.progress(_('sending'), None)
|
the-stack_0_11421 | import bpy
import math
import bmesh
bonesCount = 0
def write(filepath,
applyMods=False
):
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.transform_apply(location = True, scale = True, rotation = True)
bpy.ops.object.select_all(action='DESELECT')
scene = bpy.context.scene
meshData = MeshData()
animsData = []
bones = []
for obj in bpy.context.visible_objects:
if obj.pose is not None:
for bone in obj.pose.bones:
bones.append(bone)
global bonesCount
bonesCount = len(bones)
for obj in bpy.context.visible_objects:
if applyMods or obj.type != "MESH":
try:
me = obj.to_mesh(scene, True, "PREVIEW")
except:
me = None
is_tmp_mesh = True
else:
try:
me = obj.to_mesh(scene, False, "PREVIEW")
except:
me = None
is_tmp_mesh = True
if obj.animation_data is not None:
for track in obj.animation_data.nla_tracks:
for strip in track.strips:
action = strip.action
obj.animation_data.action = action
animData = AnimData(action.name)
for i in range(int(action.frame_range[0]), int(action.frame_range[1])):
bpy.context.scene.frame_set(i)
bpy.context.scene.update()
for bone in bones:
animData.addB_Position(bone.head)
bone.rotation_mode = 'XYZ'
animData.addB_Rotation(bone.rotation_euler)
animData.addB_Scale(bone.scale)
animsData.append(animData)
if me is not None:
bm = bmesh.new()
bm.from_mesh(me)
#bmesh.ops.subdivide_edges(bm, edges=bm.edges, use_grid_fill=True, cuts=1)
bmesh.ops.triangulate(bm, faces=bm.faces)
bm.to_mesh(me)
bm.free()
del bm
for vertex in me.vertices:
found = 0
for group in vertex.groups:
i = 0
for bone in bones:
if obj.vertex_groups[group.group].name == bone.name:
found = i
i+=1
meshData.addV_Position(vertex.co)
meshData.addV_Normal(vertex.normal)
meshData.addV_Bone(found)
# empty value as place for material index later
meshData.addV_Material(0)
for polygon in me.polygons:
meshData.addF_Vertex(polygon)
meshData.addF_Material(polygon)
meshData.addF_UVs(polygon, me)
meshData.setF_Count(meshData.fCount + len(me.polygons))
meshData.setV_Count(meshData.vCount + len(me.vertices))
meshData.setM_Count(meshData.mCount + len(obj.material_slots))
for mat_slot in obj.material_slots:
meshData.addM_Color(mat_slot)
if is_tmp_mesh:
bpy.data.meshes.remove(me)
writeToFile(filepath, meshData, animsData)
def writeToFile(filepath, meshData, animsData):
# open target file
file = open(filepath, "w")
i = 0
# write the commons to the file
commons = "".join("SOM (SceneObjectMesh) file created by Blender SOM exporter"
+ "\n" + "project page: https://github.com/JohnsProject/JPGE" + "\n")
file.write(commons)
# write the vertex data to the file
i = 0
file.write("vCount < " + str(meshData.vCount) + " > vCount" + "\n")
file.write("vPosition < ")
for value in meshData.vPosition:
i += 1
if (i < len(meshData.vPosition)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > vPosition" + "\n")
i = 0
file.write("vNormal < ")
for value in meshData.vNormal:
i += 1
if (i < len(meshData.vNormal)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > vNormal" + "\n")
i = 0
file.write("vBone < ")
for value in meshData.vBone:
i += 1
if (i < len(meshData.vBone)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > vBone" + "\n")
i = 0
file.write("vMaterial < ")
for value in meshData.vMaterial:
i += 1
if (i < len(meshData.vMaterial)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > vMaterial" + "\n")
file.write("\n")
# write the face data to the file
i = 0
file.write("fCount < " + str(meshData.fCount) + " > fCount" + "\n")
file.write("fVertex1 < ")
for value in meshData.fVertex1:
i += 1
if (i < len(meshData.fVertex1)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > fVertex1" + "\n")
i = 0
file.write("fVertex2 < ")
for value in meshData.fVertex2:
i += 1
if (i < len(meshData.fVertex2)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > fVertex2" + "\n")
i = 0
file.write("fVertex3 < ")
for value in meshData.fVertex3:
i += 1
if (i < len(meshData.fVertex3)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > fVertex3" + "\n")
i = 0
file.write("fMaterial < ")
for value in meshData.fMaterial:
i += 1
if (i < len(meshData.fMaterial)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > fMaterial" + "\n")
i = 0
file.write("fUV1 < ")
for value in meshData.fUV1:
i += 1
if (i < len(meshData.fUV1)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > fUV1" + "\n")
i = 0
file.write("fUV2 < ")
for value in meshData.fUV2:
i += 1
if (i < len(meshData.fUV2)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > fUV2" + "\n")
i = 0
file.write("fUV3 < ")
for value in meshData.fUV3:
i += 1
if (i < len(meshData.fUV3)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > fUV3" + "\n")
file.write("\n")
# write the material data to the file
i = 0
file.write("mCount < " + str(meshData.mCount) + " > mCount" + "\n")
file.write("mColor < ")
for value in meshData.mColor:
i += 1
if (i < len(meshData.mColor)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > mColor" + "\n")
file.write("\n")
# write the animations to the file
file.write("Animations < " + "\n")
global bonesCount
file.write((" BonesCount <%i" % bonesCount) + "> BonesCount \n")
for animData in animsData:
file.write(" Animation < " + "\n")
file.write(" Name < " + animData.name + "> Name \n")
i = 0
file.write(" bPosition < ")
for value in animData.bPosition:
i += 1
if (i < len(animData.bPosition)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > bPosition" + "\n")
i = 0
file.write(" bRotation < ")
for value in animData.bRotation:
i += 1
if (i < len(animData.bRotation)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > bRotation" + "\n")
i = 0
file.write(" bScale < ")
for value in animData.bScale:
i += 1
if (i < len(animData.bScale)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > bScale" + "\n")
file.write(" > Animation " + "\n")
file.write("> Animations" + "\n")
# close file
file.close()
class MeshData:
def __init__(self):
# v = vertex
self.vCount = 0
self.vPosition = []
self.vNormal = []
self.vBone = []
self.vMaterial = []
# f = face
self.fCount = 0
self.fVertex1 = []
self.fVertex2 = []
self.fVertex3 = []
self.fMaterial = []
self.fUV1 = []
self.fUV2 = []
self.fUV3 = []
# m = material
self.mCount = 0
self.mColor = []
def setV_Count(self, value):
self.vCount = value
def addV_Position(self, value):
self.vPosition.append(value[0]*1000)
self.vPosition.append(value[1]*1000)
self.vPosition.append(value[2]*1000)
def addV_Normal(self, value):
self.vNormal.append(value[0]*1000)
self.vNormal.append(value[1]*1000)
self.vNormal.append(value[2]*1000)
def addV_Bone(self, value):
self.vBone.append(value)
def addV_Material(self, value):
self.vMaterial.append(value)
def setF_Count(self, value):
self.fCount = value
def addF_Vertex(self, value):
self.fVertex1.append(self.vCount + value.vertices[0])
self.fVertex2.append(self.vCount + value.vertices[1])
self.fVertex3.append(self.vCount + value.vertices[2])
def addF_Material(self, value):
self.fMaterial.append(self.mCount + value.material_index)
self.vMaterial[value.vertices[0]] = self.mCount + value.material_index
self.vMaterial[value.vertices[1]] = self.mCount + value.material_index
self.vMaterial[value.vertices[2]] = self.mCount + value.material_index
def addF_UVs(self, value, me):
if me.uv_layers.active is not None:
self.fUV1.append(me.uv_layers.active.data[value.loop_indices[0]].uv[0]*128)
self.fUV1.append(me.uv_layers.active.data[value.loop_indices[0]].uv[1]*128)
self.fUV2.append(me.uv_layers.active.data[value.loop_indices[1]].uv[0]*128)
self.fUV2.append(me.uv_layers.active.data[value.loop_indices[1]].uv[1]*128)
self.fUV3.append(me.uv_layers.active.data[value.loop_indices[2]].uv[0]*128)
self.fUV3.append(me.uv_layers.active.data[value.loop_indices[2]].uv[1]*128)
else:
self.fUV1.append(0)
self.fUV1.append(0)
self.fUV2.append(0)
self.fUV2.append(0)
self.fUV3.append(0)
self.fUV3.append(0)
def setM_Count(self, value):
self.mCount = value
def addM_Color(self, value):
if value.material is not None:
self.mColor.append(value.material.diffuse_color[0] * 255)
self.mColor.append(value.material.diffuse_color[1] * 255)
self.mColor.append(value.material.diffuse_color[2] * 255)
self.mColor.append(value.material.alpha * 255)
else:
self.mColor.append(0)
self.mColor.append(0)
self.mColor.append(0)
self.mColor.append(100)
class AnimData:
def __init__(self, name):
self.name = name
self.bPosition = []
self.bRotation = []
self.bScale = []
def addB_Position(self, value):
self.bPosition.append(value[0]*100)
self.bPosition.append(value[1]*100)
self.bPosition.append(value[2]*100)
def addB_Rotation(self, value):
self.bRotation.append(math.degrees(value[0]))
self.bRotation.append(math.degrees(value[1]))
self.bRotation.append(math.degrees(value[2]))
def addB_Scale(self, value):
self.bScale.append(value[0])
self.bScale.append(value[1])
self.bScale.append(value[2])
|
the-stack_0_11422 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="InstaPython",
version="1.1.1",
author="Micha Birklbauer",
author_email="[email protected]",
description="A set of classes and functions to access Instagram.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/michabirklbauer/instapython",
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
|
the-stack_0_11423 | import pytest
from dvc.cli import parse_args
from dvc.command.plot import CmdPlotDiff, CmdPlotShow
def test_metrics_diff(mocker):
cli_args = parse_args(
[
"plot",
"diff",
"--file",
"result.extension",
"-t",
"template",
"-d",
"datafile",
"--select",
"column1,column2",
"--no-html",
"--stdout",
"-x",
"x_field",
"-y",
"y_field",
"--title",
"my_title",
"--xlab",
"x_title",
"--ylab",
"y_title",
"HEAD",
"tag1",
"tag2",
]
)
assert cli_args.func == CmdPlotDiff
cmd = cli_args.func(cli_args)
m = mocker.patch.object(cmd.repo, "plot", autospec=True)
mocker.patch("builtins.open")
mocker.patch("os.path.join")
assert cmd.run() == 0
m.assert_called_once_with(
datafile="datafile",
template="template",
revisions=["HEAD", "tag1", "tag2"],
fields={"column1", "column2"},
path=None,
embed=False,
x_field="x_field",
y_field="y_field",
csv_header=True,
title="my_title",
x_title="x_title",
y_title="y_title",
)
def test_metrics_show(mocker):
cli_args = parse_args(
[
"plot",
"show",
"-f",
"result.extension",
"-t",
"template",
"-s",
"$.data",
"--no-html",
"--stdout",
"--no-csv-header",
"datafile",
]
)
assert cli_args.func == CmdPlotShow
cmd = cli_args.func(cli_args)
m = mocker.patch.object(cmd.repo, "plot", autospec=True)
mocker.patch("builtins.open")
mocker.patch("os.path.join")
assert cmd.run() == 0
m.assert_called_once_with(
datafile="datafile",
template="template",
revisions=None,
fields=None,
path="$.data",
embed=False,
x_field=None,
y_field=None,
csv_header=False,
title=None,
x_title=None,
y_title=None,
)
@pytest.mark.parametrize(
"arg_revisions,is_dirty,expected_revisions",
[
([], False, ["workspace"]),
([], True, ["HEAD", "workspace"]),
(["v1", "v2", "workspace"], False, ["v1", "v2", "workspace"]),
(["v1", "v2", "workspace"], True, ["v1", "v2", "workspace"]),
],
)
def test_revisions(mocker, arg_revisions, is_dirty, expected_revisions):
args = mocker.MagicMock()
cmd = CmdPlotDiff(args)
mocker.patch.object(args, "revisions", arg_revisions)
mocker.patch.object(cmd.repo.scm, "is_dirty", return_value=is_dirty)
assert cmd._revisions() == expected_revisions
|
the-stack_0_11424 | import sqlalchemy as sa
from alembic import op
revision = "dddddddddddd"
down_revision = "cccccccccccc"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("foo", sa.Column("bar_id", sa.Integer(), server_default="9"))
def downgrade():
op.drop_column("foo", "bar_id")
|
the-stack_0_11427 | #!/usr/bin/env python
# Copyright 2017-present WonderLabs, Inc. <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pexpect
import sys
from bluepy.btle import Scanner, DefaultDelegate
import binascii
import copy
import datetime
class ScanDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
class DevScanner(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
# print('Scanner inited')
def dongle_start(self):
self.con = pexpect.spawn('hciconfig hci0 up')
time.sleep(1)
def dongle_restart(self):
print('restart bluetooth dongle')
self.con = pexpect.spawn('hciconfig hci0 down')
time.sleep(3)
self.con = pexpect.spawn('hciconfig hci0 up')
time.sleep(3)
def scan_loop(self):
service_uuid = 'cba20d00-224d-11e6-9fb8-0002a5d5c51b'
company_id = '6909' # actually 0x0969
dev_list = []
bot_list = []
meter_list = []
curtain_list = []
contact_list = []
motion_list = []
param_list = []
pir_tip = ['No movement detected', 'Movement detected']
hall_tip = ['Door closed', 'Door opened', 'Timeout no closed']
light_tip = ['Dark', 'Bright']
self.con = pexpect.spawn('hciconfig')
pnum = self.con.expect(['hci0', pexpect.EOF, pexpect.TIMEOUT])
if pnum == 0:
self.con = pexpect.spawn('hcitool lescan')
# self.con.expect('LE Scan ...', timeout=5)
scanner = Scanner().withDelegate(DevScanner())
devices = scanner.scan(10.0)
print('Scanning...')
else:
raise Error('no bluetooth error')
for dev in devices:
mac = 0
param_list[:] = []
for (adtype, desc, value) in dev.getScanData():
# print(adtype, desc, value)
if desc == '16b Service Data':
dev_type = binascii.a2b_hex(value[4:6])
if dev_type == 'H':
param_list.append(binascii.a2b_hex(value[6:8]))
elif dev_type == 'T':
# celsius
tempFra = int(value[11:12].encode('utf-8'), 16) / 10.0
tempInt = int(value[12:14].encode('utf-8'), 16)
if tempInt < 128:
tempInt *= -1
tempFra *= -1
else:
tempInt -= 128
param_list.append(tempInt + tempFra)
param_list.append(
int(value[14:16].encode('utf-8'), 16) % 128)
# print('meter:', param1, param2)
elif dev_type == 'd':
# print(adtype, desc, value)
pirSta = (
int(value[6:7].encode('utf-8'), 16) >> 2) & 0x01
diffSec = (
(int(value[10:11].encode('utf-8'), 16) & 0x04) << 14) \
+ int(value[16:20].encode('utf-8'), 16)
hallSta = (
int(value[11:12].encode('utf-8'), 16) >> 1) & 0x03
lightSta = int(value[11:12].encode('utf-8'), 16) & 0x01
param_list.extend([hallSta, pirSta, lightSta, diffSec])
# print(pirSta, diffSec, hallSta, lightSta, diffSec)
elif dev_type == 's':
# print(adtype, desc, value)
pirSta = (
int(value[6:7].encode('utf-8'), 16) >> 2) & 0x01
lightSta = (int(value[15:16].encode('utf-8'), 16) & 0x03) - 1
# TODO:
diffSec = 0
param_list.extend([pirSta, lightSta, diffSec])
else:
param_list[:] = []
elif desc == 'Local name':
if value == 'WoHand':
mac = dev.addr
dev_type = 'H'
elif value == 'WoMeter':
mac = dev.addr
dev_type = 'T'
elif value == 'WoCurtain':
mac = dev.addr
dev_type = 'c'
elif value == 'WoContact':
mac = dev.addr
dev_type = 'd'
elif value == 'WoMotion':
mac = dev.addr
dev_type = 's'
elif desc == 'Complete 128b Services' and value == service_uuid:
mac = dev.addr
elif desc == 'Manufacturer' and value[0:4] == company_id:
mac = dev.addr
if mac != 0:
dev_list.append([mac, dev_type, copy.deepcopy(param_list)])
# print(dev_list)
for (mac, dev_type, params) in dev_list:
if dev_type == 'H':
if int(binascii.b2a_hex(params[0]), 16) > 127:
bot_list.append([mac, 'Bot', 'Turn On'])
bot_list.append([mac, 'Bot', 'Turn Off'])
bot_list.append([mac, 'Bot', 'Up'])
bot_list.append([mac, 'Bot', 'Down'])
else:
bot_list.append([mac, 'Bot', 'Press'])
elif dev_type == 'T':
meter_list.append([mac, 'Meter', "%.1f'C %d%%" %
(params[0], params[1])])
elif dev_type == 'c':
curtain_list.append([mac, 'Curtain', 'Open'])
curtain_list.append([mac, 'Curtain', 'Close'])
curtain_list.append([mac, 'Curtain', 'Pause'])
elif dev_type == 'd':
timeTrigger = datetime.datetime.now() - datetime.timedelta(0, params[3])
contact_list.append([mac, 'Contact', "%s, %s, %s, Last trigger: %s" %
(hall_tip[params[0]], pir_tip[params[1]], light_tip[params[2]], timeTrigger.strftime("%Y-%m-%d %H:%M"))])
elif dev_type == 's':
motion_list.append([mac, 'Motion', "%s, %s" %
(pir_tip[params[0]], light_tip[params[1]])])
print('Scan timeout.')
return bot_list + meter_list + curtain_list + contact_list + motion_list
pass
def register_cb(self, fn):
self.cb = fn
return
def close(self):
# self.con.sendcontrol('c')
self.con.close(force=True)
def trigger_device(device):
[mac, dev_type, act] = device
# print 'Start to control'
con = pexpect.spawn('gatttool -b ' + mac + ' -t random -I')
con.expect('\[LE\]>')
print('Preparing to connect.')
retry = 3
index = 0
while retry > 0 and 0 == index:
con.sendline('connect')
# To compatible with different Bluez versions
index = con.expect(
['Error', '\[CON\]', 'Connection successful.*\[LE\]>'])
retry -= 1
if 0 == index:
print('Connection error.')
return
print('Connection successful.')
con.sendline('char-desc')
con.expect(['\[CON\]', 'cba20002-224d-11e6-9fb8-0002a5d5c51b'])
cmd_handle = con.before.split('\n')[-1].split()[2].strip(',')
if dev_type == 'Bot':
if act == 'Turn On':
con.sendline('char-write-cmd ' + cmd_handle + ' 570101')
elif act == 'Turn Off':
con.sendline('char-write-cmd ' + cmd_handle + ' 570102')
elif act == 'Press':
con.sendline('char-write-cmd ' + cmd_handle + ' 570100')
elif act == 'Down':
con.sendline('char-write-cmd ' + cmd_handle + ' 570103')
elif act == 'Up':
con.sendline('char-write-cmd ' + cmd_handle + ' 570104')
elif dev_type == 'Meter':
con.sendline('char-write-cmd ' + cmd_handle + ' 570F31')
con.expect('\[LE\]>')
con.sendline('char-read-uuid cba20003-224d-11e6-9fb8-0002a5d5c51b')
index = con.expect(['value:[0-9a-fA-F ]+', 'Error'])
if index == 0:
data = con.after.split(':')[1].replace(' ', '')
tempFra = int(data[3], 16) / 10.0
tempInt = int(data[4:6], 16)
if tempInt < 128:
tempInt *= -1
tempFra *= -1
else:
tempInt -= 128
meterTemp = tempInt + tempFra
meterHumi = int(data[6:8], 16) % 128
print("Meter[%s] %.1f'C %d%%" % (mac, meterTemp, meterHumi))
else:
print('Error!')
elif dev_type == 'Curtain':
if act == 'Open':
con.sendline('char-write-cmd ' + cmd_handle + ' 570F450105FF00')
elif act == 'Close':
con.sendline('char-write-cmd ' + cmd_handle + ' 570F450105FF64')
elif act == 'Pause':
con.sendline('char-write-cmd ' + cmd_handle + ' 570F450100FF')
else:
print('Unsupported operations')
con.expect('\[LE\]>')
con.sendline('quit')
print('Complete')
def main():
# Check bluetooth dongle
print(
'Usage: "sudo python switchbot.py [mac dev_type cmd]" or "sudo python switchbot.py"')
connect = pexpect.spawn('hciconfig')
pnum = connect.expect(["hci0", pexpect.EOF, pexpect.TIMEOUT])
if pnum != 0:
print('No bluetooth hardware, exit now')
sys.exit()
connect = pexpect.spawn('hciconfig hci0 up')
# print(sys.argv, len(sys.argv))
if len(sys.argv) == 4 or len(sys.argv) == 5:
dev = sys.argv[1]
dev_type = sys.argv[2]
act = sys.argv[3] if len(sys.argv) < 5 else ('Turn ' + sys.argv[4])
trigger_device([dev, dev_type, act])
elif len(sys.argv) == 1:
# Start scanning...
scan = DevScanner()
dev_list = scan.scan_loop()
# dev_number = None
if not dev_list:
print("No SwitchBot nearby, exit")
sys.exit()
for idx, val in enumerate(dev_list):
print('%2d' % idx, val)
dev_number = int(input("Input the device number to control:"))
if dev_number >= len(dev_list):
print("Input error, exit")
else:
ble_dev = dev_list[dev_number]
print(ble_dev)
# Trigger the device to work
# If the SwitchBot address is known you can run this command directly without scanning
trigger_device(ble_dev)
else:
print('Wrong cmd!')
print(
'Usage: "sudo python switchbot.py [mac dev_type cmd]" or "sudo python switchbot.py"')
connect = pexpect.spawn('hciconfig')
sys.exit()
if __name__ == "__main__":
main()
|
the-stack_0_11428 | from __future__ import (absolute_import, division, print_function)
# make plot of ozone concentration data on
# lambert conformal conic map projection, drawing coastlines, state and
# country boundaries, and parallels/meridians.
# the data is interpolated to the native projection grid.
from mpl_toolkits.basemap import Basemap, shiftgrid
import numpy as np
import matplotlib.pyplot as plt
import netCDF4
plt.rcParams['text.usetex'] = False
# read in netCDF4 file. Results from CAMx v6
# test case, converted to netcdf by PseudoNetCDF
# pseudonetcdf.googlecode.com
camx = netCDF4.Dataset('camx.sample.nc')
#alternatively read directly from CAMx uamiv file
#if available
#
# from PseudoNetCDF.camxfiles.Memmaps import uamiv
# camx = uamiv('camx.bin')
# Get Ozone Variable
o3 = camx.variables['O3']
# Get projection space
llcrnrx = camx.XORIG
llcrnry = camx.YORIG
urcrnrx = llcrnrx + (o3[:].shape[-1] + 1) * camx.XCELL
urcrnry = llcrnry + (o3[:].shape[-2] + 1) * camx.XCELL
# Get edge values for pcolor
xedge = np.linspace(0, urcrnrx - llcrnrx, camx.NCOLS + 1)
yedge = np.linspace(0, urcrnry - llcrnry, camx.NCOLS + 1)
X, Y = np.meshgrid(xedge, yedge)
# setup of basemap ('lcc' = lambert conformal conic).
# projection parameters from CAMx file
m = Basemap(projection = 'lcc',
lon_0=camx.P_GAM, lat_0 = 40.,
lat_1 = camx.P_ALP, lat_2 = camx.P_BET,
llcrnrx = llcrnrx, llcrnry = llcrnry,
urcrnry = urcrnry, urcrnrx = urcrnrx)
# create the figure.
fig=plt.figure(figsize=(8,8))
# add an axes.
ax = fig.add_axes([0.1,0.1,0.8,0.8])
ax.set_facecolor('lightgrey')
# associate this axes with the Basemap instance.
m.ax = ax
# plot tile plot with pcolor
# Use first time and first layer (i.e., o3[0, 0] (time, layer, row, col))
# Edge cells have precisely 0 value, and are masked
# to avoid an unnecessary color range.
# Each color bin contains precisely 10% of values
# which makes for a pretty plot.
from matplotlib.colors import ListedColormap
WhGrYlBu = ListedColormap(['#ffffff', '#b7f6ff', '#70edff', '#29e4ff', '#00e1fb', '#0fffc6', '#3bffa4', '#68ff82', '#94ff60', '#c0ff3e', '#edff1c', '#fff400', '#ffc700', '#ff9b00', '#ff6e00', '#ff4200', '#ff1500', '#e80000', '#bb0000', '#8f0000'])
#.from_list('WhGrYlBu', ['white', 'white', 'cyan', 'lightblue', 'lightgreen', 'green', 'yellow', 'orange', 'red', 'red'])
toplot = np.ma.masked_values(o3[0, 0], 0.) * 1000.
bounds = np.percentile(toplot.compressed().ravel(), np.linspace(5, 95, 9).tolist())
ptch = m.pcolor(X, Y, toplot, cmap = WhGrYlBu, norm = plt.matplotlib.colors.BoundaryNorm(bounds, 20), vmin = bounds[0], vmax = bounds[-1])
# Add a colorbar using proportional spacing, but
# colors based on 10 distinct bins
cb = m.colorbar(ptch, location='right',pad='10%', boundaries = bounds, spacing = 'proportional', format = '%.3f', extend = 'both') # draw colorbar
# Add units to the colorbar
cb.ax.set_xlabel('%s*1000.' % o3.units.strip())
# plot blue dot on Houston, Baton Rouge, and Atlanta
def add_dot(lon, lat, label):
xpt,ypt = m(lon,lat)
m.plot([xpt],[ypt],'bo')
ax.annotate(label, xy=(xpt, ypt), xytext=(xpt+1e5, ypt+1e5),
bbox=dict(boxstyle="round4", fc="w"),
arrowprops=dict(facecolor='black'),
)
add_dot(-95.361328,29.754505, 'Houston')
add_dot(-91.140320, 30.458283, 'Baton Rouge')
add_dot(-84.387982, 33.748995, 'Atlanta')
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawcountries()
m.drawstates()
# draw parallels and meridians.
# label on left, right and bottom of map.
parallels = np.arange(20.,60,10.)
m.drawparallels(parallels,labels=[1,1,0,1])
meridians = np.arange(-120., 70.,10.)
m.drawmeridians(meridians,labels=[1,1,0,1])
# set title.
ax.set_title('O$_3$ as predicted by the CAMx v6 Test-Case\neach color division has 10% of cells 5-95% and 5% in triagles')
import textwrap
histstr = 'Processing: %s' % '\n'.join(textwrap.wrap(camx.history.strip(), 140))
fig.text(0.01, 0.01, histstr, horizontalalignment = 'left', verticalalignment = 'bottom', size = 8)
plt.draw()
plt.show()
|
the-stack_0_11431 | from django.urls import re_path
from .views import (
PostListView,
PostDetailView,
PostCreateView,
PostDeleteView,
PostUpdateView,
)
APP_NAME = 'posts'
urlpatterns = [
re_path(r'^list/$', PostListView.as_view(), name='list'),
re_path(r'^create/$', PostCreateView.as_view(), name='create'),
re_path(r'^(?P<id>\d+)/edit/$', PostUpdateView.as_view(), name='edit'),
re_path(r'^(?P<id>\d+)/delete/$', PostDeleteView.as_view(), name='delete'),
re_path(r'^(?P<id>\d+)/$', PostDetailView.as_view(), name='detail'),
] |
the-stack_0_11437 | from typing import List, Dict
import spacy
from rb.core.lang import Lang
from rb.core.text_element import TextElement
from rb.core.text_element_type import TextElementType
from rb.core.word import Word
class Span(TextElement):
def __init__(self, lang: Lang, text: str, words: List[Word], index_in_container: int,
depth: int = TextElementType.SPAN.value):
super().__init__(lang, text, index_in_container, depth, container=words[0].container)
self.components = words
def get_root(self) -> Word:
return [word for word in self.components
if word.head == word or
word.head.index_in_doc < self.components[0].index_in_doc or
word.head.index_in_doc > self.components[-1].index_in_doc
][0]
@classmethod
def from_spacy_span(cls, lang: Lang, spacy_span: spacy.tokens.Span, words: Dict[int, Word]) -> "Word":
text = spacy_span.text
our_words = [words[i] for i in range(spacy_span.start, spacy_span.end)]
return Span(lang=lang, text=text, words=our_words, index_in_container=our_words[0].index_in_container)
|
the-stack_0_11438 | # *** Create a Channel Type Role with full permissions for Chat ***
# Code based on https://www.twilio.com/docs/chat/rest/roles
# Download Python 3 from https://www.python.org/downloads/
# Download the Twilio helper library from https://www.twilio.com/docs/python/install
import os
from twilio.rest import Client
#from datetime import datetime | not required for this examples
import logging
#write requests & responses from Twilio to log file, useful for debugging:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='/usr/local/twilio/python/python3-twilio-sdkv6-examples/chat/logs/twilio_chat.log',
filemode='a')
# Your Account Sid and Auth Token from twilio.com/console & stored in Mac OS ~/.bash_profile in this example
account_sid = os.environ.get('$TWILIO_ACCOUNT_SID')
auth_token = os.environ.get('$TWILIO_AUTH_TOKEN')
client = Client(account_sid, auth_token)
# A list of chat roles parameters & their permissable values
role = client.chat.services('ISxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx') \
.roles \
.create(
friendly_name='Channel - Full Permissions',
type='channel',
permission=['addMember', 'deleteAnyMessage', 'deleteOwnMessage', 'destroyChannel', 'editAnyMessage', 'editAnyMessageAttributes', 'editOwnMessage', 'editOwnMessageAttributes', 'editChannelName', 'editChannelAttributes', 'inviteMember', 'leaveChannel', 'removeMember', 'sendMessage', 'sendMediaMessage']
#['editAnyUserInfo', 'editOwnUserInfo'] not supported ????
)
#print list of all chat roles properties to console, useful for learning info available you can work with?
print(role.account_sid)
print(role.date_created)
print(role.date_updated)
print(role.friendly_name)
print(role.permissions)
print(role.service_sid)
print(role.sid)
print(role.type)
print(role.url)
#create variable for this record
cdr = (role.sid)
#open *.log file with cdr var as filename...
f = open("/usr/local/twilio/python/python3-twilio-sdkv6-examples/chat/logs/" + str( cdr ) + ".log", "a")
#write list of all chat roles properties to above file...
f.write("Account SID : " + str(role.account_sid) + "\n")
f.write("Date Created : " + str(role.date_created) + "\n")
f.write("Date Updated : " + str(role.date_updated) + "\n")
f.write("Friendly Name : " + str(role.friendly_name) + "\n")
f.write("Permissions : " + str(role.permissions) + "\n")
f.write("Service SID : " + str(role.service_sid) + "\n")
f.write("SID : " + str(role.sid) + "\n")
f.write("Type : " + str(role.type) + "\n")
f.write("URL : " + str(role.url) + "\n")
f.close() |
the-stack_0_11439 | """
控制结构练习:
1.选择结构:三角形面积周长
2.循环结构:判断素数、最大公约数和最小公倍数
"""
import math
class Triangle:
def __init__(self, a, b, c):
if a + b > c and a + c > b and b + c > a:
self.a = a
self.b = b
self.c = c
else:
print('不能构成三角形')
def perimeter(self):
p = (self.a+self.b+self.c)/2
return p
def area(self):
p = self.perimeter()
# p = (self.a + self.b + self.c) / 2
area = math.sqrt(p * (p - self.a) * (p - self.b) * (p - self.c))
return area
def draw_tri(self):
row = int(self.perimeter())
for i in range(row):
for _ in range(i + 1):
print('*', end='')
print()
for i in range(row):
for j in range(row):
if j < row - i - 1:
print(' ', end='')
else:
print('*', end='')
print()
for i in range(row):
for _ in range(row - i - 1):
print(' ', end='')
for _ in range(2 * i + 1):
print('*', end='')
print()
class IsNum:
def __init__(self):
pass
@staticmethod
def is_prime(a):
end = int(math.sqrt(a))
is_prime = True
for x in range(1, end + 1):
if a % x == 0:
is_prime = False
break
if is_prime and a != 1:
print('{0}是素数'.format(a))
else:
print('{0}不是素数'.format(a))
@staticmethod
def mm_judge(a, b):
x = int(a)
y = int(b)
if x > y:
x, y = y, x
for factor in range(x, 0, -1):
if x % factor == 0 and y % factor == 0:
print('%d和%d的最大公约数是%d' % (x, y, factor))
print('%d和%d的最小公倍数是%d' % (x, y, x * y // factor))
break
if __name__ == "__main__":
tri1 = Triangle(3, 5, 7)
print(tri1.perimeter(), tri1.area())
tri2 = Triangle(3, 4, 10)
tri1.draw_tri()
IsNum.is_prime(11)
IsNum.is_prime(12)
IsNum.mm_judge(3, 5)
|
the-stack_0_11443 | import logging
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from dojo.filters import ProductTypeFilter
from dojo.forms import Product_TypeForm, Product_TypeProductForm, Delete_Product_TypeForm
from dojo.models import Product_Type
from dojo.utils import get_page_items, add_breadcrumb
from dojo.notifications.helper import create_notification
from django.db.models import Count, Q
from django.db.models.query import QuerySet
logger = logging.getLogger(__name__)
"""
Jay
Status: in prod
Product Type views
"""
def product_type(request):
# query for names outside of query with prefetch to avoid the complex prefetch query from executing twice
name_words = Product_Type.objects.all().values_list('name', flat=True)
prod_types = Product_Type.objects.all()
ptl = ProductTypeFilter(request.GET, queryset=prod_types)
pts = get_page_items(request, ptl.qs, 25)
pts.object_list = prefetch_for_product_type(pts.object_list)
add_breadcrumb(title="Product Type List", top_level=True, request=request)
return render(request, 'dojo/product_type.html', {
'name': 'Product Type List',
'metric': False,
'user': request.user,
'pts': pts,
'ptl': ptl,
'name_words': name_words})
def prefetch_for_product_type(prod_types):
prefetch_prod_types = prod_types
if isinstance(prefetch_prod_types, QuerySet): # old code can arrive here with prods being a list because the query was already executed
active_findings_query = Q(prod_type__engagement__test__finding__active=True,
prod_type__engagement__test__finding__mitigated__isnull=True,
prod_type__engagement__test__finding__verified=True,
prod_type__engagement__test__finding__false_p=False,
prod_type__engagement__test__finding__duplicate=False,
prod_type__engagement__test__finding__out_of_scope=False)
prefetch_prod_types = prefetch_prod_types.prefetch_related('authorized_users')
prefetch_prod_types = prefetch_prod_types.annotate(findings_count=Count('prod_type__engagement__test__finding__id', filter=active_findings_query))
prefetch_prod_types = prefetch_prod_types.annotate(prod_count=Count('prod_type', distinct=True))
prefetch_prod_types = prefetch_prod_types.annotate(user_count=Count('authorized_users', distinct=True))
else:
logger.debug('unable to prefetch because query was already executed')
return prefetch_prod_types
@user_passes_test(lambda u: u.is_staff)
def add_product_type(request):
form = Product_TypeForm()
if request.method == 'POST':
form = Product_TypeForm(request.POST)
if form.is_valid():
form.save()
messages.add_message(request,
messages.SUCCESS,
'Product type added successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('product_type'))
add_breadcrumb(title="Add Product Type", top_level=False, request=request)
return render(request, 'dojo/new_product_type.html', {
'name': 'Add Product Type',
'metric': False,
'user': request.user,
'form': form,
})
@user_passes_test(lambda u: u.is_staff)
def edit_product_type(request, ptid):
pt = get_object_or_404(Product_Type, pk=ptid)
pt_form = Product_TypeForm(instance=pt)
delete_pt_form = Delete_Product_TypeForm(instance=pt)
if request.method == "POST" and request.POST.get('edit_product_type'):
pt_form = Product_TypeForm(request.POST, instance=pt)
if pt_form.is_valid():
pt = pt_form.save()
messages.add_message(
request,
messages.SUCCESS,
'Product type updated successfully.',
extra_tags="alert-success",
)
return HttpResponseRedirect(reverse("product_type"))
if request.method == "POST" and request.POST.get("delete_product_type"):
form2 = Delete_Product_TypeForm(request.POST, instance=pt)
if form2.is_valid():
pt.delete()
messages.add_message(
request,
messages.SUCCESS,
"Product type Deleted successfully.",
extra_tags="alert-success",
)
create_notification(event='other',
title='Deletion of %s' % pt.name,
description='The product type "%s" was deleted by %s' % (pt.name, request.user),
url=request.build_absolute_uri(reverse('product_type')),
icon="exclamation-triangle")
return HttpResponseRedirect(reverse("product_type"))
add_breadcrumb(title="Edit Product Type", top_level=False, request=request)
return render(request, 'dojo/edit_product_type.html', {
'name': 'Edit Product Type',
'metric': False,
'user': request.user,
'pt_form': pt_form,
'pt': pt})
@user_passes_test(lambda u: u.is_staff)
def add_product_to_product_type(request, ptid):
pt = get_object_or_404(Product_Type, pk=ptid)
form = Product_TypeProductForm(initial={'prod_type': pt})
add_breadcrumb(title="New %s Product" % pt.name, top_level=False, request=request)
return render(request, 'dojo/new_product.html',
{'form': form,
})
|
the-stack_0_11444 | from typing import Any, Dict, List, Optional
import aiohttp
from chia.cmds.units import units
from chia.consensus.block_record import BlockRecord
from chia.rpc.farmer_rpc_client import FarmerRpcClient
from chia.rpc.full_node_rpc_client import FullNodeRpcClient
from chia.rpc.wallet_rpc_client import WalletRpcClient
from chia.util.config import load_config
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.ints import uint16
from chia.util.misc import format_bytes
from chia.util.misc import format_minutes
from chia.util.network import is_localhost
SECONDS_PER_BLOCK = (24 * 3600) / 4608
async def get_harvesters(farmer_rpc_port: int) -> Optional[Dict[str, Any]]:
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
plots = await farmer_client.get_harvesters()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'harvester' {e}")
return None
farmer_client.close()
await farmer_client.await_closed()
return plots
async def get_blockchain_state(rpc_port: int) -> Optional[Dict[str, Any]]:
blockchain_state = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
blockchain_state = await client.get_blockchain_state()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node is running at {rpc_port}")
else:
print(f"Exception from 'full node' {e}")
client.close()
await client.await_closed()
return blockchain_state
async def get_average_block_time(rpc_port: int) -> float:
try:
blocks_to_compare = 500
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
blockchain_state = await client.get_blockchain_state()
curr: Optional[BlockRecord] = blockchain_state["peak"]
if curr is None or curr.height < (blocks_to_compare + 100):
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
while curr is not None and curr.height > 0 and not curr.is_transaction_block:
curr = await client.get_block_record(curr.prev_hash)
if curr is None:
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
past_curr = await client.get_block_record_by_height(curr.height - blocks_to_compare)
while past_curr is not None and past_curr.height > 0 and not past_curr.is_transaction_block:
past_curr = await client.get_block_record(past_curr.prev_hash)
if past_curr is None:
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
client.close()
await client.await_closed()
return (curr.timestamp - past_curr.timestamp) / (curr.height - past_curr.height)
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node is running at {rpc_port}")
else:
print(f"Exception from 'full node' {e}")
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
async def get_wallets_stats(wallet_rpc_port: int) -> Optional[Dict[str, Any]]:
amounts = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if wallet_rpc_port is None:
wallet_rpc_port = config["wallet"]["rpc_port"]
wallet_client = await WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), DEFAULT_ROOT_PATH, config)
amounts = await wallet_client.get_farmed_amount()
#
# Don't catch any exceptions, the caller will handle it
#
finally:
wallet_client.close()
await wallet_client.await_closed()
return amounts
async def is_farmer_running(farmer_rpc_port: int) -> bool:
is_running = False
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
await farmer_client.get_connections()
is_running = True
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'farmer' {e}")
farmer_client.close()
await farmer_client.await_closed()
return is_running
async def get_challenges(farmer_rpc_port: int) -> Optional[List[Dict[str, Any]]]:
signage_points = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
signage_points = await farmer_client.get_signage_points()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'farmer' {e}")
farmer_client.close()
await farmer_client.await_closed()
return signage_points
async def challenges(farmer_rpc_port: int, limit: int) -> None:
signage_points = await get_challenges(farmer_rpc_port)
if signage_points is None:
return None
signage_points.reverse()
if limit != 0:
signage_points = signage_points[:limit]
for signage_point in signage_points:
print(
(
f"Hash: {signage_point['signage_point']['challenge_hash']} "
f"Index: {signage_point['signage_point']['signage_point_index']}"
)
)
async def summary(rpc_port: int, wallet_rpc_port: int, harvester_rpc_port: int, farmer_rpc_port: int) -> None:
all_harvesters = await get_harvesters(farmer_rpc_port)
blockchain_state = await get_blockchain_state(rpc_port)
farmer_running = await is_farmer_running(farmer_rpc_port)
wallet_not_ready: bool = False
wallet_not_running: bool = False
amounts = None
try:
amounts = await get_wallets_stats(wallet_rpc_port)
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
wallet_not_running = True
else:
wallet_not_ready = True
print("Farming status: ", end="")
if blockchain_state is None:
print("Not available")
elif blockchain_state["sync"]["sync_mode"]:
print("Syncing")
elif not blockchain_state["sync"]["synced"]:
print("Not synced or not connected to peers")
elif not farmer_running:
print("Not running")
else:
print("Farming")
if amounts is not None:
print(f"Total chia farmed: {amounts['farmed_amount'] / units['chia']}")
print(f"User transaction fees: {amounts['fee_amount'] / units['chia']}")
print(f"Block rewards: {(amounts['farmer_reward_amount'] + amounts['pool_reward_amount']) / units['chia']}")
print(f"Last height farmed: {amounts['last_height_farmed']}")
class PlotStats:
total_plot_size = 0
total_plots = 0
if all_harvesters is not None:
harvesters_local: dict = {}
harvesters_remote: dict = {}
for harvester in all_harvesters["harvesters"]:
ip = harvester["connection"]["host"]
if is_localhost(ip):
harvesters_local[harvester["connection"]["node_id"]] = harvester
else:
if ip not in harvesters_remote:
harvesters_remote[ip] = {}
harvesters_remote[ip][harvester["connection"]["node_id"]] = harvester
def process_harvesters(harvester_peers_in: dict):
for harvester_peer_id, plots in harvester_peers_in.items():
total_plot_size_harvester = sum(map(lambda x: x["file_size"], plots["plots"]))
PlotStats.total_plot_size += total_plot_size_harvester
PlotStats.total_plots += len(plots["plots"])
print(f" {len(plots['plots'])} plots of size: {format_bytes(total_plot_size_harvester)}")
if len(harvesters_local) > 0:
print(f"Local Harvester{'s' if len(harvesters_local) > 1 else ''}")
process_harvesters(harvesters_local)
for harvester_ip, harvester_peers in harvesters_remote.items():
print(f"Remote Harvester{'s' if len(harvester_peers) > 1 else ''} for IP: {harvester_ip}")
process_harvesters(harvester_peers)
print(f"Plot count for all harvesters: {PlotStats.total_plots}")
print("Total size of plots: ", end="")
print(format_bytes(PlotStats.total_plot_size))
else:
print("Plot count: Unknown")
print("Total size of plots: Unknown")
if blockchain_state is not None:
print("Estimated network space: ", end="")
print(format_bytes(blockchain_state["space"]))
else:
print("Estimated network space: Unknown")
minutes = -1
if blockchain_state is not None and all_harvesters is not None:
proportion = PlotStats.total_plot_size / blockchain_state["space"] if blockchain_state["space"] else -1
minutes = int((await get_average_block_time(rpc_port) / 60) / proportion) if proportion else -1
if all_harvesters is not None and PlotStats.total_plots == 0:
print("Expected time to win: Never (no plots)")
else:
print("Expected time to win: " + format_minutes(minutes))
if amounts is None:
if wallet_not_running:
print("For details on farmed rewards and fees you should run 'chia start wallet' and 'chia wallet show'")
elif wallet_not_ready:
print("For details on farmed rewards and fees you should run 'chia wallet show'")
else:
print("Note: log into your key using 'chia wallet show' to see rewards for each key")
|
the-stack_0_11445 |
# Emma's change
# another change
# Fluffy Happiness: Test code to grab pictures of cute animals from
# the Internet
# Usage: >> python get_fluffy.py [options] V.A. Moss
# ([email protected])
__author__ = "V.A. Moss"
__date__ = "$22-oct-2018 22:00:00$"
__version__ = "0.2"
# Imports
import os
import sys
import urllib.request
import urllib.error
import urllib.parse
import ssl
from random import randint
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from argparse import ArgumentParser, RawTextHelpFormatter
parser = ArgumentParser(formatter_class=RawTextHelpFormatter)
parser.add_argument('-k', '--keywords',
default='cute fluffy animal',
type=str,
help='Specify which kind of search to do(default: %(default)s)')
# Parse the arguments above
args = parser.parse_args()
# Path format
path = 'https://imgur.com/search/score?q=%s' % ('+'.join(args.keywords.split()))
# ONLY GET PUPPIES!!!!!
path = 'https://imgur.com/search/score?q=%s' % ('+puppy')
# Get data from website
request = urllib.request.Request(path)
response = urllib.request.urlopen(request, context=ssl._create_unverified_context())
read_response = response.readlines()
# Possible cuteness
possible = []
for line in read_response:
line = line.decode('utf-8')
if '<img alt="" src="' in line:
image_url = line.split('src="//')[1].split('"')[0]
possible.append('http://'+image_url)
# Now select a random image to show
rand_int = randint(0,len(possible)-1)
print("I've selected image #%i: %s" % (rand_int,possible[rand_int]))
print ("Prepare to cuddle ......")
# Download the image and display it
# note: imgur adds a b to names for some reason.
img_name = (possible[rand_int].split('b.jpg')[0]+'.jpg').split('/')[-1]
image_path = 'https://i.imgur.com/' + img_name
urllib.request.urlretrieve('%s' % image_path,'%s' % img_name)
# Show the image in matplotlib
img = mpimg.imread(img_name)
imgplot = plt.imshow(img)
plt.show()
# Bla bla bla
|
the-stack_0_11446 | import torch
import torch.nn as nn
import torch.nn.functional as F
from autoencoder import Encoder, Decoder
class BasicBlock(torch.nn.Module):
def __init__(self, filters=64):
'residual basic block'
super().__init__()
self.residual = torch.nn.Sequential(
nn.Conv2d(filters, filters, 3, 1, padding=1, bias=False),
nn.BatchNorm2d(filters),
nn.ReLU(),
nn.Conv2d(filters, filters, 3, 1, padding=1, bias=False),
nn.BatchNorm2d(filters)
)
def forward(self, x):
return x + self.residual(x)
class ELU_BatchNorm2d(torch.nn.Module):
def __init__(self, filters=64):
super().__init__()
self.actnorm = torch.nn.Sequential(
torch.nn.ELU(),
torch.nn.BatchNorm2d(filters),
)
def forward(self, x):
return self.actnorm(x)
class Res_Encoder(Encoder):
def __init__(self, filters=[4, 8, 16, 32], bottleneck=10):
super().__init__()
self.activate = nn.ELU()
self.main = nn.Sequential(
nn.Conv2d(1, filters[0], 3, 1, padding=1),
self.activate,
BasicBlock(filters[0]),
ELU_BatchNorm2d(filters[0]),
nn.Conv2d(filters[0], filters[1], 5, 2),
self.activate,
BasicBlock(filters[1]),
ELU_BatchNorm2d(filters[1]),
nn.Conv2d(filters[1], filters[2], 5, 2),
self.activate,
BasicBlock(filters[2]),
ELU_BatchNorm2d(filters[2]),
nn.Conv2d(filters[2], filters[3], 3, 2),
self.activate
)
self.mean = nn.Conv2d(filters[3], bottleneck, 1, 1)
self.logvar = nn.Conv2d(filters[3], bottleneck, 1, 1)
class Res_Decoder(Decoder):
def __init__(self, filters=[4, 8, 16, 32], bottleneck=10):
super().__init__()
self.activate = nn.ELU()
self.main = nn.Sequential(
nn.Conv2d(bottleneck, filters[-1], 1, 1, bias=False),
self.activate,
BasicBlock(filters[-1]),
ELU_BatchNorm2d(filters[-1]),
nn.ConvTranspose2d(filters[-1], filters[-2], 3, 2, output_padding=1),
self.activate,
BasicBlock(filters[-2]),
ELU_BatchNorm2d(filters[-2]),
nn.ConvTranspose2d(filters[-2], filters[-3], 5, 2, output_padding=1),
self.activate,
BasicBlock(filters[-3]),
ELU_BatchNorm2d(filters[-3]),
nn.ConvTranspose2d(filters[-3], filters[-4], 5, 2, output_padding=1),
self.activate,
BasicBlock(filters[-4]),
ELU_BatchNorm2d(filters[-4]),
nn.Conv2d(filters[-4], 1, 3, 1, padding=1),
nn.Sigmoid()
)
|
the-stack_0_11448 | import datetime as dt
import cx_Oracle
from typing import List
from src.typeDefs.metricsDataRecord import IMetricsDataRecord
def getIexRtmBlockWiseData(appDbConnStr: str, col_attributes: str, startDt: dt.datetime, endDt: dt.datetime) -> List[IMetricsDataRecord]:
targetColumns = ['TRUNC(TIME_STAMP)', 'COL_ATTRIBUTES', 'DATA_VALUE']
metricsFetchSql = """
select {0} from
mo_warehouse.iex_rtm where time_stamp >= :1
and time_stamp < :2
and col_attributes = :3
""".format(','.join(targetColumns), col_attributes)
# initialise codes to be returned
dataRecords: List[IMetricsDataRecord] = []
colNames = []
dbRows = []
dbConn = None
dbCur = None
try:
# get connection with raw data table
dbConn = cx_Oracle.connect(appDbConnStr)
# get cursor and execute fetch sql
dbCur = dbConn.cursor()
dbCur.execute(metricsFetchSql, (startDt, endDt, col_attributes))
colNames = [row[0] for row in dbCur.description]
# fetch all rows
dbRows = dbCur.fetchall()
except Exception as err:
dbRows = []
print('Error while fetching iex rtm data between dates')
print(err)
finally:
# closing database cursor and connection
if dbCur is not None:
dbCur.close()
if dbConn is not None:
dbConn.close()
if (False in [(col in targetColumns) for col in colNames]):
# all desired columns not fetched, hence return empty
return []
# iterate through each row to populate result outage rows
for row in dbRows:
timeStamp: IMetricsDataRecord["time_stamp"] = row[colNames.index(
'TRUNC(TIME_STAMP)')]
metric: IMetricsDataRecord["col_attributes"] = row[colNames.index(
'COL_ATTRIBUTES')]
val: IMetricsDataRecord["data_value"] = row[colNames.index(
'DATA_VALUE')]
sampl: IMetricsDataRecord = {
"time_stamp": timeStamp,
"metric_name": metric,
"data_value": val
}
dataRecords.append(sampl)
return dataRecords
|
the-stack_0_11449 | # -*- coding: utf-8 -*-
"""KPI views for creating and viewing the kpis."""
import logging
from typing import Any, Dict, List, Optional, Tuple
from flask.blueprints import Blueprint
from flask.globals import request
from flask.json import jsonify
from flask_sqlalchemy import Pagination
from sqlalchemy.orm.attributes import flag_modified
from chaos_genius.controllers.dashboard_controller import (
create_dashboard_kpi_mapper,
disable_mapper_for_kpi_ids,
edit_kpi_dashboards,
enable_mapper_for_kpi_ids,
get_dashboard_list_by_ids,
get_mapper_obj_by_kpi_ids,
kpi_dashboard_mapper_dict,
)
from chaos_genius.controllers.kpi_controller import (
delete_anomaly_output_for_kpi,
delete_rca_output_for_kpi,
get_anomaly_count,
get_kpi_data_from_id,
)
from chaos_genius.core.rca.constants import TIME_RANGES_BY_KEY
from chaos_genius.core.rca.rca_utils.api_utils import kpi_aggregation, kpi_line_data
from chaos_genius.core.utils.kpi_validation import validate_kpi
from chaos_genius.databases.db_utils import chech_editable_field
from chaos_genius.databases.models.dashboard_kpi_mapper_model import DashboardKpiMapper
from chaos_genius.databases.models.data_source_model import DataSource
from chaos_genius.databases.models.kpi_model import Kpi
from chaos_genius.extensions import db
from chaos_genius.settings import DEEPDRILLS_ENABLED_TIME_RANGES
from chaos_genius.utils.pagination import pagination_args, pagination_info
from chaos_genius.utils.search import SEARCH_PARAM_NAME, make_search_filter
blueprint = Blueprint("api_kpi", __name__)
logger = logging.getLogger(__name__)
@blueprint.route("/", methods=["GET", "POST"]) # TODO: Remove this
@blueprint.route("", methods=["GET", "POST"])
def kpi():
"""List KPIs."""
# Handle logging in
if request.method == "POST":
data = request.get_json()
if data is None:
return (
jsonify(
{
"error": "The request payload is not in JSON format",
"status": "failure",
}
),
400,
)
data["dimensions"] = [] if data["dimensions"] is None else data["dimensions"]
if data.get("kpi_query", "").strip():
data["kpi_query"] = data["kpi_query"].strip()
# remove trailing semicolon
if data["kpi_query"][-1] == ";":
data["kpi_query"] = data["kpi_query"][:-1]
new_kpi = Kpi(
name=data.get("name"),
is_certified=data.get("is_certified"),
data_source=data.get("data_source"),
kpi_type=data.get("dataset_type"),
kpi_query=data.get("kpi_query"),
schema_name=data.get("schema_name"),
table_name=data.get("table_name"),
metric=data.get("metric"),
aggregation=data.get("aggregation"),
datetime_column=data.get("datetime_column"),
filters=data.get("filters"),
dimensions=data.get("dimensions"),
)
# Perform KPI Validation
status, message, tz_aware = validate_kpi(new_kpi.as_dict, check_tz_aware=True)
if status is not True:
return jsonify(
{"error": message, "status": "failure", "is_critical": "true"}
)
new_kpi.timezone_aware = tz_aware
new_kpi.save(commit=True)
# Add the dashboard id 0 to the kpi
dashboard_list = data.get("dashboards", []) + [0]
dashboard_list = list(set(dashboard_list))
create_dashboard_kpi_mapper(dashboard_list, [new_kpi.id])
# TODO: Fix circular import error
from chaos_genius.jobs.anomaly_tasks import ready_rca_task
# run rca as soon as new KPI is added
rca_task = ready_rca_task(new_kpi.id)
if rca_task is None:
logger.warn(
"Could not run RCA task since newly added KPI was not found: "
+ f"{new_kpi.id}"
)
else:
rca_task.apply_async()
return jsonify(
{
"data": {"kpi_id": new_kpi.id},
"message": f"KPI {new_kpi.name} has been created successfully.",
"status": "success",
}
)
elif request.method == "GET":
# TODO: abstract this filter params extraction logic
dashboard_ids_list = request.args.getlist("dashboard_id")
datasource_types_list = request.args.getlist("datasource_type")
paginate = request.args.get("paginate") != "false"
page, per_page = pagination_args(request)
search_query, search_filter = make_search_filter(request, Kpi.name)
filters = [Kpi.active == True] # noqa: E712
if search_filter is not None:
filters.append(search_filter)
if datasource_types_list and datasource_types_list != [""]:
filters.append(
DataSource.connection_type.in_(
[
datasource_type
for datasource_types in datasource_types_list
for datasource_type in datasource_types.split(",")
]
)
)
kpis: List[Tuple[Kpi, DataSource]]
kpis_paginated: Optional[Pagination] = None
if dashboard_ids_list and dashboard_ids_list != [""]:
dashboard_ids = [
int(dashboard_id)
for dashboard_ids in dashboard_ids_list
for dashboard_id in dashboard_ids.split(",")
]
kpis_query = (
db.session.query(Kpi, DataSource)
.join(DataSource, Kpi.data_source == DataSource.id)
.join(DashboardKpiMapper, Kpi.id == DashboardKpiMapper.kpi)
.filter(
*filters,
DashboardKpiMapper.active == True, # noqa: E712
DashboardKpiMapper.dashboard.in_(dashboard_ids),
)
)
# TODO: refactor this to reduce code duplication
if paginate:
kpis_paginated_ = kpis_query.order_by(Kpi.created_at.desc()).paginate(
page=page, per_page=per_page
)
kpis = kpis_paginated_.items
kpis_paginated = kpis_paginated_
else:
kpis = kpis_query.all()
else:
kpis_query = (
db.session.query(Kpi, DataSource)
.join(DataSource, Kpi.data_source == DataSource.id)
.filter(*filters) # noqa: E712
)
if paginate:
kpis_paginated_ = kpis_query.order_by(Kpi.created_at.desc()).paginate(
page=page, per_page=per_page
)
kpis = kpis_paginated_.items
kpis_paginated = kpis_paginated_
else:
kpis = kpis_query.all()
kpi_dashboard_mapper = kpi_dashboard_mapper_dict(
[kpi.id for kpi, _ in kpis], as_dict=True
)
kpi_infos: List[Dict[str, Any]] = []
for row in kpis:
kpi_info = row[0].safe_dict
data_source_info = row[1].safe_dict
kpi_info["data_source"] = data_source_info
dashboards = kpi_dashboard_mapper[kpi_info["id"]]
kpi_info["dashboards"] = dashboards
kpi_infos.append(kpi_info)
return jsonify(
{
"count": len(kpi_infos),
"data": kpi_infos,
"pagination": (
pagination_info(kpis_paginated)
if kpis_paginated is not None
else None
),
SEARCH_PARAM_NAME: search_query,
}
)
@blueprint.route("/get-dashboard-list", methods=["GET"])
def get_all_kpis():
"""List KPIs for a particular dashboard."""
status, message = "success", ""
timeline = request.args.get("timeline", "last_7_days")
dashboard_ids_list = request.args.getlist("dashboard_id")
page, per_page = pagination_args(request)
search_query, search_filter = make_search_filter(request, Kpi.name)
filters = [Kpi.active == True] # noqa: E712
if search_filter is not None:
filters.append(search_filter)
kpis_paginated: Optional[Pagination] = None
ret = []
try:
if dashboard_ids_list and dashboard_ids_list != [""]:
dashboard_ids = [
int(dashboard_id)
for dashboard_ids in dashboard_ids_list
for dashboard_id in dashboard_ids.split(",")
]
kpis_paginated_: Pagination = (
Kpi.query.join(DashboardKpiMapper, DashboardKpiMapper.kpi == Kpi.id)
.filter(
*filters,
DashboardKpiMapper.active == True, # noqa: E712
DashboardKpiMapper.dashboard.in_(dashboard_ids),
)
.order_by(Kpi.created_at.desc())
.paginate(page=page, per_page=per_page)
)
else:
kpis_paginated_: Pagination = (
Kpi.query.filter(*filters)
.order_by(Kpi.created_at.desc())
.paginate(page=page, per_page=per_page)
)
# this is only required to let the type checker know that this is not None here
kpis_paginated = kpis_paginated_
metrics = ["name", "metric", "id"]
for kpi in kpis_paginated.items:
info = {key: getattr(kpi, key) for key in metrics}
_, _, aggregate_data = kpi_aggregation(kpi.id, timeline)
info["prev"] = aggregate_data["aggregation"][0]["value"]
info["current"] = aggregate_data["aggregation"][1]["value"]
info["change"] = aggregate_data["aggregation"][2]["value"]
info["percentage_change"] = aggregate_data["aggregation"][3]["value"]
info["display_value_prev"] = TIME_RANGES_BY_KEY[timeline][
"last_period_name"
]
info["display_value_current"] = TIME_RANGES_BY_KEY[timeline][
"current_period_name"
]
info["anomaly_count"] = get_anomaly_count(kpi.id, timeline)
_, _, info["graph_data"] = kpi_line_data(kpi.id)
ret.append(info)
except Exception as e: # noqa: E722
status = "failure"
message = str(e)
logger.error(message, exc_info=True)
return jsonify(
{
"data": ret,
"message": message,
"status": status,
"pagination": pagination_info(kpis_paginated)
if kpis_paginated is not None
else None,
SEARCH_PARAM_NAME: search_query,
}
)
@blueprint.route("/get-timecuts-list", methods=["GET"])
def get_timecuts_list():
"""Returns all active timecuts."""
status, message = "success", ""
ret = {}
try:
enabled_cuts = [
{**{k: v for k, v in value.items() if k != "function"}, "id": key}
for key, value in TIME_RANGES_BY_KEY.items()
if key in DEEPDRILLS_ENABLED_TIME_RANGES
]
ret = enabled_cuts
message = "All timecuts fetched succesfully."
except Exception as e: # noqa: B902
status = "failure"
message = str(e)
logger.error(message)
return jsonify({"data": ret, "message": message, "status": status})
@blueprint.route("/<int:kpi_id>/disable", methods=["GET"])
def disable_kpi(kpi_id):
"""Disable a KPI."""
status, message = "", ""
try:
kpi_obj = Kpi.get_by_id(kpi_id)
if kpi_obj:
kpi_obj.active = False
kpi_obj.save(commit=True)
disable_mapper_for_kpi_ids([kpi_id])
status = "success"
else:
message = "KPI not found"
status = "failure"
except Exception as err: # noqa: B902
status = "failure"
logger.info(f"Error in disabling the KPI: {err}")
return jsonify({"message": message, "status": status})
@blueprint.route("/<int:kpi_id>/enable", methods=["GET"])
def enable_kpi(kpi_id):
"""Enable a KPI."""
status, message = "", ""
try:
kpi_obj = Kpi.get_by_id(kpi_id)
if kpi_obj:
kpi_obj.active = True
kpi_obj.save(commit=True)
enable_mapper_for_kpi_ids([kpi_id])
status = "success"
else:
message = "KPI not found"
status = "failure"
except Exception as err: # noqa: B902
status = "failure"
logger.info(f"Error in enabling the KPI: {err}")
return jsonify({"message": message, "status": status})
@blueprint.route("/<int:kpi_id>/get-dimensions", methods=["GET"])
def kpi_get_dimensions(kpi_id):
"""Retrieve list of dimensions of a KPI."""
dimensions = []
try:
kpi_info = get_kpi_data_from_id(kpi_id)
dimensions = kpi_info["dimensions"]
except Exception as err: # noqa: B902
logger.info(f"Error Found: {err}")
return jsonify({"dimensions": dimensions, "msg": ""})
@blueprint.route("/meta-info", methods=["GET"])
def kpi_meta_info():
"""Meta info of fields of KPI."""
logger.info("kpi meta info")
return jsonify({"data": Kpi.meta_info()})
@blueprint.route("/<int:kpi_id>/update", methods=["PUT"])
def edit_kpi(kpi_id):
"""Edit a KPI."""
status, message = "", ""
do_not_run_analytics_list = ["name", "dashboards"]
run_analytics = False
try:
kpi_obj = Kpi.get_by_id(kpi_id)
data = request.get_json()
if data is None:
raise Exception("Request body is not a JSON.")
meta_info = Kpi.meta_info()
if kpi_obj and kpi_obj.active is True:
dashboard_id_list = data.pop("dashboards", []) + [0]
dashboard_id_list = list(set(dashboard_id_list))
for key, value in data.items():
if key not in do_not_run_analytics_list:
run_analytics = True
if chech_editable_field(meta_info, key):
setattr(kpi_obj, key, value)
# check if dimensions are editted
if "dimensions" in data.keys():
# if empty, do not run anomaly on subdim
if len(data["dimensions"]) < 1:
run_optional = {
"data_quality": True,
"overall": True,
"subdim": False,
}
else:
run_optional = {
"data_quality": True,
"overall": True,
"subdim": True,
}
if "run_optional" not in kpi_obj.anomaly_params or (
kpi_obj.anomaly_params["run_optional"]["subdim"]
!= run_optional["subdim"]
):
kpi_obj.anomaly_params["run_optional"] = run_optional
flag_modified(kpi_obj, "anomaly_params")
if run_analytics:
logger.info(
"Deleting analytics output and re-running tasks since KPI was "
+ f"edited for KPI ID: {kpi_id}"
)
from chaos_genius.jobs.anomaly_tasks import ready_rca_task
rca_task = ready_rca_task(kpi_id)
if rca_task is not None:
delete_rca_output_for_kpi(kpi_id)
rca_task.apply_async()
logger.info(f"RCA started for KPI ID after editing: {kpi_id}")
else:
logger.info(
"RCA failed for KPI ID since KPI does not exist after editing:"
+ f" {kpi_id}"
)
from chaos_genius.jobs.anomaly_tasks import ready_anomaly_task
anomaly_task = ready_anomaly_task(kpi_id)
if anomaly_task is not None:
delete_anomaly_output_for_kpi(kpi_id)
anomaly_task.apply_async()
logger.info(f"Anomaly started for KPI ID after editing: {kpi_id}")
else:
logger.info(
"Anomaly failed for KPI ID since KPI does not exist after "
+ f"editing: {kpi_id}"
)
edit_kpi_dashboards(kpi_id, dashboard_id_list)
kpi_obj.save(commit=True)
status = "success"
else:
message = "KPI not found or disabled"
status = "failure"
except Exception as err: # noqa: B902
status = "failure"
logger.info(f"Error in updating the KPI: {err}")
message = str(err)
return jsonify({"message": message, "status": status})
@blueprint.route("/<int:kpi_id>", methods=["GET"])
def get_kpi_info(kpi_id):
"""Retrieve details of a KPI."""
status, message = "", ""
data = None
try:
kpi_obj = get_kpi_data_from_id(kpi_id)
data = kpi_obj
mapper_obj_list = get_mapper_obj_by_kpi_ids([kpi_id])
dashboard_id_list = [mapper.dashboard for mapper in mapper_obj_list]
dashboard_list = get_dashboard_list_by_ids(dashboard_id_list)
dashboard_list = [dashboard.as_dict for dashboard in dashboard_list]
data["dashboards"] = dashboard_list
status = "success"
except Exception as err: # noqa: B902
status = "failure"
message = str(err)
logger.info(f"Error in fetching the KPI: {err}")
return jsonify({"message": message, "status": status, "data": data})
@blueprint.route("/<int:kpi_id>/trigger-analytics", methods=["GET"])
def trigger_analytics(kpi_id):
"""Trigger analytics tasks for a KPI."""
# TODO: Fix circular import error
from chaos_genius.jobs.anomaly_tasks import ready_anomaly_task, ready_rca_task
rca_task = ready_rca_task(kpi_id)
anomaly_task = ready_anomaly_task(kpi_id)
if rca_task is not None and anomaly_task is not None:
rca_task.apply_async()
anomaly_task.apply_async()
else:
logger.warn(f"Could not analytics since KPI was not found: {kpi_id}")
return jsonify({"message": "RCA and Anomaly triggered successfully"})
|
the-stack_0_11451 | class Solution:
def findLeaves(self, root: TreeNode) -> List[List[int]]:
d = collections.defaultdict(list)
self.dfs(d, root)
res = []
for v in d.values():
res.append(v)
return res
def dfs(self, d, node):
if not node:
return 0
left = self.dfs(d, node.left)
right = self.dfs(d, node.right)
depth = max(left, right) + 1
d[depth].append(node.val)
return depth |
the-stack_0_11452 | from math import atan2
from ..Qt import QtGui, QtCore
from ..Point import Point
from .. import functions as fn
from .GraphicsObject import GraphicsObject
from .UIGraphicsItem import UIGraphicsItem
from .TextItem import TextItem
from .ScatterPlotItem import Symbols, makeCrosshair
from .ViewBox import ViewBox
import string
import warnings
class TargetItem(UIGraphicsItem):
"""Draws a draggable target symbol (circle plus crosshair).
The size of TargetItem will remain fixed on screen even as the view is zoomed.
Includes an optional text label.
"""
sigPositionChanged = QtCore.Signal(object)
sigPositionChangeFinished = QtCore.Signal(object)
def __init__(
self,
pos=None,
size=10,
radii=None,
symbol="crosshair",
pen=None,
hoverPen=None,
brush=None,
hoverBrush=None,
movable=True,
label=None,
labelOpts=None,
):
r"""
Parameters
----------
pos : list, tuple, QPointF, QPoint, Optional
Initial position of the symbol. Default is (0, 0)
size : int
Size of the symbol in pixels. Default is 10.
radii : tuple of int
Deprecated. Gives size of crosshair in screen pixels.
pen : QPen, tuple, list or str
Pen to use when drawing line. Can be any arguments that are valid
for :func:`~pyqtgraph.mkPen`. Default pen is transparent yellow.
brush : QBrush, tuple, list, or str
Defines the brush that fill the symbol. Can be any arguments that
is valid for :func:`~pyqtgraph.mkBrush`. Default is transparent
blue.
movable : bool
If True, the symbol can be dragged to a new position by the user.
hoverPen : QPen, tuple, list, or str
Pen to use when drawing symbol when hovering over it. Can be any
arguments that are valid for :func:`~pyqtgraph.mkPen`. Default pen
is red.
hoverBrush : QBrush, tuple, list or str
Brush to use to fill the symbol when hovering over it. Can be any
arguments that is valid for :func:`~pyqtgraph.mkBrush`. Default is
transparent blue.
symbol : QPainterPath or str
QPainterPath to use for drawing the target, should be centered at
``(0, 0)`` with ``max(width, height) == 1.0``. Alternatively a string
which can be any symbol accepted by
:func:`~pyqtgraph.ScatterPlotItem.setData`
label : bool, str or callable, optional
Text to be displayed in a label attached to the symbol, or None to
show no label (default is None). May optionally include formatting
strings to display the symbol value, or a callable that accepts x
and y as inputs. If True, the label is ``x = {: >.3n}\ny = {: >.3n}``
False or None will result in no text being displayed
labelOpts : dict
A dict of keyword arguments to use when constructing the text
label. See :class:`TargetLabel` and :class:`~pyqtgraph.TextItem`
"""
super().__init__(self)
self.movable = movable
self.moving = False
self._label = None
self.mouseHovering = False
if radii is not None:
warnings.warn(
"'radii' is now deprecated, and will be removed in 0.13.0. Use 'size' "
"parameter instead",
DeprecationWarning,
stacklevel=2,
)
symbol = makeCrosshair(*radii)
size = 1
if pen is None:
pen = (255, 255, 0)
self.setPen(pen)
if hoverPen is None:
hoverPen = (255, 0, 255)
self.setHoverPen(hoverPen)
if brush is None:
brush = (0, 0, 255, 50)
self.setBrush(brush)
if hoverBrush is None:
hoverBrush = (0, 255, 255, 100)
self.setHoverBrush(hoverBrush)
self.currentPen = self.pen
self.currentBrush = self.brush
self._shape = None
self._pos = Point(0, 0)
if pos is None:
pos = Point(0, 0)
self.setPos(pos)
if isinstance(symbol, str):
try:
self._path = Symbols[symbol]
except KeyError:
raise KeyError("symbol name found in available Symbols")
elif isinstance(symbol, QtGui.QPainterPath):
self._path = symbol
else:
raise TypeError("Unknown type provided as symbol")
self.scale = size
self.setPath(self._path)
self.setLabel(label, labelOpts)
@property
def sigDragged(self):
warnings.warn(
"'sigDragged' has been deprecated and will be removed in 0.13.0. Use "
"`sigPositionChanged` instead",
DeprecationWarning,
stacklevel=2,
)
return self.sigPositionChangeFinished
def setPos(self, pos):
"""Method to set the position to ``(x, y)`` within the plot view
Parameters
----------
pos : tuple, list, QPointF, QPoint, or pg.Point
Container that consists of ``(x, y)`` representation of where the
TargetItem should be placed
Raises
------
TypeError
If the type of ``pos`` does not match the known types to extract
coordinate info from, a TypeError is raised
"""
if isinstance(pos, Point):
newPos = pos
elif isinstance(pos, (tuple, list)):
newPos = Point(pos)
elif isinstance(pos, (QtCore.QPointF, QtCore.QPoint)):
newPos = Point(pos.x(), pos.y())
else:
raise TypeError
if self._pos != newPos:
self._pos = newPos
super().setPos(self._pos)
self.sigPositionChanged.emit(self)
def setBrush(self, *args, **kwargs):
"""Set the brush that fills the symbol. Allowable arguments are any that
are valid for :func:`~pyqtgraph.mkBrush`.
"""
self.brush = fn.mkBrush(*args, **kwargs)
if not self.mouseHovering:
self.currentBrush = self.brush
self.update()
def setHoverBrush(self, *args, **kwargs):
"""Set the brush that fills the symbol when hovering over it. Allowable
arguments are any that are valid for :func:`~pyqtgraph.mkBrush`.
"""
self.hoverBrush = fn.mkBrush(*args, **kwargs)
if self.mouseHovering:
self.currentBrush = self.hoverBrush
self.update()
def setPen(self, *args, **kwargs):
"""Set the pen for drawing the symbol. Allowable arguments are any that
are valid for :func:`~pyqtgraph.mkPen`."""
self.pen = fn.mkPen(*args, **kwargs)
if not self.mouseHovering:
self.currentPen = self.pen
self.update()
def setHoverPen(self, *args, **kwargs):
"""Set the pen for drawing the symbol when hovering over it. Allowable
arguments are any that are valid for
:func:`~pyqtgraph.mkPen`."""
self.hoverPen = fn.mkPen(*args, **kwargs)
if self.mouseHovering:
self.currentPen = self.hoverPen
self.update()
def boundingRect(self):
return self.shape().boundingRect()
def paint(self, p, *_):
p.setPen(self.currentPen)
p.setBrush(self.currentBrush)
p.drawPath(self.shape())
def setPath(self, path):
if path != self._path:
self._path = path
self._shape = None
return None
def shape(self):
if self._shape is None:
s = self.generateShape()
if s is None:
return self._path
self._shape = s
# beware--this can cause the view to adjust
# which would immediately invalidate the shape.
self.prepareGeometryChange()
return self._shape
def generateShape(self):
dt = self.deviceTransform()
if dt is None:
self._shape = self._path
return None
v = dt.map(QtCore.QPointF(1, 0)) - dt.map(QtCore.QPointF(0, 0))
dti = fn.invertQTransform(dt)
devPos = dt.map(QtCore.QPointF(0, 0))
tr = QtGui.QTransform()
tr.translate(devPos.x(), devPos.y())
va = atan2(v.y(), v.x())
tr.rotateRadians(va)
tr.scale(self.scale, self.scale)
return dti.map(tr.map(self._path))
def mouseDragEvent(self, ev):
if not self.movable or int(ev.button() & QtCore.Qt.LeftButton) == 0:
return
ev.accept()
if ev.isStart():
self.symbolOffset = self.pos() - self.mapToView(ev.buttonDownPos())
self.moving = True
if not self.moving:
return
self.setPos(self.symbolOffset + self.mapToView(ev.pos()))
if ev.isFinish():
self.moving = False
self.sigPositionChangeFinished.emit(self)
def mouseClickEvent(self, ev):
if self.moving and ev.button() == QtCore.Qt.RightButton:
ev.accept()
self.moving = False
self.sigPositionChanged.emit(self)
self.sigPositionChangeFinished.emit(self)
def setMouseHover(self, hover):
# Inform the item that the mouse is(not) hovering over it
if self.mouseHovering is hover:
return
self.mouseHovering = hover
if hover:
self.currentBrush = self.hoverBrush
self.currentPen = self.hoverPen
else:
self.currentBrush = self.brush
self.currentPen = self.pen
self.update()
def hoverEvent(self, ev):
if self.movable and (not ev.isExit()) and ev.acceptDrags(QtCore.Qt.LeftButton):
self.setMouseHover(True)
else:
self.setMouseHover(False)
def viewTransformChanged(self):
GraphicsObject.viewTransformChanged(self)
self._shape = None # invalidate shape, recompute later if requested.
self.update()
def pos(self):
"""Provides the current position of the TargetItem
Returns
-------
Point
pg.Point of the current position of the TargetItem
"""
return self._pos
def label(self):
"""Provides the TargetLabel if it exists
Returns
-------
TargetLabel or None
If a TargetLabel exists for this TargetItem, return that, otherwise
return None
"""
return self._label
def setLabel(self, text=None, labelOpts=None):
"""Method to call to enable or disable the TargetLabel for displaying text
Parameters
----------
text : Callable or str, optional
Details how to format the text, by default None
If None, do not show any text next to the TargetItem
If Callable, then the label will display the result of ``text(x, y)``
If a fromatted string, then the output of ``text.format(x, y)`` will be
displayed
If a non-formatted string, then the text label will display ``text``, by
default None
labelOpts : dictionary, optional
These arguments are passed on to :class:`~pyqtgraph.TextItem`
"""
if not text:
if self._label is not None and self._label.scene() is not None:
# remove the label if it's already added
self._label.scene().removeItem(self._label)
self._label = None
else:
# provide default text if text is True
if text is True:
# convert to default value or empty string
text = "x = {: .3n}\ny = {: .3n}"
labelOpts = {} if labelOpts is None else labelOpts
if self._label is not None:
self._label.scene().removeItem(self._label)
self._label = TargetLabel(self, text=text, **labelOpts)
def setLabelAngle(self, angle):
warnings.warn(
"TargetItem.setLabelAngle is deprecated and will be removed in 0.13.0."
"Use TargetItem.label().setAngle() instead",
DeprecationWarning,
stacklevel=2,
)
if self.label() is not None and angle != self.label().angle:
self.label().setAngle(angle)
return None
class TargetLabel(TextItem):
"""A TextItem that attaches itself to a TargetItem.
This class extends TextItem with the following features :
* Automatically positions adjacent to the symbol at a fixed position.
* Automatically reformats text when the symbol location has changed.
Parameters
----------
target : TargetItem
The TargetItem to which this label will be attached to.
text : str or callable, Optional
Governs the text displayed, can be a fixed string or a format string
that accepts the x, and y position of the target item; or be a callable
method that accepts a tuple (x, y) and returns a string to be displayed.
If None, an empty string is used. Default is None
offset : tuple or list or QPointF or QPoint
Position to set the anchor of the TargetLabel away from the center of
the target in pixels, by default it is (20, 0).
anchor : tuple, list, QPointF or QPoint
Position to rotate the TargetLabel about, and position to set the
offset value to see :class:`~pyqtgraph.TextItem` for more inforation.
kwargs : dict of arguments that are passed on to
:class:`~pyqtgraph.TextItem` constructor, excluding text parameter
"""
def __init__(
self,
target,
text="",
offset=(20, 0),
anchor=(0, 0.5),
**kwargs,
):
if isinstance(offset, Point):
self.offset = offset
elif isinstance(offset, (tuple, list)):
self.offset = Point(*offset)
elif isinstance(offset, (QtCore.QPoint, QtCore.QPointF)):
self.offset = Point(offset.x(), offset.y())
else:
raise TypeError("Offset parameter is the wrong data type")
super().__init__(anchor=anchor, **kwargs)
self.setParentItem(target)
self.target = target
self.setFormat(text)
self.target.sigPositionChanged.connect(self.valueChanged)
self.valueChanged()
def format(self):
return self._format
def setFormat(self, text):
"""Method to set how the TargetLabel should display the text. This
method should be called from TargetItem.setLabel directly.
Parameters
----------
text : Callable or str
Details how to format the text.
If Callable, then the label will display the result of ``text(x, y)``
If a fromatted string, then the output of ``text.format(x, y)`` will be
displayed
If a non-formatted string, then the text label will display ``text``
"""
if not callable(text):
parsed = list(string.Formatter().parse(text))
if parsed and parsed[0][1] is not None:
self.setProperty("formattableText", True)
else:
self.setText(text)
self.setProperty("formattableText", False)
else:
self.setProperty("formattableText", False)
self._format = text
self.valueChanged()
def valueChanged(self):
x, y = self.target.pos()
if self.property("formattableText"):
self.setText(self._format.format(float(x), float(y)))
elif callable(self._format):
self.setText(self._format(x, y))
def viewTransformChanged(self):
viewbox = self.getViewBox()
if isinstance(viewbox, ViewBox):
viewPixelSize = viewbox.viewPixelSize()
scaledOffset = QtCore.QPointF(
self.offset.x() * viewPixelSize[0], self.offset.y() * viewPixelSize[1]
)
self.setPos(scaledOffset)
return super().viewTransformChanged()
def mouseClickEvent(self, ev):
return self.parentItem().mouseClickEvent(ev)
def mouseDragEvent(self, ev):
targetItem = self.parentItem()
if not targetItem.movable or int(ev.button() & QtCore.Qt.LeftButton) == 0:
return
ev.accept()
if ev.isStart():
targetItem.symbolOffset = targetItem.pos() - self.mapToView(
ev.buttonDownPos()
)
targetItem.moving = True
if not targetItem.moving:
return
targetItem.setPos(targetItem.symbolOffset + self.mapToView(ev.pos()))
if ev.isFinish():
targetItem.moving = False
targetItem.sigPositionChangeFinished.emit(self)
|
the-stack_0_11453 | from callsmusic.callsmusic import client as USER
from pyrogram import Client, filters
from pyrogram.types import Message, InlineKeyboardButton, InlineKeyboardMarkup
import config
from config import BOT_USERNAME
from pyrogram.errors import UserAlreadyParticipant
from helpers.decorators import errors, authorized_users_only
@Client.on_message(filters.group & filters.command(["userbotjoin"]))
@authorized_users_only
@errors
async def addchannel(client, message):
chid = message.chat.id
try:
invitelink = await client.export_chat_invite_link(chid)
except:
await message.reply_text(
"<b>Aggiungimi come admin</b>",
)
return
try:
user = await USER.get_me()
except:
user.first_name = "Music"
try:
await USER.join_chat(invitelink)
await USER.send_message(message.chat.id,"Sono entrato!")
except UserAlreadyParticipant:
await message.reply_text(
"<b>Userbot già in chat</b>",
)
pass
except Exception as e:
print(e)
await message.reply_text(
f"<b>🛑 Flood Wait Error 🛑</b>",
)
return
await message.reply_text(
"<b>L'userbot è entrato</b>",
)
@USER.on_message(filters.group & filters.command(["userbotleave"]))
async def rem(USER, message):
try:
await USER.leave_chat(message.chat.id)
except:
await message.reply_text(
f"<b>Errore!</b>",
)
return
|
the-stack_0_11455 | from sqlalchemy import engine_from_config
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import configure_mappers
import zope.sqlalchemy
# import or define all models here to ensure they are attached to the
# Base.metadata prior to any initialization routines
from .horse import Horse
from .race import Race
# run configure_mappers after defining all of the models to ensure
# all relationships can be setup
configure_mappers()
def get_engine(settings, prefix='sqlalchemy.'):
return engine_from_config(settings, prefix)
def get_session_factory(engine):
factory = sessionmaker()
factory.configure(bind=engine)
return factory
def get_tm_session(session_factory, transaction_manager):
"""
Get a ``sqlalchemy.orm.Session`` instance backed by a transaction.
This function will hook the session to the transaction manager which
will take care of committing any changes.
- When using pyramid_tm it will automatically be committed or aborted
depending on whether an exception is raised.
- When using scripts you should wrap the session in a manager yourself.
For example::
import transaction
engine = get_engine(settings)
session_factory = get_session_factory(engine)
with transaction.manager:
dbsession = get_tm_session(session_factory, transaction.manager)
"""
dbsession = session_factory()
zope.sqlalchemy.register(
dbsession, transaction_manager=transaction_manager)
return dbsession
def includeme(config):
"""
Initialize the model for a Pyramid app.
Activate this setup using ``config.include('midway.models')``.
"""
settings = config.get_settings()
settings['tm.manager_hook'] = 'pyramid_tm.explicit_manager'
# use pyramid_tm to hook the transaction lifecycle to the request
config.include('pyramid_tm')
# use pyramid_retry to retry a request when transient exceptions occur
config.include('pyramid_retry')
session_factory = get_session_factory(get_engine(settings))
config.registry['dbsession_factory'] = session_factory
# make request.dbsession available for use in Pyramid
config.add_request_method(
# r.tm is the transaction manager used by pyramid_tm
lambda r: get_tm_session(session_factory, r.tm),
'dbsession',
reify=True
)
|
the-stack_0_11456 | #!/bin/usr/python3
import logging
import os
import json
import uuid
import datetime
import urllib.request
from utils import getConfig
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
LOG_FILE = os.path.join(BASE_DIR, 'output.log')
logger = logging.getLogger('transatlanticTorrentExpress')
logger.setLevel(logging.DEBUG)
if not os.path.isfile(LOG_FILE):
print('Log file does not exist yet, creating in project folder')
f = open(LOG_FILE, 'w+')
f.close()
fh = logging.FileHandler(LOG_FILE)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
class ESHandler(logging.Handler):
def __init__(self, *args, **kwargs):
self.host = kwargs.get('host')
self.port = kwargs.get('port') or 9200
self.date = datetime.date.today()
self.sessionID = uuid.uuid4()
logging.StreamHandler.__init__(self)
def emit(self, record):
self.format(record)
indexURL = 'http://{}:{}/transatlantic_torrent_express-{}/_doc'.format(self.host, self.port, self.date.strftime('%Y.%m.%d'))
doc = {
'severity': record.levelname,
'message': record.message,
'@timestamp': int(record.created*1000),
'sessionID': str(self.sessionID)
}
if hasattr(record, 'es'):
for param in record.es.values():
if ': {}'.format(param) in record.message:
doc['message'] = record.message.replace(': {}'.format(str(param)), '')
doc = {**record.es, **doc}
payload = json.dumps(doc).encode('utf8')
req = urllib.request.Request(indexURL, data=payload,
headers={'content-type': 'application/json'})
response = urllib.request.urlopen(req)
response = response.read().decode('utf8')
return response
class ElasticFieldParameterAdapter(logging.LoggerAdapter):
def __init__(self, logger, extra={}):
super().__init__(logger, extra)
def process(self, msg, kwargs):
if kwargs == {}:
return (msg, kwargs)
extra = kwargs.get("extra", {})
extra.update({"es": kwargs.pop("es", True)})
kwargs["extra"] = extra
return (msg, kwargs)
config = getConfig()
esHost = config['ELASTIC']['host']
esPort = config['ELASTIC']['port']
eh = ESHandler(host=esHost, port=esPort)
eh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)8s | %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.addHandler(eh)
logger = ElasticFieldParameterAdapter(logger)
|
the-stack_0_11457 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition
"""MXNet symbol frontend."""
from __future__ import absolute_import as _abs
import json
import tvm
from .. import ir_pass
from .. import expr as _expr
from .. import op as _op
from ... import nd as _nd
from .common import StrAttrsDict
from .nnvm_common import _rename, _binop_scalar, _rbinop_scalar, _reduce
from .nnvm_common import _arg_reduce, _init_op, _softmax_op, _cast
from .nnvm_common import _clip, _transpose, _upsampling
from .nnvm_common import _elemwise_sum, _reshape
from .nnvm_common import _warn_not_used
__all__ = ['from_mxnet']
_activation_map = {
"sigmoid": _op.sigmoid,
"tanh" : _op.tanh,
"relu" : _op.nn.relu
}
def _mx_fully_connected(inputs, attrs):
import mxnet as mx
units = attrs.get_int("num_hidden")
use_bias = not attrs.get_bool("no_bias", False)
try:
_ = mx.sym.FullyConnected(mx.sym.var("x"), num_hidden=1, flatten=True)
has_flatten = True
except mx.base.MXNetError:
# no flatten attribute in old mxnet
has_flatten = False
use_flatten = attrs.get_bool("flatten", True)
if has_flatten and use_flatten:
inputs[0] = _op.nn.batch_flatten(inputs[0])
res = _op.nn.dense(inputs[0], inputs[1], units=units)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=-1)
return res
def _get_channel_axis(layout, op_name):
if layout == "NCHW":
return 1
if layout == "NHWC":
return 3
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "layout" of operator {} is not valid.'.format(layout, op_name))
def _mx_activations(inputs, attrs):
act_type = attrs.get_str("act_type")
assert len(inputs) == 1
if act_type == "softrelu":
def _stable_softrelu(x):
# log(1 + exp(-abs(x))) + relu(x)
one = _expr.const(1, dtype="float32")
exp_neg_abs_x = _op.exp(_op.negative(_op.abs(x)))
return _op.add(_op.log(_op.add(one, exp_neg_abs_x)),
_op.nn.relu(x))
return _stable_softrelu(inputs[0])
if act_type not in _activation_map:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported for frontend MXNet.'.format(act_type))
return _activation_map[act_type](inputs[0])
def _mx_compare(new_op, wrapper):
def impl(inputs, attrs):
dtype = ir_pass.infer_type(inputs[0]).checked_type.dtype
return wrapper(new_op)(inputs, attrs).astype(dtype)
return impl
def _mx_conv2d(inputs, attrs):
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 2:
raise tvm.error.OpAttributeInvalid(
'Non-2D kernels are not supported for operator Conv2D.')
data_layout = attrs.get_str("layout", "NCHW")
channel_axis = _get_channel_axis(data_layout, "conv2d")
if "kernel_layout" in attrs.attrs:
kernel_layout = attrs.get_str("kernel_layout")
else:
kernel_layout = "HWIO" if data_layout == "NHWC" else "OIHW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0))
new_attrs["dilation"] = attrs.get_int_tuple("dilate", (1, 1))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
use_bias = not attrs.get_bool("no_bias", False)
res = _op.nn.conv2d(inputs[0], inputs[1], **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
return res
def _mx_conv2d_transpose(inputs, attrs):
if "target_shape" in attrs.attrs:
raise tvm.error.OpAttributeUnimplemented(
'Attribute "target_shape" is not supported for operator Conv2D-transpose.')
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 2:
raise tvm.error.OpAttributeInvalid(
'Non-2D kernels are not supported for operator Conv2D-transpose.')
data_layout = attrs.get_str("layout", "NCHW")
channel_axis = _get_channel_axis(data_layout, "conv2d_transpose")
if "kernel_layout" in attrs.attrs:
kernel_layout = attrs.get_str("kernel_layout")
else:
kernel_layout = "HWIO" if data_layout == "NHWC" else "OIHW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1))
new_attrs["output_padding"] = attrs.get_int_tuple("adj", (0, 0))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0))
new_attrs["dilation"] = attrs.get_int_tuple("dilate", (1, 1))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
use_bias = not attrs.get_bool("no_bias", False)
res = _op.nn.conv2d_transpose(inputs[0], inputs[1], **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
return res
def _mx_pooling(inputs, attrs):
global_pool = attrs.get_bool("global_pool", False)
pool_type = attrs.get_str("pool_type")
def _pool2d(new_op, is_avg):
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 2:
raise tvm.error.OpAttributeInvalid(
'Only 2D kernels are supported for operator Pool2D.')
new_attrs = {}
new_attrs["pool_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0))
new_attrs["ceil_mode"] = (attrs.get_str("pooling_convention", "valid") == "full")
if is_avg:
new_attrs["count_include_pad"] = attrs.get_bool("count_include_pad", True)
return new_op(inputs[0], **new_attrs)
if pool_type == "max":
if global_pool:
return _op.nn.global_max_pool2d(inputs[0])
return _pool2d(_op.nn.max_pool2d, False)
if pool_type == "avg":
if global_pool:
return _op.nn.global_avg_pool2d(inputs[0])
return _pool2d(_op.nn.avg_pool2d, True)
raise tvm.error.OpNotImplemented(
'Operator {} Pooling is not supported for frontend MXNet.'.format(pool_type.capitalize()))
def _mx_adaptive_avg_pooling(inputs, attrs):
output_size = attrs.get_int_tuple("output_size", [])
return _op.contrib.adaptive_avg_pool2d(inputs[0], output_size)
def _mx_dropout(inputs, attrs):
rate = attrs.get_float("p", 0.5)
return _op.nn.dropout(inputs[0], rate=rate)
def _mx_BlockGrad(inputs, attrs): #pylint: disable=unused-argument
return inputs
def _mx_batch_norm(inputs, attrs):
if attrs.get_bool("output_mean_var", False):
raise tvm.error.OpAttributeUnimplemented(
'Attribute "output_mean_var" is not supported for operator Batch Norm.')
if attrs.get_bool("use_global_stats", False):
_warn_not_used("use_global_stats", "batch_norm")
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis", 1)
new_attrs["epsilon"] = attrs.get_float("eps", 0.001)
new_attrs["center"] = True
new_attrs["scale"] = not attrs.get_bool("fix_gamma", False)
return _op.nn.batch_norm(*inputs, **new_attrs)
def _mx_slice(inputs, attrs):
new_attrs = {}
begin = attrs.get_int_tuple('begin', None)
end = attrs.get_int_tuple('end', None)
stride = attrs.get_int_tuple('step', None)
if begin is None:
raise tvm.error.OpAttributeRequired(
'Attribute "begin" not found in operator Slice.')
if end is None:
raise tvm.error.OpAttributeRequired(
'Attribute "end" not found in operator Slice.')
if None in begin:
raise tvm.error.OpAttributeInvalid(
'Value None in attribute "begin" of operator Slice is not valid.')
if None in end:
raise tvm.error.OpAttributeInvalid(
'Value None in attribute "end" of operator Slice is not valid.')
new_attrs = {'begin': begin, 'end': end}
if stride is not None:
new_attrs['strides'] = stride
return _op.strided_slice(inputs[0], **new_attrs)
def _mx_slice_like(inputs, attrs):
assert len(inputs) == 2
new_attrs = {}
new_attrs["axes"] = attrs.get_int_tuple("axes", None)
return _op.slice_like(*inputs, **new_attrs)
def _mx_slice_axis(inputs, attrs):
assert len(inputs) == 1
shape = ir_pass.infer_type(inputs[0]).checked_type.shape
axis = attrs.get_int("axis")
ax_beg = attrs.get_int("begin")
ax_end = attrs.get_str("end")
if axis < 0:
axis += len(shape)
assert 0 <= axis < len(shape)
if ax_end == "None":
ax_end = int(shape[axis])
else:
ax_end = int(ax_end)
if ax_beg < 0:
ax_beg += int(shape[axis])
if ax_end < 0:
ax_end += int(shape[axis])
assert 0 <= ax_beg < int(shape[axis])
assert ax_beg < ax_end <= int(shape[axis])
begin = []
end = []
for i, dim in enumerate(shape):
if i != axis:
begin.append(0)
end.append(dim)
else:
begin.append(ax_beg)
end.append(ax_end)
return _op.strided_slice(inputs[0], begin, end)
def _mx_split(inputs, attrs):
axis = attrs.get_int("axis", 1)
new_attrs = {}
new_attrs["indices_or_sections"] = attrs.get_int("num_outputs")
new_attrs["axis"] = axis
res = _op.split(inputs[0], **new_attrs)
if attrs.get_bool("squeeze_axis", False):
return tuple([_op.squeeze(x, axis=[axis]) for x in res])
return res
def _mx_softmax_activation(inputs, attrs):
mode = attrs.get_str("mode", "instance")
axis = 0 if mode == "instance" else 1
return _op.nn.softmax(inputs[0], axis=axis)
def _mx_softmax_output(inputs, attrs):
if attrs.get_bool("multi_output", False):
return _op.nn.softmax(inputs[0], axis=1)
return _op.nn.softmax(inputs[0])
def _mx_concat(inputs, attrs):
axis = attrs.get_int("dim", 1)
return _op.concatenate(tuple(inputs), axis=axis)
def _mx_stack(inputs, attrs):
axis = attrs.get_int("axis", 0)
return _op.stack(tuple(inputs), axis=axis)
def _mx_expand_dims(inputs, attrs):
axis = attrs.get_int("axis")
return _op.expand_dims(inputs[0], axis=axis)
def _mx_leaky_relu(inputs, attrs):
act_type = attrs.get_str("act_type")
if act_type == "leaky":
return _op.nn.leaky_relu(inputs[0], alpha=attrs.get_float("slope", 0.25))
if act_type == "prelu":
assert len(inputs) == 2
return _op.nn.prelu(*inputs)
if act_type == "elu":
# -slope * relu(1-exp(x)) + relu(x)
slope = attrs.get_float("slope", 0.25)
one = _expr.const(1, dtype="float32")
x = inputs[0]
mslope = _op.nn.relu(_op.subtract(one, _op.exp(x)))
mslope = _op.multiply(mslope, _expr.const(-slope, dtype="float32"))
return _op.add(mslope, _op.nn.relu(x))
if act_type == "rrelu":
# NOTE this is only converted for inference.
lower_bound = attrs.get_float("lower_bound")
upper_bound = attrs.get_float("upper_bound")
alpha = (lower_bound + upper_bound) / 2.0
return _op.nn.leaky_relu(inputs[0], alpha=alpha)
raise tvm.error.OpNotImplemented(
'Operator {} is not supported for frontend MXNet.'.format(act_type))
def _mx_make_power(power):
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
scalar = _expr.const(power, dtype=None)
# Note: int maps to "int32", float maps to "float32"
return _op.power(inputs[0], scalar)
return _impl
def _mx_make_exponent(base):
# exp(b, x) = e^b * e^x
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
scalar = _op.exp(_expr.const(base, dtype="float32"))
return _op.multiply(inputs[0], scalar)
return _impl
def _mx_make_logarithm(base):
# log(b, x) = log(x) / log(b)
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
scalar = _op.log(_expr.const(base, dtype="float32"))
return _op.divide(inputs[0], scalar)
return _impl
def _mx_expm1():
# exp_minus_1 x = exp(x) - 1
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
one = _expr.const(1, dtype="float32")
return _op.log(_op.subtract(inputs[0], one))
return _impl
def _mx_log1p():
# 1_plus_log x = log(x + 1)
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
one = _expr.const(1, dtype="float32")
return _op.log(_op.add(inputs[0], one))
return _impl
def _mx_lrn(inputs, attrs):
new_attrs = {}
new_attrs["alpha"] = attrs.get_float("alpha", 0.0001)
new_attrs["beta"] = attrs.get_float("beta", 0.75)
new_attrs["bias"] = attrs.get_float("knorm", 2)
# NCHW format and normalization along channel axis
new_attrs["axis"] = 1
new_attrs["size"] = attrs.get_int("nsize")
assert len(inputs) == 1
return _op.nn.lrn(inputs[0], **new_attrs)
def _mx_multibox_prior(inputs, attrs):
new_attrs = {}
new_attrs["sizes"] = attrs.get_float_tuple("sizes", (1.0, ))
new_attrs["steps"] = attrs.get_float_tuple("steps", (-1.0, -1.0))
new_attrs["offsets"] = attrs.get_float_tuple("offsets", (0.5, 0.5))
new_attrs["ratios"] = attrs.get_float_tuple("ratios", (1.0, ))
new_attrs["clip"] = attrs.get_bool("clip", False)
return _op.vision.multibox_prior(inputs[0], **new_attrs)
def _mx_multibox_detection(inputs, attrs):
new_attrs0 = {}
new_attrs0["clip"] = attrs.get_bool("clip", True)
new_attrs0["threshold"] = attrs.get_float("threshold", 0.01)
new_attrs0["variances"] = attrs.get_float_tuple("variances", (0.1, 0.1,
0.2, 0.2))
new_attrs1 = {}
new_attrs1["return_indices"] = False
new_attrs1["iou_threshold"] = attrs.get_float("nms_threshold", 0.5)
new_attrs1["force_suppress"] = attrs.get_bool("force_suppress", False)
new_attrs1["top_k"] = attrs.get_int("nms_topk", -1)
ret = _op.vision.multibox_transform_loc(inputs[0], inputs[1],
inputs[2], **new_attrs0)
return _op.vision.non_max_suppression(ret[0], ret[1], **new_attrs1)
def _mx_batch_dot(inputs, attrs):
assert len(inputs) == 2
a, b = inputs
transpose_a = attrs.get_bool("transpose_a", False)
transpose_b = attrs.get_bool("transpose_b", False)
if transpose_a is True:
msg = 'Value {} in attribute "transpose_a" of operator batch_dot ' \
'is not valid.'
raise tvm.error.OpAttributeInvalid(msg.format(transpose_a))
if transpose_b is False:
b = _op.transpose(b, axes=[0, 2, 1])
return _op.nn.batch_matmul(a, b)
def _mx_arange(inputs, attrs):
assert len(inputs) == 0
if attrs.get_int("repeat", 1) != 1:
raise tvm.error.OpAttributeUnimplemented(
'Attribute "repeat" is not supported in operator arange.')
new_attrs = {}
new_attrs["start"] = attrs.get_float("start", 0)
new_attrs["stop"] = attrs.get_float("stop")
new_attrs["step"] = attrs.get_float("step", 1)
new_attrs["dtype"] = attrs.get_str("dtype", "float32")
return _op.arange(**new_attrs)
def _mx_repeat(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["repeats"] = attrs.get_int("repeats")
new_attrs["axis"] = attrs.get_int("axis", 0)
return _op.repeat(inputs[0], **new_attrs)
def _mx_tile(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["reps"] = attrs.get_int_tuple("reps")
return _op.tile(inputs[0], **new_attrs)
def _mx_take(inputs, attrs):
assert len(inputs) == 2
mode = attrs.get_str("mode", "clip")
if mode == "raise":
raise tvm.error.OpAttributeUnimplemented("take with raise mode is not supported yet")
axis = attrs.get_int("axis", 0)
return _op.take(inputs[0], inputs[1].astype("int32"), axis, mode)
def _mx_reverse(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis")
return _op.reverse(inputs[0], **new_attrs)
def _mx_roi_align(inputs, attrs):
new_attrs = {}
new_attrs["pooled_size"] = attrs.get_int_tuple("pooled_size")
new_attrs["spatial_scale"] = attrs.get_float("spatial_scale")
new_attrs["sample_ratio"] = attrs.get_int("sample_ratio", -1)
new_attrs["layout"] = "NCHW"
return _op.vision.roi_align(inputs[0], inputs[1], **new_attrs)
def _mx_resize(inputs, attrs):
scale_height = attrs.get_float("scale_height", None)
scale_width = attrs.get_float("scale_width", None)
height = attrs.get_int("height", 1)
width = attrs.get_int("width", 1)
shape = ir_pass.infer_type(inputs[0]).checked_type.shape
if scale_height is not None:
height = (scale_height * shape[2]).astype("int32")
if scale_width is not None:
width = (scale_width * shape[3]).astype("int32")
size = (height, width)
return _op.image.resize(inputs[0], size, align_corners=True)
def _mx_roi_pooling(inputs, attrs):
new_attrs = {}
new_attrs["pooled_size"] = attrs.get_int_tuple("pooled_size")
new_attrs["spatial_scale"] = attrs.get_float("spatial_scale")
new_attrs["layout"] = "NCHW"
return _op.vision.roi_pool(inputs[0], inputs[1], **new_attrs)
def _mx_proposal(inputs, attrs):
new_attrs = {}
new_attrs["scales"] = attrs.get_float_tuple("scales", (4.0, 8.0, 16.0, 32.0))
new_attrs["ratios"] = attrs.get_float_tuple("ratios", (0.5, 1.0, 2.0))
new_attrs["feature_stride"] = attrs.get_int("feature_stride", 16)
new_attrs["threshold"] = attrs.get_float("threshold", 0.7)
new_attrs["rpn_pre_nms_top_n"] = attrs.get_int("rpn_pre_nms_top_n", 6000)
new_attrs["rpn_post_nms_top_n"] = attrs.get_int("rpn_post_nms_top_n", 300)
new_attrs["rpn_min_size"] = attrs.get_int("rpn_min_size", 16)
new_attrs["iou_loss"] = attrs.get_bool("iou_loss", False)
assert not attrs.get_bool("output_score", False), "proposal doesn't support output score"
return _op.vision.proposal(inputs[0], inputs[1], inputs[2], **new_attrs)
def _mx_box_nms(inputs, attrs):
force_suppress = attrs.get_bool("force_suppress", False)
iou_thresh = attrs.get_float('overlap_thresh', 0.5)
top_k = attrs.get_int('topk', -1)
valid_thresh = attrs.get_float('valid_thresh', 0)
coord_start = attrs.get_int('coord_start', 2)
score_index = attrs.get_int('score_index', 1)
id_index = attrs.get_int('id_index', -1)
in_format = attrs.get_str('in_format', 'corner')
out_format = attrs.get_str('out_format', 'corner')
if in_format != 'corner':
raise tvm.error.OpAttributeInvalid(
'Value of attribute "in_format" must equal "corner" for operator box_nms.')
if out_format != 'corner':
raise tvm.error.OpAttributeInvalid(
'Value of attribute "out_format" must equal "corner" for operator box_nms.')
ret = _op.vision.get_valid_counts(inputs[0], score_threshold=valid_thresh)
nms_out = _op.vision.non_max_suppression(ret[1],
ret[0],
iou_threshold=iou_thresh,
force_suppress=force_suppress,
top_k=top_k,
coord_start=coord_start,
score_index=score_index,
id_index=id_index,
return_indices=False,
invalid_to_bottom=True)
return nms_out
def _mx_l2_normalize(inputs, attrs):
new_attrs = {}
mode = attrs.get_str('mode', 'instance')
if mode != 'channel':
raise tvm.error.OpAttributeInvalid(
'Value of attribute "mode" must equal "channel" for operator l2_normalize.')
new_attrs['eps'] = attrs.get_float('eps', 1e-10)
new_attrs['axis'] = [1]
return _op.nn.l2_normalize(inputs[0], **new_attrs)
def _mx_shape_array(inputs, attrs):
assert len(inputs) == 1
if attrs.get_int("lhs_begin", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support lhs_begin")
if attrs.get_int("lhs_end", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support lhs_end")
if attrs.get_int("rhs_begin", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support rhs_begin")
if attrs.get_int("rhs_end", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support rhs_end")
return _op.shape_of(inputs[0], dtype='int64')
def _mx_full(inputs, attrs):
assert len(inputs) == 0
val = attrs.get_float("value")
shape = attrs.get_int_tuple("shape")
dtype = attrs.get_str("dtype", "float32")
return _op.full(_expr.const(val, dtype), shape, dtype)
def _mx_squeeze(inputs, attrs):
assert len(inputs) == 1
axis = attrs.get_int_tuple("axis", None)
return _op.squeeze(inputs[0], axis)
def _mx_broadcast_axis(inputs, attrs):
assert len(inputs) == 1
axis = attrs.get_int_tuple("axis", [])
size = attrs.get_int_tuple("size", [])
assert len(axis) == len(size)
if len(axis) == 0:
return inputs[0]
src_shape = ir_pass.infer_type(inputs[0])._checked_type_.shape
tgt_shape = []
for i, dim in enumerate(src_shape):
if i not in axis:
tgt_shape.append(dim)
else:
assert int(dim) == 1
idx = axis.index(i)
tgt_shape.append(size[idx])
return _op.broadcast_to(inputs[0], tgt_shape)
def _mx_embedding(inputs, _):
assert len(inputs) == 2
indices, weight = inputs
return _op.take(weight, indices.astype('int32'), axis=0)
def _mx_smooth_l1(inputs, attrs):
scalar = attrs.get_float("scalar", 1.0)
scalar_sq = scalar * scalar
mask = _op.less(inputs[0], _expr.const(1.0 / scalar_sq, dtype='float32'))
return _op.where(mask,
_expr.const(scalar_sq / 2.0, dtype='float32') * inputs[0] * inputs[0],
_op.abs(inputs[0]) - _expr.const(0.5 / scalar_sq))
def _mx_deformable_convolution(inputs, attrs):
new_attrs = {}
assert attrs.get_bool("no_bias")
new_attrs["kernel_size"] = attrs.get_int_tuple("kernel")
new_attrs["strides"] = attrs.get_int_tuple("stride")
new_attrs["padding"] = attrs.get_int_tuple("pad")
new_attrs["dilation"] = attrs.get_int_tuple("dilate")
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["deformable_groups"] = attrs.get_int("num_deformable_group", 1)
new_attrs["groups"] = attrs.get_int("num_group", 1)
assert attrs.get_str("layout", "NCHW") == "NCHW", "Deformable conv2d only supports NCHW layout"
use_bias = not attrs.get_bool("no_bias", False)
res = _op.nn.deformable_conv2d(inputs[0], inputs[1], inputs[2], **new_attrs)
if use_bias:
assert len(inputs) == 4
res = _op.nn.bias_add(res, inputs[3])
return res
def _mx_argsort(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis", -1)
new_attrs["is_ascend"] = attrs.get_bool("is_ascend", True)
new_attrs["dtype"] = attrs.get_str("dtype", "float32")
return _op.argsort(inputs[0], **new_attrs)
def _mx_rnn_param_concat(inputs, _):
# We don't need to concatenate RNN params because we will unravel the RNN op
return [inputs]
def _mx_rnn_layer(inputs, attrs):
def _rnn_cell(data, states, i2h_weight, h2h_weight, i2h_bias, h2h_bias, activation):
i2h = _op.nn.bias_add(_op.nn.dense(data, i2h_weight), i2h_bias, axis=-1)
h2h = _op.nn.bias_add(_op.nn.dense(states[0], h2h_weight), h2h_bias, axis=-1)
out = _activation_map[activation](i2h + h2h)
return out, [out]
def _gru_cell(data, states, i2h_weight, h2h_weight, i2h_bias, h2h_bias):
dtype = ir_pass.infer_type(data).checked_type.dtype
i2h = _op.nn.bias_add(_op.nn.dense(data, i2h_weight), i2h_bias, axis=-1)
h2h = _op.nn.bias_add(_op.nn.dense(states[0], h2h_weight), h2h_bias, axis=-1)
i2h_r, i2h_z, i2h = _op.split(i2h, indices_or_sections=3, axis=1)
h2h_r, h2h_z, h2h = _op.split(h2h, indices_or_sections=3, axis=1)
reset_gate = _activation_map["sigmoid"](i2h_r + h2h_r)
update_gate = _activation_map["sigmoid"](i2h_z + h2h_z)
next_h_tmp = _activation_map["tanh"](reset_gate * h2h + i2h)
next_h = (_expr.const(1, dtype) - update_gate) * next_h_tmp + update_gate * states[0]
return next_h, [next_h]
def _lstm_cell(data, states, i2h_weight, h2h_weight, i2h_bias, h2h_bias):
i2h = _op.nn.bias_add(_op.nn.dense(data, i2h_weight), i2h_bias, axis=-1)
h2h = _op.nn.bias_add(_op.nn.dense(states[0], h2h_weight), h2h_bias, axis=-1)
gates = i2h + h2h
slice_gates = _op.split(gates, indices_or_sections=4, axis=1)
in_gate = _activation_map["sigmoid"](slice_gates[0])
forget_gate = _activation_map["sigmoid"](slice_gates[1])
in_transform = _activation_map["tanh"](slice_gates[2])
out_gate = _activation_map["sigmoid"](slice_gates[3])
next_c = forget_gate * states[1] + in_gate * in_transform
next_h = out_gate * _activation_map["tanh"](next_c)
return next_h, [next_h, next_c]
num_layers = attrs.get_int("num_layers", 1)
mode = attrs.get_str("mode")
if mode.startswith("rnn"):
mode, activation = mode.split('_')
assert mode in ["rnn", "gru", "lstm"]
bidirectional = attrs.get_bool("bidirectional", False)
if bidirectional:
raise tvm.error.OpAttributeUnimplemented(
"Bidirectional RNN op is not supported yet")
layout = attrs.get_str("layout", "TNC")
if layout != "TNC":
raise tvm.error.OpAttributeUnimplemented(
"RNN with layout other than TNC is not supported yet")
num_states = 2 if mode == 'lstm' else 1
assert len(inputs) == num_states + 2
seq_data = inputs[0]
concat_weight = inputs[1]
concat_states = inputs[2:]
seq_len = int(ir_pass.infer_type(seq_data).checked_type.shape[0])
assert len(concat_weight) == num_layers * 4
weights = []
bias = []
states = []
for i in range(num_layers):
w = []
b = []
s = []
for j in range(2):
w.append(concat_weight[i*2 + j].args[0])
b.append(concat_weight[num_layers*2 + i*2 + j].args[0])
for state in concat_states:
s.append(_op.take(state, _expr.const(i, "int32"), axis=0))
weights.append(w)
bias.append(b)
states.append(s)
seq_output = []
for t in range(seq_len):
data = _op.take(seq_data, _expr.const(t, "int32"), axis=0)
for l in range(num_layers):
if mode == "rnn":
out, new_states = _rnn_cell(data, states[l], *weights[l], *bias[l], activation)
elif mode == "gru":
out, new_states = _gru_cell(data, states[l], *weights[l], *bias[l])
else: # mode == "lstm"
out, new_states = _lstm_cell(data, states[l], *weights[l], *bias[l])
states[l] = new_states
data = out
seq_output.append(out)
outputs = [_op.stack(seq_output, axis=0)]
for i in range(num_states):
outputs.append(_op.stack([s[i] for s in states], axis=0))
return outputs
# Note: due to attribute conversion constraint
# ops in the identity set must be attribute free
_identity_list = [
"log",
"exp",
"sqrt",
"floor",
"ceil",
"sigmoid",
"tanh",
"negative",
"reshape_like",
"zeros_like",
"ones_like",
"where",
"gather_nd",
]
_convert_map = {
"_copy" : _rename(_op.copy),
"relu" : _rename(_op.nn.relu),
"broadcast_add" : _rename(_op.add),
"broadcast_sub" : _rename(_op.subtract),
"broadcast_mul" : _rename(_op.multiply),
"broadcast_div" : _rename(_op.divide),
"broadcast_mod" : _rename(_op.mod),
"broadcast_maximum" : _rename(_op.maximum),
"broadcast_minimum" : _rename(_op.minimum),
"broadcast_equal" : _mx_compare(_op.equal, _rename),
"broadcast_not_equal" : _mx_compare(_op.not_equal, _rename),
"broadcast_greater" : _mx_compare(_op.greater, _rename),
"broadcast_greater_equal": _mx_compare(_op.greater_equal, _rename),
"broadcast_lesser" : _mx_compare(_op.less, _rename),
"broadcast_lesser_equal" : _mx_compare(_op.less_equal, _rename),
"elemwise_add" : _rename(_op.add),
"elemwise_sub" : _rename(_op.subtract),
"elemwise_mul" : _rename(_op.multiply),
"elemwise_div" : _rename(_op.divide),
"_maximum" : _rename(_op.maximum),
"_minimum" : _rename(_op.minimum),
"flatten" : _rename(_op.nn.batch_flatten),
"Flatten" : _rename(_op.nn.batch_flatten),
# scalar power
"square" : _mx_make_power(2),
"rsqrt" : _mx_make_power(-1/2),
"cbrt" : _mx_make_power(1/3),
"rcbrt" : _mx_make_power(-1/3),
"__pow_scalar__" : _binop_scalar(_op.power),
"_power_scalar" : _binop_scalar(_op.power),
"__rsub_scalar__" : _rbinop_scalar(_op.subtract),
"_rminus_scalar" : _rbinop_scalar(_op.subtract),
"__rdiv_scalar__" : _rbinop_scalar(_op.divide),
"_rdiv_scalar" : _rbinop_scalar(_op.divide),
"__rpow_scalar__" : _rbinop_scalar(_op.power),
# scalar op
"__add_scalar__" : _binop_scalar(_op.add),
"_plus_scalar" : _binop_scalar(_op.add),
"__sub_scalar__" : _binop_scalar(_op.subtract),
"_minus_scalar" : _binop_scalar(_op.subtract),
"__mul_scalar__" : _binop_scalar(_op.multiply),
"_mul_scalar" : _binop_scalar(_op.multiply),
"__div_scalar__" : _binop_scalar(_op.divide),
"_div_scalar" : _binop_scalar(_op.divide),
"log2" : _mx_make_logarithm(2),
"log10" : _mx_make_logarithm(10),
"log1p" : _mx_log1p,
"expm1" : _mx_expm1,
"_equal_scalar" : _mx_compare(_op.equal, _binop_scalar),
"_not_equal_scalar" : _mx_compare(_op.not_equal, _binop_scalar),
"_greater_scalar" : _mx_compare(_op.greater, _binop_scalar),
"_greater_equal_scalar" : _mx_compare(_op.greater_equal, _binop_scalar),
"_lesser_scalar" : _mx_compare(_op.less, _binop_scalar),
"_lesser_equal_scalar" : _mx_compare(_op.less_equal, _binop_scalar),
"_maximum_scalar" : _binop_scalar(_op.maximum),
"_minimum_scalar" : _binop_scalar(_op.minimum),
# reduction ops
"mean" : _reduce(_op.mean),
"max" : _reduce(_op.max),
"min" : _reduce(_op.min),
"sum" : _reduce(_op.sum),
"max_axis" : _reduce(_op.max),
"min_axis" : _reduce(_op.min),
"sum_axis" : _reduce(_op.sum),
"argmax" : _arg_reduce(_op.argmax),
"argmin" : _arg_reduce(_op.argmin),
# init ops
"_ones" : _init_op(_op.ones),
"_zeros" : _init_op(_op.zeros),
# softmax
"softmax" : _softmax_op(_op.nn.softmax),
"log_softmax" : _softmax_op(_op.nn.log_softmax),
"Softmax" : _softmax_op(_op.nn.softmax),
# per op specialization
"Reshape" : _reshape,
"reshape" : _reshape,
"Cast" : _cast,
"clip" : _clip,
"transpose" : _transpose,
"UpSampling" : _upsampling,
"add_n" : _elemwise_sum,
# MXNet specific implementations
"FullyConnected": _mx_fully_connected,
"Activation" : _mx_activations,
"Convolution" : _mx_conv2d,
"Convolution_v1": _mx_conv2d,
"Deconvolution" : _mx_conv2d_transpose,
"Pooling" : _mx_pooling,
"Pooling_v1" : _mx_pooling,
"Dropout" : _mx_dropout,
"BatchNorm" : _mx_batch_norm,
"BatchNorm_v1" : _mx_batch_norm,
"LRN" : _mx_lrn,
"L2Normalization" : _mx_l2_normalize,
"slice" : _mx_slice,
"slice_like" : _mx_slice_like,
"slice_axis" : _mx_slice_axis,
"SliceChannel" : _mx_split,
"split" : _mx_split,
"expand_dims" : _mx_expand_dims,
"Concat" : _mx_concat,
"concat" : _mx_concat,
"stack" : _mx_stack,
"batch_dot" : _mx_batch_dot,
"LeakyReLU" : _mx_leaky_relu,
"_arange" : _mx_arange,
"_full" : _mx_full,
"repeat" : _mx_repeat,
"tile" : _mx_tile,
"take" : _mx_take,
"reverse" : _mx_reverse,
"squeeze" : _mx_squeeze,
"broadcast_axis": _mx_broadcast_axis,
"BlockGrad" : _mx_BlockGrad,
"shape_array" : _mx_shape_array,
"Embedding" : _mx_embedding,
"argsort" : _mx_argsort,
"SoftmaxOutput" : _mx_softmax_output,
"SoftmaxActivation" : _mx_softmax_activation,
"smooth_l1" : _mx_smooth_l1,
# vision
"_contrib_BilinearResize2D" : _mx_resize,
"_contrib_MultiBoxPrior" : _mx_multibox_prior,
"_contrib_MultiBoxDetection" : _mx_multibox_detection,
"_contrib_ROIAlign" : _mx_roi_align,
"ROIPooling" : _mx_roi_pooling,
"_contrib_Proposal" : _mx_proposal,
"_contrib_MultiProposal" : _mx_proposal,
"_contrib_box_nms" : _mx_box_nms,
"_contrib_DeformableConvolution" : _mx_deformable_convolution,
"_contrib_AdaptiveAvgPooling2D" : _mx_adaptive_avg_pooling,
# NLP
"RNN" : _mx_rnn_layer,
"_rnn_param_concat" : _mx_rnn_param_concat,
# List of missing operators that are present in NNVMv1
# TODO(tvm-tvm): support all operators.
#
# "broadcast_to",
# "Crop" : _crop_like,
}
# set identity list
_convert_map.update({k : _rename(k) for k in _identity_list})
def _from_mxnet_impl(symbol, shape_dict, dtype_info):
"""Convert mxnet symbol to compatible relay Function.
Reconstruct a relay Function by traversing the mxnet symbol.
Parameters
----------
symbol : mxnet.sym.Symbol
Incompatible symbol from mxnet.
The op_name and attrs inside are not always compatible.
shape_dict : dict
Known parameter shapes
dtype_info : dict or str.
Known parameter dtypes
Returns:
-------
func : tvm.relay.Function
Converted relay Function
"""
assert symbol is not None
jgraph = json.loads(symbol.tojson())
jnodes = jgraph["nodes"]
node_map = {}
for nid, node in enumerate(jnodes):
children = [node_map[e[0]][e[1]] for e in node["inputs"]]
attrs = StrAttrsDict(node.get("attrs", {}))
node_name = node["name"]
op_name = node["op"]
if op_name == "null":
shape = shape_dict[node_name] if node_name in shape_dict else None
if isinstance(dtype_info, dict):
dtype = dtype_info[node_name] if node_name in dtype_info else "float32"
else:
dtype = dtype_info
node_map[nid] = [_expr.var(node_name, shape=shape, dtype=dtype)]
elif op_name in _convert_map:
res = _convert_map[op_name](children, attrs)
if isinstance(res, (_expr.TupleWrapper, tuple, list)):
pass
elif isinstance(res, _expr.Expr):
res = [res]
else:
raise RuntimeError("unexpected type %s" % type(res))
node_map[nid] = res
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend MXNet.'.format(op_name))
outputs = [node_map[e[0]][e[1]] for e in jgraph["heads"]]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
func = _expr.Function(ir_pass.free_vars(outputs), outputs)
return func
def _update_shape_dtype(shape, dtype, params):
"""Update shape dtype given params information"""
shape = {} if shape is None else shape
if not params:
return shape, dtype
shape = shape.copy()
shape.update({k : v.shape for k, v in params.items()})
if isinstance(dtype, str):
for k, v in params.items():
if v.dtype != dtype:
raise ValueError(
"%s: dtype not expected %s vs %s" % (k, dtype, v.dtype))
else:
dtype = dtype.copy()
dtype.update({k : str(v.dtype) for k, v in params.items()})
return shape, dtype
def from_mxnet(symbol,
shape=None,
dtype="float32",
arg_params=None,
aux_params=None):
"""Convert from MXNet"s model into compatible relay Function.
Parameters
----------
symbol : mxnet.Symbol or mxnet.gluon.HybridBlock
MXNet symbol.
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
arg_params : dict of str to mx.NDArray
The argument parameters in mxnet
aux_params : dict of str to mx.NDArray
The auxiliary parameters in mxnet
Returns
-------
sym : tvm.relay.Function
Compatible relay Function
params : dict of str to tvm.NDArray
The parameter dict to be used by nnvm
"""
try:
import mxnet as mx
except ImportError as e:
raise ImportError("{}. MXNet is required to parse symbols.".format(e))
if isinstance(symbol, mx.sym.Symbol):
params = {}
arg_params = arg_params if arg_params else {}
aux_params = aux_params if aux_params else {}
for k, v in arg_params.items():
params[k] = _nd.array(v.asnumpy())
for k, v in aux_params.items():
params[k] = _nd.array(v.asnumpy())
shape, dtype = _update_shape_dtype(shape, dtype, params)
sym = _from_mxnet_impl(symbol, shape, dtype)
elif isinstance(symbol, mx.gluon.HybridBlock):
if arg_params is not None or aux_params is not None:
raise ValueError("arg_params and aux_params ae not used when importing HybridBlock")
params = {}
for k, v in symbol.collect_params().items():
params[k] = _nd.array(v.data().asnumpy())
data = mx.sym.Variable("data")
sym = symbol(data)
if isinstance(sym, (list, tuple)):
sym = mx.sym.Group(sym)
shape, dtype = _update_shape_dtype(shape, dtype, params)
sym = _from_mxnet_impl(sym, shape, dtype)
elif isinstance(symbol, mx.gluon.Block):
raise NotImplementedError("Only Hybrid Blocks are supported now.")
else:
msg = "mxnet.Symbol or gluon.HybridBlock expected, got {}".format(type(symbol))
raise ValueError(msg)
return sym, params
|
the-stack_0_11458 | # Copyright 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
logger = logging.getLogger(__name__)
import pdo.submitter.sawtooth.sawtooth_submitter as sw_sub
import pdo.submitter.ccf.ccf_submitter as ccf_sub
# -----------------------------------------------------------------
# Create a new Submitter
# -----------------------------------------------------------------
def create_submitter(ledger_config, *args, **kwargs) :
ledger_type = ledger_config.get('LedgerType', os.environ.get('PDO_LEDGER_TYPE'))
if ledger_type == 'sawtooth':
return sw_sub.SawtoothSubmitter(ledger_config, *args, **kwargs)
elif ledger_type == 'ccf':
return ccf_sub.CCFSubmitter(ledger_config, *args, **kwargs)
else:
logger.error("Invalid Ledger Type. Must be either 'sawtooth' or 'ccf'")
raise Exception("Invalid Ledger Type. Must be either 'sawtooth' or 'ccf'") |
the-stack_0_11459 | from PIL import Image, ImageFilter
import time
class MyGaussianBlur(ImageFilter.GaussianBlur):
name = "GaussianBlur"
def __init__(self, size,radius=2, bounds=None):
super().__init__()
self.radius = radius
self.bounds = bounds
self.size=size
# print(size)
def filter(self, image):
print(1)
if self.bounds:
bounds1 = (0, 0, self.size[0], self.bounds[1])
# print(bounds1)
clips = image.crop(bounds1).gaussian_blur(self.radius)
image.paste(clips, bounds1)
bounds2 = (0, self.bounds[1], self.bounds[0], self.bounds[3])
clips = image.crop(bounds2).gaussian_blur(self.radius)
image.paste(clips, bounds2)
bounds3 = (0, self.bounds[3], self.size[0], self.size[1])
clips = image.crop(bounds3).gaussian_blur(self.radius)
image.paste(clips, bounds3)
bounds4 = (self.bounds[2], self.bounds[1], self.size[0], self.bounds[3])
clips = image.crop(bounds4).gaussian_blur(self.radius)
image.paste(clips, bounds4)
return image
else:
return image.gaussian_blur(self.radius)
st = time.process_time()
image = Image.open('p.jpg')
bounds = (150, 130, 280, 250)
image = image.filter(MyGaussianBlur(image.size,radius=2, bounds=bounds))
# print(1)
# image = image.filter(MyGaussianBlur(radius=2, bounds=bounds2))
# image = image.filter(MyGaussianBlur(radius=2, bounds=bounds3))
# image = image.filter(MyGaussianBlur(radius=2, bounds=bounds4))
print(time.process_time() - st)
image.show()
|
the-stack_0_11460 | #aprimorando matriz em python
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
somapar = maior = somacoluna = 0
for l in range(0, 3):
for c in range(0, 3):
matriz[l][c] = int(input(f'Digite um valor para [{l}, {c}]: '))
print('-=' * 25)
for l in range(0, 3):
for c in range(0, 3):
print(f'[{matriz[l][c]:^5}]', end='')
if matriz[l][c] % 2 ==0:
somapar += matriz[l][c]
print()
print('-=' * 25)
print(f'A soma dos valores pares é {somapar}')
for l in range(0, 3):
somacoluna += matriz[l][2]
print(f'A soma dos valores da terceira coluna é {somacoluna} ')
for c in range(0, 3):
if c == 0:
maior = matriz[1][c]
elif matriz[1][c] > maior:
maior = matriz[1][c]
print(f'O maior valor na segunda linha é {maior}')
|
the-stack_0_11461 | # -*- coding: utf-8 -*-
"""Train a CapsNet Network on the MNIST dataset.
See the corresponding paper for explanations of the network
@inproceedings{sabour2017dynamic,
title={Dynamic routing between capsules},
author={Sabour, Sara and Frosst, Nicholas and Hinton, Geoffrey E},
booktitle={Advances in Neural Information Processing Systems},
pages={3859--3869},
year={2017}
}
The network trains to an accuracy of >99% in few epochs. The most epochs are needed to train the reconstruction network.
The implementation is based on the code of (thanks to the great and inspiring implementation!):
Author: Xifeng Guo, E-mail: `[email protected]`, Github: `https://github.com/XifengGuo/CapsNet-Keras`
with changes to incorporate the anysma package.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import argparse
import numpy as np
from PIL import Image
from keras import backend as K
from keras import layers, models, optimizers
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from keras import callbacks
from anysma import Capsule
from anysma.modules import InputModule
from anysma.modules.final import DynamicRouting
from anysma.modules.transformations import LinearTransformation
from anysma.utils.normalization_funcs import dynamic_routing_squash as squash
from anysma.callbacks import TensorBoard
from anysma.losses import margin_loss
from anysma.datasets import mnist
K.set_image_data_format('channels_last')
class Mask(layers.Layer):
"""
Mask all vectors except the best matching one.
"""
def __init__(self, **kwargs):
super(Mask, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
signals, prob = inputs
mask = K.one_hot(indices=K.argmax(prob, 1), num_classes=prob.get_shape().as_list()[1])
masked = K.batch_flatten(signals * K.expand_dims(mask, -1))
return masked
def compute_output_shape(self, input_shape):
return tuple([None, input_shape[0][1] * input_shape[0][2]])
def combine_images(generated_images, height=None, width=None):
num = generated_images.shape[0]
if width is None and height is None:
width = int(math.sqrt(num))
height = int(math.ceil(float(num)/width))
elif width is not None and height is None: # height not given
height = int(math.ceil(float(num)/width))
elif height is not None and width is None: # width not given
width = int(math.ceil(float(num)/height))
shape = generated_images.shape[1:3]
image = np.zeros((height*shape[0], width*shape[1]),
dtype=generated_images.dtype)
for index, img in enumerate(generated_images):
i = int(index/width)
j = index % width
image[i*shape[0]:(i+1)*shape[0], j*shape[1]:(j+1)*shape[1]] = \
img[:, :, 0]
return image
def CapsNet(input_shape, n_class, routings):
""" Initialize the CapsNet"""
x = layers.Input(shape=input_shape)
# Layer 1: Just a conventional Conv2D layer
digitcaps = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
primary_caps = Capsule(name='PrimaryCaps')
primary_caps.add(layers.Conv2D(filters=8 * 32, kernel_size=9, strides=2, padding='valid', name='primarycap_conv2d'))
primary_caps.add(layers.Reshape(target_shape=[-1, 8], name='primarycap_reshape'))
primary_caps.add(layers.Lambda(squash, name='primarycap_squash'))
digitcaps = primary_caps(digitcaps)
# Layer 3: Capsule layer. Routing algorithm works here.
digit_caps = Capsule(name='digitcaps', prototype_distribution=(1, n_class))
digit_caps.add(InputModule(signal_shape=None, dissimilarity_initializer='zeros', trainable=False))
digit_caps.add(LinearTransformation(output_dim=16, scope='local'))
digit_caps.add(DynamicRouting(iterations=routings, name='capsnet'))
digitcaps = digit_caps(digitcaps)
# Decoder network.
y = layers.Input(shape=(n_class,))
masked_by_y = Mask()([digitcaps[0], y]) # The true label is used to mask the output of capsule layer. For training
masked = Mask()([digitcaps[0], digitcaps[2]]) # Mask using the capsule with maximal length. For prediction
# Shared Decoder model in training and prediction
decoder = models.Sequential(name='decoder')
decoder.add(layers.Dense(512, activation='relu', input_dim=16 * n_class))
decoder.add(layers.Dense(1024, activation='relu'))
decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))
# Models for training and evaluation (prediction)
train_model = models.Model([x, y], [digitcaps[2], decoder(masked_by_y)])
eval_model = models.Model(x, [digitcaps[2], decoder(masked)])
# manipulate model
noise = layers.Input(shape=(n_class, 16))
noised_digitcaps = layers.Add()([digitcaps[0], noise])
masked_noised_y = Mask()([noised_digitcaps, y])
manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
return train_model, eval_model, manipulate_model
def train(model, data, args):
"""
Training
:param model: the model
:param data: a tuple containing training and testing data, like `((x_train, y_train), (x_test, y_test))`
:param args: arguments
:return: The trained model
"""
# unpacking the data
(x_train, y_train), (x_test, y_test) = data
# callbacks
log = callbacks.CSVLogger(args.save_dir + '/log.csv')
tb = TensorBoard(log_dir=args.save_dir + '/tensorboard-logs',
batch_size=args.batch_size, histogram_freq=int(args.debug))
checkpoint = callbacks.ModelCheckpoint(args.save_dir + '/weights-{epoch:02d}.h5', monitor='val_capsnet_acc',
save_best_only=True, save_weights_only=True, verbose=1)
# compile the model
model.compile(optimizer=optimizers.Adam(lr=args.lr),
loss=[margin_loss, 'mse'],
loss_weights=[1., args.lam_recon],
metrics={'capsnet': 'accuracy'})
def train_generator(x, y, batch_size):
train_datagen = ImageDataGenerator(width_shift_range=2,
height_shift_range=2)
generator = train_datagen.flow(x, y, batch_size=batch_size)
while 1:
x_batch, y_batch = generator.next()
yield ([x_batch, y_batch], [y_batch, x_batch])
# Training with data augmentation. If shift_fraction=0., also no augmentation.
model.fit_generator(generator=train_generator(x_train, y_train, args.batch_size),
steps_per_epoch=int(y_train.shape[0] / args.batch_size),
epochs=args.epochs,
validation_data=[[x_test, y_test], [y_test, x_test]],
callbacks=[log, tb, checkpoint])
model.save_weights(args.save_dir + '/trained_model.h5')
print('Trained model saved to \'%s/trained_model.h5\'' % args.save_dir)
return model
def test(model, data, args):
x_test, y_test = data
y_pred, x_recon = model.predict(x_test, batch_size=args.batch_size)
print('-' * 30 + 'Begin: test' + '-' * 30)
print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1)) / y_test.shape[0])
img = combine_images(np.concatenate([x_test[:50], x_recon[:50]]))
image = img * 255
Image.fromarray(image.astype(np.uint8)).save(args.save_dir + "/real_and_recon.png")
print()
print('Reconstructed images are saved to %s/real_and_recon.png' % args.save_dir)
print('-' * 30 + 'End: test' + '-' * 30)
def manipulate_latent(model, data, args):
print('-' * 30 + 'Begin: manipulate' + '-' * 30)
x_test, y_test = data
index = np.argmax(y_test, 1) == args.digit
number = np.random.randint(low=0, high=sum(index) - 1)
x, y = x_test[index][number], y_test[index][number]
x, y = np.expand_dims(x, 0), np.expand_dims(y, 0)
noise = np.zeros([1, 10, 16])
x_recons = []
for dim in range(16):
for r in [-0.25, -0.2, -0.15, -0.1, -0.05, 0, 0.05, 0.1, 0.15, 0.2, 0.25]:
tmp = np.copy(noise)
tmp[:, :, dim] = r
x_recon = model.predict([x, y, tmp])
x_recons.append(x_recon)
x_recons = np.concatenate(x_recons)
img = combine_images(x_recons, height=16)
image = img * 255
Image.fromarray(image.astype(np.uint8)).save(args.save_dir + '/manipulate-%d.png' % args.digit)
print('manipulated result saved to %s/manipulate-%d.png' % (args.save_dir, args.digit))
print('-' * 30 + 'End: manipulate' + '-' * 30)
def load_mnist():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype('float32') / 255.
x_test = x_test.reshape(-1, 28, 28, 1).astype('float32') / 255.
y_train = to_categorical(y_train.astype('float32'))
y_test = to_categorical(y_test.astype('float32'))
return (x_train, y_train), (x_test, y_test)
if __name__ == "__main__":
# setting the hyper parameters
parser = argparse.ArgumentParser(description="Capsule Network on MNIST.")
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--lr', default=0.001, type=float,
help="Initial learning rate.")
parser.add_argument('--lam_recon', default=0.392, type=float,
help="The coefficient for the loss of decoder.")
parser.add_argument('-r', '--routings', default=3, type=int,
help="Number of iterations used in routing algorithm. should > 0.")
parser.add_argument('--debug', action='store_true',
help="Save weights by TensorBoard.")
parser.add_argument('--save_dir', default='./output')
parser.add_argument('-t', '--testing', action='store_true',
help="Test the trained model on testing dataset.")
parser.add_argument('--digit', default=5, type=int,
help="Digit to manipulate during test.")
parser.add_argument('-w', '--weights', default=None,
help="The path of the saved weights. Should be specified when testing.")
args = parser.parse_args()
print(args)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# load data
(x_train, y_train), (x_test, y_test) = load_mnist()
# define model
model, eval_model, manipulate_model = CapsNet(input_shape=x_train.shape[1:],
n_class=len(np.unique(np.argmax(y_train, 1))),
routings=args.routings)
model.summary(line_length=200, positions=[.33, .6, .67, 1.])
# train or test
if args.weights is not None: # init the model weights with provided one
model.load_weights(args.weights)
if not args.testing:
train(model=model, data=((x_train, y_train), (x_test, y_test)), args=args)
else: # as long as weights are given, will run testing
if args.weights is None:
print('No weights are provided. Will test using random initialized weights.')
manipulate_latent(manipulate_model, (x_test, y_test), args)
test(model=eval_model, data=(x_test, y_test), args=args)
|
the-stack_0_11463 | # coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
# noqa: E241
from __future__ import print_function
from functools import wraps
import glob
import gzip
import itertools
import json
import os
import pipes
import re
import select
import shlex
import shutil
import struct
import subprocess
import sys
import time
import tempfile
import unittest
import uuid
from subprocess import PIPE, STDOUT
if __name__ == '__main__':
raise Exception('do not run this file directly; do something like: tests/runner.py other')
from tools.shared import run_process, try_delete
from tools.shared import EMCC, EMXX, EMAR, EMRANLIB, PYTHON, FILE_PACKAGER, WINDOWS, LLVM_ROOT, EM_BUILD_VERBOSE
from tools.shared import CLANG_CC, CLANG_CXX, LLVM_AR, LLVM_DWARFDUMP
from tools.shared import NODE_JS, SPIDERMONKEY_ENGINE, JS_ENGINES, WASM_ENGINES, V8_ENGINE
from runner import RunnerCore, path_from_root, no_wasm_backend, no_fastcomp, is_slow_test, ensure_dir
from runner import needs_dlfcn, env_modify, no_windows, requires_native_clang, chdir, with_env_modify, create_test_file, parameterized
from jsrun import run_js
from tools import shared, building
import jsrun
import clang_native
import tools.line_endings
import tools.js_optimizer
import tools.tempfiles
import tools.duplicate_function_eliminator
scons_path = shared.which('scons')
emmake = shared.bat_suffix(path_from_root('emmake'))
emcmake = shared.bat_suffix(path_from_root('emcmake'))
emconfigure = shared.bat_suffix(path_from_root('emconfigure'))
emconfig = shared.bat_suffix(path_from_root('em-config'))
emsize = shared.bat_suffix(path_from_root('emsize'))
class temp_directory(object):
def __init__(self, dirname):
self.dir = dirname
def __enter__(self):
self.directory = tempfile.mkdtemp(prefix='emtest_temp_', dir=self.dir)
self.prev_cwd = os.getcwd()
os.chdir(self.directory)
print('temp_directory: ' + self.directory)
return self.directory
def __exit__(self, type, value, traceback):
os.chdir(self.prev_cwd)
def uses_canonical_tmp(func):
"""Decorator that signals the use of the canonical temp by a test method.
This decorator takes care of cleaning the directory after the
test to satisfy the leak detector.
"""
@wraps(func)
def decorated(self):
# Before running the test completely remove the canonical_tmp
if os.path.exists(self.canonical_temp_dir):
shutil.rmtree(self.canonical_temp_dir)
try:
func(self)
finally:
# Make sure the test isn't lying about the fact that it uses
# canonical_tmp
self.assertTrue(os.path.exists(self.canonical_temp_dir))
# Remove the temp dir in a try-finally, as otherwise if the
# test fails we would not clean it up, and if leak detection
# is set we will show that error instead of the actual one.
shutil.rmtree(self.canonical_temp_dir)
return decorated
def is_python3_version_supported():
"""Retuns True if the installed python3 version is supported by emscripten.
Note: Emscripten requires python3.5 or above since python3.4 and below do not
support circular dependencies."""
try:
print('is_python3_version_supported')
python3 = shared.which('python3')
print(' python3 =', python3)
output = run_process([python3, '--version'], stdout=PIPE).stdout
print(' output =', output, output.split())
output = output.split()[1]
# ignore final component which can contains non-integers (e.g 'rc1')
version = [int(x) for x in output.split('.')[:2]]
return version >= [3, 5]
except Exception:
# If anything goes wrong (no python3, unexpected output format), then we do
# not support this python3
return False
def encode_leb(number):
# TODO(sbc): handle larger numbers
assert(number < 255)
# pack the integer then take only the first (little end) byte
return struct.pack('<i', number)[:1]
def get_fastcomp_src_dir():
"""Locate fastcomp source tree by searching realtive to LLVM_ROOT."""
d = LLVM_ROOT
key_file = 'readme-emscripten-fastcomp.txt'
while d != os.path.dirname(d):
d = os.path.abspath(d)
# when the build directory lives below the source directory
if os.path.exists(os.path.join(d, key_file)):
return d
# when the build directory lives alongside the source directory
elif os.path.exists(os.path.join(d, 'src', key_file)):
return os.path.join(d, 'src')
else:
d = os.path.dirname(d)
return None
def parse_wasm(filename):
wat = run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), filename], stdout=PIPE).stdout
imports = []
exports = []
funcs = []
for line in wat.splitlines():
line = line.strip()
if line.startswith('(import '):
line = line.strip('()')
name = line.split()[2].strip('"')
imports.append(name)
if line.startswith('(export '):
line = line.strip('()')
name = line.split()[1].strip('"')
exports.append(name)
if line.startswith('(func '):
line = line.strip('()')
name = line.split()[1].strip('"')
funcs.append(name)
return imports, exports, funcs
class other(RunnerCore):
# Utility to run a simple test in this suite. This receives a directory which
# should contain a test.cpp and test.out files, compiles the cpp, and runs it
# to verify the output, with optional compile and run arguments.
# TODO: use in more places
def do_other_test(self, dirname, emcc_args=[], run_args=[]):
shutil.copyfile(path_from_root('tests', dirname, 'test.cpp'), 'test.cpp')
run_process([EMCC, 'test.cpp'] + emcc_args)
expected = open(path_from_root('tests', dirname, 'test.out')).read()
seen = run_js('a.out.js', args=run_args, stderr=PIPE, full_output=True) + '\n'
self.assertContained(expected, seen)
# Another utility to run a test in this suite. This receives a source file
# to compile, with optional compiler and execution flags.
# Output can be checked by seeing if literals are contained, and that a list
# of regexes match. The return code can also be checked.
def do_smart_test(self, source, literals=[], regexes=[],
emcc_args=[], run_args=[], assert_returncode=0):
run_process([EMCC, source] + emcc_args)
seen = run_js('a.out.js', args=run_args, stderr=PIPE, full_output=True,
assert_returncode=assert_returncode) + '\n'
for literal in literals:
self.assertContained([literal], seen)
for regex in regexes:
self.assertTrue(re.search(regex, seen), 'Expected regex "%s" to match on:\n%s' % (regex, seen))
def run_on_pty(self, cmd):
master, slave = os.openpty()
output = []
try:
env = os.environ.copy()
env['TERM'] = 'xterm-color'
proc = subprocess.Popen(cmd, stdout=slave, stderr=slave, env=env)
while proc.poll() is None:
r, w, x = select.select([master], [], [], 1)
if r:
output.append(os.read(master, 1024))
return (proc.returncode, b''.join(output))
finally:
os.close(master)
os.close(slave)
def test_emcc_v(self):
for compiler in [EMCC, EMXX]:
# -v, without input files
proc = run_process([compiler, '-v'], stdout=PIPE, stderr=PIPE)
self.assertContained('clang version %s' % shared.expected_llvm_version(), proc.stderr)
self.assertContained('GNU', proc.stderr)
self.assertNotContained('this is dangerous', proc.stdout)
self.assertNotContained('this is dangerous', proc.stderr)
def test_emcc_generate_config(self):
for compiler in [EMCC, EMXX]:
config_path = './emscripten_config'
run_process([compiler, '--generate-config', config_path])
self.assertExists(config_path, 'A config file should have been created at %s' % config_path)
config_contents = open(config_path).read()
self.assertContained('EMSCRIPTEN_ROOT', config_contents)
self.assertContained('LLVM_ROOT', config_contents)
os.remove(config_path)
def test_emcc_output_mjs(self):
run_process([EMCC, '-o', 'hello_world.mjs', path_from_root('tests', 'hello_world.c')])
with open('hello_world.mjs') as f:
output = f.read()
self.assertContained('export default Module;', output)
# TODO(sbc): Test that this is actually runnable. We currently don't have
# any tests for EXPORT_ES6 but once we do this should be enabled.
# self.assertContained('hello, world!', run_js('hello_world.mjs'))
def test_emcc_out_file(self):
# Verify that "-ofile" works in addition to "-o" "file"
run_process([EMCC, '-c', '-ofoo.o', path_from_root('tests', 'hello_world.c')])
self.assertExists('foo.o')
run_process([EMCC, '-ofoo.js', 'foo.o'])
self.assertExists('foo.js')
@parameterized({
'c': [EMCC, '.c'],
'cxx': [EMXX, '.cpp']})
def test_emcc_basics(self, compiler, suffix):
# emcc src.cpp ==> writes a.out.js and a.out.wasm
run_process([compiler, path_from_root('tests', 'hello_world' + suffix)])
self.assertExists('a.out.js')
self.assertExists('a.out.wasm')
self.assertContained('hello, world!', run_js('a.out.js'))
# --version
output = run_process([compiler, '--version'], stdout=PIPE, stderr=PIPE)
output = output.stdout.replace('\r', '')
self.assertContained('emcc (Emscripten gcc/clang-like replacement)', output)
self.assertContained('''Copyright (C) 2014 the Emscripten authors (see AUTHORS.txt)
This is free and open source software under the MIT license.
There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
''', output)
# --help
output = run_process([compiler, '--help'], stdout=PIPE, stderr=PIPE)
self.assertContained('Display this information', output.stdout)
self.assertContained('Most clang options will work', output.stdout)
# -dumpmachine
output = run_process([compiler, '-dumpmachine'], stdout=PIPE, stderr=PIPE)
self.assertContained(shared.get_llvm_target(), output.stdout)
# -dumpversion
output = run_process([compiler, '-dumpversion'], stdout=PIPE, stderr=PIPE)
self.assertEqual(shared.EMSCRIPTEN_VERSION, output.stdout.strip())
# properly report source code errors, and stop there
self.clear()
stderr = self.expect_fail([compiler, path_from_root('tests', 'hello_world_error' + suffix)])
self.assertNotContained('IOError', stderr) # no python stack
self.assertNotContained('Traceback', stderr) # no python stack
self.assertContained('error: invalid preprocessing directive', stderr)
self.assertContained(["error: use of undeclared identifier 'cheez", "error: unknown type name 'cheez'"], stderr)
self.assertContained('errors generated.', stderr.splitlines()[-2])
@parameterized({
'c': [EMCC, '.c'],
'cxx': [EMXX, '.cpp']})
def test_emcc_2(self, compiler, suffix):
# emcc src.cpp -c and emcc src.cpp -o src.[o|bc] ==> should give a .bc file
for args in [['-c'], ['-o', 'src.o'], ['-o', 'src.bc'], ['-o', 'src.so'], ['-O1', '-c', '-o', '/dev/null'], ['-O1', '-o', '/dev/null']]:
print('args:', args)
if '/dev/null' in args and WINDOWS:
print('skip because windows')
continue
target = args[1] if len(args) == 2 else 'hello_world.o'
self.clear()
run_process([compiler, path_from_root('tests', 'hello_world' + suffix)] + args)
if args[-1] == '/dev/null':
print('(no output)')
continue
syms = building.llvm_nm(target)
self.assertIn('main', syms.defs)
if self.is_wasm_backend():
# wasm backend will also have '__original_main' or such
self.assertEqual(len(syms.defs), 2)
else:
self.assertEqual(len(syms.defs), 1)
if target == 'js': # make sure emcc can recognize the target as a bitcode file
shutil.move(target, target + '.bc')
target += '.bc'
run_process([compiler, target, '-o', target + '.js'])
self.assertContained('hello, world!', run_js(target + '.js'))
@parameterized({
'c': [EMCC, '.c'],
'cxx': [EMXX, '.cpp']})
def test_emcc_3(self, compiler, suffix):
# handle singleton archives
run_process([compiler, '-c', path_from_root('tests', 'hello_world' + suffix), '-o', 'a.o'])
run_process([LLVM_AR, 'r', 'a.a', 'a.o'], stdout=PIPE, stderr=PIPE)
run_process([compiler, 'a.a'])
self.assertContained('hello, world!', run_js('a.out.js'))
if not self.is_wasm_backend():
# emcc src.ll ==> generates .js
self.clear()
run_process([compiler, path_from_root('tests', 'hello_world.ll')])
self.assertContained('hello, world!', run_js('a.out.js'))
# emcc [..] -o [path] ==> should work with absolute paths
for path in [os.path.abspath(os.path.join('..', 'file1.js')), os.path.join('b_dir', 'file2.js')]:
print(path)
os.chdir(self.get_dir())
self.clear()
print(os.listdir(os.getcwd()))
ensure_dir(os.path.join('a_dir', 'b_dir'))
os.chdir('a_dir')
# use single file so we don't have more files to clean up
run_process([compiler, path_from_root('tests', 'hello_world' + suffix), '-o', path, '-s', 'SINGLE_FILE=1'])
last = os.getcwd()
os.chdir(os.path.dirname(path))
self.assertContained('hello, world!', run_js(os.path.basename(path)))
os.chdir(last)
try_delete(path)
@parameterized({
'c': [EMCC],
'cxx': [EMXX]})
def test_emcc_4(self, compiler):
# Optimization: emcc src.cpp -o something.js [-Ox]. -O0 is the same as not specifying any optimization setting
for params, opt_level, bc_params, closure, has_malloc in [ # bc params are used after compiling to bitcode
(['-o', 'something.js'], 0, None, 0, 1),
(['-o', 'something.js', '-O0'], 0, None, 0, 0),
(['-o', 'something.js', '-O1'], 1, None, 0, 0),
(['-o', 'something.js', '-O1', '-g'], 1, None, 0, 0), # no closure since debug
(['-o', 'something.js', '-O2'], 2, None, 0, 1),
(['-o', 'something.js', '-O2', '-g'], 2, None, 0, 0),
(['-o', 'something.js', '-Os'], 2, None, 0, 1),
(['-o', 'something.js', '-O3'], 3, None, 0, 1),
# and, test compiling to bitcode first
(['-o', 'something.bc'], 0, [], 0, 0),
(['-o', 'something.bc', '-O0'], 0, [], 0, 0),
(['-o', 'something.bc', '-O1'], 1, ['-O1'], 0, 0),
(['-o', 'something.bc', '-O2'], 2, ['-O2'], 0, 0),
(['-o', 'something.bc', '-O3'], 3, ['-O3'], 0, 0),
(['-O1', '-o', 'something.bc'], 1, [], 0, 0),
# non-wasm
(['-s', 'WASM=0', '-o', 'something.js'], 0, None, 0, 1),
(['-s', 'WASM=0', '-o', 'something.js', '-O0'], 0, None, 0, 0),
(['-s', 'WASM=0', '-o', 'something.js', '-O1'], 1, None, 0, 0),
(['-s', 'WASM=0', '-o', 'something.js', '-O1', '-g'], 1, None, 0, 0), # no closure since debug
(['-s', 'WASM=0', '-o', 'something.js', '-O2'], 2, None, 0, 1),
(['-s', 'WASM=0', '-o', 'something.js', '-O2', '-g'], 2, None, 0, 0),
(['-s', 'WASM=0', '-o', 'something.js', '-Os'], 2, None, 0, 1),
(['-s', 'WASM=0', '-o', 'something.js', '-O3'], 3, None, 0, 1),
# and, test compiling to bitcode first
(['-s', 'WASM=0', '-o', 'something.bc'], 0, ['-s', 'WASM=0'], 0, 0),
(['-s', 'WASM=0', '-o', 'something.bc', '-O0'], 0, ['-s', 'WASM=0'], 0, 0),
(['-s', 'WASM=0', '-o', 'something.bc', '-O1'], 1, ['-s', 'WASM=0', '-O1'], 0, 0),
(['-s', 'WASM=0', '-o', 'something.bc', '-O2'], 2, ['-s', 'WASM=0', '-O2'], 0, 0),
(['-s', 'WASM=0', '-o', 'something.bc', '-O3'], 3, ['-s', 'WASM=0', '-O3'], 0, 0),
(['-s', 'WASM=0', '-O1', '-o', 'something.bc'], 1, ['-s', 'WASM=0'], 0, 0),
]:
if 'WASM=0' in params and self.is_wasm_backend():
continue
print(params, opt_level, bc_params, closure, has_malloc)
self.clear()
keep_debug = '-g' in params
args = [compiler, path_from_root('tests', 'hello_world_loop' + ('_malloc' if has_malloc else '') + '.cpp')] + params
print('..', args)
output = run_process(args, stdout=PIPE, stderr=PIPE)
assert len(output.stdout) == 0, output.stdout
if bc_params is not None:
self.assertExists('something.bc', output.stderr)
bc_args = [compiler, 'something.bc', '-o', 'something.js'] + bc_params
print('....', bc_args)
output = run_process(bc_args, stdout=PIPE, stderr=PIPE)
self.assertExists('something.js', output.stderr)
self.assertContained('hello, world!', run_js('something.js'))
# Verify optimization level etc. in the generated code
# XXX these are quite sensitive, and will need updating when code generation changes
generated = open('something.js').read()
main = self.get_func(generated, '_main') if 'function _main' in generated else generated
assert 'new Uint16Array' in generated and 'new Uint32Array' in generated, 'typed arrays 2 should be used by default'
assert 'SAFE_HEAP' not in generated, 'safe heap should not be used by default'
assert ': while(' not in main, 'when relooping we also js-optimize, so there should be no labelled whiles'
if closure:
if opt_level == 0:
assert '._main =' in generated, 'closure compiler should have been run'
elif opt_level >= 1:
assert '._main=' in generated, 'closure compiler should have been run (and output should be minified)'
else:
# closure has not been run, we can do some additional checks. TODO: figure out how to do these even with closure
assert '._main = ' not in generated, 'closure compiler should not have been run'
if keep_debug:
assert ('switch (label)' in generated or 'switch (label | 0)' in generated) == (opt_level <= 0), 'relooping should be in opt >= 1'
assert ('assert(STACKTOP < STACK_MAX' in generated) == (opt_level == 0), 'assertions should be in opt == 0'
if 'WASM=0' in params:
if opt_level >= 2 and '-g' in params:
assert re.search(r'HEAP8\[\$?\w+ ?\+ ?\(+\$?\w+ ?', generated) or re.search(r'HEAP8\[HEAP32\[', generated) or re.search(r'[i$]\d+ & ~\(1 << [i$]\d+\)', generated), 'eliminator should create compound expressions, and fewer one-time vars' # also in -O1, but easier to test in -O2
looks_unminified = ' = {}' in generated and ' = []' in generated
looks_minified = '={}' in generated and '=[]' and ';var' in generated
assert not (looks_minified and looks_unminified)
if opt_level == 0 or '-g' in params:
assert looks_unminified
elif opt_level >= 2:
assert looks_minified
@no_wasm_backend('tests for asmjs optimzer')
@parameterized({
'c': [EMCC],
'cxx': [EMXX]})
def test_emcc_5(self, compiler):
# asm.js optimization levels
for params, test, text in [
(['-O2'], lambda generated: 'function addRunDependency' in generated, 'shell has unminified utilities'),
(['-O2', '--closure', '1'], lambda generated: 'function addRunDependency' not in generated and ';function' in generated, 'closure minifies the shell, removes whitespace'),
(['-O2', '--closure', '1', '-g1'], lambda generated: 'function addRunDependency' not in generated and ';function' not in generated, 'closure minifies the shell, -g1 makes it keep whitespace'),
(['-O2'], lambda generated: 'var b=0' in generated and 'function _main' not in generated, 'registerize/minify is run by default in -O2'),
(['-O2', '--minify', '0'], lambda generated: 'var b = 0' in generated and 'function _main' not in generated, 'minify is cancelled, but not registerize'),
(['-O2', '--js-opts', '0'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'js opts are cancelled'),
(['-O2', '-g'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'registerize/minify is cancelled by -g'),
(['-O2', '-g0'], lambda generated: 'var b=0' in generated and 'function _main' not in generated, 'registerize/minify is run by default in -O2 -g0'),
(['-O2', '-g1'], lambda generated: 'var b = 0' in generated and 'function _main' not in generated, 'compress is cancelled by -g1'),
(['-O2', '-g2'], lambda generated: ('var b = 0' in generated or 'var i1 = 0' in generated) and 'function _main' in generated, 'minify is cancelled by -g2'),
(['-O2', '-g3'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'registerize is cancelled by -g3'),
(['-O2', '--profiling'], lambda generated: ('var b = 0' in generated or 'var i1 = 0' in generated) and 'function _main' in generated, 'similar to -g2'),
(['-O2', '-profiling'], lambda generated: ('var b = 0' in generated or 'var i1 = 0' in generated) and 'function _main' in generated, 'similar to -g2'),
(['-O2', '--profiling-funcs'], lambda generated: 'var b=0' in generated and '"use asm";var a=' in generated and 'function _main' in generated, 'very minified, but retain function names'),
(['-O2', '-profiling-funcs'], lambda generated: 'var b=0' in generated and '"use asm";var a=' in generated and 'function _main' in generated, 'very minified, but retain function names'),
(['-O2'], lambda generated: 'var b=0' in generated and '"use asm";var a=' in generated and 'function _main' not in generated, 'very minified, no function names'),
# (['-O2', '-g4'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'same as -g3 for now'),
(['-s', 'INLINING_LIMIT=0'], lambda generated: 'function _dump' in generated, 'no inlining without opts'),
([], lambda generated: 'Module["_dump"]' not in generated, 'dump is not exported by default'),
(['-s', 'EXPORTED_FUNCTIONS=["_main", "_dump"]'], lambda generated: 'Module["_dump"] =' in generated, 'dump is now exported'),
(['--llvm-opts', '1'], lambda generated: '_puts(' in generated, 'llvm opts requested'),
([], lambda generated: '// Sometimes an existing Module' in generated, 'without opts, comments in shell code'),
(['-O2'], lambda generated: '// Sometimes an existing Module' not in generated, 'with opts, no comments in shell code'),
(['-O2', '-g2'], lambda generated: '// Sometimes an existing Module' not in generated, 'with -g2, no comments in shell code'),
(['-O2', '-g3'], lambda generated: '// Sometimes an existing Module' in generated, 'with -g3, yes comments in shell code'),
]:
print(params, text)
self.clear()
run_process([compiler, path_from_root('tests', 'hello_world_loop.cpp'), '-o', 'a.out.js', '-s', 'WASM=0'] + params)
self.assertContained('hello, world!', run_js('a.out.js'))
assert test(open('a.out.js').read()), text
def test_multiple_sources(self):
# Compiling two sources at a time should work.
cmd = [EMCC, '-c', path_from_root('tests', 'twopart_main.cpp'), path_from_root('tests', 'twopart_side.c')]
run_process(cmd)
# Object files should be generated by default in the current working
# directory, and not alongside the sources.
self.assertExists('twopart_main.o')
self.assertExists('twopart_side.o')
self.assertNotExists(path_from_root('tests', 'twopart_main.o'))
self.assertNotExists(path_from_root('tests', 'twopart_side.o'))
# But it is an error if '-o' is also specified.
self.clear()
err = self.expect_fail(cmd + ['-o', 'out.o'])
self.assertContained('cannot specify -o with -c/-S and multiple source files', err)
self.assertNotExists('twopart_main.o')
self.assertNotExists('twopart_side.o')
self.assertNotExists(path_from_root('tests', 'twopart_main.o'))
self.assertNotExists(path_from_root('tests', 'twopart_side.o'))
def test_combining_object_files(self):
# Compiling two files with -c will generate separate object files
run_process([EMCC, path_from_root('tests', 'twopart_main.cpp'), path_from_root('tests', 'twopart_side.c'), '-c'])
self.assertExists('twopart_main.o')
self.assertExists('twopart_side.o')
# Linking with just one of them is expected to fail
err = self.expect_fail([EMCC, 'twopart_main.o'])
self.assertContained('undefined symbol: theFunc', err)
# Linking with both should work
run_process([EMCC, 'twopart_main.o', 'twopart_side.o'])
self.assertContained('side got: hello from main, over', run_js('a.out.js'))
# Combining object files into another object should also work, using the `-r` flag
run_process([EMCC, '-r', 'twopart_main.o', 'twopart_side.o', '-o', 'combined.o'])
# We also support building without the `-r` flag but expect a warning
err = run_process([EMCC, 'twopart_main.o', 'twopart_side.o', '-o', 'combined2.o'], stderr=PIPE).stderr
self.assertBinaryEqual('combined.o', 'combined2.o')
self.assertContained('warning: Assuming object file output in the absence of `-c`', err)
# Should be two symbols (and in the wasm backend, also __original_main)
syms = building.llvm_nm('combined.o')
self.assertIn('main', syms.defs)
if self.is_wasm_backend():
self.assertEqual(len(syms.defs), 3)
else:
self.assertEqual(len(syms.defs), 2)
run_process([EMCC, 'combined.o', '-o', 'combined.o.js'])
self.assertContained('side got: hello from main, over', run_js('combined.o.js'))
def test_js_transform(self):
with open('t.py', 'w') as f:
f.write('''
import sys
f = open(sys.argv[1], 'a')
f.write('transformed!')
f.close()
''')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '--js-transform', '%s t.py' % (PYTHON)])
self.assertIn('transformed!', open('a.out.js').read())
@no_wasm_backend("wasm backend alwasy embedds memory")
def test_js_mem_file(self):
for opts in [0, 1, 2, 3]:
print('mem init in', opts)
self.clear()
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '-O' + str(opts)])
if opts >= 2:
self.assertExists('a.out.js.mem')
else:
self.assertNotExists('a.out.js.mem')
def test_emcc_asm_v_wasm(self):
for opts in ([], ['-O1'], ['-O2'], ['-O3']):
print('opts', opts)
for mode in ([], ['-s', 'WASM=0']):
self.clear()
wasm = '=0' not in str(mode)
print(' mode', mode, 'wasm?', wasm)
run_process([EMCC, path_from_root('tests', 'hello_world.c')] + opts + mode)
self.assertExists('a.out.js')
if wasm:
self.assertExists('a.out.wasm')
for engine in JS_ENGINES:
print(' engine', engine)
out = run_js('a.out.js', engine=engine, stderr=PIPE, full_output=True)
self.assertContained('hello, world!', out)
if not wasm and engine == SPIDERMONKEY_ENGINE:
self.validate_asmjs(out)
if not wasm and not self.is_wasm_backend():
src = open('a.out.js').read()
if opts == []:
self.assertContained('almost asm', src)
else:
self.assertContained('use asm', src)
def test_emcc_cflags(self):
output = run_process([EMCC, '--cflags'], stdout=PIPE)
flags = output.stdout.strip()
self.assertContained(' '.join(building.doublequote_spaces(shared.emsdk_cflags([], False))), flags)
output = run_process([EMXX, '--cflags'], stdout=PIPE)
flags = output.stdout.strip()
self.assertContained(' '.join(building.doublequote_spaces(shared.emsdk_cflags([], True))), flags)
# check they work
cmd = [CLANG_CXX, path_from_root('tests', 'hello_world.cpp')] + shlex.split(flags.replace('\\', '\\\\')) + ['-c', '-emit-llvm', '-o', 'a.bc']
run_process(cmd)
run_process([EMCC, 'a.bc'])
self.assertContained('hello, world!', run_js('a.out.js'))
def test_emcc_print_search_dirs(self):
result = run_process([EMCC, '-print-search-dirs'], stdout=PIPE, stderr=PIPE)
self.assertContained('programs: =', result.stdout)
self.assertContained('libraries: =', result.stdout)
def test_emar_em_config_flag(self):
# Test that the --em-config flag is accepted but not passed down do llvm-ar.
# We expand this in case the EM_CONFIG is ~/.emscripten (default)
config = os.path.expanduser(shared.EM_CONFIG)
proc = run_process([EMAR, '--em-config', config, '-version'], stdout=PIPE, stderr=PIPE)
self.assertEqual(proc.stderr, "")
self.assertContained('LLVM', proc.stdout)
def test_emsize(self):
with open(path_from_root('tests', 'other', 'test_emsize.out')) as expected_output:
expected = expected_output.read()
cmd = [emsize, path_from_root('tests', 'other', 'test_emsize.js')]
for command in [cmd, cmd + ['-format=sysv']]:
output = run_process(cmd, stdout=PIPE).stdout
self.assertContained(expected, output)
@parameterized({
# ('directory to the test', 'output filename', ['extra args to pass to
# CMake']) Testing all combinations would be too much work and the test
# would take 10 minutes+ to finish (CMake feature detection is slow), so
# combine multiple features into one to try to cover as much as possible
# while still keeping this test in sensible time limit.
'js': ('target_js', 'test_cmake.js', ['-DCMAKE_BUILD_TYPE=Debug']),
'html': ('target_html', 'hello_world_gles.html', ['-DCMAKE_BUILD_TYPE=Release']),
'library': ('target_library', 'libtest_cmake.a', ['-DCMAKE_BUILD_TYPE=MinSizeRel']),
'static_cpp': ('target_library', 'libtest_cmake.a', ['-DCMAKE_BUILD_TYPE=RelWithDebInfo', '-DCPP_LIBRARY_TYPE=STATIC']),
'stdproperty': ('stdproperty', 'helloworld.js', [])
})
def test_cmake(self, test_dir, output_file, cmake_args):
# Test all supported generators.
if WINDOWS:
generators = ['MinGW Makefiles', 'NMake Makefiles']
else:
generators = ['Unix Makefiles', 'Ninja', 'Eclipse CDT4 - Ninja']
configurations = {'MinGW Makefiles' : {'build' : ['mingw32-make'] }, # noqa
'NMake Makefiles' : {'build' : ['nmake', '/NOLOGO']}, # noqa
'Unix Makefiles' : {'build' : ['make']}, # noqa
'Ninja' : {'build' : ['ninja']}, # noqa
'Eclipse CDT4 - Ninja': {'build' : ['ninja']}, # noqa
}
for generator in generators:
conf = configurations[generator]
if not shared.which(conf['build'][0]):
# Use simple test if applicable
print('Skipping %s test for CMake support; build tool found found: %s.' % (generator, conf['build'][0]))
continue
cmakelistsdir = path_from_root('tests', 'cmake', test_dir)
with temp_directory(self.get_dir()) as tempdirname:
# Run Cmake
cmd = [emcmake, 'cmake'] + cmake_args + ['-G', generator, cmakelistsdir]
env = os.environ.copy()
# https://github.com/emscripten-core/emscripten/pull/5145: Check that CMake works even if EMCC_SKIP_SANITY_CHECK=1 is passed.
if test_dir == 'target_html':
env['EMCC_SKIP_SANITY_CHECK'] = '1'
print(str(cmd))
ret = run_process(cmd, env=env, stdout=None if EM_BUILD_VERBOSE >= 2 else PIPE, stderr=None if EM_BUILD_VERBOSE >= 1 else PIPE)
if ret.stderr is not None and len(ret.stderr.strip()):
print(ret.stderr) # If there were any errors, print them directly to console for diagnostics.
if ret.stderr is not None and 'error' in ret.stderr.lower():
print('Failed command: ' + ' '.join(cmd))
print('Result:\n' + ret.stderr)
self.fail('cmake call failed!')
# Build
cmd = conf['build']
if EM_BUILD_VERBOSE >= 3 and 'Ninja' not in generator:
cmd += ['VERBOSE=1']
ret = run_process(cmd, stdout=None if EM_BUILD_VERBOSE >= 2 else PIPE)
if ret.stderr is not None and len(ret.stderr.strip()):
print(ret.stderr) # If there were any errors, print them directly to console for diagnostics.
if ret.stdout is not None and 'error' in ret.stdout.lower() and '0 error(s)' not in ret.stdout.lower():
print('Failed command: ' + ' '.join(cmd))
print('Result:\n' + ret.stdout)
self.fail('make failed!')
self.assertExists(tempdirname + '/' + output_file, 'building a cmake-generated Makefile failed to produce an output file %s!' % tempdirname + '/' + output_file)
# Run through node, if CMake produced a .js file.
if output_file.endswith('.js'):
ret = run_process(NODE_JS + [tempdirname + '/' + output_file], stdout=PIPE).stdout
self.assertTextDataIdentical(open(cmakelistsdir + '/out.txt').read().strip(), ret.strip())
# Test that the various CMAKE_xxx_COMPILE_FEATURES that are advertised for the Emscripten toolchain match with the actual language features that Clang supports.
# If we update LLVM version and this test fails, copy over the new advertised features from Clang and place them to cmake/Modules/Platform/Emscripten.cmake.
@no_windows('Skipped on Windows because CMake does not configure native Clang builds well on Windows.')
def test_cmake_compile_features(self):
with temp_directory(self.get_dir()):
cmd = ['cmake', '-DCMAKE_C_COMPILER=' + CLANG_CC, '-DCMAKE_CXX_COMPILER=' + CLANG_CXX, path_from_root('tests', 'cmake', 'stdproperty')]
print(str(cmd))
native_features = run_process(cmd, stdout=PIPE).stdout
with temp_directory(self.get_dir()):
cmd = [emcmake, 'cmake', path_from_root('tests', 'cmake', 'stdproperty')]
print(str(cmd))
emscripten_features = run_process(cmd, stdout=PIPE).stdout
native_features = '\n'.join([x for x in native_features.split('\n') if '***' in x])
emscripten_features = '\n'.join([x for x in emscripten_features.split('\n') if '***' in x])
self.assertTextDataIdentical(native_features, emscripten_features)
# Tests that it's possible to pass C++11 or GNU++11 build modes to CMake by building code that needs C++11 (embind)
def test_cmake_with_embind_cpp11_mode(self):
for args in [[], ['-DNO_GNU_EXTENSIONS=1']]:
with temp_directory(self.get_dir()) as tempdirname:
configure = [emcmake, 'cmake', path_from_root('tests', 'cmake', 'cmake_with_emval')] + args
print(str(configure))
run_process(configure)
build = ['cmake', '--build', '.']
print(str(build))
run_process(build)
ret = run_process(NODE_JS + [os.path.join(tempdirname, 'cpp_with_emscripten_val.js')], stdout=PIPE).stdout.strip()
if '-DNO_GNU_EXTENSIONS=1' in args:
self.assertTextDataIdentical('Hello! __STRICT_ANSI__: 1, __cplusplus: 201103', ret)
else:
self.assertTextDataIdentical('Hello! __STRICT_ANSI__: 0, __cplusplus: 201103', ret)
# Tests that the Emscripten CMake toolchain option
def test_cmake_bitcode_static_libraries(self):
if self.is_wasm_backend():
# Test that this option produces an error with the llvm backend
err = self.expect_fail([emcmake, 'cmake', path_from_root('tests', 'cmake', 'static_lib'), '-DEMSCRIPTEN_GENERATE_BITCODE_STATIC_LIBRARIES=ON'])
self.assertContained('EMSCRIPTEN_GENERATE_BITCODE_STATIC_LIBRARIES is not compatible with the', err)
return
# Test that building static libraries by default generates UNIX archives (.a, with the emar tool)
self.clear()
run_process([emcmake, 'cmake', path_from_root('tests', 'cmake', 'static_lib')])
run_process(['cmake', '--build', '.'])
self.assertTrue(building.is_ar('libstatic_lib.a'))
run_process([EMAR, 'x', 'libstatic_lib.a'])
found = False # hashing makes the object name random
for x in os.listdir('.'):
if x.endswith('.o'):
found = True
if self.is_wasm_backend():
assert building.is_wasm(x)
else:
assert building.is_bitcode(x)
assert found
# Test that passing the -DEMSCRIPTEN_GENERATE_BITCODE_STATIC_LIBRARIES=ON
# directive causes CMake to generate LLVM bitcode files as static libraries
# (.bc)
self.clear()
run_process([emcmake, 'cmake', '-DEMSCRIPTEN_GENERATE_BITCODE_STATIC_LIBRARIES=ON', path_from_root('tests', 'cmake', 'static_lib')])
run_process(['cmake', '--build', '.'])
if self.is_wasm_backend():
assert building.is_wasm('libstatic_lib.bc')
else:
assert building.is_bitcode('libstatic_lib.bc')
assert not building.is_ar('libstatic_lib.bc')
# Test that one is able to fake custom suffixes for static libraries.
# (sometimes projects want to emulate stuff, and do weird things like files
# with ".so" suffix which are in fact either ar archives or bitcode files)
self.clear()
run_process([emcmake, 'cmake', '-DSET_FAKE_SUFFIX_IN_PROJECT=1', path_from_root('tests', 'cmake', 'static_lib')])
run_process(['cmake', '--build', '.'])
assert building.is_ar('myprefix_static_lib.somecustomsuffix')
# Tests that the CMake variable EMSCRIPTEN_VERSION is properly provided to user CMake scripts
def test_cmake_emscripten_version(self):
run_process([emcmake, 'cmake', path_from_root('tests', 'cmake', 'emscripten_version')])
def test_system_include_paths(self):
# Verify that all default include paths are within `emscripten/system`
def verify_includes(stderr):
self.assertContained('<...> search starts here:', stderr)
assert stderr.count('End of search list.') == 1, stderr
start = stderr.index('<...> search starts here:')
end = stderr.index('End of search list.')
includes = stderr[start:end]
includes = [i.strip() for i in includes.splitlines()[1:-1]]
for i in includes:
if shared.Cache.dirname in i:
self.assertContained(shared.Cache.dirname, i)
else:
self.assertContained(path_from_root('system'), i)
err = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-v'], stderr=PIPE).stderr
verify_includes(err)
err = run_process([EMXX, path_from_root('tests', 'hello_world.cpp'), '-v'], stderr=PIPE).stderr
verify_includes(err)
def test_failure_error_code(self):
for compiler in [EMCC, EMXX]:
# Test that if one file is missing from the build, then emcc shouldn't succeed, and shouldn't produce an output file.
self.expect_fail([compiler, path_from_root('tests', 'hello_world.c'), 'this_file_is_missing.c', '-o', 'out.js'])
self.assertFalse(os.path.exists('out.js'))
def test_use_cxx(self):
create_test_file('empty_file', ' ')
dash_xc = run_process([EMCC, '-v', '-xc', 'empty_file'], stderr=PIPE).stderr
self.assertNotContained('-x c++', dash_xc)
dash_xcpp = run_process([EMCC, '-v', '-xc++', 'empty_file'], stderr=PIPE).stderr
self.assertContained('-x c++', dash_xcpp)
def test_cxx11(self):
for std in ['-std=c++11', '--std=c++11']:
for compiler in [EMCC, EMXX]:
run_process([compiler, std, path_from_root('tests', 'hello_cxx11.cpp')])
# Regression test for issue #4522: Incorrect CC vs CXX detection
def test_incorrect_c_detection(self):
# This auto-detection only works for the compile phase.
# For linking you need to use `em++` or pass `-x c++`
create_test_file('test.c', 'foo\n')
for compiler in [EMCC, EMXX]:
run_process([compiler, '-c', '--bind', '--embed-file', 'test.c', path_from_root('tests', 'hello_world.cpp')])
def test_odd_suffixes(self):
for suffix in ['CPP', 'c++', 'C++', 'cxx', 'CXX', 'cc', 'CC', 'i', 'ii']:
if self.is_wasm_backend() and suffix == 'ii':
# wasm backend treats .i and .ii specially and considers them already
# pre-processed. Because if this is strips all the -D command line
# flags, including the __EMSCRIPTEN__ define, which makes this fail
# to compile since libcxx/__config depends in __EMSCRIPTEN__.
continue
self.clear()
print(suffix)
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'test.' + suffix)
run_process([EMCC, self.in_dir('test.' + suffix)])
self.assertContained('hello, world!', run_js('a.out.js'))
for suffix in ['lo']:
self.clear()
print(suffix)
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-o', 'binary.' + suffix])
run_process([EMCC, 'binary.' + suffix])
self.assertContained('hello, world!', run_js('a.out.js'))
@no_wasm_backend('asm.js minification')
def test_asm_minify(self):
def test(args):
run_process([EMCC, path_from_root('tests', 'hello_world_loop_malloc.cpp'), '-s', 'WASM=0'] + args)
self.assertContained('hello, world!', run_js('a.out.js'))
return open('a.out.js').read()
src = test([])
assert 'function _malloc' in src
src = test(['-O2', '-s', 'ASM_JS=1'])
normal_size = len(src)
print('normal', normal_size)
assert 'function _malloc' not in src
src = test(['-O2', '-s', 'ASM_JS=1', '--minify', '0'])
unminified_size = len(src)
print('unminified', unminified_size)
assert unminified_size > normal_size
assert 'function _malloc' not in src
src = test(['-O2', '-s', 'ASM_JS=1', '-g'])
debug_size = len(src)
print('debug', debug_size)
self.assertGreater(debug_size, unminified_size)
self.assertContained('function _malloc', src)
@no_wasm_backend('tests fastcomp extra assertions for function pointer errors - do we need these?')
def test_dangerous_func_cast(self):
src = r'''
#include <stdio.h>
typedef void (*voidfunc)();
int my_func() {
printf("my func\n");
return 10;
}
int main(int argc, char **argv) {
voidfunc fps[10];
for (int i = 0; i < 10; i++)
fps[i] = (i == argc) ? (void (*)())my_func : NULL;
fps[2 * (argc-1) + 1]();
return 0;
}
'''
create_test_file('src.c', src)
def test(args, expected):
print(args, expected)
run_process([EMCC, 'src.c'] + args, stderr=PIPE)
self.assertContained(expected, run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None))
if self.is_wasm_backend():
return
print('in asm.js')
run_process([EMCC, 'src.c', '-s', 'WASM=0'] + args, stderr=PIPE)
self.assertContained(expected, run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None))
# TODO: emulation function support in wasm is imperfect
print('with emulated function pointers in asm.js')
run_process([EMCC, '-s', 'WASM=0', 'src.c', '-s', 'ASSERTIONS=1'] + args + ['-s', 'EMULATED_FUNCTION_POINTERS=1'], stderr=PIPE)
out = run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None)
self.assertContained(expected, out)
# fastcomp. all asm, so it can't just work with wrong sigs. but,
# ASSERTIONS=2 gives much better info to debug
# Case 1: No useful info, but does mention ASSERTIONS
test(['-O1'], 'ASSERTIONS')
# Case 2: Some useful text
test(['-O1', '-s', 'ASSERTIONS=1'], [
'Invalid function pointer',
"called with signature 'v'. Perhaps this is an invalid value",
'Build with ASSERTIONS=2 for more info'
])
# Case 3: actually useful identity of the bad pointer, with comparisons to
# what it would be in other types/tables
test(['-O1', '-s', 'ASSERTIONS=2'], [
'Invalid function pointer',
"called with signature 'v'. Perhaps this is an invalid value",
'This pointer might make sense in another type signature:',
'Invalid function pointer',
"called with signature 'v'. Perhaps this is an invalid value",
"i: asm['_my_func']"
])
# Case 4: emulate so it works
test(['-O1', '-s', 'EMULATE_FUNCTION_POINTER_CASTS=1'], 'my func\n')
@no_wasm_backend('uses EMULATED_FUNCTION_POINTERS')
def test_emulate_function_pointer_casts_assertions_2(self):
# check empty tables work with assertions 2 in this mode (#6554)
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'EMULATED_FUNCTION_POINTERS=1', '-s', 'ASSERTIONS=2'])
def test_wl_linkflags(self):
# Test path -L and -l via -Wl, arguments and -Wl, response files
create_test_file('main.cpp', '''
extern "C" void printey();
int main() {
printey();
return 0;
}
''')
create_test_file('libfile.cpp', '''
#include <stdio.h>
extern "C" void printey() {
printf("hello from lib\\n");
}
''')
create_test_file('linkflags.txt', '''
-L.
-lfoo
''')
run_process([EMCC, '-o', 'libfile.o', 'libfile.cpp'])
run_process([EMAR, 'cr', 'libfoo.a', 'libfile.o'])
run_process([EMCC, 'main.cpp', '-L.', '-lfoo'])
run_process([EMCC, 'main.cpp', '-Wl,-L.', '-Wl,-lfoo'])
run_process([EMCC, 'main.cpp', '-Wl,@linkflags.txt'])
def test_l_link(self):
# Linking with -lLIBNAME and -L/DIRNAME should work, also should work with spaces
create_test_file('main.cpp', '''
extern void printey();
int main() {
printey();
return 0;
}
''')
create_test_file('libfile.cpp', '''
#include <stdio.h>
void printey() {
printf("hello from lib\\n");
}
''')
ensure_dir('libdir')
libfile = self.in_dir('libdir', 'libfile.so')
aout = 'a.out.js'
def build(path, args):
run_process([EMCC, path] + args)
# Test linking the library built here by emcc
build('libfile.cpp', ['-c'])
shutil.move('libfile.o', libfile)
build('main.cpp', ['-L' + 'libdir', '-lfile'])
self.assertContained('hello from lib', run_js(aout))
# Also test execution with `-l c` and space-separated library linking syntax
os.remove(aout)
build('libfile.cpp', ['-c', '-l', 'c'])
shutil.move('libfile.o', libfile)
build('main.cpp', ['-L', 'libdir', '-l', 'file'])
self.assertContained('hello from lib', run_js(aout))
# Must not leave unneeded linker stubs
self.assertNotExists('a.out')
self.assertNotExists('a.exe')
def test_commons_link(self):
create_test_file('a.h', r'''
#if !defined(A_H)
#define A_H
extern int foo[8];
#endif
''')
create_test_file('a.c', r'''
#include "a.h"
int foo[8];
''')
create_test_file('main.c', r'''
#include <stdio.h>
#include "a.h"
int main() {
printf("|%d|\n", foo[0]);
return 0;
}
''')
run_process([EMCC, '-o', 'a.o', 'a.c'])
run_process([EMAR, 'rv', 'library.a', 'a.o'])
run_process([EMCC, '-o', 'main.o', 'main.c'])
run_process([EMCC, '-o', 'a.js', 'main.o', 'library.a'])
self.assertContained('|0|', run_js('a.js'))
@parameterized({
'expand_symlinks': [[]],
'no_canonical_prefixes': [['-no-canonical-prefixes']],
})
@no_windows('Windows does not support symlinks')
def test_symlink_points_to_bad_suffix(self, flags):
"""Tests compiling a symlink where foobar.c points to foobar.xxx.
In this case, we should always successfully compile the code."""
create_test_file('foobar.xxx', 'int main(){ return 0; }')
os.symlink('foobar.xxx', 'foobar.c')
run_process([EMCC, 'foobar.c', '-o', 'foobar.bc'] + flags)
@parameterized({
'expand_symlinks': ([], True),
'no_canonical_prefixes': (['-no-canonical-prefixes'], False),
})
@no_windows('Windows does not support symlinks')
def test_symlink_has_bad_suffix(self, flags, expect_success):
"""Tests compiling a symlink where foobar.xxx points to foobar.c.
In this case, setting -no-canonical-prefixes will result in a build failure
due to the inappropriate file suffix on foobar.xxx."""
create_test_file('foobar.c', 'int main(){ return 0; }')
os.symlink('foobar.c', 'foobar.xxx')
proc = run_process([EMCC, 'foobar.xxx', '-o', 'foobar.bc'] + flags, check=expect_success, stderr=PIPE)
if not expect_success:
self.assertNotEqual(proc.returncode, 0)
self.assertContained("unknown suffix", proc.stderr)
def test_multiply_defined_libsymbols(self):
lib_name = 'libA.c'
a2_name = 'a2.c'
b2_name = 'b2.c'
main_name = 'main.c'
create_test_file(lib_name, 'int mult() { return 1; }')
create_test_file(a2_name, 'void x() {}')
create_test_file(b2_name, 'void y() {}')
create_test_file(main_name, r'''
#include <stdio.h>
int mult();
int main() {
printf("result: %d\n", mult());
return 0;
}
''')
building.emcc(lib_name, output_filename='libA.so')
building.emcc(a2_name, ['-L.', '-lA'])
building.emcc(b2_name, ['-L.', '-lA'])
building.emcc(main_name, ['-L.', '-lA', a2_name + '.o', b2_name + '.o'], output_filename='a.out.js')
self.assertContained('result: 1', run_js('a.out.js'))
def test_multiply_defined_libsymbols_2(self):
a = "int x() { return 55; }"
a_name = 'a.c'
create_test_file(a_name, a)
b = "int y() { return 2; }"
b_name = 'b.c'
create_test_file(b_name, b)
c = "int z() { return 5; }"
c_name = 'c.c'
create_test_file(c_name, c)
main = r'''
#include <stdio.h>
int x();
int y();
int z();
int main() {
printf("result: %d\n", x() + y() + z());
return 0;
}
'''
main_name = 'main.c'
create_test_file(main_name, main)
building.emcc(a_name) # a.c.o
building.emcc(b_name) # b.c.o
building.emcc(c_name) # c.c.o
lib_name = 'libLIB.a'
building.emar('cr', lib_name, [a_name + '.o', b_name + '.o']) # libLIB.a with a and b
# a is in the lib AND in an .o, so should be ignored in the lib. We do still need b from the lib though
building.emcc(main_name, [a_name + '.o', c_name + '.o', '-L.', '-lLIB'], output_filename='a.out.js')
self.assertContained('result: 62', run_js('a.out.js'))
@no_wasm_backend('not relevent with lld')
def test_link_group(self):
lib_src_name = 'lib.c'
create_test_file(lib_src_name, 'int x() { return 42; }')
main_name = 'main.c'
create_test_file(main_name, r'''
#include <stdio.h>
int x();
int main() {
printf("result: %d\n", x());
return 0;
}
''')
building.emcc(lib_src_name) # lib.c.o
lib_name = 'libLIB.a'
building.emar('cr', lib_name, [lib_src_name + '.o']) # libLIB.a with lib.c.o
def test(lib_args, err_expected):
print(err_expected)
output = run_process([EMCC, main_name, '-o', 'a.out.js'] + lib_args, stdout=PIPE, stderr=PIPE, check=not err_expected)
if err_expected:
self.assertContained(err_expected, output.stderr)
else:
self.assertNotContained('undefined symbol', output.stderr)
out_js = 'a.out.js'
self.assertExists(out_js, output.stdout + '\n' + output.stderr)
self.assertContained('result: 42', run_js(out_js))
test(['-Wl,--start-group', lib_name, '-Wl,--start-group'], 'Nested --start-group, missing --end-group?')
test(['-Wl,--end-group', lib_name, '-Wl,--start-group'], '--end-group without --start-group')
test(['-Wl,--start-group', lib_name, '-Wl,--end-group'], None)
test(['-Wl,--start-group', lib_name], None)
print('embind test with groups')
main_name = 'main.cpp'
create_test_file(main_name, r'''
#include <stdio.h>
#include <emscripten/val.h>
using namespace emscripten;
extern "C" int x();
int main() {
int y = -x();
y = val::global("Math").call<int>("abs", y);
printf("result: %d\n", y);
return 0;
}
''')
test(['-Wl,--start-group', lib_name, '-Wl,--end-group', '--bind'], None)
def test_whole_archive(self):
# Verify that -Wl,--whole-archive includes the static constructor from the
# otherwise unreferenced library.
run_process([EMCC, '-c', '-o', 'main.o', path_from_root('tests', 'test_whole_archive', 'main.c')])
run_process([EMCC, '-c', '-o', 'testlib.o', path_from_root('tests', 'test_whole_archive', 'testlib.c')])
run_process([EMAR, 'crs', 'libtest.a', 'testlib.o'])
run_process([EMCC, '-Wl,--whole-archive', 'libtest.a', '-Wl,--no-whole-archive', 'main.o'])
self.assertContained('foo is: 42\n', run_js('a.out.js'))
run_process([EMCC, '-Wl,-whole-archive', 'libtest.a', '-Wl,-no-whole-archive', 'main.o'])
self.assertContained('foo is: 42\n', run_js('a.out.js'))
# Verify the --no-whole-archive prevents the inclusion of the ctor
run_process([EMCC, '-Wl,-whole-archive', '-Wl,--no-whole-archive', 'libtest.a', 'main.o'])
self.assertContained('foo is: 0\n', run_js('a.out.js'))
def test_link_group_bitcode(self):
create_test_file('1.c', r'''
int f(void);
int main() {
f();
return 0;
}
''')
create_test_file('2.c', r'''
#include <stdio.h>
int f() {
printf("Hello\n");
return 0;
}
''')
run_process([EMCC, '-o', '1.o', '1.c'])
run_process([EMCC, '-o', '2.o', '2.c'])
run_process([EMAR, 'crs', '2.a', '2.o'])
run_process([EMCC, '-o', 'out.bc', '-Wl,--start-group', '2.a', '1.o', '-Wl,--end-group'])
run_process([EMCC, 'out.bc'])
self.assertContained('Hello', run_js('a.out.js'))
@no_wasm_backend('lld resolves circular lib dependencies')
def test_circular_libs(self):
def tmp_source(name, code):
with open(name, 'w') as f:
f.write(code)
tmp_source('a.c', 'int z(); int x() { return z(); }')
tmp_source('b.c', 'int x(); int y() { return x(); } int z() { return 42; }')
tmp_source('c.c', 'int q() { return 0; }')
tmp_source('main.c', r'''
#include <stdio.h>
int y();
int main() {
printf("result: %d\n", y());
return 0;
}
''')
building.emcc('a.c') # a.c.o
building.emcc('b.c') # b.c.o
building.emcc('c.c')
building.emar('cr', 'libA.a', ['a.c.o', 'c.c.o'])
building.emar('cr', 'libB.a', ['b.c.o', 'c.c.o'])
args = ['main.c', '-o', 'a.out.js']
libs_list = ['libA.a', 'libB.a']
# 'libA.a' does not satisfy any symbols from main, so it will not be included,
# and there will be an undefined symbol.
err = self.expect_fail([EMCC] + args + libs_list)
self.assertContained('error: undefined symbol: x', err)
# -Wl,--start-group and -Wl,--end-group around the libs will cause a rescan
# of 'libA.a' after 'libB.a' adds undefined symbol "x", so a.c.o will now be
# included (and the link will succeed).
libs = ['-Wl,--start-group'] + libs_list + ['-Wl,--end-group']
run_process([EMCC] + args + libs)
self.assertContained('result: 42', run_js('a.out.js'))
# -( and -) should also work.
args = ['main.c', '-o', 'a2.out.js']
libs = ['-Wl,-('] + libs_list + ['-Wl,-)']
run_process([EMCC] + args + libs)
self.assertContained('result: 42', run_js('a2.out.js'))
# The fastcomp path will deliberately ignore duplicate input files in order
# to allow "libA.so" on the command line twice. The is not really .so support
# and the .so files are really bitcode.
@no_wasm_backend('tests legacy .so linking behviour')
@needs_dlfcn
def test_redundant_link(self):
lib = "int mult() { return 1; }"
lib_name = 'libA.c'
create_test_file(lib_name, lib)
main = r'''
#include <stdio.h>
int mult();
int main() {
printf("result: %d\n", mult());
return 0;
}
'''
main_name = 'main.c'
create_test_file(main_name, main)
building.emcc(lib_name, output_filename='libA.so')
building.emcc(main_name, ['libA.so', 'libA.so'], output_filename='a.out.js')
self.assertContained('result: 1', run_js('a.out.js'))
def test_dot_a_all_contents_invalid(self):
# check that we error if an object file in a .a is not valid bitcode.
# do not silently ignore native object files, which may have been
# built by mistake
create_test_file('native.c', 'int native() { return 5; }')
create_test_file('main.c', 'extern int native(); int main() { return native(); }')
run_process([CLANG_CC, 'native.c', '-target', 'x86_64-linux', '-c', '-o', 'native.o'])
run_process([EMAR, 'crs', 'libfoo.a', 'native.o'])
stderr = self.expect_fail([EMCC, 'main.c', 'libfoo.a'])
self.assertContained('unknown file type', stderr)
def test_export_all(self):
lib = r'''
#include <stdio.h>
void libf1() { printf("libf1\n"); }
void libf2() { printf("libf2\n"); }
'''
create_test_file('lib.c', lib)
create_test_file('main.js', '''
var Module = {
onRuntimeInitialized: function() {
_libf1();
_libf2();
}
};
''')
building.emcc('lib.c', ['-s', 'EXPORT_ALL', '-s', 'LINKABLE', '--pre-js', 'main.js'], output_filename='a.out.js')
self.assertContained('libf1\nlibf2\n', run_js('a.out.js'))
def test_export_all_and_exported_functions(self):
# EXPORT_ALL should not export library functions by default.
# This mans that to export library function you also need to explicitly
# list them in EXPORTED_FUNCTIONS.
lib = r'''
#include <stdio.h>
#include <emscripten.h>
EMSCRIPTEN_KEEPALIVE void libfunc() { puts("libfunc\n"); }
'''
create_test_file('lib.c', lib)
create_test_file('main.js', '''
var Module = {
onRuntimeInitialized: function() {
_libfunc();
__get_daylight();
}
};
''')
# __get_daylight should not be linked by default, even with EXPORT_ALL
building.emcc('lib.c', ['-s', 'EXPORT_ALL', '--pre-js', 'main.js'], output_filename='a.out.js')
err = run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None)
self.assertContained('__get_daylight is not defined', err)
building.emcc('lib.c', ['-s', "EXPORTED_FUNCTIONS=['__get_daylight']", '-s', 'EXPORT_ALL', '--pre-js', 'main.js'], output_filename='a.out.js')
self.assertContained('libfunc\n', run_js('a.out.js'))
def test_stdin(self):
def run_test():
for engine in JS_ENGINES:
if engine == V8_ENGINE:
continue # no stdin support in v8 shell
engine[0] = os.path.normpath(engine[0])
print(engine, file=sys.stderr)
# work around a bug in python's subprocess module
# (we'd use run_js() normally)
try_delete('out.txt')
cmd = jsrun.make_command(os.path.normpath('out.js'), engine)
cmd = ' '.join(building.doublequote_spaces(cmd))
if WINDOWS:
os.system('type "in.txt" | {} >out.txt'.format(cmd))
else: # posix
os.system('cat in.txt | {} > out.txt'.format(cmd))
self.assertContained('abcdef\nghijkl\neof', open('out.txt').read())
building.emcc(path_from_root('tests', 'module', 'test_stdin.c'), output_filename='out.js')
create_test_file('in.txt', 'abcdef\nghijkl')
run_test()
building.emcc(path_from_root('tests', 'module', 'test_stdin.c'),
['-O2', '--closure', '1'], output_filename='out.js')
run_test()
def test_ungetc_fscanf(self):
create_test_file('main.cpp', r'''
#include <stdio.h>
int main(int argc, char const *argv[])
{
char str[4] = {0};
FILE* f = fopen("my_test.input", "r");
if (f == NULL) {
printf("cannot open file\n");
return -1;
}
ungetc('x', f);
ungetc('y', f);
ungetc('z', f);
fscanf(f, "%3s", str);
printf("%s\n", str);
return 0;
}
''')
create_test_file('my_test.input', 'abc')
building.emcc('main.cpp', ['--embed-file', 'my_test.input'], output_filename='a.out.js')
self.assertContained('zyx', run_process(JS_ENGINES[0] + ['a.out.js'], stdout=PIPE, stderr=PIPE).stdout)
def test_abspaths(self):
# Includes with absolute paths are generally dangerous, things like -I/usr/.. will get to system local headers, not our portable ones.
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'main.c')
for args, expected in [(['-I/usr/something', '-Wwarn-absolute-paths'], True),
(['-L/usr/something', '-Wwarn-absolute-paths'], True),
(['-I/usr/something'], False),
(['-L/usr/something'], False),
(['-I/usr/something', '-Wno-warn-absolute-paths'], False),
(['-L/usr/something', '-Wno-warn-absolute-paths'], False),
(['-Isubdir/something', '-Wwarn-absolute-paths'], False),
(['-Lsubdir/something', '-Wwarn-absolute-paths'], False),
([], False)]:
print(args, expected)
proc = run_process([EMCC, 'main.c'] + args, stderr=PIPE)
WARNING = 'encountered. If this is to a local system header/library, it may cause problems (local system files make sense for compiling natively on your system, but not necessarily to JavaScript)'
self.assertContainedIf(WARNING, proc.stderr, expected)
def test_local_link(self):
# Linking a local library directly, like /usr/lib/libsomething.so, cannot work of course since it
# doesn't contain bitcode. However, when we see that we should look for a bitcode file for that
# library in the -L paths and system/lib
create_test_file('main.cpp', '''
extern void printey();
int main() {
printey();
return 0;
}
''')
ensure_dir('subdir')
open(os.path.join('subdir', 'libfile.so'), 'w').write('this is not llvm bitcode!')
create_test_file('libfile.cpp', '''
#include <stdio.h>
void printey() {
printf("hello from lib\\n");
}
''')
run_process([EMCC, 'libfile.cpp', '-o', 'libfile.so'], stderr=PIPE)
run_process([EMCC, 'main.cpp', os.path.join('subdir', 'libfile.so'), '-L.'])
self.assertContained('hello from lib', run_js('a.out.js'))
def test_identical_basenames(self):
# Issue 287: files in different dirs but with the same basename get confused as the same,
# causing multiply defined symbol errors
ensure_dir('foo')
ensure_dir('bar')
open(os.path.join('foo', 'main.cpp'), 'w').write('''
extern void printey();
int main() {
printey();
return 0;
}
''')
open(os.path.join('bar', 'main.cpp'), 'w').write('''
#include <stdio.h>
void printey() { printf("hello there\\n"); }
''')
run_process([EMCC, os.path.join('foo', 'main.cpp'), os.path.join('bar', 'main.cpp')])
self.assertContained('hello there', run_js('a.out.js'))
# ditto with first creating .o files
try_delete('a.out.js')
run_process([EMCC, os.path.join('foo', 'main.cpp'), '-o', os.path.join('foo', 'main.o')])
run_process([EMCC, os.path.join('bar', 'main.cpp'), '-o', os.path.join('bar', 'main.o')])
run_process([EMCC, os.path.join('foo', 'main.o'), os.path.join('bar', 'main.o')])
self.assertContained('hello there', run_js('a.out.js'))
def test_main_a(self):
# if main() is in a .a, we need to pull in that .a
main_name = 'main.c'
create_test_file(main_name, r'''
#include <stdio.h>
extern int f();
int main() {
printf("result: %d.\n", f());
return 0;
}
''')
other_name = 'other.c'
create_test_file(other_name, r'''
#include <stdio.h>
int f() { return 12346; }
''')
run_process([EMCC, main_name, '-c', '-o', main_name + '.bc'])
run_process([EMCC, other_name, '-c', '-o', other_name + '.bc'])
run_process([EMAR, 'cr', main_name + '.a', main_name + '.bc'])
run_process([EMCC, other_name + '.bc', main_name + '.a'])
self.assertContained('result: 12346.', run_js('a.out.js'))
def test_multiple_archives_duplicate_basenames(self):
create_test_file('common.c', r'''
#include <stdio.h>
void a(void) {
printf("a\n");
}
''')
run_process([EMCC, 'common.c', '-c', '-o', 'common.o'])
try_delete('liba.a')
run_process([EMAR, 'rc', 'liba.a', 'common.o'])
create_test_file('common.c', r'''
#include <stdio.h>
void b(void) {
printf("b\n");
}
''')
run_process([EMCC, 'common.c', '-c', '-o', 'common.o'])
try_delete('libb.a')
run_process([EMAR, 'rc', 'libb.a', 'common.o'])
create_test_file('main.c', r'''
void a(void);
void b(void);
int main() {
a();
b();
}
''')
run_process([EMCC, 'main.c', '-L.', '-la', '-lb'])
self.assertContained('a\nb\n', run_js('a.out.js'))
def test_archive_duplicate_basenames(self):
ensure_dir('a')
create_test_file(os.path.join('a', 'common.c'), r'''
#include <stdio.h>
void a(void) {
printf("a\n");
}
''')
run_process([EMCC, os.path.join('a', 'common.c'), '-c', '-o', os.path.join('a', 'common.o')])
ensure_dir('b')
create_test_file(os.path.join('b', 'common.c'), r'''
#include <stdio.h>
void b(void) {
printf("b...\n");
}
''')
run_process([EMCC, os.path.join('b', 'common.c'), '-c', '-o', os.path.join('b', 'common.o')])
try_delete('liba.a')
run_process([EMAR, 'rc', 'liba.a', os.path.join('a', 'common.o'), os.path.join('b', 'common.o')])
# Verify that archive contains basenames with hashes to avoid duplication
text = run_process([EMAR, 't', 'liba.a'], stdout=PIPE).stdout
self.assertEqual(text.count('common'), 2)
for line in text.split('\n'):
# should not have huge hash names
self.assertLess(len(line), 20, line)
create_test_file('main.c', r'''
void a(void);
void b(void);
int main() {
a();
b();
}
''')
err = run_process([EMCC, 'main.c', '-L.', '-la'], stderr=PIPE).stderr
self.assertNotIn('archive file contains duplicate entries', err)
self.assertContained('a\nb...\n', run_js('a.out.js'))
# Using llvm-ar directly should cause duplicate basenames
try_delete('libdup.a')
run_process([LLVM_AR, 'rc', 'libdup.a', os.path.join('a', 'common.o'), os.path.join('b', 'common.o')])
text = run_process([EMAR, 't', 'libdup.a'], stdout=PIPE).stdout
self.assertEqual(text.count('common.o'), 2)
# With fastcomp we don't support duplicate members so this should generate
# a warning. With the wasm backend (lld) this is fully supported.
cmd = [EMCC, 'main.c', '-L.', '-ldup']
if self.is_wasm_backend():
run_process(cmd)
self.assertContained('a\nb...\n', run_js('a.out.js'))
else:
err = self.expect_fail(cmd)
self.assertIn('libdup.a: archive file contains duplicate entries', err)
self.assertIn('error: undefined symbol: a', err)
# others are not duplicates - the hashing keeps them separate
self.assertEqual(err.count('duplicate: '), 1)
self.assertContained('a\nb...\n', run_js('a.out.js'))
def test_export_from_archive(self):
export_name = 'this_is_an_entry_point'
full_export_name = '_' + export_name
# The wasm backend exports symbols without the leading '_'
if self.is_wasm_backend():
expect_export = export_name
else:
expect_export = full_export_name
create_test_file('export.c', r'''
#include <stdio.h>
void %s(void) {
printf("Hello, world!\n");
}
''' % export_name)
run_process([EMCC, 'export.c', '-c', '-o', 'export.o'])
run_process([EMAR, 'rc', 'libexport.a', 'export.o'])
create_test_file('main.c', r'''
int main() {
return 0;
}
''')
# Sanity check: the symbol should not be linked in if not requested.
run_process([EMCC, 'main.c', '-L.', '-lexport'])
self.assertFalse(self.is_exported_in_wasm(expect_export, 'a.out.wasm'))
# Sanity check: exporting without a definition does not cause it to appear.
# Note: exporting main prevents emcc from warning that it generated no code.
run_process([EMCC, 'main.c', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0', '-s', "EXPORTED_FUNCTIONS=['_main', '%s']" % full_export_name])
self.assertFalse(self.is_exported_in_wasm(expect_export, 'a.out.wasm'))
# Actual test: defining symbol in library and exporting it causes it to appear in the output.
run_process([EMCC, 'main.c', '-L.', '-lexport', '-s', "EXPORTED_FUNCTIONS=['%s']" % full_export_name])
self.assertTrue(self.is_exported_in_wasm(expect_export, 'a.out.wasm'))
def test_embed_file(self):
create_test_file('somefile.txt', 'hello from a file with lots of data and stuff in it thank you very much')
create_test_file('main.cpp', r'''
#include <stdio.h>
int main() {
FILE *f = fopen("somefile.txt", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
return 0;
}
''')
run_process([EMCC, 'main.cpp', '--embed-file', 'somefile.txt'])
self.assertContained('|hello from a file wi|', run_js('a.out.js'))
# preload twice, should not err
run_process([EMCC, 'main.cpp', '--embed-file', 'somefile.txt', '--embed-file', 'somefile.txt'])
self.assertContained('|hello from a file wi|', run_js('a.out.js'))
def test_embed_file_dup(self):
ensure_dir(self.in_dir('tst', 'test1'))
ensure_dir(self.in_dir('tst', 'test2'))
open(self.in_dir('tst', 'aa.txt'), 'w').write('frist')
open(self.in_dir('tst', 'test1', 'aa.txt'), 'w').write('sacond')
open(self.in_dir('tst', 'test2', 'aa.txt'), 'w').write('thard')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
void print_file(const char *name) {
FILE *f = fopen(name, "r");
char buf[100];
memset(buf, 0, 100);
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
}
int main() {
print_file("tst/aa.txt");
print_file("tst/test1/aa.txt");
print_file("tst/test2/aa.txt");
return 0;
}
''')
run_process([EMCC, 'main.cpp', '--embed-file', 'tst'])
self.assertContained('|frist|\n|sacond|\n|thard|\n', run_js('a.out.js'))
def test_exclude_file(self):
ensure_dir(self.in_dir('tst', 'abc.exe'))
ensure_dir(self.in_dir('tst', 'abc.txt'))
open(self.in_dir('tst', 'hello.exe'), 'w').write('hello')
open(self.in_dir('tst', 'hello.txt'), 'w').write('world')
open(self.in_dir('tst', 'abc.exe', 'foo'), 'w').write('emscripten')
open(self.in_dir('tst', 'abc.txt', 'bar'), 'w').write('!!!')
create_test_file('main.cpp', r'''
#include <stdio.h>
int main() {
if(fopen("tst/hello.exe", "rb")) printf("Failed\n");
if(!fopen("tst/hello.txt", "rb")) printf("Failed\n");
if(fopen("tst/abc.exe/foo", "rb")) printf("Failed\n");
if(!fopen("tst/abc.txt/bar", "rb")) printf("Failed\n");
return 0;
}
''')
run_process([EMCC, 'main.cpp', '--embed-file', 'tst', '--exclude-file', '*.exe'])
self.assertEqual(run_js('a.out.js').strip(), '')
def test_multidynamic_link(self):
# Linking the same dynamic library in statically will error, normally, since we statically link it, causing dupe symbols
def test(link_cmd, lib_suffix=''):
print(link_cmd, lib_suffix)
self.clear()
ensure_dir('libdir')
create_test_file('main.cpp', r'''
#include <stdio.h>
extern void printey();
extern void printother();
int main() {
printf("*");
printey();
printf("\n");
printother();
printf("\n");
printf("*");
return 0;
}
''')
open(os.path.join('libdir', 'libfile.cpp'), 'w').write('''
#include <stdio.h>
void printey() {
printf("hello from lib");
}
''')
open(os.path.join('libdir', 'libother.cpp'), 'w').write('''
#include <stdio.h>
extern void printey();
void printother() {
printf("|");
printey();
printf("|");
}
''')
compiler = [EMCC]
# Build libfile normally into an .so
run_process(compiler + [os.path.join('libdir', 'libfile.cpp'), '-o', os.path.join('libdir', 'libfile.so' + lib_suffix)])
# Build libother and dynamically link it to libfile
run_process(compiler + [os.path.join('libdir', 'libother.cpp')] + link_cmd + ['-o', os.path.join('libdir', 'libother.so')])
# Build the main file, linking in both the libs
run_process(compiler + [os.path.join('main.cpp')] + link_cmd + ['-lother', '-c'])
print('...')
# The normal build system is over. We need to do an additional step to link in the dynamic libraries, since we ignored them before
run_process([EMCC, 'main.o'] + link_cmd + ['-lother', '-s', 'EXIT_RUNTIME=1'])
self.assertContained('*hello from lib\n|hello from lib|\n*', run_js('a.out.js'))
test(['-L' + 'libdir', '-lfile']) # -l, auto detection from library path
test(['-L' + 'libdir', self.in_dir('libdir', 'libfile.so.3.1.4.1.5.9')], '.3.1.4.1.5.9') # handle libX.so.1.2.3 as well
def test_js_link(self):
create_test_file('main.cpp', '''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
create_test_file('before.js', '''
var MESSAGE = 'hello from js';
// Module is initialized with empty object by default, so if there are no keys - nothing was run yet
if (Object.keys(Module).length) throw 'This code should run before anything else!';
''')
create_test_file('after.js', '''
out(MESSAGE);
''')
run_process([EMCC, 'main.cpp', '--pre-js', 'before.js', '--post-js', 'after.js', '-s', 'WASM_ASYNC_COMPILATION=0'])
self.assertContained('hello from main\nhello from js\n', run_js('a.out.js'))
def test_sdl_endianness(self):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <SDL/SDL.h>
int main() {
printf("%d, %d, %d\n", SDL_BYTEORDER, SDL_LIL_ENDIAN, SDL_BIG_ENDIAN);
return 0;
}
''')
run_process([EMCC, 'main.cpp'])
self.assertContained('1234, 1234, 4321\n', run_js('a.out.js'))
def test_sdl2_mixer(self):
building.emcc(path_from_root('tests', 'sdl2_mixer.c'), ['-s', 'USE_SDL_MIXER=2'], output_filename='a.out.js')
def test_libpng(self):
shutil.copyfile(path_from_root('tests', 'pngtest.png'), 'pngtest.png')
building.emcc(path_from_root('tests', 'pngtest.c'), ['--embed-file', 'pngtest.png', '-s', 'USE_ZLIB=1', '-s', 'USE_LIBPNG=1'], output_filename='a.out.js')
self.assertContained('TESTS PASSED', run_process(JS_ENGINES[0] + ['a.out.js'], stdout=PIPE, stderr=PIPE).stdout)
def test_libjpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
building.emcc(path_from_root('tests', 'jpeg_test.c'), ['--embed-file', 'screenshot.jpg', '-s', 'USE_LIBJPEG=1'], output_filename='a.out.js')
self.assertContained('Image is 600 by 450 with 3 components', run_js('a.out.js', args=['screenshot.jpg'], stdout=PIPE, stderr=PIPE))
def test_bullet(self):
building.emcc(path_from_root('tests', 'bullet_hello_world.cpp'), ['-s', 'USE_BULLET=1'], output_filename='a.out.js')
self.assertContained('BULLET RUNNING', run_process(JS_ENGINES[0] + ['a.out.js'], stdout=PIPE, stderr=PIPE).stdout)
def test_vorbis(self):
# This will also test if ogg compiles, because vorbis depends on ogg
building.emcc(path_from_root('tests', 'vorbis_test.c'), ['-s', 'USE_VORBIS=1'], output_filename='a.out.js')
self.assertContained('ALL OK', run_process(JS_ENGINES[0] + ['a.out.js'], stdout=PIPE, stderr=PIPE).stdout)
def test_bzip2(self):
building.emcc(path_from_root('tests', 'bzip2_test.c'), ['-s', 'USE_BZIP2=1'], output_filename='a.out.js')
self.assertContained("usage: unzcrash filename", run_process(JS_ENGINES[0] + ['a.out.js'], stdout=PIPE, stderr=PIPE).stdout)
def test_freetype(self):
# copy the Liberation Sans Bold truetype file located in the
# <emscripten_root>/tests/freetype to the compilation folder
shutil.copy2(path_from_root('tests/freetype', 'LiberationSansBold.ttf'), os.getcwd())
# build test program with the font file embed in it
building.emcc(path_from_root('tests', 'freetype_test.c'), ['-s', 'USE_FREETYPE=1', '--embed-file', 'LiberationSansBold.ttf'], output_filename='a.out.js')
# the test program will print an ascii representation of a bitmap where the
# 'w' character has been rendered using the Liberation Sans Bold font
expectedOutput = ' \n' + \
' \n' + \
' \n' + \
' \n' + \
'*** +***+ \n' + \
'***+ ***** +\n' + \
'+**+ ***** +\n' + \
'+*** +**+**+ *\n' + \
' ***+ ***+**+ +*\n' + \
' +**+ *** *** +*\n' + \
' +**++**+ +**+**\n' + \
' ***+**+ +**+**\n' + \
' ****** *****\n' + \
' +****+ +****\n' + \
' +****+ +****\n' + \
' **** ****'
self.assertContained(expectedOutput, run_process(JS_ENGINES[0] + ['a.out.js'], stdout=PIPE, stderr=PIPE).stdout)
def test_link_memcpy(self):
# memcpy can show up *after* optimizations, so after our opportunity to link in libc, so it must be special-cased
create_test_file('main.cpp', r'''
#include <stdio.h>
int main(int argc, char **argv) {
int num = argc + 10;
char buf[num], buf2[num];
for (int i = 0; i < num; i++) {
buf[i] = i*i+i/3;
}
for (int i = 1; i < num; i++) {
buf[i] += buf[i-1];
}
for (int i = 0; i < num; i++) {
buf2[i] = buf[i];
}
for (int i = 1; i < num; i++) {
buf2[i] += buf2[i-1];
}
for (int i = 0; i < num; i++) {
printf("%d:%d\n", i, buf2[i]);
}
return 0;
}
''')
run_process([EMCC, '-O2', 'main.cpp'])
output = run_js('a.out.js', full_output=True, stderr=PIPE)
self.assertContained('''0:0
1:1
2:6
3:21
4:53
5:111
6:-49
7:98
8:55
9:96
10:-16
''', output)
self.assertNotContained('warning: library.js memcpy should not be running, it is only for testing!', output)
def test_undefined_function(self):
cmd = [EMCC, path_from_root('tests', 'hello_world.cpp')]
run_process(cmd)
# adding a missing symbol to EXPORTED_FUNCTIONS should cause failure
cmd += ['-s', "EXPORTED_FUNCTIONS=['foobar']"]
err = self.expect_fail(cmd)
self.assertContained('undefined exported function: "foobar"', err)
# setting ERROR_ON_UNDEFINED_SYMBOLS=0 suppresses error
cmd += ['-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0']
run_process(cmd)
def test_undefined_symbols(self):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <SDL.h>
#include "SDL/SDL_opengl.h"
extern "C" {
void something();
void elsey();
}
int main() {
printf("%p", SDL_GL_GetProcAddress("glGenTextures")); // pull in gl proc stuff, avoid warnings on emulation funcs
something();
elsey();
return 0;
}
''')
for args in ([], ['-O1'], ['-s', 'MAX_WEBGL_VERSION=2']):
for action in ('WARN', 'ERROR', None):
for value in ([0, 1]):
try_delete('a.out.js')
print('checking "%s" %s=%s' % (args, action, value))
extra = ['-s', action + '_ON_UNDEFINED_SYMBOLS=%d' % value] if action else []
proc = run_process([EMCC, 'main.cpp'] + extra + args, stderr=PIPE, check=False)
print(proc.stderr)
if value or action is None:
# The default is that we error in undefined symbols
self.assertContained('error: undefined symbol: something', proc.stderr)
self.assertContained('error: undefined symbol: elsey', proc.stderr)
check_success = False
elif action == 'ERROR' and not value:
# Error disables, should only warn
self.assertContained('warning: undefined symbol: something', proc.stderr)
self.assertContained('warning: undefined symbol: elsey', proc.stderr)
self.assertNotContained('undefined symbol: emscripten_', proc.stderr)
check_success = True
elif action == 'WARN' and not value:
# Disabled warning should imply disabling errors
self.assertNotContained('undefined symbol', proc.stderr)
check_success = True
if check_success:
self.assertEqual(proc.returncode, 0)
self.assertTrue(os.path.exists('a.out.js'))
else:
self.assertNotEqual(proc.returncode, 0)
self.assertFalse(os.path.exists('a.out.js'))
def test_GetProcAddress_LEGACY_GL_EMULATION(self):
# without legacy gl emulation, getting a proc from there should fail
self.do_other_test(os.path.join('other', 'GetProcAddress_LEGACY_GL_EMULATION'), run_args=['0'], emcc_args=['-s', 'LEGACY_GL_EMULATION=0'])
# with it, it should work
self.do_other_test(os.path.join('other', 'GetProcAddress_LEGACY_GL_EMULATION'), run_args=['1'], emcc_args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_prepost(self):
create_test_file('main.cpp', '''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
create_test_file('pre.js', '''
var Module = {
preRun: function() { out('pre-run') },
postRun: function() { out('post-run') }
};
''')
run_process([EMCC, 'main.cpp', '--pre-js', 'pre.js', '-s', 'WASM_ASYNC_COMPILATION=0'])
self.assertContained('pre-run\nhello from main\npost-run\n', run_js('a.out.js'))
# addRunDependency during preRun should prevent main, and post-run from
# running.
with open('pre.js', 'a') as f:
f.write('Module.preRun = function() { out("add-dep"); addRunDependency(); }\n')
run_process([EMCC, 'main.cpp', '--pre-js', 'pre.js', '-s', 'WASM_ASYNC_COMPILATION=0'])
output = run_js('a.out.js')
self.assertContained('add-dep\n', output)
self.assertNotContained('hello from main\n', output)
self.assertNotContained('post-run\n', output)
# noInitialRun prevents run
for no_initial_run, run_dep in [(0, 0), (1, 0), (0, 1)]:
print(no_initial_run, run_dep)
args = ['-s', 'WASM_ASYNC_COMPILATION=0', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["callMain"]']
if no_initial_run:
args += ['-s', 'INVOKE_RUN=0']
if run_dep:
create_test_file('pre.js', 'Module.preRun = function() { addRunDependency("test"); }')
create_test_file('post.js', 'removeRunDependency("test");')
args += ['--pre-js', 'pre.js', '--post-js', 'post.js']
run_process([EMCC, 'main.cpp'] + args)
output = run_js('a.out.js')
self.assertContainedIf('hello from main', output, not no_initial_run)
if no_initial_run:
# Calling main later should still work, filesystem etc. must be set up.
print('call main later')
src = open('a.out.js').read()
src += '\nModule.callMain();\n'
create_test_file('a.out.js', src)
self.assertContained('hello from main', run_js('a.out.js'))
# Use postInit
create_test_file('pre.js', '''
var Module = {
preRun: function() { out('pre-run') },
postRun: function() { out('post-run') },
preInit: function() { out('pre-init') }
};
''')
run_process([EMCC, 'main.cpp', '--pre-js', 'pre.js'])
self.assertContained('pre-init\npre-run\nhello from main\npost-run\n', run_js('a.out.js'))
def test_prepost2(self):
create_test_file('main.cpp', '''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
create_test_file('pre.js', '''
var Module = {
preRun: function() { out('pre-run') },
};
''')
create_test_file('pre2.js', '''
Module.postRun = function() { out('post-run') };
''')
run_process([EMCC, 'main.cpp', '--pre-js', 'pre.js', '--pre-js', 'pre2.js'])
self.assertContained('pre-run\nhello from main\npost-run\n', run_js('a.out.js'))
def test_prepre(self):
create_test_file('main.cpp', '''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
create_test_file('pre.js', '''
var Module = {
preRun: [function() { out('pre-run') }],
};
''')
create_test_file('pre2.js', '''
Module.preRun.push(function() { out('prepre') });
''')
run_process([EMCC, 'main.cpp', '--pre-js', 'pre.js', '--pre-js', 'pre2.js'])
self.assertContained('prepre\npre-run\nhello from main\n', run_js('a.out.js'))
def test_extern_prepost(self):
create_test_file('extern-pre.js', '''
// I am an external pre.
''')
create_test_file('extern-post.js', '''
// I am an external post.
''')
run_process([EMCC, '-O2', path_from_root('tests', 'hello_world.c'), '--extern-pre-js', 'extern-pre.js', '--extern-post-js', 'extern-post.js'])
# the files should be included, and externally - not as part of optimized
# code, so they are the very first and last things, and they are not
# minified.
with open('a.out.js') as output:
js = output.read()
pre = js.index('// I am an external pre.')
post = js.index('// I am an external post.')
# ignore some slack - newlines and other things. we just care about the
# big picture here
SLACK = 50
self.assertLess(pre, post)
self.assertLess(pre, SLACK)
self.assertGreater(post, len(js) - SLACK)
# make sure the slack is tiny compared to the whole program
self.assertGreater(len(js), 100 * SLACK)
@no_wasm_backend('depends on bc output')
def test_save_bc(self):
cmd = [EMCC, path_from_root('tests', 'hello_world_loop_malloc.cpp'), '--save-bc', 'my_bitcode.bc']
run_process(cmd)
assert 'hello, world!' in run_js('a.out.js')
self.assertExists('my_bitcode.bc')
try_delete('a.out.js')
building.llvm_dis('my_bitcode.bc', 'my_ll.ll')
run_process([EMCC, 'my_ll.ll', '-nostdlib', '-o', 'two.js'])
assert 'hello, world!' in run_js('two.js')
def test_js_optimizer(self):
ACORN_PASSES = ['JSDCE', 'AJSDCE', 'applyImportAndExportNameChanges', 'emitDCEGraph', 'applyDCEGraphRemovals', 'growableHeap', 'unsignPointers', 'asanify']
for input, expected, passes in [
(path_from_root('tests', 'optimizer', 'eliminateDeadGlobals.js'), open(path_from_root('tests', 'optimizer', 'eliminateDeadGlobals-output.js')).read(),
['eliminateDeadGlobals']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-output.js')).read(),
['hoistMultiples', 'removeAssignsToUndefined', 'simplifyExpressions']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-output.js')).read(),
['asm', 'simplifyExpressions']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-si.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-si-output.js')).read(),
['simplifyIfs']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-regs.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-regs-output.js')).read(),
['registerize']),
(path_from_root('tests', 'optimizer', 'eliminator-test.js'), open(path_from_root('tests', 'optimizer', 'eliminator-test-output.js')).read(),
['eliminate']),
(path_from_root('tests', 'optimizer', 'safe-eliminator-test.js'), open(path_from_root('tests', 'optimizer', 'safe-eliminator-test-output.js')).read(),
['eliminateMemSafe']),
(path_from_root('tests', 'optimizer', 'asm-eliminator-test.js'), open(path_from_root('tests', 'optimizer', 'asm-eliminator-test-output.js')).read(),
['asm', 'eliminate']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs-output.js')).read(),
['asm', 'registerize']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs-harder.js'), [open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs-harder-output.js')).read(), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs-harder-output2.js')).read(), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs-harder-output3.js')).read()],
['asm', 'registerizeHarder']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs-min.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs-min-output.js')).read(),
['asm', 'registerize', 'minifyLocals']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-minifyLocals.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-minifyLocals-output.js')).read(),
['minifyLocals']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-pre.js'), [open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-pre-output.js')).read(), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-pre-output2.js')).read()],
['asm', 'simplifyExpressions']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-pre-f32.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-pre-output-f32.js')).read(),
['asm', 'asmPreciseF32', 'simplifyExpressions', 'optimizeFrounds']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-pre-f32.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-pre-output-f32-nosimp.js')).read(),
['asm', 'asmPreciseF32', 'optimizeFrounds']),
(path_from_root('tests', 'optimizer', 'test-reduce-dead-float-return.js'), open(path_from_root('tests', 'optimizer', 'test-reduce-dead-float-return-output.js')).read(),
['asm', 'optimizeFrounds', 'registerizeHarder']),
(path_from_root('tests', 'optimizer', 'test-no-reduce-dead-float-return-to-nothing.js'), open(path_from_root('tests', 'optimizer', 'test-no-reduce-dead-float-return-to-nothing-output.js')).read(),
['asm', 'registerizeHarder']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-last.js'), [open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-lastOpts-output.js')).read(), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-lastOpts-output2.js')).read(), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-lastOpts-output3.js')).read()],
['asm', 'asmLastOpts']),
(path_from_root('tests', 'optimizer', 'asmLastOpts.js'), open(path_from_root('tests', 'optimizer', 'asmLastOpts-output.js')).read(),
['asm', 'asmLastOpts']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-last.js'), [open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-last-output.js')).read(), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-last-output2.js')).read(), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-last-output3.js')).read()],
['asm', 'asmLastOpts', 'last']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-relocate.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-relocate-output.js')).read(),
['asm', 'relocate']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-minlast.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-minlast-output.js')).read(),
['asm', 'minifyWhitespace', 'asmLastOpts', 'last']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-shiftsAggressive.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-shiftsAggressive-output.js')).read(),
['asm', 'aggressiveVariableElimination']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-localCSE.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-localCSE-output.js')).read(),
['asm', 'localCSE']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-ensureLabelSet.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-ensureLabelSet-output.js')).read(),
['asm', 'ensureLabelSet']),
(path_from_root('tests', 'optimizer', '3154.js'), open(path_from_root('tests', 'optimizer', '3154-output.js')).read(),
['asm', 'eliminate', 'registerize', 'asmLastOpts', 'last']),
(path_from_root('tests', 'optimizer', 'safeLabelSetting.js'), open(path_from_root('tests', 'optimizer', 'safeLabelSetting-output.js')).read(),
['asm', 'safeLabelSetting']), # eliminate, just enough to trigger asm normalization/denormalization
(path_from_root('tests', 'optimizer', 'null_if.js'), [open(path_from_root('tests', 'optimizer', 'null_if-output.js')).read(), open(path_from_root('tests', 'optimizer', 'null_if-output2.js')).read()],
['asm', 'registerizeHarder', 'asmLastOpts', 'minifyWhitespace']), # issue 3520
(path_from_root('tests', 'optimizer', 'null_else.js'), [open(path_from_root('tests', 'optimizer', 'null_else-output.js')).read(), open(path_from_root('tests', 'optimizer', 'null_else-output2.js')).read()],
['asm', 'registerizeHarder', 'asmLastOpts', 'minifyWhitespace']), # issue 3549
(path_from_root('tests', 'optimizer', 'test-js-optimizer-splitMemory.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-splitMemory-output.js')).read(),
['splitMemory']),
(path_from_root('tests', 'optimizer', 'JSDCE.js'), open(path_from_root('tests', 'optimizer', 'JSDCE-output.js')).read(),
['JSDCE']),
(path_from_root('tests', 'optimizer', 'JSDCE-hasOwnProperty.js'), open(path_from_root('tests', 'optimizer', 'JSDCE-hasOwnProperty-output.js')).read(),
['JSDCE']),
(path_from_root('tests', 'optimizer', 'JSDCE-fors.js'), open(path_from_root('tests', 'optimizer', 'JSDCE-fors-output.js')).read(),
['JSDCE']),
(path_from_root('tests', 'optimizer', 'AJSDCE.js'), open(path_from_root('tests', 'optimizer', 'AJSDCE-output.js')).read(),
['AJSDCE']),
(path_from_root('tests', 'optimizer', 'emitDCEGraph.js'), open(path_from_root('tests', 'optimizer', 'emitDCEGraph-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'emitDCEGraph2.js'), open(path_from_root('tests', 'optimizer', 'emitDCEGraph2-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'emitDCEGraph3.js'), open(path_from_root('tests', 'optimizer', 'emitDCEGraph3-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'emitDCEGraph4.js'), open(path_from_root('tests', 'optimizer', 'emitDCEGraph4-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'emitDCEGraph5.js'), open(path_from_root('tests', 'optimizer', 'emitDCEGraph5-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'minimal-runtime-applyDCEGraphRemovals.js'), open(path_from_root('tests', 'optimizer', 'minimal-runtime-applyDCEGraphRemovals-output.js')).read(),
['applyDCEGraphRemovals']),
(path_from_root('tests', 'optimizer', 'applyDCEGraphRemovals.js'), open(path_from_root('tests', 'optimizer', 'applyDCEGraphRemovals-output.js')).read(),
['applyDCEGraphRemovals']),
(path_from_root('tests', 'optimizer', 'applyImportAndExportNameChanges.js'), open(path_from_root('tests', 'optimizer', 'applyImportAndExportNameChanges-output.js')).read(),
['applyImportAndExportNameChanges']),
(path_from_root('tests', 'optimizer', 'applyImportAndExportNameChanges2.js'), open(path_from_root('tests', 'optimizer', 'applyImportAndExportNameChanges2-output.js')).read(),
['applyImportAndExportNameChanges']),
(path_from_root('tests', 'optimizer', 'minimal-runtime-emitDCEGraph.js'), open(path_from_root('tests', 'optimizer', 'minimal-runtime-emitDCEGraph-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'minimal-runtime-2-emitDCEGraph.js'), open(path_from_root('tests', 'optimizer', 'minimal-runtime-2-emitDCEGraph-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'standalone-emitDCEGraph.js'), open(path_from_root('tests', 'optimizer', 'standalone-emitDCEGraph-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'emittedJSPreservesParens.js'), open(path_from_root('tests', 'optimizer', 'emittedJSPreservesParens-output.js')).read(),
['asm']),
(path_from_root('tests', 'optimizer', 'test-growableHeap.js'), open(path_from_root('tests', 'optimizer', 'test-growableHeap-output.js')).read(),
['growableHeap']),
(path_from_root('tests', 'optimizer', 'test-unsignPointers.js'), open(path_from_root('tests', 'optimizer', 'test-unsignPointers-output.js')).read(),
['unsignPointers']),
(path_from_root('tests', 'optimizer', 'test-asanify.js'), open(path_from_root('tests', 'optimizer', 'test-asanify-output.js')).read(),
['asanify']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-minifyGlobals.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-minifyGlobals-output.js')).read(),
['minifyGlobals']),
]:
print(input, passes)
if not isinstance(expected, list):
expected = [expected]
expected = [out.replace('\n\n', '\n').replace('\n\n', '\n') for out in expected]
acorn = any(p in ACORN_PASSES for p in passes)
# test calling optimizer
if not acorn:
print(' js')
output = run_process(NODE_JS + [path_from_root('tools', 'js-optimizer.js'), input] + passes, stdin=PIPE, stdout=PIPE).stdout
else:
print(' acorn')
output = run_process(NODE_JS + [path_from_root('tools', 'acorn-optimizer.js'), input] + passes, stdin=PIPE, stdout=PIPE).stdout
def check_js(js, expected):
# print >> sys.stderr, 'chak\n==========================\n', js, '\n===========================\n'
if 'registerizeHarder' in passes:
# registerizeHarder is hard to test, as names vary by chance, nondeterminstically FIXME
def fix(src):
if type(src) is list:
return list(map(fix, src))
src = '\n'.join([line for line in src.split('\n') if 'var ' not in line]) # ignore vars
def reorder(func):
def swap(func, stuff):
# emit EYE_ONE always before EYE_TWO, replacing i1,i2 or i2,i1 etc
for i in stuff:
if i not in func:
return func
indexes = [[i, func.index(i)] for i in stuff]
indexes.sort(key=lambda x: x[1])
for j in range(len(indexes)):
func = func.replace(indexes[j][0], 'STD_' + str(j))
return func
func = swap(func, ['i1', 'i2', 'i3'])
func = swap(func, ['i1', 'i2'])
func = swap(func, ['i4', 'i5'])
return func
src = 'function '.join(map(reorder, src.split('function ')))
return src
js = fix(js)
expected = fix(expected)
self.assertIdentical(expected, js.replace('\r\n', '\n').replace('\n\n', '\n').replace('\n\n', '\n'))
if input not in [ # blacklist of tests that are native-optimizer only
path_from_root('tests', 'optimizer', 'asmLastOpts.js'),
path_from_root('tests', 'optimizer', '3154.js')
]:
check_js(output, expected)
else:
print('(skip non-native)')
if not self.is_wasm_backend() and tools.js_optimizer.use_native(passes) and tools.js_optimizer.get_native_optimizer():
# test calling native
def check_json():
run_process(NODE_JS + [path_from_root('tools', 'js-optimizer.js'), output_temp, 'receiveJSON'], stdin=PIPE, stdout=open(output_temp + '.js', 'w'))
output = open(output_temp + '.js').read()
check_js(output, expected)
self.clear()
input_temp = 'temp.js'
output_temp = 'output.js'
shutil.copyfile(input, input_temp)
run_process(NODE_JS + [path_from_root('tools', 'js-optimizer.js'), input_temp, 'emitJSON'], stdin=PIPE, stdout=open(input_temp + '.js', 'w'))
original = open(input).read()
if '// EXTRA_INFO:' in original:
json = open(input_temp + '.js').read()
json += '\n' + original[original.find('// EXTRA_INFO:'):]
create_test_file(input_temp + '.js', json)
# last is only relevant when we emit JS
if 'last' not in passes and \
'null_if' not in input and 'null_else' not in input: # null-* tests are js optimizer or native, not a mixture (they mix badly)
print(' native (receiveJSON)')
output = run_process([tools.js_optimizer.get_native_optimizer(), input_temp + '.js'] + passes + ['receiveJSON', 'emitJSON'], stdin=PIPE, stdout=open(output_temp, 'w')).stdout
check_json()
print(' native (parsing JS)')
output = run_process([tools.js_optimizer.get_native_optimizer(), input] + passes + ['emitJSON'], stdin=PIPE, stdout=open(output_temp, 'w')).stdout
check_json()
print(' native (emitting JS)')
output = run_process([tools.js_optimizer.get_native_optimizer(), input] + passes, stdin=PIPE, stdout=PIPE).stdout
check_js(output, expected)
@no_fastcomp('wasm2js-only')
def test_js_optimizer_wasm2js(self):
# run the js optimizer in a similar way as wasm2js does
shutil.copyfile(path_from_root('tests', 'optimizer', 'wasm2js.js'), 'wasm2js.js')
run_process([PYTHON, path_from_root('tools', 'js_optimizer.py'), 'wasm2js.js', 'minifyNames', 'last'])
with open(path_from_root('tests', 'optimizer', 'wasm2js-output.js')) as expected:
with open('wasm2js.js.jsopt.js') as actual:
self.assertIdentical(expected.read(), actual.read())
def test_m_mm(self):
create_test_file('foo.c', '#include <emscripten.h>')
for opt in ['M', 'MM']:
proc = run_process([EMCC, 'foo.c', '-' + opt], stdout=PIPE, stderr=PIPE)
assert 'foo.o: ' in proc.stdout, '-%s failed to produce the right output: %s' % (opt, proc.stdout)
assert 'error' not in proc.stderr, 'Unexpected stderr: ' + proc.stderr
@uses_canonical_tmp
def test_emcc_debug_files(self):
for opts in [0, 1, 2, 3]:
for debug in [None, '1', '2']:
print(opts, debug)
if os.path.exists(self.canonical_temp_dir):
shutil.rmtree(self.canonical_temp_dir)
env = os.environ.copy()
if debug is None:
env.pop('EMCC_DEBUG', None)
else:
env['EMCC_DEBUG'] = debug
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-O' + str(opts)], stderr=PIPE, env=env)
if debug is None:
self.assertFalse(os.path.exists(self.canonical_temp_dir))
elif debug == '1':
if self.is_wasm_backend():
self.assertExists(os.path.join(self.canonical_temp_dir, 'emcc-3-original.js'))
else:
self.assertExists(os.path.join(self.canonical_temp_dir, 'emcc-0-linktime.bc'))
self.assertExists(os.path.join(self.canonical_temp_dir, 'emcc-1-original.js'))
elif debug == '2':
if self.is_wasm_backend():
self.assertExists(os.path.join(self.canonical_temp_dir, 'emcc-3-original.js'))
else:
self.assertExists(os.path.join(self.canonical_temp_dir, 'emcc-0-basebc.bc'))
self.assertExists(os.path.join(self.canonical_temp_dir, 'emcc-1-linktime.bc'))
self.assertExists(os.path.join(self.canonical_temp_dir, 'emcc-2-original.js'))
def test_debuginfo(self):
for args, expect_debug in [
(['-O0'], False),
(['-O0', '-g'], True),
(['-O0', '-g4'], True),
(['-O1'], False),
(['-O1', '-g'], True),
(['-O2'], False),
(['-O2', '-g'], True),
]:
print(args, expect_debug)
err = run_process([EMCC, '-v', path_from_root('tests', 'hello_world.cpp')] + args, stdout=PIPE, stderr=PIPE).stderr
lines = err.splitlines()
if self.is_wasm_backend():
finalize = [l for l in lines if 'wasm-emscripten-finalize' in l][0]
if expect_debug:
self.assertIn(' -g ', finalize)
else:
self.assertNotIn(' -g ', finalize)
else:
if expect_debug:
self.assertNotIn('strip-debug', err)
else:
self.assertIn('strip-debug', err)
@no_fastcomp()
def test_debuginfo_line_tables_only(self):
def test(do_compile):
do_compile([])
no_size = os.path.getsize('a.out.wasm')
do_compile(['-gline-tables-only'])
line_size = os.path.getsize('a.out.wasm')
do_compile(['-g'])
full_size = os.path.getsize('a.out.wasm')
return (no_size, line_size, full_size)
def compile_to_object(compile_args):
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-c', '-o', 'a.out.wasm'] + compile_args)
no_size, line_size, full_size = test(compile_to_object)
self.assertLess(no_size, line_size)
self.assertLess(line_size, full_size)
def compile_to_executable(compile_args, link_args):
# compile with the specified args
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-c', '-o', 'a.o'] + compile_args)
# link with debug info
run_process([EMCC, 'a.o'] + link_args)
def compile_to_debug_executable(compile_args):
return compile_to_executable(compile_args, ['-g'])
no_size, line_size, full_size = test(compile_to_debug_executable)
self.assertLess(no_size, line_size)
self.assertLess(line_size, full_size)
def compile_to_release_executable(compile_args):
return compile_to_executable(compile_args, [])
no_size, line_size, full_size = test(compile_to_release_executable)
self.assertEqual(no_size, line_size)
self.assertEqual(line_size, full_size)
@no_fastcomp()
def test_dwarf(self):
def compile_with_dwarf(args, output):
# Test that -g enables dwarf info in object files and linked wasm
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-o', output, '-g'] + args)
def verify(output):
info = run_process([LLVM_DWARFDUMP, '--all', output], stdout=PIPE).stdout
self.assertIn('DW_TAG_subprogram', info) # Ensure there's a subprogram entry in .debug_info
self.assertIn('debug_line[0x', info) # Ensure there's a line table
compile_with_dwarf(['-c'], 'a.o')
verify('a.o')
compile_with_dwarf([], 'a.js')
verify('a.wasm')
@unittest.skipIf(not scons_path, 'scons not found in PATH')
@with_env_modify({'EMSCRIPTEN_ROOT': path_from_root()})
def test_scons(self):
# this test copies the site_scons directory alongside the test
shutil.copytree(path_from_root('tests', 'scons'), 'test')
shutil.copytree(path_from_root('tools', 'scons', 'site_scons'), os.path.join('test', 'site_scons'))
with chdir('test'):
run_process(['scons'])
output = run_js('scons_integration.js', assert_returncode=5)
self.assertContained('If you see this - the world is all right!', output)
@unittest.skipIf(not scons_path, 'scons not found in PATH')
@with_env_modify({'EMSCRIPTEN_TOOLPATH': path_from_root('tools', 'scons', 'site_scons'),
'EMSCRIPTEN_ROOT': path_from_root()})
def test_emscons(self):
# uses the emscons wrapper which requires EMSCRIPTEN_TOOLPATH to find
# site_scons
shutil.copytree(path_from_root('tests', 'scons'), 'test')
with chdir('test'):
run_process([path_from_root('emscons'), 'scons'])
output = run_js('scons_integration.js', assert_returncode=5)
self.assertContained('If you see this - the world is all right!', output)
def test_embind_fail(self):
out = self.expect_fail([EMCC, path_from_root('tests', 'embind', 'test_unsigned.cpp')])
self.assertContained("undefined symbol: _embind_register_function", out)
@is_slow_test
def test_embind(self):
environ = os.environ.copy()
environ['EMCC_CLOSURE_ARGS'] = environ.get('EMCC_CLOSURE_ARGS', '') + " --externs " + pipes.quote(path_from_root('tests', 'embind', 'underscore-externs.js'))
test_cases = [
(['--bind']),
(['--bind', '-O1']),
(['--bind', '-O2']),
(['--bind', '-O2', '-s', 'ALLOW_MEMORY_GROWTH=1', path_from_root('tests', 'embind', 'isMemoryGrowthEnabled=true.cpp')]),
]
without_utf8_args = ['-s', 'EMBIND_STD_STRING_IS_UTF8=0']
test_cases_without_utf8 = []
for args in test_cases:
test_cases_without_utf8.append((args + without_utf8_args))
test_cases += test_cases_without_utf8
test_cases.extend([(args[:] + ['-s', 'DYNAMIC_EXECUTION=0']) for args in test_cases])
# closure compiler doesn't work with DYNAMIC_EXECUTION=0
test_cases.append((['--bind', '-O2', '--closure', '1']))
for args in test_cases:
print(args)
self.clear()
testFiles = [
path_from_root('tests', 'embind', 'underscore-1.4.2.js'),
path_from_root('tests', 'embind', 'imvu_test_adapter.js'),
path_from_root('tests', 'embind', 'embind.test.js'),
]
run_process(
[EMCC, path_from_root('tests', 'embind', 'embind_test.cpp'),
'--pre-js', path_from_root('tests', 'embind', 'test.pre.js'),
'--post-js', path_from_root('tests', 'embind', 'test.post.js'),
'-s', 'WASM_ASYNC_COMPILATION=0',
'-s', 'IN_TEST_HARNESS=1'] + args,
env=environ)
if 'DYNAMIC_EXECUTION=0' in args:
with open('a.out.js') as js_binary_file:
js_binary_str = js_binary_file.read()
self.assertNotContained('new Function(', js_binary_str)
self.assertNotContained('eval(', js_binary_str)
with open('a.out.js', 'ab') as f:
for tf in testFiles:
f.write(open(tf, 'rb').read())
output = run_js('a.out.js', stdout=PIPE, stderr=PIPE, full_output=True)
self.assertNotContained('FAIL', output)
def test_emconfig(self):
output = run_process([emconfig, 'LLVM_ROOT'], stdout=PIPE).stdout.strip()
self.assertEqual(output, LLVM_ROOT)
# EMSCRIPTEN_ROOT is kind of special since it should always report the locaton of em-config
# itself (its not configurable via the config file but driven by the location for arg0)
output = run_process([emconfig, 'EMSCRIPTEN_ROOT'], stdout=PIPE).stdout.strip()
self.assertEqual(output, os.path.dirname(emconfig))
invalid = 'Usage: em-config VAR_NAME'
# Don't accept variables that do not exist
output = self.expect_fail([emconfig, 'VAR_WHICH_DOES_NOT_EXIST']).strip()
self.assertEqual(output, invalid)
# Don't accept no arguments
output = self.expect_fail([emconfig]).strip()
self.assertEqual(output, invalid)
# Don't accept more than one variable
output = self.expect_fail([emconfig, 'LLVM_ROOT', 'EMCC']).strip()
self.assertEqual(output, invalid)
# Don't accept arbitrary python code
output = self.expect_fail([emconfig, 'sys.argv[1]']).strip()
self.assertEqual(output, invalid)
def test_link_s(self):
# -s OPT=VALUE can conflict with -s as a linker option. We warn and ignore
create_test_file('main.cpp', r'''
extern "C" {
void something();
}
int main() {
something();
return 0;
}
''')
create_test_file('supp.cpp', r'''
#include <stdio.h>
extern "C" {
void something() {
printf("yello\n");
}
}
''')
run_process([EMCC, 'main.cpp', '-o', 'main.o'])
run_process([EMCC, 'supp.cpp', '-o', 'supp.o'])
run_process([EMCC, 'main.o', '-s', 'supp.o', '-s', 'SAFE_HEAP=1'])
self.assertContained('yello', run_js('a.out.js'))
# Check that valid -s option had an effect'
self.assertContained('SAFE_HEAP', open('a.out.js').read())
def test_conftest_s_flag_passing(self):
create_test_file('conftest.c', r'''
int main() {
return 0;
}
''')
with env_modify({'EMMAKEN_JUST_CONFIGURE': '1'}):
cmd = [EMCC, '-s', 'ASSERTIONS=1', 'conftest.c', '-o', 'conftest']
output = run_process(cmd, stderr=PIPE)
self.assertNotContained('emcc: warning: treating -s as linker option', output.stderr)
self.assertExists('conftest')
def test_file_packager(self):
ensure_dir('subdir')
create_test_file('data1.txt', 'data1')
os.chdir('subdir')
create_test_file('data2.txt', 'data2')
# relative path to below the current dir is invalid
stderr = self.expect_fail([PYTHON, FILE_PACKAGER, 'test.data', '--preload', '../data1.txt'])
self.assertContained('below the current directory', stderr)
# relative path that ends up under us is cool
proc = run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', '../subdir/data2.txt'], stderr=PIPE, stdout=PIPE)
self.assertGreater(len(proc.stdout), 0)
self.assertNotContained('below the current directory', proc.stderr)
# direct path leads to the same code being generated - relative path does not make us do anything different
proc2 = run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data2.txt'], stderr=PIPE, stdout=PIPE)
self.assertGreater(len(proc2.stdout), 0)
self.assertNotContained('below the current directory', proc2.stderr)
def clean(txt):
lines = txt.splitlines()
lines = [l for l in lines if 'PACKAGE_UUID' not in l and 'loadPackage({' not in l]
return ''.join(lines)
self.assertTextDataIdentical(clean(proc.stdout), clean(proc2.stdout))
# verify '--separate-metadata' option produces separate metadata file
os.chdir('..')
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data1.txt', '--preload', 'subdir/data2.txt', '--js-output=immutable.js', '--separate-metadata'])
self.assertExists('immutable.js.metadata')
# verify js output JS file is not touched when the metadata is separated
orig_timestamp = os.path.getmtime('immutable.js')
orig_content = open('immutable.js').read()
# ensure some time passes before running the packager again so that if it does touch the
# js file it will end up with the different timestamp.
time.sleep(1.0)
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data1.txt', '--preload', 'subdir/data2.txt', '--js-output=immutable.js', '--separate-metadata'])
# assert both file content and timestamp are the same as reference copy
self.assertTextDataIdentical(orig_content, open('immutable.js').read())
self.assertEqual(orig_timestamp, os.path.getmtime('immutable.js'))
# verify the content of metadata file is correct
with open('immutable.js.metadata') as f:
metadata = json.load(f)
self.assertEqual(len(metadata['files']), 2)
assert metadata['files'][0]['start'] == 0 and metadata['files'][0]['end'] == len('data1') and metadata['files'][0]['filename'] == '/data1.txt'
assert metadata['files'][1]['start'] == len('data1') and metadata['files'][1]['end'] == len('data1') + len('data2') and metadata['files'][1]['filename'] == '/subdir/data2.txt'
assert metadata['remote_package_size'] == len('data1') + len('data2')
# can only assert the uuid format is correct, the uuid's value is expected to differ in between invocation
uuid.UUID(metadata['package_uuid'], version=4)
def test_file_packager_unicode(self):
unicode_name = 'unicode…☃'
try:
ensure_dir(unicode_name)
except OSError:
print("we failed to even create a unicode dir, so on this OS, we can't test this")
return
full = os.path.join(unicode_name, 'data.txt')
create_test_file(full, 'data')
proc = run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', full], stdout=PIPE, stderr=PIPE)
assert len(proc.stdout), proc.stderr
assert unicode_name in proc.stdout, proc.stdout
print(len(proc.stderr))
def test_file_packager_mention_FORCE_FILESYSTEM(self):
MESSAGE = 'Remember to build the main file with -s FORCE_FILESYSTEM=1 so that it includes support for loading this file package'
create_test_file('data.txt', 'data1')
# mention when running standalone
err = run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=PIPE, stderr=PIPE).stderr
self.assertContained(MESSAGE, err)
# do not mention from emcc
err = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '--preload-file', 'data.txt'], stdout=PIPE, stderr=PIPE).stderr
self.assertEqual(len(err), 0)
def test_headless(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'example.png')
run_process([EMCC, path_from_root('tests', 'sdl_headless.c'), '-s', 'HEADLESS=1'])
output = run_js('a.out.js', stderr=PIPE)
assert '''Init: 0
Font: 0x1
Sum: 0
you should see two lines of text in different colors and a blue rectangle
SDL_Quit called (and ignored)
done.
''' in output, output
def test_preprocess(self):
# Pass -Werror to prevent regressions such as https://github.com/emscripten-core/emscripten/pull/9661
out = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-E', '-Werror'], stdout=PIPE).stdout
self.assertNotExists('a.out.js')
self.assertNotExists('a.out')
# Test explicitly that the output contains a line typically written by the preprocessor.
self.assertContained('# 1 ', out)
self.assertContained('hello_world.c"', out)
self.assertContained('printf("hello, world!', out)
def test_syntax_only_valid(self):
result = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-fsyntax-only'], stdout=PIPE, stderr=STDOUT)
self.assertEqual(result.stdout, '')
self.assertNotExists('a.out.js')
def test_syntax_only_invalid(self):
create_test_file('src.c', 'int main() {')
result = run_process([EMCC, 'src.c', '-fsyntax-only'], stdout=PIPE, check=False, stderr=STDOUT)
self.assertNotEqual(result.returncode, 0)
self.assertContained("src.c:1:13: error: expected '}'", result.stdout)
self.assertNotExists('a.out.js')
def test_demangle(self):
create_test_file('src.cpp', '''
#include <stdio.h>
#include <emscripten.h>
void two(char c) {
EM_ASM(out(stackTrace()));
}
void one(int x) {
two(x % 17);
}
int main() {
EM_ASM(out(demangle('__Znwm'))); // check for no aborts
EM_ASM(out(demangle('_main')));
EM_ASM(out(demangle('__Z2f2v')));
EM_ASM(out(demangle('__Z12abcdabcdabcdi')));
EM_ASM(out(demangle('__ZL12abcdabcdabcdi')));
EM_ASM(out(demangle('__Z4testcsifdPvPiPc')));
EM_ASM(out(demangle('__ZN4test5moarrEcslfdPvPiPc')));
EM_ASM(out(demangle('__ZN4Waka1f12a234123412345pointEv')));
EM_ASM(out(demangle('__Z3FooIiEvv')));
EM_ASM(out(demangle('__Z3FooIidEvi')));
EM_ASM(out(demangle('__ZN3Foo3BarILi5EEEvv')));
EM_ASM(out(demangle('__ZNK10__cxxabiv120__si_class_type_info16search_below_dstEPNS_19__dynamic_cast_infoEPKvib')));
EM_ASM(out(demangle('__Z9parsewordRPKciRi')));
EM_ASM(out(demangle('__Z5multiwahtjmxyz')));
EM_ASM(out(demangle('__Z1aA32_iPA5_c')));
EM_ASM(out(demangle('__ZN21FWakaGLXFleeflsMarfooC2EjjjPKvbjj')));
EM_ASM(out(demangle('__ZN5wakaw2Cm10RasterBaseINS_6watwat9PolocatorEE8merbine1INS4_2OREEEvPKjj'))); // we get this wrong, but at least emit a '?'
one(17);
return 0;
}
''')
# full demangle support
run_process([EMCC, 'src.cpp', '-s', 'DEMANGLE_SUPPORT=1'])
output = run_js('a.out.js')
self.assertContained('''operator new(unsigned long)
_main
f2()
abcdabcdabcd(int)
abcdabcdabcd(int)
test(char, short, int, float, double, void*, int*, char*)
test::moarr(char, short, long, float, double, void*, int*, char*)
Waka::f::a23412341234::point()
void Foo<int>()
void Foo<int, double>(int)
void Foo::Bar<5>()
__cxxabiv1::__si_class_type_info::search_below_dst(__cxxabiv1::__dynamic_cast_info*, void const*, int, bool) const
parseword(char const*&, int, int&)
multi(wchar_t, signed char, unsigned char, unsigned short, unsigned int, unsigned long, long long, unsigned long long, ...)
a(int [32], char (*) [5])
FWakaGLXFleeflsMarfoo::FWakaGLXFleeflsMarfoo(unsigned int, unsigned int, unsigned int, void const*, bool, unsigned int, unsigned int)
void wakaw::Cm::RasterBase<wakaw::watwat::Polocator>::merbine1<wakaw::Cm::RasterBase<wakaw::watwat::Polocator>::OR>(unsigned int const*, unsigned int)
''', output)
# test for multiple functions in one stack trace
run_process([EMCC, 'src.cpp', '-s', 'DEMANGLE_SUPPORT=1', '-g'])
output = run_js('a.out.js')
self.assertIn('one(int)', output)
self.assertIn('two(char)', output)
def test_demangle_cpp(self):
create_test_file('src.cpp', '''
#include <stdio.h>
#include <emscripten.h>
#include <cxxabi.h>
#include <assert.h>
int main() {
char out[256];
int status = 1;
size_t length = 255;
abi::__cxa_demangle("_ZN4Waka1f12a234123412345pointEv", out, &length, &status);
assert(status == 0);
printf("%s\\n", out);
return 0;
}
''')
run_process([EMCC, 'src.cpp'])
output = run_js('a.out.js')
self.assertContained('Waka::f::a23412341234::point()', output)
# Test that malloc() -> OOM -> abort() -> stackTrace() -> jsStackTrace() -> demangleAll() -> demangle() -> malloc()
# cycle will not produce an infinite loop.
def test_demangle_malloc_infinite_loop_crash(self):
run_process([EMXX, path_from_root('tests', 'malloc_demangle_infinite_loop.cpp'), '-g', '-s', 'ABORTING_MALLOC=1', '-s', 'DEMANGLE_SUPPORT=1'])
output = run_js('a.out.js', assert_returncode=None, stderr=PIPE)
if output.count('Cannot enlarge memory arrays') > 2:
print(output)
assert(output.count('Cannot enlarge memory arrays') <= 2)
def test_module_exports_with_closure(self):
# This test checks that module.export is retained when JavaScript is minified by compiling with --closure 1
# This is important as if module.export is not present the Module object will not be visible to node.js
# Run with ./runner.py other.test_module_exports_with_closure
# First make sure test.js isn't present.
self.clear()
# compile with -O2 --closure 0
run_process([EMCC, path_from_root('tests', 'Module-exports', 'test.c'),
'-o', 'test.js', '-O2', '--closure', '0',
'--pre-js', path_from_root('tests', 'Module-exports', 'setup.js'),
'-s', 'EXPORTED_FUNCTIONS=["_bufferTest"]',
'-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["ccall", "cwrap"]',
'-s', 'WASM_ASYNC_COMPILATION=0'])
# Check that compilation was successful
self.assertExists('test.js')
test_js_closure_0 = open('test.js').read()
# Check that test.js compiled with --closure 0 contains "module['exports'] = Module;"
assert ("module['exports'] = Module;" in test_js_closure_0) or ('module["exports"]=Module' in test_js_closure_0) or ('module["exports"] = Module;' in test_js_closure_0)
# Check that main.js (which requires test.js) completes successfully when run in node.js
# in order to check that the exports are indeed functioning correctly.
shutil.copyfile(path_from_root('tests', 'Module-exports', 'main.js'), 'main.js')
if NODE_JS in JS_ENGINES:
self.assertContained('bufferTest finished', run_js('main.js'))
# Delete test.js again and check it's gone.
try_delete('test.js')
self.assertNotExists('test.js')
# compile with -O2 --closure 1
run_process([EMCC, path_from_root('tests', 'Module-exports', 'test.c'),
'-o', 'test.js', '-O2', '--closure', '1',
'--pre-js', path_from_root('tests', 'Module-exports', 'setup.js'),
'-s', 'EXPORTED_FUNCTIONS=["_bufferTest"]',
'-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["ccall", "cwrap"]',
'-s', 'WASM_ASYNC_COMPILATION=0'])
# Check that compilation was successful
self.assertExists('test.js')
test_js_closure_1 = open('test.js').read()
# Check that test.js compiled with --closure 1 contains "module.exports", we want to verify that
# "module['exports']" got minified to "module.exports" when compiling with --closure 1
self.assertContained("module.exports", test_js_closure_1)
# Check that main.js (which requires test.js) completes successfully when run in node.js
# in order to check that the exports are indeed functioning correctly.
if NODE_JS in JS_ENGINES:
self.assertContained('bufferTest finished', run_js('main.js', engine=NODE_JS))
def test_node_catch_exit(self):
# Test that in node.js exceptions are not caught if NODEJS_EXIT_CATCH=0
if NODE_JS not in JS_ENGINES:
return
create_test_file('count.c', '''
#include <string.h>
int count(const char *str) {
return (int)strlen(str);
}
''')
create_test_file('index.js', '''
const count = require('./count.js');
console.log(xxx); //< here is the ReferenceError
''')
reference_error_text = 'console.log(xxx); //< here is the ReferenceError'
run_process([EMCC, 'count.c', '-o', 'count.js'])
# Check that the ReferenceError is caught and rethrown and thus the original error line is masked
self.assertNotContained(reference_error_text,
run_js('index.js', engine=NODE_JS, stderr=STDOUT, assert_returncode=None))
run_process([EMCC, 'count.c', '-o', 'count.js', '-s', 'NODEJS_CATCH_EXIT=0'])
# Check that the ReferenceError is not caught, so we see the error properly
self.assertContained(reference_error_text,
run_js('index.js', engine=NODE_JS, stderr=STDOUT, assert_returncode=None))
def test_extra_exported_methods(self):
# Test with node.js that the EXTRA_EXPORTED_RUNTIME_METHODS setting is considered by libraries
if NODE_JS not in JS_ENGINES:
self.skipTest("node engine required for this test")
create_test_file('count.c', '''
#include <string.h>
int count(const char *str) {
return (int)strlen(str);
}
''')
create_test_file('index.js', '''
const count = require('./count.js');
console.log(count.FS_writeFile);
''')
reference_error_text = 'undefined'
run_process([EMCC, 'count.c', '-s', 'FORCE_FILESYSTEM=1', '-s',
'EXTRA_EXPORTED_RUNTIME_METHODS=["FS_writeFile"]', '-o', 'count.js'])
# Check that the Module.FS_writeFile exists
self.assertNotContained(reference_error_text,
run_js('index.js', engine=NODE_JS, stderr=STDOUT, assert_returncode=None))
run_process([EMCC, 'count.c', '-s', 'FORCE_FILESYSTEM=1', '-o', 'count.js'])
# Check that the Module.FS_writeFile is not exported
self.assertContained(reference_error_text,
run_js('index.js', engine=NODE_JS, stderr=STDOUT, assert_returncode=None))
def test_fs_stream_proto(self):
open('src.cpp', 'wb').write(br'''
#include <stdio.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/stat.h>
#include <errno.h>
#include <string.h>
int main()
{
long file_size = 0;
int h = open("src.cpp", O_RDONLY, 0666);
if (0 != h)
{
FILE* file = fdopen(h, "rb");
if (0 != file)
{
fseek(file, 0, SEEK_END);
file_size = ftell(file);
fseek(file, 0, SEEK_SET);
}
else
{
printf("fdopen() failed: %s\n", strerror(errno));
return 10;
}
close(h);
printf("File size: %ld\n", file_size);
}
else
{
printf("open() failed: %s\n", strerror(errno));
return 10;
}
return 0;
}
''')
run_process([EMCC, 'src.cpp', '--embed-file', 'src.cpp'])
for engine in JS_ENGINES:
out = run_js('a.out.js', engine=engine, stderr=PIPE, full_output=True)
self.assertContained('File size: 724', out)
def test_proxyfs(self):
# This test supposes that 3 different programs share the same directory and files.
# The same JS object is not used for each of them
# But 'require' function caches JS objects.
# If we just load same js-file multiple times like following code,
# these programs (m0,m1,m2) share the same JS object.
#
# var m0 = require('./proxyfs_test.js');
# var m1 = require('./proxyfs_test.js');
# var m2 = require('./proxyfs_test.js');
#
# To separate js-objects for each of them, following 'require' use different js-files.
#
# var m0 = require('./proxyfs_test.js');
# var m1 = require('./proxyfs_test1.js');
# var m2 = require('./proxyfs_test2.js');
#
create_test_file('proxyfs_test_main.js', r'''
var m0 = require('./proxyfs_test.js');
var m1 = require('./proxyfs_test1.js');
var m2 = require('./proxyfs_test2.js');
var section;
function print(str){
process.stdout.write(section+":"+str+":");
}
m0.FS.mkdir('/working');
m0.FS.mount(m0.PROXYFS,{root:'/',fs:m1.FS},'/working');
m0.FS.mkdir('/working2');
m0.FS.mount(m0.PROXYFS,{root:'/',fs:m2.FS},'/working2');
section = "child m1 reads and writes local file.";
print("m1 read embed");
m1.ccall('myreade','number',[],[]);
print("m1 write");console.log("");
m1.ccall('mywrite0','number',['number'],[1]);
print("m1 read");
m1.ccall('myread0','number',[],[]);
section = "child m2 reads and writes local file.";
print("m2 read embed");
m2.ccall('myreade','number',[],[]);
print("m2 write");console.log("");
m2.ccall('mywrite0','number',['number'],[2]);
print("m2 read");
m2.ccall('myread0','number',[],[]);
section = "child m1 reads local file.";
print("m1 read");
m1.ccall('myread0','number',[],[]);
section = "parent m0 reads and writes local and children's file.";
print("m0 read embed");
m0.ccall('myreade','number',[],[]);
print("m0 read m1");
m0.ccall('myread1','number',[],[]);
print("m0 read m2");
m0.ccall('myread2','number',[],[]);
section = "m0,m1 and m2 verify local files.";
print("m0 write");console.log("");
m0.ccall('mywrite0','number',['number'],[0]);
print("m0 read");
m0.ccall('myread0','number',[],[]);
print("m1 read");
m1.ccall('myread0','number',[],[]);
print("m2 read");
m2.ccall('myread0','number',[],[]);
print("m0 read embed");
m0.ccall('myreade','number',[],[]);
print("m1 read embed");
m1.ccall('myreade','number',[],[]);
print("m2 read embed");
m2.ccall('myreade','number',[],[]);
section = "parent m0 writes and reads children's files.";
print("m0 write m1");console.log("");
m0.ccall('mywrite1','number',[],[]);
print("m0 read m1");
m0.ccall('myread1','number',[],[]);
print("m0 write m2");console.log("");
m0.ccall('mywrite2','number',[],[]);
print("m0 read m2");
m0.ccall('myread2','number',[],[]);
print("m1 read");
m1.ccall('myread0','number',[],[]);
print("m2 read");
m2.ccall('myread0','number',[],[]);
print("m0 read m0");
m0.ccall('myread0','number',[],[]);
''')
create_test_file('proxyfs_pre.js', r'''
if (typeof Module === 'undefined') Module = {};
Module["noInitialRun"]=true;
noExitRuntime=true;
''')
create_test_file('proxyfs_embed.txt', r'''test
''')
create_test_file('proxyfs_test.c', r'''
#include <stdio.h>
int
mywrite1(){
FILE* out = fopen("/working/hoge.txt","w");
fprintf(out,"test1\n");
fclose(out);
return 0;
}
int
myread1(){
FILE* in = fopen("/working/hoge.txt","r");
char buf[1024];
int len;
if(in==NULL)
printf("open failed\n");
while(! feof(in)){
if(fgets(buf,sizeof(buf),in)==buf){
printf("%s",buf);
}
}
fclose(in);
return 0;
}
int
mywrite2(){
FILE* out = fopen("/working2/hoge.txt","w");
fprintf(out,"test2\n");
fclose(out);
return 0;
}
int
myread2(){
{
FILE* in = fopen("/working2/hoge.txt","r");
char buf[1024];
int len;
if(in==NULL)
printf("open failed\n");
while(! feof(in)){
if(fgets(buf,sizeof(buf),in)==buf){
printf("%s",buf);
}
}
fclose(in);
}
return 0;
}
int
mywrite0(int i){
FILE* out = fopen("hoge.txt","w");
fprintf(out,"test0_%d\n",i);
fclose(out);
return 0;
}
int
myread0(){
{
FILE* in = fopen("hoge.txt","r");
char buf[1024];
int len;
if(in==NULL)
printf("open failed\n");
while(! feof(in)){
if(fgets(buf,sizeof(buf),in)==buf){
printf("%s",buf);
}
}
fclose(in);
}
return 0;
}
int
myreade(){
{
FILE* in = fopen("proxyfs_embed.txt","r");
char buf[1024];
int len;
if(in==NULL)
printf("open failed\n");
while(! feof(in)){
if(fgets(buf,sizeof(buf),in)==buf){
printf("%s",buf);
}
}
fclose(in);
}
return 0;
}
''')
run_process([EMCC,
'-o', 'proxyfs_test.js', 'proxyfs_test.c',
'--embed-file', 'proxyfs_embed.txt', '--pre-js', 'proxyfs_pre.js',
'-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["ccall", "cwrap"]',
'-lproxyfs.js',
'-s', 'WASM_ASYNC_COMPILATION=0',
'-s', 'MAIN_MODULE=1',
'-s', 'EXPORT_ALL=1'])
# Following shutil.copyfile just prevent 'require' of node.js from caching js-object.
# See https://nodejs.org/api/modules.html
shutil.copyfile('proxyfs_test.js', 'proxyfs_test1.js')
shutil.copyfile('proxyfs_test.js', 'proxyfs_test2.js')
out = run_js('proxyfs_test_main.js')
section = "child m1 reads and writes local file."
self.assertContained(section + ":m1 read embed:test", out)
self.assertContained(section + ":m1 write:", out)
self.assertContained(section + ":m1 read:test0_1", out)
section = "child m2 reads and writes local file."
self.assertContained(section + ":m2 read embed:test", out)
self.assertContained(section + ":m2 write:", out)
self.assertContained(section + ":m2 read:test0_2", out)
section = "child m1 reads local file."
self.assertContained(section + ":m1 read:test0_1", out)
section = "parent m0 reads and writes local and children's file."
self.assertContained(section + ":m0 read embed:test", out)
self.assertContained(section + ":m0 read m1:test0_1", out)
self.assertContained(section + ":m0 read m2:test0_2", out)
section = "m0,m1 and m2 verify local files."
self.assertContained(section + ":m0 write:", out)
self.assertContained(section + ":m0 read:test0_0", out)
self.assertContained(section + ":m1 read:test0_1", out)
self.assertContained(section + ":m2 read:test0_2", out)
self.assertContained(section + ":m0 read embed:test", out)
self.assertContained(section + ":m1 read embed:test", out)
self.assertContained(section + ":m2 read embed:test", out)
section = "parent m0 writes and reads children's files."
self.assertContained(section + ":m0 write m1:", out)
self.assertContained(section + ":m0 read m1:test1", out)
self.assertContained(section + ":m0 write m2:", out)
self.assertContained(section + ":m0 read m2:test2", out)
self.assertContained(section + ":m1 read:test1", out)
self.assertContained(section + ":m2 read:test2", out)
self.assertContained(section + ":m0 read m0:test0_0", out)
def test_dependency_file(self):
# Issue 1732: -MMD (and friends) create dependency files that need to be
# copied from the temporary directory.
create_test_file('test.cpp', r'''
#include "test.hpp"
void my_function()
{
}
''')
create_test_file('test.hpp', r'''
void my_function();
''')
run_process([EMCC, '-MMD', '-c', 'test.cpp', '-o', 'test.o'])
self.assertExists('test.d')
deps = open('test.d').read()
# Look for ': ' instead of just ':' to not confuse C:\path\ notation with make "target: deps" rule. Not perfect, but good enough for this test.
head, tail = deps.split(': ', 2)
assert 'test.o' in head, 'Invalid dependency target'
assert 'test.cpp' in tail and 'test.hpp' in tail, 'Invalid dependencies generated'
def test_dependency_file_2(self):
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'a.c')
run_process([EMCC, 'a.c', '-MMD', '-MF', 'test.d', '-c'])
self.assertContained(open('test.d').read(), 'a.o: a.c\n')
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'a.c')
run_process([EMCC, 'a.c', '-MMD', '-MF', 'test2.d', '-c', '-o', 'test.o'])
self.assertContained(open('test2.d').read(), 'test.o: a.c\n')
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'a.c')
ensure_dir('obj')
run_process([EMCC, 'a.c', '-MMD', '-MF', 'test3.d', '-c', '-o', 'obj/test.o'])
self.assertContained(open('test3.d').read(), 'obj/test.o: a.c\n')
def test_js_lib_quoted_key(self):
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
__internal_data:{
'<' : 0,
'white space' : 1
},
printf__deps: ['__internal_data', 'fprintf']
});
''')
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '--js-library', 'lib.js'])
self.assertContained('hello, world!', run_js('a.out.js'))
def test_js_lib_exported(self):
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
jslibfunc: function(x) { return 2 * x }
});
''')
create_test_file('src.cpp', r'''
#include <emscripten.h>
#include <stdio.h>
extern "C" int jslibfunc(int x);
int main() {
printf("c calling: %d\n", jslibfunc(6));
EM_ASM({
out('js calling: ' + Module['_jslibfunc'](5) + '.');
});
}
''')
run_process([EMCC, 'src.cpp', '--js-library', 'lib.js', '-s', 'EXPORTED_FUNCTIONS=["_main", "_jslibfunc"]'])
self.assertContained('c calling: 12\njs calling: 10.', run_js('a.out.js'))
def test_js_lib_primitive_dep(self):
# Verify that primitive dependencies aren't generated in the output JS.
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
foo__deps: ['Int8Array', 'NonPrimitive'],
foo: function() {},
});
''')
create_test_file('main.c', r'''
void foo(void);
int main(int argc, char** argv) {
foo();
return 0;
}
''')
run_process([EMCC, '-O0', 'main.c', '--js-library', 'lib.js', '-s', 'WARN_ON_UNDEFINED_SYMBOLS=0'])
generated = open('a.out.js').read()
self.assertContained('missing function: NonPrimitive', generated)
self.assertNotContained('missing function: Int8Array', generated)
def test_js_lib_using_asm_lib(self):
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
jslibfunc__deps: ['asmlibfunc'],
jslibfunc: function(x) {
return 2 * _asmlibfunc(x);
},
asmlibfunc__asm: true,
asmlibfunc__sig: 'ii',
asmlibfunc: function(x) {
x = x | 0;
return x + 1 | 0;
}
});
''')
create_test_file('src.cpp', r'''
#include <stdio.h>
extern "C" int jslibfunc(int x);
int main() {
printf("c calling: %d\n", jslibfunc(6));
}
''')
run_process([EMCC, 'src.cpp', '--js-library', 'lib.js'])
self.assertContained('c calling: 14\n', run_js('a.out.js'))
def test_EMCC_BUILD_DIR(self):
# EMCC_BUILD_DIR env var contains the dir we were building in, when running the js compiler (e.g. when
# running a js library). We force the cwd to be src/ for technical reasons, so this lets you find out
# where you were.
create_test_file('lib.js', r'''
printErr('dir was ' + process.env.EMCC_BUILD_DIR);
''')
err = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '--js-library', 'lib.js'], stderr=PIPE).stderr
self.assertContained('dir was ' + os.path.realpath(os.path.normpath(self.get_dir())), err)
def test_float_h(self):
process = run_process([EMCC, path_from_root('tests', 'float+.c')], stdout=PIPE, stderr=PIPE)
assert process.returncode == 0, 'float.h should agree with our system: ' + process.stdout + '\n\n\n' + process.stderr
def test_output_is_dir(self):
ensure_dir('out_dir')
err = self.expect_fail([EMCC, '-c', path_from_root('tests', 'hello_world.c'), '-o', 'out_dir/'])
self.assertContained('error: unable to open output file', err)
def test_default_obj_ext(self):
run_process([EMCC, '-c', path_from_root('tests', 'hello_world.c')])
self.assertExists('hello_world.o')
run_process([EMCC, '-c', path_from_root('tests', 'hello_world.c'), '--default-obj-ext', 'obj'])
self.assertExists('hello_world.obj')
def test_doublestart_bug(self):
create_test_file('code.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
void main_loop(void) {
static int cnt = 0;
if (++cnt >= 10) emscripten_cancel_main_loop();
}
int main(void) {
printf("This should only appear once.\n");
emscripten_set_main_loop(main_loop, 10, 0);
return 0;
}
''')
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
run_process([EMCC, 'code.cpp', '--pre-js', 'pre.js'])
output = run_js('a.out.js')
assert output.count('This should only appear once.') == 1, output
def test_module_print(self):
create_test_file('code.cpp', r'''
#include <stdio.h>
int main(void) {
printf("123456789\n");
return 0;
}
''')
create_test_file('pre.js', r'''
var Module = { print: function(x) { throw '<{(' + x + ')}>' } };
''')
run_process([EMCC, 'code.cpp', '--pre-js', 'pre.js'])
output = run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None)
assert r'<{(123456789)}>' in output, output
def test_precompiled_headers_warnings(self):
# Check that we don't have any underlying warnings from clang, this can happen if we
# pass any link flags to when building a pch.
create_test_file('header.h', '#define X 5\n')
run_process([EMCC, '-Werror', '-xc++-header', 'header.h'])
def test_precompiled_headers(self):
for suffix in ['gch', 'pch']:
print(suffix)
self.clear()
create_test_file('header.h', '#define X 5\n')
run_process([EMCC, '-xc++-header', 'header.h', '-c'])
self.assertExists('header.h.gch') # default output is gch
if suffix != 'gch':
run_process([EMCC, '-xc++-header', 'header.h', '-o', 'header.h.' + suffix])
self.assertBinaryEqual('header.h.gch', 'header.h.' + suffix)
create_test_file('src.cpp', r'''
#include <stdio.h>
int main() {
printf("|%d|\n", X);
return 0;
}
''')
run_process([EMCC, 'src.cpp', '-include', 'header.h'])
output = run_js('a.out.js', stderr=PIPE, full_output=True)
self.assertContained('|5|', output)
# also verify that the gch is actually used
err = run_process([EMCC, 'src.cpp', '-include', 'header.h', '-Xclang', '-print-stats'], stderr=PIPE).stderr
self.assertTextDataContained('*** PCH/Modules Loaded:\nModule: header.h.' + suffix, err)
# and sanity check it is not mentioned when not
try_delete('header.h.' + suffix)
err = run_process([EMCC, 'src.cpp', '-include', 'header.h', '-Xclang', '-print-stats'], stderr=PIPE).stderr
self.assertNotContained('*** PCH/Modules Loaded:\nModule: header.h.' + suffix, err.replace('\r\n', '\n'))
# with specified target via -o
try_delete('header.h.' + suffix)
run_process([EMCC, '-xc++-header', 'header.h', '-o', 'my.' + suffix])
self.assertExists('my.' + suffix)
# -include-pch flag
run_process([EMCC, '-xc++-header', 'header.h', '-o', 'header.h.' + suffix])
run_process([EMCC, 'src.cpp', '-include-pch', 'header.h.' + suffix])
output = run_js('a.out.js')
self.assertContained('|5|', output)
@no_wasm_backend('tests extra fastcomp warnings on unaligned loads/stores, which matter a lot more in asm.js')
def test_warn_unaligned(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
struct packey {
char x;
int y;
double z;
} __attribute__((__packed__));
int main() {
volatile packey p;
p.x = 0;
p.y = 1;
p.z = 2;
return 0;
}
''')
output = run_process([EMCC, 'src.cpp', '-s', 'WASM=0', '-s', 'WARN_UNALIGNED=1', '-g'], stderr=PIPE)
self.assertContained('emcc: warning: unaligned store', output.stderr)
self.assertContained('emcc: warning: unaligned store', output.stderr)
self.assertContained('@line 11 "src.cpp"', output.stderr)
def test_LEGACY_VM_SUPPORT(self):
# when modern features are lacking, we can polyfill them or at least warn
create_test_file('pre.js', 'Math.imul = undefined;')
def test(expected, opts=[]):
print(opts)
result = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '--pre-js', 'pre.js'] + opts, stderr=PIPE, check=False)
if result.returncode == 0:
self.assertContained(expected, run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None))
else:
self.assertContained(expected, result.stderr)
# when legacy is needed, we show an error indicating so
test('build with LEGACY_VM_SUPPORT')
# legacy + disabling wasm works
if self.is_wasm_backend():
return
test('hello, world!', ['-s', 'LEGACY_VM_SUPPORT=1', '-s', 'WASM=0'])
def test_on_abort(self):
expected_output = 'Module.onAbort was called'
def add_on_abort_and_verify(extra=''):
with open('a.out.js') as f:
js = f.read()
with open('a.out.js', 'w') as f:
f.write("var Module = { onAbort: function() { console.log('%s') } };\n" % expected_output)
f.write(extra + '\n')
f.write(js)
self.assertContained(expected_output, run_js('a.out.js', assert_returncode=None))
# test direct abort() C call
create_test_file('src.c', '''
#include <stdlib.h>
int main() {
abort();
}
''')
run_process([EMCC, 'src.c', '-s', 'WASM_ASYNC_COMPILATION=0'])
add_on_abort_and_verify()
# test direct abort() JS call
create_test_file('src.c', '''
#include <emscripten.h>
int main() {
EM_ASM({ abort() });
}
''')
run_process([EMCC, 'src.c', '-s', 'WASM_ASYNC_COMPILATION=0'])
add_on_abort_and_verify()
# test throwing in an abort handler, and catching that
create_test_file('src.c', '''
#include <emscripten.h>
int main() {
EM_ASM({
try {
out('first');
abort();
} catch (e) {
out('second');
abort();
throw e;
}
});
}
''')
run_process([EMCC, 'src.c', '-s', 'WASM_ASYNC_COMPILATION=0'])
with open('a.out.js') as f:
js = f.read()
with open('a.out.js', 'w') as f:
f.write("var Module = { onAbort: function() { console.log('%s'); throw 're-throw'; } };\n" % expected_output)
f.write(js)
out = run_js('a.out.js', stderr=STDOUT, assert_returncode=None)
print(out)
self.assertContained(expected_output, out)
self.assertContained('re-throw', out)
self.assertContained('first', out)
self.assertContained('second', out)
self.assertEqual(out.count(expected_output), 2)
# test an abort during startup
run_process([EMCC, path_from_root('tests', 'hello_world.c')])
os.remove('a.out.wasm') # trigger onAbort by intentionally causing startup to fail
add_on_abort_and_verify()
def test_no_exit_runtime(self):
create_test_file('code.cpp', r'''
#include <stdio.h>
template<int x>
struct Waste {
Waste() {
printf("coming around %d\n", x);
}
~Waste() {
printf("going away %d\n", x);
}
};
Waste<1> w1;
Waste<2> w2;
Waste<3> w3;
Waste<4> w4;
Waste<5> w5;
int main(int argc, char **argv) {
return 0;
}
''')
for wasm in [0, 1]:
for no_exit in [1, 0]:
for opts in [[], ['-O1'], ['-O2', '-g2'], ['-O2', '-g2', '--llvm-lto', '1']]:
if self.is_wasm_backend() and not wasm:
continue
print(wasm, no_exit, opts)
cmd = [EMCC] + opts + ['code.cpp', '-s', 'EXIT_RUNTIME=' + str(1 - no_exit), '-s', 'WASM=' + str(wasm)]
if wasm:
cmd += ['--profiling-funcs'] # for function names
run_process(cmd)
output = run_js('a.out.js', stderr=PIPE, full_output=True)
src = open('a.out.js').read()
if wasm:
src += '\n' + self.get_wasm_text('a.out.wasm')
exit = 1 - no_exit
print(' exit:', exit, 'opts:', opts)
self.assertContained('coming around', output)
self.assertContainedIf('going away', output, exit)
if not self.is_wasm_backend():
# The wasm backend uses atexit to register destructors when
# constructors are called There is currently no way to exclude
# these destructors from the wasm binary.
assert ('atexit(' in src) == exit, 'atexit should not appear in src when EXIT_RUNTIME=0'
assert ('_ZN5WasteILi2EED' in src) == exit, 'destructors should not appear if no exit:\n' + src
def test_no_exit_runtime_warnings_flush(self):
# check we warn if there is unflushed info
create_test_file('code.c', r'''
#include <stdio.h>
int main(int argc, char **argv) {
printf("hello\n");
printf("world"); // no newline, not flushed
#if FLUSH
printf("\n");
#endif
}
''')
create_test_file('code.cpp', r'''
#include <iostream>
int main() {
using namespace std;
cout << "hello" << std::endl;
cout << "world"; // no newline, not flushed
#if FLUSH
std::cout << std::endl;
#endif
}
''')
for src in ['code.c', 'code.cpp']:
for no_exit in [0, 1]:
for assertions in [0, 1]:
for flush in [0, 1]:
# TODO: also check FILESYSTEM=0 here. it never worked though, buffered output was not emitted at shutdown
print(src, no_exit, assertions, flush)
cmd = [EMCC, src, '-s', 'EXIT_RUNTIME=%d' % (1 - no_exit), '-s', 'ASSERTIONS=%d' % assertions]
if flush:
cmd += ['-DFLUSH']
run_process(cmd)
output = run_js('a.out.js', stderr=PIPE, full_output=True)
exit = 1 - no_exit
self.assertContained('hello', output)
assert ('world' in output) == (exit or flush), 'unflushed content is shown only when exiting the runtime'
assert (no_exit and assertions and not flush) == ('stdio streams had content in them that was not flushed. you should set EXIT_RUNTIME to 1' in output), 'warning should be shown'
def test_fs_after_main(self):
for args in [[], ['-O1']]:
print(args)
run_process([EMCC, path_from_root('tests', 'fs_after_main.cpp')])
self.assertContained('Test passed.', run_js('a.out.js'))
@no_wasm_backend('tests fastcomp compiler flags')
def test_os_oz(self):
for arg, expect in [
('-O1', '-O1'),
('-O2', '-O3'),
('-Os', '-Os'),
('-Oz', '-Oz'),
('-O3', '-O3'),
]:
print(arg, expect)
proc = run_process([EMCC, '-v', path_from_root('tests', 'hello_world.cpp'), arg], stderr=PIPE)
self.assertContained(expect, proc.stderr)
self.assertContained('hello, world!', run_js('a.out.js'))
def test_oz_size(self):
sizes = {}
for name, args in [
('0', []),
('1', ['-O1']),
('2', ['-O2']),
('s', ['-Os']),
('z', ['-Oz']),
('3', ['-O3']),
]:
print(name, args)
self.clear()
run_process([EMCC, '-c', path_from_root('system', 'lib', 'dlmalloc.c')] + args)
sizes[name] = os.path.getsize('dlmalloc.o')
print(sizes)
opt_min = min(sizes['1'], sizes['2'], sizes['3'], sizes['s'], sizes['z'])
opt_max = max(sizes['1'], sizes['2'], sizes['3'], sizes['s'], sizes['z'])
# 'opt builds are all fairly close'
self.assertLess(opt_min - opt_max, opt_max * 0.1)
# unopt build is quite larger'
self.assertGreater(sizes['0'], (1.20 * opt_max))
@no_wasm_backend('relies on ctor evaluation and dtor elimination')
def test_global_inits(self):
create_test_file('inc.h', r'''
#include <stdio.h>
template<int x>
struct Waste {
int state;
Waste() : state(10) {}
void test(int a) {
printf("%d\n", a + state);
}
~Waste() {
printf("going away %d\n", x);
}
};
Waste<3> *getMore();
''')
create_test_file('main.cpp', r'''
#include "inc.h"
Waste<1> mw1;
Waste<2> mw2;
int main(int argc, char **argv) {
printf("argc: %d\n", argc);
mw1.state += argc;
mw2.state += argc;
mw1.test(5);
mw2.test(6);
getMore()->test(0);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "inc.h"
Waste<3> sw3;
Waste<3> *getMore() {
return &sw3;
}
''')
for opts, has_global in [
(['-O2', '-g', '-s', 'EXIT_RUNTIME=1'], True),
# no-exit-runtime removes the atexits, and then globalgce can work
# it's magic to remove the global initializer entirely
(['-O2', '-g'], False),
(['-Os', '-g', '-s', 'EXIT_RUNTIME=1'], True),
(['-Os', '-g'], False),
(['-O2', '-g', '--llvm-lto', '1', '-s', 'EXIT_RUNTIME=1'], True),
(['-O2', '-g', '--llvm-lto', '1'], False),
]:
print(opts, has_global)
run_process([EMCC, 'main.cpp', '-c'] + opts)
run_process([EMCC, 'side.cpp', '-c'] + opts)
run_process([EMCC, 'main.o', 'side.o'] + opts)
run_js('a.out.js', stderr=PIPE, full_output=True)
src = open('a.out.js').read()
self.assertContained('argc: 1\n16\n17\n10\n', run_js('a.out.js'))
self.assertContainedIf('globalCtors', src, has_global)
# Tests that when there are only 0 or 1 global initializers, that a grouped global initializer function will not be generated
# (that would just consume excess code size)
def test_no_global_inits(self):
create_test_file('one_global_initializer.cpp', r'''
#include <emscripten.h>
#include <stdio.h>
double t = emscripten_get_now();
int main() { printf("t:%d\n", (int)(t>0)); }
''')
run_process([EMCC, 'one_global_initializer.cpp'])
# Above file has one global initializer, should not generate a redundant grouped globalCtors function
self.assertNotContained('globalCtors', open('a.out.js').read())
self.assertContained('t:1', run_js('a.out.js'))
create_test_file('zero_global_initializers.cpp', r'''
#include <stdio.h>
int main() { printf("t:1\n"); }
''')
run_process([EMCC, 'zero_global_initializers.cpp'])
# Above file should have zero global initializers, should not generate any global initializer functions
self.assertNotContained('__GLOBAL__sub_', open('a.out.js').read())
self.assertContained('t:1', run_js('a.out.js'))
def test_implicit_func(self):
create_test_file('src.c', r'''
#include <stdio.h>
int main()
{
printf("hello %d\n", strnlen("waka", 2)); // Implicit declaration, no header, for strnlen
int (*my_strnlen)(char*, ...) = strnlen;
printf("hello %d\n", my_strnlen("shaka", 2));
return 0;
}
''')
IMPLICIT_WARNING = "warning: implicit declaration of function 'strnlen' is invalid in C99"
IMPLICIT_ERROR = "error: implicit declaration of function 'strnlen' is invalid in C99"
INCOMPATIBLE_WARNINGS = ('warning: incompatible pointer types', 'warning: incompatible function pointer types')
for opts, expected, compile_expected in [
([], None, [IMPLICIT_ERROR]),
(['-Wno-error=implicit-function-declaration'], ['hello '], [IMPLICIT_WARNING]), # turn error into warning
(['-Wno-implicit-function-declaration'], ['hello '], []), # turn error into nothing at all (runtime output is incorrect)
]:
print(opts, expected)
try_delete('a.out.js')
stderr = run_process([EMCC, 'src.c'] + opts, stderr=PIPE, check=False).stderr
for ce in compile_expected + [INCOMPATIBLE_WARNINGS]:
self.assertContained(ce, stderr)
if expected is None:
self.assertNotExists('a.out.js')
else:
output = run_js('a.out.js', stderr=PIPE, full_output=True)
for e in expected:
self.assertContained(e, output)
@no_wasm_backend('uses prebuilt .ll file')
def test_incorrect_static_call(self):
for wasm in [0, 1]:
for opts in [0, 1]:
for asserts in [0, 1]:
extra = []
if opts != 1 - asserts:
extra = ['-s', 'ASSERTIONS=' + str(asserts)]
cmd = [EMCC, path_from_root('tests', 'sillyfuncast2_noasm.ll'), '-O' + str(opts), '-s', 'WASM=' + str(wasm)] + extra
print(opts, asserts, wasm, cmd)
# Should not need to pipe stdout here but binaryen writes to stdout
# when it really should write to stderr.
stderr = run_process(cmd, stdout=PIPE, stderr=PIPE, check=False).stderr
assert ('unexpected' in stderr) == asserts, stderr
assert ("to 'doit'" in stderr) == asserts, stderr
@no_wasm_backend('fastcomp specific')
def test_llvm_lit(self):
grep_path = shared.which('grep')
if not grep_path:
self.skipTest('This test needs the "grep" tool in PATH. If you are using emsdk on Windows, you can obtain it via installing and activating the gnu package.')
llvm_src = get_fastcomp_src_dir()
if not llvm_src:
self.skipTest('llvm source tree not found')
LLVM_LIT = os.path.join(LLVM_ROOT, 'llvm-lit.py')
if not os.path.exists(LLVM_LIT):
LLVM_LIT = os.path.join(LLVM_ROOT, 'llvm-lit')
if not os.path.exists(LLVM_LIT):
self.skipTest('llvm-lit not found; fastcomp directory is most likely prebuilt')
cmd = [PYTHON, LLVM_LIT, '-v', os.path.join(llvm_src, 'test', 'CodeGen', 'JS')]
print(cmd)
run_process(cmd)
@requires_native_clang
def test_bad_triple(self):
# compile a minimal program, with as few dependencies as possible, as
# native building on CI may not always work well
create_test_file('minimal.cpp', 'int main() { return 0; }')
run_process([CLANG_CXX, 'minimal.cpp', '-target', 'x86_64-linux', '-c', '-emit-llvm', '-o', 'a.bc'] + clang_native.get_clang_native_args(), env=clang_native.get_clang_native_env())
# wasm backend will hard fail where as fastcomp only warns
if self.is_wasm_backend():
err = self.expect_fail([EMCC, 'a.bc'])
self.assertContained('machine type must be wasm32', err)
else:
err = run_process([EMCC, 'a.bc'], stderr=PIPE).stderr
assert 'warning' in err or 'WARNING' in err, err
assert 'incorrect target triple' in err or 'different target triples' in err, err
def test_valid_abspath(self):
# Test whether abspath warning appears
abs_include_path = os.path.abspath(self.get_dir())
err = run_process([EMCC, '-I%s' % abs_include_path, '-Wwarn-absolute-paths', path_from_root('tests', 'hello_world.c')], stderr=PIPE).stderr
warning = '-I or -L of an absolute path "-I%s" encountered. If this is to a local system header/library, it may cause problems (local system files make sense for compiling natively on your system, but not necessarily to JavaScript).' % abs_include_path
self.assertContained(warning, err)
# Passing an absolute path to a directory inside the emscripten tree is always ok and should not issue a warning.
abs_include_path = path_from_root('tests')
err = run_process([EMCC, '-I%s' % abs_include_path, '-Wwarn-absolute-paths', path_from_root('tests', 'hello_world.c')], stderr=PIPE).stderr
warning = '-I or -L of an absolute path "-I%s" encountered. If this is to a local system header/library, it may cause problems (local system files make sense for compiling natively on your system, but not necessarily to JavaScript).' % abs_include_path
self.assertNotContained(warning, err)
# Hide warning for this include path
err = run_process([EMCC, '--valid-abspath', abs_include_path, '-I%s' % abs_include_path, '-Wwarn-absolute-paths', path_from_root('tests', 'hello_world.c')], stderr=PIPE).stderr
self.assertNotContained(warning, err)
def test_valid_abspath_2(self):
if WINDOWS:
abs_include_path = 'C:\\nowhere\\at\\all'
else:
abs_include_path = '/nowhere/at/all'
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '--valid-abspath', abs_include_path, '-I%s' % abs_include_path]
print(' '.join(cmd))
run_process(cmd)
self.assertContained('hello, world!', run_js('a.out.js'))
def test_warn_dylibs(self):
shared_suffixes = ['.so', '.dylib', '.dll']
for suffix in ['.o', '.a', '.bc', '.so', '.lib', '.dylib', '.js', '.html']:
print(suffix)
err = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-o', 'out' + suffix], stderr=PIPE).stderr
warning = 'When Emscripten compiles to a typical native suffix for shared libraries (.so, .dylib, .dll) then it emits an object file. You should then compile that to an emscripten SIDE_MODULE (using that flag) with suffix .wasm (for wasm) or .js (for asm.js).'
self.assertContainedIf(warning, err, suffix in shared_suffixes)
def test_side_module_without_proper_target(self):
# SIDE_MODULE is only meaningful when compiling to wasm (or js+wasm)
# otherwise, we are just linking bitcode, and should show an error
for wasm in [0, 1]:
if self.is_wasm_backend() and not wasm:
continue
print(wasm)
stderr = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'SIDE_MODULE=1', '-o', 'a.so', '-s', 'WASM=%d' % wasm])
self.assertContained('SIDE_MODULE must only be used when compiling to an executable shared library, and not when emitting an object file', stderr)
@no_wasm_backend('asm.js optimizations')
def test_simplify_ifs(self):
def test(src, nums):
create_test_file('src.c', src)
for opts, ifs in [
[['-g2'], nums[0]],
[['--profiling'], nums[1]],
[['--profiling', '-g2'], nums[2]]
]:
print(opts, ifs)
if type(ifs) == int:
ifs = [ifs]
try_delete('a.out.js')
run_process([EMCC, 'src.c', '-O2', '-s', 'WASM=0'] + opts, stdout=PIPE)
src = open('a.out.js').read()
main = src[src.find('function _main'):src.find('\n}', src.find('function _main'))]
actual_ifs = main.count('if (')
assert actual_ifs in ifs, main + ' : ' + str([ifs, actual_ifs])
test(r'''
#include <stdio.h>
#include <string.h>
int main(int argc, char **argv) {
if (argc > 5 && strlen(argv[0]) > 1 && strlen(argv[1]) > 2) printf("halp");
return 0;
}
''', [3, 1, 1])
test(r'''
#include <stdio.h>
#include <string.h>
int main(int argc, char **argv) {
while (argc % 3 == 0) {
if (argc > 5 && strlen(argv[0]) > 1 && strlen(argv[1]) > 2) {
printf("halp");
argc++;
} else {
while (argc > 0) {
printf("%d\n", argc--);
}
}
}
return 0;
}
''', [8, [5, 7], [5, 7]])
test(r'''
#include <stdio.h>
#include <string.h>
int main(int argc, char **argv) {
while (argc % 17 == 0) argc *= 2;
if (argc > 5 && strlen(argv[0]) > 10 && strlen(argv[1]) > 20) {
printf("halp");
argc++;
} else {
printf("%d\n", argc--);
}
while (argc % 17 == 0) argc *= 2;
return argc;
}
''', [6, 3, 3])
test(r'''
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[]) {
if (getenv("A") && getenv("B")) {
printf("hello world\n");
} else {
printf("goodnight moon\n");
}
printf("and that's that\n");
return 0;
}
''', [[3, 2], 1, 1])
test(r'''
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[]) {
if (getenv("A") || getenv("B")) {
printf("hello world\n");
}
printf("and that's that\n");
return 0;
}
''', [[3, 2], 1, 1])
def test_symbol_map(self):
UNMINIFIED_HEAP8 = 'var HEAP8 = new global.Int8Array'
UNMINIFIED_MIDDLE = 'function middle'
for opts in [['-O2'], ['-O3']]:
for wasm in [0, 1, 2]:
# -s WASM=2 is a WASM_BACKEND-only feature:
if wasm == 2 and not shared.Settings.WASM_BACKEND:
continue
print(opts, wasm)
self.clear()
create_test_file('src.c', r'''
#include <emscripten.h>
EM_JS(int, run_js, (), {
out(new Error().stack);
return 0;
});
EMSCRIPTEN_KEEPALIVE
void middle() {
if (run_js()) {
// fake recursion that is never reached, to avoid inlining in binaryen and LLVM
middle();
}
}
int main() {
EM_ASM({ _middle() });
}
''')
cmd = [EMCC, 'src.c', '--emit-symbol-map'] + opts
cmd += ['-s', 'WASM=%d' % wasm]
run_process(cmd)
# check that the map is correct
with open('a.out.js.symbols') as f:
symbols = f.read()
lines = [line.split(':') for line in symbols.strip().split('\n')]
minified_middle = None
for minified, full in lines:
# handle both fastcomp and wasm backend notation
if full == '_middle' or full == 'middle':
minified_middle = minified
break
self.assertNotEqual(minified_middle, None)
if wasm:
# stack traces are standardized enough that we can easily check that the
# minified name is actually in the output
stack_trace_reference = 'wasm-function[%s]' % minified_middle
out = run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None)
self.assertContained(stack_trace_reference, out)
# make sure there are no symbols in the wasm itself
wat = run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), 'a.out.wasm'], stdout=PIPE).stdout
for func_start in ('(func $middle', '(func $_middle'):
self.assertNotContained(func_start, wat)
# check we don't keep unnecessary debug info with wasm2js when emitting
# a symbol map
if self.is_wasm_backend() and wasm == 0 and '-O' in str(opts):
with open('a.out.js') as f:
js = f.read()
self.assertNotContained(UNMINIFIED_HEAP8, js)
self.assertNotContained(UNMINIFIED_MIDDLE, js)
# verify those patterns would exist with more debug info
run_process(cmd + ['--profiling-funcs'])
with open('a.out.js') as f:
js = f.read()
self.assertContained(UNMINIFIED_HEAP8, js)
self.assertContained(UNMINIFIED_MIDDLE, js)
def test_bc_to_bc(self):
# emcc should 'process' bitcode to bitcode. build systems can request this if
# e.g. they assume our 'executable' extension is bc, and compile an .o to a .bc
# (the user would then need to build bc to js of course, but we need to actually
# emit the bc)
run_process([EMCC, '-c', path_from_root('tests', 'hello_world.c')])
self.assertExists('hello_world.o')
run_process([EMCC, 'hello_world.o', '-o', 'hello_world.bc'])
self.assertExists('hello_world.o')
self.assertExists('hello_world.bc')
def test_bad_function_pointer_cast(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
typedef int (*callback) (int, ...);
int impl(int foo) {
printf("Hello, world.\n");
return 0;
}
int main() {
volatile callback f = (callback) impl;
f(0); /* This fails with or without additional arguments. */
return 0;
}
''')
for opts in [0, 1, 2]:
for safe in [0, 1]:
for emulate_casts in [0, 1]:
for emulate_fps in [0, 1]:
for relocatable in [0, 1]:
for wasm in [0, 1]:
if self.is_wasm_backend() and (not wasm or emulate_fps):
continue
if emulate_casts and self.is_wasm_backend() and relocatable:
# TODO('https://github.com/emscripten-core/emscripten/issues/8507')
continue
cmd = [EMCC, 'src.cpp', '-O' + str(opts)]
if not wasm:
cmd += ['-s', 'WASM=0']
if safe:
cmd += ['-s', 'SAFE_HEAP']
if emulate_casts:
cmd += ['-s', 'EMULATE_FUNCTION_POINTER_CASTS']
if emulate_fps:
cmd += ['-s', 'EMULATED_FUNCTION_POINTERS']
if relocatable:
cmd += ['-s', 'RELOCATABLE'] # disables asm-optimized safe heap
print(cmd)
run_process(cmd)
output = run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None)
if emulate_casts:
# success!
self.assertContained('Hello, world.', output)
else:
# otherwise, the error depends on the mode we are in
if self.is_wasm_backend() or (wasm and (relocatable or emulate_fps)):
# wasm trap raised by the vm
self.assertContained('function signature mismatch', output)
elif opts == 0 and safe and not wasm:
# non-wasm safe mode checks asm.js function table masks
self.assertContained('Function table mask error', output)
elif opts == 0:
# informative error message (assertions are enabled in -O0)
self.assertContained('Invalid function pointer', output)
else:
# non-informative error
self.assertContained(('abort(', 'exception'), output)
@no_wasm_backend('asm.js function table feature')
def test_aliased_func_pointers(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
int impl1(int foo) { return foo; }
float impla(float foo) { return foo; }
int impl2(int foo) { return foo+1; }
float implb(float foo) { return foo+1; }
int impl3(int foo) { return foo+2; }
float implc(float foo) { return foo+2; }
int main(int argc, char **argv) {
volatile void *f = (void*)impl1;
if (argc == 50) f = (void*)impla;
if (argc == 51) f = (void*)impl2;
if (argc == 52) f = (void*)implb;
if (argc == 53) f = (void*)impl3;
if (argc == 54) f = (void*)implc;
return (int)f;
}
''')
print('aliasing')
sizes_ii = {}
sizes_dd = {}
for alias in [None, 0, 1]:
cmd = [EMCC, 'src.cpp', '-O1', '-s', 'WASM=0']
if alias is not None:
cmd += ['-s', 'ALIASING_FUNCTION_POINTERS=' + str(alias)]
else:
alias = -1
print(cmd)
run_process(cmd)
src = open('a.out.js').read().split('\n')
for line in src:
if line.strip().startswith('var FUNCTION_TABLE_ii = '):
sizes_ii[alias] = line.count(',')
if line.strip().startswith('var FUNCTION_TABLE_dd = '):
sizes_dd[alias] = line.count(',')
print('ii', sizes_ii)
print('dd', sizes_dd)
for sizes in [sizes_ii, sizes_dd]:
self.assertEqual(sizes[-1], sizes[1]) # default is to alias
self.assertLess(sizes[1], sizes[0]) # without aliasing, we have more unique values and fat tables
def test_bad_export(self):
for m in ['', ' ']:
self.clear()
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'EXPORTED_FUNCTIONS=["' + m + '_main"]']
print(cmd)
stderr = run_process(cmd, stderr=PIPE, check=False).stderr
if m:
self.assertContained('undefined exported function: " _main"', stderr)
else:
self.assertContained('hello, world!', run_js('a.out.js'))
def test_no_dynamic_execution(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-O1', '-s', 'DYNAMIC_EXECUTION=0'])
self.assertContained('hello, world!', run_js('a.out.js'))
src = open('a.out.js').read()
self.assertNotContained('eval(', src)
self.assertNotContained('eval.', src)
self.assertNotContained('new Function', src)
try_delete('a.out.js')
# Test that --preload-file doesn't add an use of eval().
create_test_file('temp.txt', "foo\n")
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-O1',
'-s', 'DYNAMIC_EXECUTION=0', '--preload-file', 'temp.txt'])
src = open('a.out.js').read()
assert 'eval(' not in src
assert 'eval.' not in src
assert 'new Function' not in src
try_delete('a.out.js')
# Test that -s DYNAMIC_EXECUTION=1 and -s RELOCATABLE=1 are not allowed together.
self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-O1',
'-s', 'DYNAMIC_EXECUTION=0', '-s', 'RELOCATABLE=1'])
try_delete('a.out.js')
create_test_file('test.c', r'''
#include <emscripten/emscripten.h>
int main() {
emscripten_run_script("console.log('hello from script');");
return 0;
}
''')
# Test that emscripten_run_script() aborts when -s DYNAMIC_EXECUTION=0
run_process([EMCC, 'test.c', '-O1', '-s', 'DYNAMIC_EXECUTION=0'])
self.assertContained('DYNAMIC_EXECUTION=0 was set, cannot eval', run_js('a.out.js', assert_returncode=None, full_output=True, stderr=PIPE))
try_delete('a.out.js')
# Test that emscripten_run_script() posts a warning when -s DYNAMIC_EXECUTION=2
run_process([EMCC, 'test.c', '-O1', '-s', 'DYNAMIC_EXECUTION=2'])
self.assertContained('Warning: DYNAMIC_EXECUTION=2 was set, but calling eval in the following location:', run_js('a.out.js', assert_returncode=None, full_output=True, stderr=PIPE))
self.assertContained('hello from script', run_js('a.out.js', assert_returncode=None, full_output=True, stderr=PIPE))
try_delete('a.out.js')
def test_init_file_at_offset(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
int main() {
int data = 0x12345678;
FILE *f = fopen("test.dat", "wb");
fseek(f, 100, SEEK_CUR);
fwrite(&data, 4, 1, f);
fclose(f);
int data2;
f = fopen("test.dat", "rb");
fread(&data2, 4, 1, f); // should read 0s, not that int we wrote at an offset
printf("read: %d\n", data2);
fseek(f, 0, SEEK_END);
long size = ftell(f); // should be 104, not 4
fclose(f);
printf("file size is %ld\n", size);
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained('read: 0\nfile size is 104\n', run_js('a.out.js'))
def test_unlink(self):
self.do_other_test(os.path.join('other', 'unlink'))
def test_argv0_node(self):
create_test_file('code.cpp', r'''
#include <stdio.h>
int main(int argc, char **argv) {
printf("I am %s.\n", argv[0]);
return 0;
}
''')
run_process([EMCC, 'code.cpp'])
self.assertContained('I am ' + os.path.realpath(self.get_dir()).replace('\\', '/') + '/a.out.js', run_js('a.out.js').replace('\\', '/'))
def test_returncode(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
#if CALL_EXIT
exit(CODE);
#else
return CODE;
#endif
}
''')
for code in [0, 123]:
for no_exit in [0, 1]:
for call_exit in [0, 1]:
for async_compile in [0, 1]:
run_process([EMCC, 'src.cpp', '-DCODE=%d' % code, '-s', 'EXIT_RUNTIME=%d' % (1 - no_exit), '-DCALL_EXIT=%d' % call_exit, '-s', 'WASM_ASYNC_COMPILATION=%d' % async_compile])
for engine in JS_ENGINES:
# async compilation can't return a code in d8
if async_compile and engine == V8_ENGINE:
continue
print(code, no_exit, call_exit, async_compile, engine)
proc = run_process(engine + ['a.out.js'], stderr=PIPE, check=False)
# we always emit the right exit code, whether we exit the runtime or not
self.assertEqual(proc.returncode, code)
msg = 'but EXIT_RUNTIME is not set, so halting execution but not exiting the runtime or preventing further async execution (build with EXIT_RUNTIME=1, if you want a true shutdown)'
if no_exit and call_exit:
self.assertContained(msg, proc.stderr)
else:
self.assertNotContained(msg, proc.stderr)
def test_emscripten_force_exit_NO_EXIT_RUNTIME(self):
create_test_file('src.cpp', r'''
#include <emscripten.h>
int main() {
#if CALL_EXIT
emscripten_force_exit(0);
#endif
}
''')
for no_exit in [0, 1]:
for call_exit in [0, 1]:
run_process([EMCC, 'src.cpp', '-s', 'EXIT_RUNTIME=%d' % (1 - no_exit), '-DCALL_EXIT=%d' % call_exit])
print(no_exit, call_exit)
out = run_js('a.out.js', stdout=PIPE, stderr=PIPE, full_output=True)
assert ('emscripten_force_exit cannot actually shut down the runtime, as the build does not have EXIT_RUNTIME set' in out) == (no_exit and call_exit), out
def test_mkdir_silly(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <dirent.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
int main(int argc, char **argv) {
printf("\n");
for (int i = 1; i < argc; i++) {
printf("%d:\n", i);
int ok = mkdir(argv[i], S_IRWXU|S_IRWXG|S_IRWXO);
printf(" make %s: %d\n", argv[i], ok);
DIR *dir = opendir(argv[i]);
printf(" open %s: %d\n", argv[i], dir != NULL);
if (dir) {
struct dirent *entry;
while ((entry = readdir(dir))) {
printf(" %s, %d\n", entry->d_name, entry->d_type);
}
}
}
}
''')
run_process([EMCC, 'src.cpp'])
# cannot create /, can open
self.assertContained(r'''
1:
make /: -1
open /: 1
., 4
.., 4
tmp, 4
home, 4
dev, 4
proc, 4
''', run_js('a.out.js', args=['/']))
# cannot create empty name, cannot open
self.assertContained(r'''
1:
make : -1
open : 0
''', run_js('a.out.js', args=['']))
# can create unnormalized path, can open
self.assertContained(r'''
1:
make /a//: 0
open /a//: 1
., 4
.., 4
''', run_js('a.out.js', args=['/a//']))
# can create child unnormalized
self.assertContained(r'''
1:
make /a: 0
open /a: 1
., 4
.., 4
2:
make /a//b//: 0
open /a//b//: 1
., 4
.., 4
''', run_js('a.out.js', args=['/a', '/a//b//']))
def test_stat_silly(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <errno.h>
#include <sys/stat.h>
int main(int argc, char **argv) {
for (int i = 1; i < argc; i++) {
const char *path = argv[i];
struct stat path_stat;
if (stat(path, &path_stat) != 0) {
printf("Failed to stat path: %s; errno=%d\n", path, errno);
} else {
printf("ok on %s\n", path);
}
}
}
''')
run_process([EMCC, 'src.cpp'])
# cannot stat ""
self.assertContained(r'''Failed to stat path: /a; errno=44
Failed to stat path: ; errno=44
''', run_js('a.out.js', args=['/a', '']))
def test_symlink_silly(self):
create_test_file('src.cpp', r'''
#include <dirent.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdio.h>
int main(int argc, char **argv) {
if (symlink(argv[1], argv[2]) != 0) {
printf("Failed to symlink paths: %s, %s; errno=%d\n", argv[1], argv[2], errno);
} else {
printf("ok\n");
}
}
''')
run_process([EMCC, 'src.cpp'])
# cannot symlink nonexistents
self.assertContained(r'Failed to symlink paths: , abc; errno=44', run_js('a.out.js', args=['', 'abc']))
self.assertContained(r'Failed to symlink paths: , ; errno=44', run_js('a.out.js', args=['', '']))
self.assertContained(r'ok', run_js('a.out.js', args=['123', 'abc']))
self.assertContained(r'Failed to symlink paths: abc, ; errno=44', run_js('a.out.js', args=['abc', '']))
def test_rename_silly(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <errno.h>
int main(int argc, char **argv) {
if (rename(argv[1], argv[2]) != 0) {
printf("Failed to rename paths: %s, %s; errno=%d\n", argv[1], argv[2], errno);
} else {
printf("ok\n");
}
}
''')
run_process([EMCC, 'src.cpp'])
# cannot symlink nonexistents
self.assertContained(r'Failed to rename paths: , abc; errno=44', run_js('a.out.js', args=['', 'abc']))
self.assertContained(r'Failed to rename paths: , ; errno=44', run_js('a.out.js', args=['', '']))
self.assertContained(r'Failed to rename paths: 123, abc; errno=44', run_js('a.out.js', args=['123', 'abc']))
self.assertContained(r'Failed to rename paths: abc, ; errno=44', run_js('a.out.js', args=['abc', '']))
def test_readdir_r_silly(self):
create_test_file('src.cpp', r'''
#include <iostream>
#include <cstring>
#include <cerrno>
#include <unistd.h>
#include <fcntl.h>
#include <cstdlib>
#include <dirent.h>
#include <sys/stat.h>
#include <sys/types.h>
using std::endl;
namespace
{
void check(const bool result)
{
if(not result) {
std::cout << "Check failed!" << endl;
throw "bad";
}
}
// Do a recursive directory listing of the directory whose path is specified
// by \a name.
void ls(const std::string& name, std::size_t indent = 0)
{
::DIR *dir;
struct ::dirent *entry;
if(indent == 0) {
std::cout << name << endl;
++indent;
}
// Make sure we can open the directory. This should also catch cases where
// the empty string is passed in.
if (not (dir = ::opendir(name.c_str()))) {
const int error = errno;
std::cout
<< "Failed to open directory: " << name << "; " << error << endl;
return;
}
// Just checking the sanity.
if (name.empty()) {
std::cout
<< "Managed to open a directory whose name was the empty string.."
<< endl;
check(::closedir(dir) != -1);
return;
}
// Iterate over the entries in the directory.
while ((entry = ::readdir(dir))) {
const std::string entryName(entry->d_name);
if (entryName == "." || entryName == "..") {
// Skip the dot entries.
continue;
}
const std::string indentStr(indent * 2, ' ');
if (entryName.empty()) {
std::cout
<< indentStr << "\"\": Found empty string as a "
<< (entry->d_type == DT_DIR ? "directory" : "file")
<< " entry!" << endl;
continue;
} else {
std::cout << indentStr << entryName
<< (entry->d_type == DT_DIR ? "/" : "") << endl;
}
if (entry->d_type == DT_DIR) {
// We found a subdirectory; recurse.
ls(std::string(name + (name == "/" ? "" : "/" ) + entryName),
indent + 1);
}
}
// Close our handle.
check(::closedir(dir) != -1);
}
void touch(const std::string &path)
{
const int fd = ::open(path.c_str(), O_CREAT | O_TRUNC, 0644);
check(fd != -1);
check(::close(fd) != -1);
}
}
int main()
{
check(::mkdir("dir", 0755) == 0);
touch("dir/a");
touch("dir/b");
touch("dir/c");
touch("dir/d");
touch("dir/e");
std::cout << "Before:" << endl;
ls("dir");
std::cout << endl;
// Attempt to delete entries as we walk the (single) directory.
::DIR * const dir = ::opendir("dir");
check(dir != NULL);
struct ::dirent *entry;
while((entry = ::readdir(dir)) != NULL) {
const std::string name(entry->d_name);
// Skip "." and "..".
if(name == "." || name == "..") {
continue;
}
// Unlink it.
std::cout << "Unlinking " << name << endl;
check(::unlink(("dir/" + name).c_str()) != -1);
}
check(::closedir(dir) != -1);
std::cout << "After:" << endl;
ls("dir");
std::cout << endl;
return 0;
}
''')
run_process([EMCC, 'src.cpp'])
# cannot symlink nonexistents
self.assertContained(r'''Before:
dir
a
b
c
d
e
Unlinking a
Unlinking b
Unlinking c
Unlinking d
Unlinking e
After:
dir
''', run_js('a.out.js', args=['', 'abc']))
def test_emversion(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
int main() {
printf("major: %d\n", __EMSCRIPTEN_major__);
printf("minor: %d\n", __EMSCRIPTEN_minor__);
printf("tiny: %d\n", __EMSCRIPTEN_tiny__);
}
''')
run_process([EMCC, 'src.cpp'])
expected = '''\
major: %d
minor: %d
tiny: %d
''' % (shared.EMSCRIPTEN_VERSION_MAJOR, shared.EMSCRIPTEN_VERSION_MINOR, shared.EMSCRIPTEN_VERSION_TINY)
self.assertContained(expected, run_js('a.out.js'))
def test_libc_files_without_syscalls(self):
# a program which includes FS due to libc js library support, but has no syscalls,
# so full FS support would normally be optimized out
create_test_file('src.cpp', r'''
#include <sys/time.h>
#include <stddef.h>
int main() {
return utimes(NULL, NULL);
}''')
run_process([EMCC, 'src.cpp'])
def test_syscall_without_filesystem(self):
# a program which includes a non-trivial syscall, but disables the filesystem.
create_test_file('src.c', r'''
#include <sys/time.h>
#include <stddef.h>
extern int __sys_openat(int);
int main() {
return __sys_openat(0);
}''')
run_process([EMCC, 'src.c', '-s', 'NO_FILESYSTEM=1'])
def test_dashS(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-S'])
self.assertExists('hello_world.s')
def test_dashS_stdout(self):
stdout = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-S', '-o', '-'], stdout=PIPE).stdout
self.assertEqual(os.listdir('.'), [])
self.assertContained('hello_world.c', stdout)
def test_emit_llvm(self):
# TODO(https://github.com/emscripten-core/emscripten/issues/9016):
# We shouldn't need to copy the file here but if we don't then emcc will
# internally clobber the hello_world.ll in tests.
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'hello_world.c')
run_process([EMCC, 'hello_world.c', '-S', '-emit-llvm'])
self.assertExists('hello_world.ll')
bitcode = open('hello_world.ll').read()
self.assertContained('target triple = "', bitcode)
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-c', '-emit-llvm'])
self.assertTrue(building.is_bitcode('hello_world.bc'))
def test_dashE(self):
create_test_file('src.cpp', r'''#include <emscripten.h>
__EMSCRIPTEN_major__ __EMSCRIPTEN_minor__ __EMSCRIPTEN_tiny__ EMSCRIPTEN_KEEPALIVE
''')
def test(args=[]):
print(args)
out = run_process([EMCC, 'src.cpp', '-E'] + args, stdout=PIPE).stdout
self.assertContained('%d %d %d __attribute__((used))' % (shared.EMSCRIPTEN_VERSION_MAJOR, shared.EMSCRIPTEN_VERSION_MINOR, shared.EMSCRIPTEN_VERSION_TINY), out)
test()
test(['--bind'])
def test_dashE_respect_dashO(self):
# issue #3365
with_dash_o = run_process([EMXX, path_from_root('tests', 'hello_world.cpp'), '-E', '-o', 'ignored.js'], stdout=PIPE, stderr=PIPE).stdout
without_dash_o = run_process([EMXX, path_from_root('tests', 'hello_world.cpp'), '-E'], stdout=PIPE, stderr=PIPE).stdout
self.assertEqual(len(with_dash_o), 0)
self.assertNotEqual(len(without_dash_o), 0)
def test_dashM(self):
out = run_process([EMXX, path_from_root('tests', 'hello_world.cpp'), '-M'], stdout=PIPE).stdout
self.assertContained('hello_world.o:', out) # Verify output is just a dependency rule instead of bitcode or js
def test_dashM_respect_dashO(self):
# issue #3365
with_dash_o = run_process([EMXX, path_from_root('tests', 'hello_world.cpp'), '-M', '-o', 'ignored.js'], stdout=PIPE).stdout
without_dash_o = run_process([EMXX, path_from_root('tests', 'hello_world.cpp'), '-M'], stdout=PIPE).stdout
self.assertEqual(len(with_dash_o), 0)
self.assertNotEqual(len(without_dash_o), 0)
def test_malloc_implicit(self):
self.do_other_test(os.path.join('other', 'malloc_implicit'))
def test_switch64phi(self):
# issue 2539, fastcomp segfault on phi-i64 interaction
create_test_file('src.cpp', r'''
#include <cstdint>
#include <limits>
#include <cstdio>
//============================================================================
namespace
{
class int_adapter {
public:
typedef ::int64_t int_type;
int_adapter(int_type v = 0)
: value_(v)
{}
static const int_adapter pos_infinity()
{
return (::std::numeric_limits<int_type>::max)();
}
static const int_adapter neg_infinity()
{
return (::std::numeric_limits<int_type>::min)();
}
static const int_adapter not_a_number()
{
return (::std::numeric_limits<int_type>::max)()-1;
}
static bool is_neg_inf(int_type v)
{
return (v == neg_infinity().as_number());
}
static bool is_pos_inf(int_type v)
{
return (v == pos_infinity().as_number());
}
static bool is_not_a_number(int_type v)
{
return (v == not_a_number().as_number());
}
bool is_infinity() const
{
return (value_ == neg_infinity().as_number() ||
value_ == pos_infinity().as_number());
}
bool is_special() const
{
return(is_infinity() || value_ == not_a_number().as_number());
}
bool operator<(const int_adapter& rhs) const
{
if(value_ == not_a_number().as_number()
|| rhs.value_ == not_a_number().as_number()) {
return false;
}
if(value_ < rhs.value_) return true;
return false;
}
int_type as_number() const
{
return value_;
}
int_adapter operator-(const int_adapter& rhs)const
{
if(is_special() || rhs.is_special())
{
if (rhs.is_pos_inf(rhs.as_number()))
{
return int_adapter(1);
}
if (rhs.is_neg_inf(rhs.as_number()))
{
return int_adapter();
}
}
return int_adapter();
}
private:
int_type value_;
};
class time_iterator {
public:
time_iterator(int_adapter t, int_adapter d)
: current_(t),
offset_(d)
{}
time_iterator& operator--()
{
current_ = int_adapter(current_ - offset_);
return *this;
}
bool operator>=(const int_adapter& t)
{
return not (current_ < t);
}
private:
int_adapter current_;
int_adapter offset_;
};
void iterate_backward(const int_adapter *answers, const int_adapter& td)
{
int_adapter end = answers[0];
time_iterator titr(end, td);
std::puts("");
for (; titr >= answers[0]; --titr) {
}
}
}
int
main()
{
const int_adapter answer1[] = {};
iterate_backward(NULL, int_adapter());
iterate_backward(answer1, int_adapter());
}
''')
run_process([EMCC, 'src.cpp', '-O2', '-s', 'SAFE_HEAP=1'])
@parameterized({
'none': [{'EMCC_FORCE_STDLIBS': None}, False],
# forced libs is ok, they were there anyhow
'normal': [{'EMCC_FORCE_STDLIBS': 'libc,libc++abi,libc++'}, False],
# partial list, but ok since we grab them as needed
'parial': [{'EMCC_FORCE_STDLIBS': 'libc++'}, False],
# fail! not enough stdlibs
'partial_only': [{'EMCC_FORCE_STDLIBS': 'libc++,libc,libc++abi', 'EMCC_ONLY_FORCED_STDLIBS': '1'}, True],
# force all the needed stdlibs, so this works even though we ignore the input file
'full_only': [{'EMCC_FORCE_STDLIBS': 'libc,libc++abi,libc++,libpthread,libmalloc', 'EMCC_ONLY_FORCED_STDLIBS': '1'}, False],
})
def test_only_force_stdlibs(self, env, fail):
with env_modify(env):
run_process([EMXX, path_from_root('tests', 'hello_libcxx.cpp'), '-s', 'WARN_ON_UNDEFINED_SYMBOLS=0'])
if fail:
output = self.expect_fail(NODE_JS + ['a.out.js'], stdout=PIPE)
self.assertContained('missing function', output)
else:
self.assertContained('hello, world!', run_js('a.out.js'))
def test_only_force_stdlibs_2(self):
create_test_file('src.cpp', r'''
#include <iostream>
#include <stdexcept>
int main()
{
try {
throw std::exception();
std::cout << "got here" << std::endl;
}
catch (const std::exception& ex) {
std::cout << "Caught exception: " << ex.what() << std::endl;
}
}
''')
with env_modify({'EMCC_FORCE_STDLIBS': 'libc,libc++abi,libc++,libmalloc,libpthread', 'EMCC_ONLY_FORCED_STDLIBS': '1'}):
run_process([EMXX, 'src.cpp', '-s', 'DISABLE_EXCEPTION_CATCHING=0'])
self.assertContained('Caught exception: std::exception', run_js('a.out.js', stderr=PIPE))
def test_strftime_zZ(self):
create_test_file('src.cpp', r'''
#include <cerrno>
#include <cstring>
#include <ctime>
#include <iostream>
int main()
{
// Buffer to hold the current hour of the day. Format is HH + nul
// character.
char hour[3];
// Buffer to hold our ISO 8601 formatted UTC offset for the current
// timezone. Format is [+-]hhmm + nul character.
char utcOffset[6];
// Buffer to hold the timezone name or abbreviation. Just make it
// sufficiently large to hold most timezone names.
char timezone[128];
std::tm tm;
// Get the current timestamp.
const std::time_t now = std::time(NULL);
// What time is that here?
if (::localtime_r(&now, &tm) == NULL) {
const int error = errno;
std::cout
<< "Failed to get localtime for timestamp=" << now << "; errno=" << error
<< "; " << std::strerror(error) << std::endl;
return 1;
}
size_t result = 0;
// Get the formatted hour of the day.
if ((result = std::strftime(hour, 3, "%H", &tm)) != 2) {
const int error = errno;
std::cout
<< "Failed to format hour for timestamp=" << now << "; result="
<< result << "; errno=" << error << "; " << std::strerror(error)
<< std::endl;
return 1;
}
std::cout << "The current hour of the day is: " << hour << std::endl;
// Get the formatted UTC offset in ISO 8601 format.
if ((result = std::strftime(utcOffset, 6, "%z", &tm)) != 5) {
const int error = errno;
std::cout
<< "Failed to format UTC offset for timestamp=" << now << "; result="
<< result << "; errno=" << error << "; " << std::strerror(error)
<< std::endl;
return 1;
}
std::cout << "The current timezone offset is: " << utcOffset << std::endl;
// Get the formatted timezone name or abbreviation. We don't know how long
// this will be, so just expect some data to be written to the buffer.
if ((result = std::strftime(timezone, 128, "%Z", &tm)) == 0) {
const int error = errno;
std::cout
<< "Failed to format timezone for timestamp=" << now << "; result="
<< result << "; errno=" << error << "; " << std::strerror(error)
<< std::endl;
return 1;
}
std::cout << "The current timezone is: " << timezone << std::endl;
std::cout << "ok!\n";
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained('ok!', run_js('a.out.js'))
def test_strptime_symmetry(self):
building.emcc(path_from_root('tests', 'strptime_symmetry.cpp'), output_filename='a.out.js')
self.assertContained('TEST PASSED', run_js('a.out.js'))
def test_truncate_from_0(self):
create_test_file('src.cpp', r'''
#include <cerrno>
#include <cstring>
#include <iostream>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
using std::endl;
//============================================================================
// :: Helpers
namespace
{
// Returns the size of the regular file specified as 'path'.
::off_t getSize(const char* const path)
{
// Stat the file and make sure that it's the expected size.
struct ::stat path_stat;
if (::stat(path, &path_stat) != 0) {
const int error = errno;
std::cout
<< "Failed to lstat path: " << path << "; errno=" << error << "; "
<< std::strerror(error) << endl;
return -1;
}
std::cout
<< "Size of file is: " << path_stat.st_size << endl;
return path_stat.st_size;
}
// Causes the regular file specified in 'path' to have a size of 'length'
// bytes.
void resize(const char* const path,
const ::off_t length)
{
std::cout
<< "Truncating file=" << path << " to length=" << length << endl;
if (::truncate(path, length) == -1)
{
const int error = errno;
std::cout
<< "Failed to truncate file=" << path << "; errno=" << error
<< "; " << std::strerror(error) << endl;
}
const ::off_t size = getSize(path);
if (size != length) {
std::cout
<< "Failed to truncate file=" << path << " to length=" << length
<< "; got size=" << size << endl;
}
}
// Helper to create a file with the given content.
void createFile(const std::string& path, const std::string& content)
{
std::cout
<< "Creating file: " << path << " with content=" << content << endl;
const int fd = ::open(path.c_str(), O_CREAT | O_WRONLY, 0644);
if (fd == -1) {
const int error = errno;
std::cout
<< "Failed to open file for writing: " << path << "; errno=" << error
<< "; " << std::strerror(error) << endl;
return;
}
if (::write(fd, content.c_str(), content.size()) != content.size()) {
const int error = errno;
std::cout
<< "Failed to write content=" << content << " to file=" << path
<< "; errno=" << error << "; " << std::strerror(error) << endl;
// Fall through to close FD.
}
::close(fd);
}
}
//============================================================================
// :: Entry Point
int main()
{
const char* const file = "/tmp/file";
createFile(file, "This is some content");
getSize(file);
resize(file, 32);
resize(file, 17);
resize(file, 0);
// This throws a JS exception.
resize(file, 32);
return 0;
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained(r'''Creating file: /tmp/file with content=This is some content
Size of file is: 20
Truncating file=/tmp/file to length=32
Size of file is: 32
Truncating file=/tmp/file to length=17
Size of file is: 17
Truncating file=/tmp/file to length=0
Size of file is: 0
Truncating file=/tmp/file to length=32
Size of file is: 32
''', run_js('a.out.js'))
def test_create_readonly(self):
create_test_file('src.cpp', r'''
#include <cerrno>
#include <cstring>
#include <iostream>
#include <fcntl.h>
#include <unistd.h>
using std::endl;
//============================================================================
// :: Helpers
namespace
{
// Helper to create a read-only file with content.
void readOnlyFile(const std::string& path, const std::string& content)
{
std::cout
<< "Creating file: " << path << " with content of size="
<< content.size() << endl;
const int fd = ::open(path.c_str(), O_CREAT | O_WRONLY, 0400);
if (fd == -1) {
const int error = errno;
std::cout
<< "Failed to open file for writing: " << path << "; errno=" << error
<< "; " << std::strerror(error) << endl;
return;
}
// Write the content to the file.
ssize_t result = 0;
if ((result = ::write(fd, content.data(), content.size()))
!= ssize_t(content.size()))
{
const int error = errno;
std::cout
<< "Failed to write to file=" << path << "; errno=" << error
<< "; " << std::strerror(error) << endl;
// Fall through to close the file.
}
else {
std::cout
<< "Data written to file=" << path << "; successfully wrote "
<< result << " bytes" << endl;
}
::close(fd);
}
}
//============================================================================
// :: Entry Point
int main()
{
const char* const file = "/tmp/file";
unlink(file);
readOnlyFile(file, "This content should get written because the file "
"does not yet exist and so, only the mode of the "
"containing directory will influence my ability to "
"create and open the file. The mode of the file only "
"applies to opening of the stream, not subsequent stream "
"operations after stream has opened.\n\n");
readOnlyFile(file, "This should not get written because the file already "
"exists and is read-only.\n\n");
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained(r'''Creating file: /tmp/file with content of size=292
Data written to file=/tmp/file; successfully wrote 292 bytes
Creating file: /tmp/file with content of size=79
Failed to open file for writing: /tmp/file; errno=2; Permission denied
''', run_js('a.out.js'))
def test_embed_file_large(self):
# If such long files are encoded on one line,
# they overflow the interpreter's limit
large_size = int(1500000)
create_test_file('large.txt', 'x' * large_size)
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <unistd.h>
int main()
{
FILE* fp = fopen("large.txt", "r");
if (fp) {
printf("ok\n");
fseek(fp, 0L, SEEK_END);
printf("%ld\n", ftell(fp));
} else {
printf("failed to open large file.txt\n");
}
return 0;
}
''')
run_process([EMCC, 'src.cpp', '--embed-file', 'large.txt'])
for engine in JS_ENGINES:
if engine == V8_ENGINE:
continue # ooms
print(engine)
self.assertContained('ok\n' + str(large_size) + '\n', run_js('a.out.js', engine=engine))
def test_force_exit(self):
create_test_file('src.cpp', r'''
#include <emscripten/emscripten.h>
namespace
{
extern "C"
EMSCRIPTEN_KEEPALIVE
void callback()
{
EM_ASM({ out('callback pre()') });
::emscripten_force_exit(42);
EM_ASM({ out('callback post()') });
}
}
int
main()
{
EM_ASM({ setTimeout(function() { out("calling callback()"); _callback() }, 100) });
::emscripten_exit_with_live_runtime();
return 123;
}
''')
run_process([EMCC, 'src.cpp'])
output = run_js('a.out.js', assert_returncode=42)
assert 'callback pre()' in output
assert 'callback post()' not in output
def test_bad_locale(self):
create_test_file('src.cpp', r'''
#include <locale.h>
#include <stdio.h>
#include <wctype.h>
int
main(const int argc, const char * const * const argv)
{
const char * const locale = (argc > 1 ? argv[1] : "C");
const char * const actual = setlocale(LC_ALL, locale);
if(actual == NULL) {
printf("%s locale not supported\n",
locale);
return 0;
}
printf("locale set to %s: %s\n", locale, actual);
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained('locale set to C: C;C;C;C;C;C',
run_js('a.out.js', args=['C']))
self.assertContained('locale set to waka: waka;waka;waka;waka;waka;waka',
run_js('a.out.js', args=['waka']))
def test_browser_language_detection(self):
# Test HTTP Accept-Language parsing by simulating navigator.languages #8751
run_process([EMCC,
path_from_root('tests', 'test_browser_language_detection.c')])
self.assertContained('C.UTF-8', run_js('a.out.js'))
# Accept-Language: fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3
create_test_file('preamble.js', r'''navigator = {};
navigator.languages = [ "fr", "fr-FR", "en-US", "en" ];''')
run_process([EMCC, '--pre-js', 'preamble.js',
path_from_root('tests', 'test_browser_language_detection.c')])
self.assertContained('fr.UTF-8', run_js('a.out.js'))
# Accept-Language: fr-FR,fr;q=0.8,en-US;q=0.5,en;q=0.3
create_test_file('preamble.js', r'''navigator = {};
navigator.languages = [ "fr-FR", "fr", "en-US", "en" ];''')
run_process([EMCC, '--pre-js', 'preamble.js',
path_from_root('tests', 'test_browser_language_detection.c')])
self.assertContained('fr_FR.UTF-8', run_js('a.out.js'))
def test_js_main(self):
# try to add a main() from JS, at runtime. this is not supported (the
# compiler needs to know at compile time about main).
create_test_file('pre_main.js', r'''
var Module = {
'_main': function() {
}
};
''')
create_test_file('src.cpp', '')
run_process([EMCC, 'src.cpp', '--pre-js', 'pre_main.js'])
self.assertContained('compiled without a main, but one is present. if you added it from JS, use Module["onRuntimeInitialized"]',
run_js('a.out.js', assert_returncode=None, stderr=PIPE))
def test_js_malloc(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
for (var i = 0; i < 1000; i++) {
var ptr = Module._malloc(1024 * 1024); // only done in JS, but still must not leak
Module._free(ptr);
}
});
printf("ok.\n");
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained('ok.', run_js('a.out.js', args=['C']))
def test_locale_wrong(self):
create_test_file('src.cpp', r'''
#include <locale>
#include <iostream>
#include <stdexcept>
int
main(const int argc, const char * const * const argv)
{
const char * const name = argc > 1 ? argv[1] : "C";
try {
const std::locale locale(name);
std::cout
<< "Constructed locale \"" << name << "\"\n"
<< "This locale is "
<< (locale == std::locale::global(locale) ? "" : "not ")
<< "the global locale.\n"
<< "This locale is " << (locale == std::locale::classic() ? "" : "not ")
<< "the C locale." << std::endl;
} catch(const std::runtime_error &ex) {
std::cout
<< "Can't construct locale \"" << name << "\": " << ex.what()
<< std::endl;
return 1;
} catch(...) {
std::cout
<< "FAIL: Unexpected exception constructing locale \"" << name << '\"'
<< std::endl;
return 127;
}
}
''')
run_process([EMCC, 'src.cpp', '-s', 'EXIT_RUNTIME=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0'])
self.assertContained('Constructed locale "C"\nThis locale is the global locale.\nThis locale is the C locale.', run_js('a.out.js', args=['C']))
self.assertContained('''Can't construct locale "waka": collate_byname<char>::collate_byname failed to construct for waka''', run_js('a.out.js', args=['waka'], assert_returncode=1))
def test_cleanup_os(self):
# issue 2644
def test(args, be_clean):
print(args)
self.clear()
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'a.c')
create_test_file('b.c', ' ')
run_process([EMCC, 'a.c', 'b.c'] + args)
clutter = glob.glob('*.o')
if be_clean:
assert len(clutter) == 0, 'should not leave clutter ' + str(clutter)
else:
assert len(clutter) == 2, 'should leave .o files'
test(['-o', 'c.bc'], True)
test(['-o', 'c.js'], True)
test(['-o', 'c.html'], True)
test(['-c'], False)
@no_wasm_backend('asm.js debug info')
def test_js_dash_g(self):
create_test_file('src.c', '''
#include <stdio.h>
#include <assert.h>
void checker(int x) {
x += 20;
assert(x < 15); // this is line 7!
}
int main() {
checker(10);
return 0;
}
''')
def check(has):
print(has)
lines = open('a.out.js').readlines()
lines = [line for line in lines if '___assert_fail(' in line or '___assert_func(' in line]
found_line_num = any(('//@line 7 "' in line) for line in lines)
found_filename = any(('src.c"\n' in line) for line in lines)
assert found_line_num == has, 'Must have debug info with the line number'
assert found_filename == has, 'Must have debug info with the filename'
run_process([EMCC, '-s', 'WASM=0', 'src.c', '-g'])
check(True)
run_process([EMCC, '-s', 'WASM=0', 'src.c'])
check(False)
run_process([EMCC, '-s', 'WASM=0', 'src.c', '-g0'])
check(False)
run_process([EMCC, '-s', 'WASM=0', 'src.c', '-g0', '-g']) # later one overrides
check(True)
run_process([EMCC, '-s', 'WASM=0', 'src.c', '-g', '-g0']) # later one overrides
check(False)
def test_dash_g_bc(self):
def test(opts):
print(opts)
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-o', 'a_.bc'] + opts)
sizes = {'_': os.path.getsize('a_.bc')}
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-g', '-o', 'ag.bc'] + opts)
sizes['g'] = os.path.getsize('ag.bc')
for i in range(0, 5):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-g' + str(i), '-o', 'a' + str(i) + '.bc'] + opts)
sizes[i] = os.path.getsize('a' + str(i) + '.bc')
print(' ', sizes)
assert sizes['_'] == sizes[0] == sizes[1] == sizes[2], 'no debug means no llvm debug info ' + str(sizes)
assert sizes['g'] == sizes[3] == sizes[4], '-g or -g4 means llvm debug info ' + str(sizes)
assert sizes['_'] < sizes['g'], 'llvm debug info has positive size ' + str(sizes)
test([])
test(['-O1'])
def test_no_filesystem(self):
FS_MARKER = 'var FS'
# fopen forces full filesystem support
run_process([EMCC, path_from_root('tests', 'hello_world_fopen.c'), '-s', 'ASSERTIONS=0'])
yes_size = os.path.getsize('a.out.js')
self.assertContained('hello, world!', run_js('a.out.js'))
self.assertContained(FS_MARKER, open('a.out.js').read())
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'ASSERTIONS=0'])
no_size = os.path.getsize('a.out.js')
self.assertContained('hello, world!', run_js('a.out.js'))
self.assertNotContained(FS_MARKER, open('a.out.js').read())
print('yes fs, no fs:', yes_size, no_size)
# ~100K of FS code is removed
self.assertGreater(yes_size - no_size, 90000)
self.assertLess(no_size, 360000)
def test_no_filesystem_libcxx(self):
run_process([EMCC, path_from_root('tests', 'hello_libcxx.cpp'), '-s', 'FILESYSTEM=0'])
self.assertContained('hello, world!', run_js('a.out.js'))
def test_no_nuthin(self):
# check FILESYSTEM is automatically set, and effective
def test(opts, absolute):
print('opts, absolute:', opts, absolute)
sizes = {}
def do(name, source, moar_opts):
self.clear()
# pad the name to a common length so that doesn't effect the size of the
# output
padded_name = name + '_' * (20 - len(name))
run_process([EMCC, path_from_root('tests', source), '-o', padded_name + '.js'] + opts + moar_opts)
sizes[name] = os.path.getsize(padded_name + '.js')
if os.path.exists(padded_name + '.wasm'):
sizes[name] += os.path.getsize(padded_name + '.wasm')
self.assertContained('hello, world!', run_js(padded_name + '.js'))
do('normal', 'hello_world_fopen.c', [])
do('no_fs', 'hello_world.c', []) # without fopen, we should auto-detect we do not need full fs support and can do FILESYSTEM=0
do('no_fs_manual', 'hello_world.c', ['-s', 'FILESYSTEM=0'])
print(' ', sizes)
self.assertLess(sizes['no_fs'], sizes['normal'])
self.assertLess(sizes['no_fs'], absolute)
# manual can usually remove a tiny bit more
self.assertLess(sizes['no_fs_manual'], sizes['no_fs'] + 30)
test(['-s', 'ASSERTIONS=0'], 120000) # we don't care about code size with assertions
test(['-O1'], 91000)
test(['-O2'], 46000)
test(['-O3', '--closure', '1'], 17000)
# asm.js too
if not self.is_wasm_backend():
test(['-O3', '--closure', '1', '-s', 'WASM=0'], 36000)
test(['-O3', '--closure', '2', '-s', 'WASM=0'], 33000) # might change now and then
def test_no_browser(self):
BROWSER_INIT = 'var Browser'
run_process([EMCC, path_from_root('tests', 'hello_world.c')])
self.assertNotContained(BROWSER_INIT, open('a.out.js').read())
run_process([EMCC, path_from_root('tests', 'browser_main_loop.c')]) # uses emscripten_set_main_loop, which needs Browser
self.assertContained(BROWSER_INIT, open('a.out.js').read())
def test_EXPORTED_RUNTIME_METHODS(self):
def test(opts, has, not_has):
print(opts, has, not_has)
self.clear()
# check without assertions, as with assertions we add stubs for the things we remove (which
# print nice error messages)
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'ASSERTIONS=0'] + opts)
self.assertContained('hello, world!', run_js('a.out.js'))
src = open('a.out.js').read()
self.assertContained(has, src)
self.assertNotContained(not_has, src)
test([], 'Module["', 'Module["waka')
test(['-s', 'EXPORTED_RUNTIME_METHODS=[]'], '', 'Module["addRunDependency')
test(['-s', 'EXPORTED_RUNTIME_METHODS=["addRunDependency"]'], 'Module["addRunDependency', 'Module["waka')
test(['-s', 'EXPORTED_RUNTIME_METHODS=[]', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["addRunDependency"]'], 'Module["addRunDependency', 'Module["waka')
def test_stat_fail_alongtheway(self):
create_test_file('src.cpp', r'''
#include <errno.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <stdlib.h>
#include <fcntl.h>
#include <string.h>
#define CHECK(expression) \
if(!(expression)) { \
error = errno; \
printf("FAIL: %s\n", #expression); fail = 1; \
} else { \
error = errno; \
printf("pass: %s\n", #expression); \
} \
int
main()
{
int error;
int fail = 0;
CHECK(mkdir("path", 0777) == 0);
CHECK(close(open("path/file", O_CREAT | O_WRONLY, 0644)) == 0);
{
struct stat st;
CHECK(stat("path", &st) == 0);
CHECK(st.st_mode = 0777);
}
{
struct stat st;
CHECK(stat("path/nosuchfile", &st) == -1);
printf("info: errno=%d %s\n", error, strerror(error));
CHECK(error == ENOENT);
}
{
struct stat st;
CHECK(stat("path/file", &st) == 0);
CHECK(st.st_mode = 0666);
}
{
struct stat st;
CHECK(stat("path/file/impossible", &st) == -1);
printf("info: errno=%d %s\n", error, strerror(error));
CHECK(error == ENOTDIR);
}
{
struct stat st;
CHECK(lstat("path/file/impossible", &st) == -1);
printf("info: errno=%d %s\n", error, strerror(error));
CHECK(error == ENOTDIR);
}
return fail;
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained(r'''pass: mkdir("path", 0777) == 0
pass: close(open("path/file", O_CREAT | O_WRONLY, 0644)) == 0
pass: stat("path", &st) == 0
pass: st.st_mode = 0777
pass: stat("path/nosuchfile", &st) == -1
info: errno=44 No such file or directory
pass: error == ENOENT
pass: stat("path/file", &st) == 0
pass: st.st_mode = 0666
pass: stat("path/file/impossible", &st) == -1
info: errno=54 Not a directory
pass: error == ENOTDIR
pass: lstat("path/file/impossible", &st) == -1
info: errno=54 Not a directory
pass: error == ENOTDIR
''', run_js('a.out.js'))
def test_link_with_a_static(self):
create_test_file('x.c', r'''
int init_weakref(int a, int b) {
return a + b;
}
''')
create_test_file('y.c', r'''
static int init_weakref(void) { // inlined in -O2, not in -O0 where it shows up in llvm-nm as 't'
return 150;
}
int testy(void) {
return init_weakref();
}
''')
create_test_file('z.c', r'''
extern int init_weakref(int, int);
extern int testy(void);
int main(void) {
return testy() + init_weakref(5, 6);
}
''')
run_process([EMCC, 'x.c', '-o', 'x.o'])
run_process([EMCC, 'y.c', '-o', 'y.o'])
run_process([EMCC, 'z.c', '-o', 'z.o'])
try_delete('libtest.a')
run_process([EMAR, 'rc', 'libtest.a', 'y.o'])
run_process([EMAR, 'rc', 'libtest.a', 'x.o'])
run_process([EMRANLIB, 'libtest.a'])
for args in [[], ['-O2']]:
print('args:', args)
run_process([EMCC, 'z.o', 'libtest.a', '-s', 'EXIT_RUNTIME=1'] + args)
run_js('a.out.js', assert_returncode=161)
def test_link_with_bad_o_in_a(self):
# when building a .a, we force-include all the objects inside it. but, some
# may not be valid bitcode, e.g. if it contains metadata or something else
# weird. we should just ignore those
run_process([EMCC, '-c', path_from_root('tests', 'hello_world.c'), '-o', 'hello_world.o'])
create_test_file('bad.obj', 'this is not a good file, it should be ignored!')
run_process([LLVM_AR, 'cr', 'libfoo.a', 'hello_world.o', 'bad.obj'])
run_process([EMCC, 'libfoo.a'])
self.assertContained('hello, world!', run_js('a.out.js'))
def test_require(self):
inname = path_from_root('tests', 'hello_world.c')
building.emcc(inname, args=['-s', 'ASSERTIONS=0'], output_filename='a.out.js')
output = run_process(NODE_JS + ['-e', 'require("./a.out.js")'], stdout=PIPE, stderr=PIPE)
assert output.stdout == 'hello, world!\n' and output.stderr == '', 'expected no output, got\n===\nSTDOUT\n%s\n===\nSTDERR\n%s\n===\n' % (output.stdout, output.stderr)
def test_require_modularize(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MODULARIZE=1', '-s', 'ASSERTIONS=0'])
src = open('a.out.js').read()
self.assertContained('module.exports = Module;', src)
output = run_process(NODE_JS + ['-e', 'var m = require("./a.out.js"); m();'], stdout=PIPE, stderr=PIPE)
self.assertFalse(output.stderr)
self.assertEqual(output.stdout, 'hello, world!\n')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="NotModule"', '-s', 'ASSERTIONS=0'])
src = open('a.out.js').read()
self.assertContained('module.exports = NotModule;', src)
output = run_process(NODE_JS + ['-e', 'var m = require("./a.out.js"); m();'], stdout=PIPE, stderr=PIPE)
self.assertFalse(output.stderr)
self.assertEqual(output.stdout, 'hello, world!\n')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MODULARIZE=1'])
# We call require() twice to ensure it returns wrapper function each time
output = run_process(NODE_JS + ['-e', 'require("./a.out.js")();var m = require("./a.out.js"); m();'], stdout=PIPE, stderr=PIPE)
self.assertFalse(output.stderr)
self.assertEqual(output.stdout, 'hello, world!\nhello, world!\n')
def test_define_modularize(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MODULARIZE=1', '-s', 'ASSERTIONS=0'])
with open('a.out.js') as f:
src = 'var module = 0; ' + f.read()
create_test_file('a.out.js', src)
assert "define([], function() { return Module; });" in src
output = run_process(NODE_JS + ['-e', 'var m; (global.define = function(deps, factory) { m = factory(); }).amd = true; require("./a.out.js"); m();'], stdout=PIPE, stderr=PIPE)
assert output.stdout == 'hello, world!\n' and output.stderr == '', 'expected output, got\n===\nSTDOUT\n%s\n===\nSTDERR\n%s\n===\n' % (output.stdout, output.stderr)
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="NotModule"', '-s', 'ASSERTIONS=0'])
with open('a.out.js') as f:
src = 'var module = 0; ' + f.read()
create_test_file('a.out.js', src)
assert "define([], function() { return NotModule; });" in src
output = run_process(NODE_JS + ['-e', 'var m; (global.define = function(deps, factory) { m = factory(); }).amd = true; require("./a.out.js"); m();'], stdout=PIPE, stderr=PIPE)
assert output.stdout == 'hello, world!\n' and output.stderr == '', 'expected output, got\n===\nSTDOUT\n%s\n===\nSTDERR\n%s\n===\n' % (output.stdout, output.stderr)
def test_EXPORT_NAME_with_html(self):
result = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-o', 'a.html', '-s', 'EXPORT_NAME=Other'], stdout=PIPE, check=False, stderr=STDOUT)
self.assertNotEqual(result.returncode, 0)
self.assertContained('Customizing EXPORT_NAME requires that the HTML be customized to use that name', result.stdout)
@no_wasm_backend('tests fastcomp specific passes')
def test_emcc_c_multi(self):
def test(args, llvm_opts=None):
print(args)
lib = r'''
int mult() { return 1; }
'''
lib_name = 'libA.c'
create_test_file(lib_name, lib)
main = r'''
#include <stdio.h>
int mult();
int main() {
printf("result: %d\n", mult());
return 0;
}
'''
main_name = 'main.c'
create_test_file(main_name, main)
err = run_process([EMCC, '-v', '-c', main_name, lib_name] + args, stderr=PIPE).stderr
VECTORIZE = '-disable-loop-vectorization'
if args:
assert err.count(VECTORIZE) == 2, err # specified twice, once per file
# corresponding to exactly once per invocation of optimizer
assert err.count(os.path.sep + 'opt') == 2, err
else:
assert err.count(VECTORIZE) == 0, err # no optimizations
run_process([EMCC, main_name.replace('.c', '.o'), lib_name.replace('.c', '.o')])
self.assertContained('result: 1', run_js('a.out.js'))
test([])
test(['-O2'], '-O3')
test(['-Oz'], '-Oz')
test(['-Os'], '-Os')
def test_export_all_3142(self):
create_test_file('src.cpp', r'''
typedef unsigned int Bit32u;
struct S_Descriptor {
Bit32u limit_0_15 :16;
Bit32u base_0_15 :16;
Bit32u base_16_23 :8;
};
class Descriptor
{
public:
Descriptor() { saved.fill[0]=saved.fill[1]=0; }
union {
S_Descriptor seg;
Bit32u fill[2];
} saved;
};
Descriptor desc;
''')
try_delete('a.out.js')
run_process([EMCC, 'src.cpp', '-O2', '-s', 'EXPORT_ALL'])
self.assertExists('a.out.js')
@no_wasm_backend('tests PRECISE_F32=1')
def test_f0(self):
run_process([EMCC, path_from_root('tests', 'fasta.cpp'), '-O2', '-s', 'PRECISE_F32=1', '-profiling', '-s', 'WASM=0'])
src = open('a.out.js').read()
assert ' = f0;' in src or ' = f0,' in src
def test_emmake_emconfigure(self):
def check(what, args, fail=True, expect=''):
args = [what] + args
print(what, args, fail, expect)
output = run_process(args, stdout=PIPE, stderr=PIPE, check=False)
assert ('is a helper for' in output.stderr) == fail
assert ('Typical usage' in output.stderr) == fail
self.assertContained(expect, output.stdout)
check(emmake, [])
check(emconfigure, [])
check(emmake, ['--version'])
check(emconfigure, ['--version'])
check(emmake, ['make'], fail=False)
check(emconfigure, ['configure'], fail=False)
check(emconfigure, ['./configure'], fail=False)
check(emcmake, ['cmake'], fail=False)
create_test_file('test.py', '''
import os
print(os.environ.get('CROSS_COMPILE'))
''')
check(emconfigure, [PYTHON, 'test.py'], expect=path_from_root('em'), fail=False)
check(emmake, [PYTHON, 'test.py'], expect=path_from_root('em'), fail=False)
create_test_file('test.py', '''
import os
print(os.environ.get('NM'))
''')
check(emconfigure, [PYTHON, 'test.py'], expect=shared.LLVM_NM, fail=False)
def test_emmake_python(self):
# simulates a configure/make script that looks for things like CC, AR, etc., and which we should
# not confuse by setting those vars to something containing `python X` as the script checks for
# the existence of an executable.
run_process([emmake, PYTHON, path_from_root('tests', 'emmake', 'make.py')])
def test_sdl2_config(self):
for args, expected in [
[['--version'], '2.0.0'],
[['--cflags'], '-s USE_SDL=2'],
[['--libs'], '-s USE_SDL=2'],
[['--cflags', '--libs'], '-s USE_SDL=2'],
]:
print(args, expected)
out = run_process([PYTHON, path_from_root('system', 'bin', 'sdl2-config')] + args, stdout=PIPE, stderr=PIPE).stdout
assert expected in out, out
print('via emmake')
out = run_process([emmake, 'sdl2-config'] + args, stdout=PIPE, stderr=PIPE).stdout
assert expected in out, out
def test_module_onexit(self):
create_test_file('src.cpp', r'''
#include <emscripten.h>
int main() {
EM_ASM({
Module['onExit'] = function(status) { out('exiting now, status ' + status) };
});
return 14;
}
''')
try_delete('a.out.js')
run_process([EMCC, 'src.cpp', '-s', 'EXIT_RUNTIME=1'])
self.assertContained('exiting now, status 14', run_js('a.out.js', assert_returncode=14))
def test_NO_aliasing(self):
# the NO_ prefix flips boolean options
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'EXIT_RUNTIME=1'])
exit_1 = open('a.out.js').read()
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'NO_EXIT_RUNTIME=0'])
no_exit_0 = open('a.out.js').read()
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'EXIT_RUNTIME=0'])
exit_0 = open('a.out.js').read()
assert exit_1 == no_exit_0
assert exit_1 != exit_0
def test_underscore_exit(self):
create_test_file('src.cpp', r'''
#include <unistd.h>
int main() {
_exit(0); // should not end up in an infinite loop with non-underscore exit
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained('', run_js('a.out.js', assert_returncode=0))
def test_file_packager_huge(self):
MESSAGE = 'warning: file packager is creating an asset bundle of 257 MB. this is very large, and browsers might have trouble loading it'
create_test_file('huge.dat', 'a' * (1024 * 1024 * 257))
create_test_file('tiny.dat', 'a')
err = run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'tiny.dat'], stdout=PIPE, stderr=PIPE).stderr
self.assertNotContained(MESSAGE, err)
err = run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'huge.dat'], stdout=PIPE, stderr=PIPE).stderr
self.assertContained(MESSAGE, err)
self.clear()
def test_massive_alloc(self):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
volatile int x = (int)malloc(1024 * 1024 * 1400);
return x == 0; // can't alloc it, but don't fail catastrophically, expect null
}
''')
run_process([EMCC, 'main.cpp', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'WASM=0'])
# just care about message regarding allocating over 1GB of memory
output = run_js('a.out.js', stderr=PIPE, full_output=True)
if self.is_wasm_backend():
self.assertContained('''Warning: Enlarging memory arrays, this is not fast! 16777216,1473314816\n''', output)
else:
self.assertContained('''Warning: Enlarging memory arrays, this is not fast! 16777216,1476395008\n''', output)
print('wasm')
run_process([EMCC, 'main.cpp', '-s', 'ALLOW_MEMORY_GROWTH=1'])
# no message about growth, just check return code
run_js('a.out.js', stderr=PIPE, full_output=True)
def test_failing_alloc(self):
for pre_fail, post_fail, opts in [
('', '', []),
('EM_ASM( Module.temp = HEAP32[DYNAMICTOP_PTR>>2] );', 'EM_ASM( assert(Module.temp === HEAP32[DYNAMICTOP_PTR>>2], "must not adjust DYNAMICTOP when an alloc fails!") );', []),
# also test non-wasm in normal mode
('', '', ['-s', 'WASM=0']),
('EM_ASM( Module.temp = HEAP32[DYNAMICTOP_PTR>>2] );', 'EM_ASM( assert(Module.temp === HEAP32[DYNAMICTOP_PTR>>2], "must not adjust DYNAMICTOP when an alloc fails!") );', ['-s', 'WASM=0']),
]:
for growth in [0, 1]:
for aborting_args in [[], ['-s', 'ABORTING_MALLOC=0'], ['-s', 'ABORTING_MALLOC=1']]:
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <assert.h>
#include <emscripten.h>
#define CHUNK_SIZE (10 * 1024 * 1024)
int main() {
std::vector<void*> allocs;
bool has = false;
while (1) {
printf("trying an allocation\n");
%s
void* curr = malloc(CHUNK_SIZE);
if (!curr) {
%s
break;
}
has = true;
printf("allocated another chunk, %%zu so far\n", allocs.size());
allocs.push_back(curr);
}
assert(has);
printf("an allocation failed!\n");
#ifdef SPLIT
return 0;
#endif
while (1) {
assert(allocs.size() > 0);
void *curr = allocs.back();
allocs.pop_back();
free(curr);
printf("freed one\n");
if (malloc(CHUNK_SIZE)) break;
}
printf("managed another malloc!\n");
}
''' % (pre_fail, post_fail))
args = [EMCC, 'main.cpp'] + opts + aborting_args
args += ['-s', 'TEST_MEMORY_GROWTH_FAILS=1'] # In this test, force memory growing to fail
if growth:
args += ['-s', 'ALLOW_MEMORY_GROWTH=1']
# growth disables aborting by default, but it can be overridden
aborting = 'ABORTING_MALLOC=1' in aborting_args or (not aborting_args and not growth)
print('test_failing_alloc', args, pre_fail)
run_process(args)
# growth also disables aborting
can_manage_another = not aborting
split = '-DSPLIT' in args
print('can manage another:', can_manage_another, 'split:', split, 'aborting:', aborting)
output = run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=0 if can_manage_another else None)
if can_manage_another:
self.assertContained('an allocation failed!\n', output)
if not split:
# split memory allocation may fail due to GC objects no longer being allocatable,
# and we can't expect to recover from that deterministically. So just check we
# get to the fail.
# otherwise, we should fail eventually, then free, then succeed
self.assertContained('managed another malloc!\n', output)
else:
# we should see an abort
self.assertContained('abort(Cannot enlarge memory arrays', output)
if growth:
# when growth is enabled, the default is to not abort, so just explain that
self.assertContained('If you want malloc to return NULL (0) instead of this abort, do not link with -s ABORTING_MALLOC=1', output)
else:
# when growth is not enabled, suggest 3 possible solutions (start with more memory, allow growth, or don't abort)
self.assertContained(('higher than the current value 16777216,', 'higher than the current value 33554432,'), output)
self.assertContained('compile with -s ALLOW_MEMORY_GROWTH=1 ', output)
self.assertContained('compile with -s ABORTING_MALLOC=0 ', output)
def test_failing_growth_2gb(self):
create_test_file('test.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
void* out;
int main() {
while (1) {
puts("loop...");
out = malloc(1024 * 1024);
if (!out) {
puts("done");
return 0;
}
}
}
''')
run_process([EMCC, '-O1', 'test.cpp', '-s', 'ALLOW_MEMORY_GROWTH'])
self.assertContained('done', run_js('a.out.js'))
def test_libcxx_minimal(self):
create_test_file('vector.cpp', r'''
#include <vector>
int main(int argc, char** argv) {
std::vector<void*> v;
for (int i = 0 ; i < argc; i++) {
v.push_back(nullptr);
}
return v.size();
}
''')
run_process([EMCC, '-O2', 'vector.cpp', '-o', 'vector.js'])
run_process([EMCC, '-O2', path_from_root('tests', 'hello_libcxx.cpp'), '-o', 'iostream.js'])
vector = os.path.getsize('vector.js')
iostream = os.path.getsize('iostream.js')
print(vector, iostream)
self.assertGreater(vector, 1000)
# we can strip out almost all of libcxx when just using vector
self.assertLess(2.25 * vector, iostream)
@no_wasm_backend('relies on EMULATED_FUNCTION_POINTERS')
def test_emulated_function_pointers(self):
create_test_file('src.c', r'''
#include <emscripten.h>
typedef void (*fp)();
int main(int argc, char **argv) {
volatile fp f = 0;
EM_ASM({
if (typeof FUNCTION_TABLE_v !== 'undefined') {
out('function table: ' + FUNCTION_TABLE_v);
} else {
out('no visible function tables');
}
});
if (f) f();
return 0;
}
''')
def test(args, expected):
print(args, expected)
run_process([EMCC, 'src.c', '-s', 'WASM=0'] + args, stderr=PIPE)
self.assertContained(expected, run_js('a.out.js'))
for opts in [0, 1, 2, 3]:
test(['-O' + str(opts)], 'no visible function tables')
test(['-O' + str(opts), '-s', 'EMULATED_FUNCTION_POINTERS=1'], 'function table: ')
@no_wasm_backend('relies on EMULATED_FUNCTION_POINTERS')
def test_emulated_function_pointers_2(self):
create_test_file('src.c', r'''
#include <emscripten.h>
typedef void (*fp)();
static void one() { EM_ASM( out('one') ); }
static void two() { EM_ASM( out('two') ); }
void test() {
volatile fp f = one;
f();
f = two;
f();
}
int main(int argc, char **argv) {
test();
// swap them!
EM_ASM_INT({
var one = $0;
var two = $1;
if (typeof FUNCTION_TABLE_v === 'undefined') {
out('no');
return;
}
var temp = FUNCTION_TABLE_v[one];
FUNCTION_TABLE_v[one] = FUNCTION_TABLE_v[two];
FUNCTION_TABLE_v[two] = temp;
}, (int)&one, (int)&two);
test();
return 0;
}
''')
flipped = 'one\ntwo\ntwo\none\n'
unchanged = 'one\ntwo\none\ntwo\n'
no_table = 'one\ntwo\nno\none\ntwo\n'
def test(args, expected):
print(args, expected.replace('\n', ' '))
run_process([EMCC, 'src.c', '-s', 'WASM=0'] + args)
self.assertContained(expected, run_js('a.out.js'))
for opts in [0, 1, 2]:
test(['-O' + str(opts)], no_table)
test(['-O' + str(opts), '-s', 'EMULATED_FUNCTION_POINTERS=1'], flipped)
test(['-O' + str(opts), '-s', 'EMULATED_FUNCTION_POINTERS=2'], flipped)
test(['-O' + str(opts), '-s', 'EMULATED_FUNCTION_POINTERS=1', '-s', 'RELOCATABLE=1'], flipped)
test(['-O' + str(opts), '-s', 'EMULATED_FUNCTION_POINTERS=2', '-s', 'RELOCATABLE=1'], unchanged) # with both of those, we optimize and you cannot flip them
test(['-O' + str(opts), '-s', 'MAIN_MODULE=1'], unchanged) # default for modules is optimized
test(['-O' + str(opts), '-s', 'MAIN_MODULE=1', '-s', 'EMULATED_FUNCTION_POINTERS=2'], unchanged)
test(['-O' + str(opts), '-s', 'MAIN_MODULE=1', '-s', 'EMULATED_FUNCTION_POINTERS=1'], flipped) # but you can disable that
def test_minimal_dynamic(self):
def run(wasm):
print('wasm?', wasm)
library_file = 'library.wasm' if wasm else 'library.js'
def test(main_args, library_args=[], expected='hello from main\nhello from library'):
print('testing', main_args, library_args)
self.clear()
create_test_file('library.c', r'''
#include <stdio.h>
void library_func() {
#ifdef USE_PRINTF
printf("hello from library: %p\n", &library_func);
#else
puts("hello from library");
#endif
}
''')
# -fno-builtin to prevent printf -> iprintf optimization
run_process([EMCC, 'library.c', '-fno-builtin', '-s', 'SIDE_MODULE=1', '-O2', '-o', library_file, '-s', 'WASM=' + str(wasm), '-s', 'EXPORT_ALL'] + library_args)
create_test_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
int main() {
puts("hello from main");
void *lib_handle = dlopen("%s", 0);
if (!lib_handle) {
puts("cannot load side module");
return 1;
}
typedef void (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x) puts("cannot find side function");
else x();
}
''' % library_file)
run_process([EMCC, 'main.c', '--embed-file', library_file, '-O2', '-s', 'WASM=' + str(wasm)] + main_args)
self.assertContained(expected, run_js('a.out.js', assert_returncode=None, stderr=STDOUT))
size = os.path.getsize('a.out.js')
if wasm:
size += os.path.getsize('a.out.wasm')
side_size = os.path.getsize(library_file)
print(' sizes:', size, side_size)
return (size, side_size)
def percent_diff(x, y):
small = min(x, y)
large = max(x, y)
return float(100 * large) / small - 100
full = test(main_args=['-s', 'MAIN_MODULE=1'])
# printf is not used in main, but libc was linked in, so it's there
printf = test(main_args=['-s', 'MAIN_MODULE=1'], library_args=['-DUSE_PRINTF'])
# main module tests
# dce in main, and it fails since puts is not exported
dce = test(main_args=['-s', 'MAIN_MODULE=2'], expected=('cannot', 'undefined'))
# with exporting, it works
dce = test(main_args=['-s', 'MAIN_MODULE=2', '-s', 'EXPORTED_FUNCTIONS=["_main", "_puts"]'])
# printf is not used in main, and we dce, so we failz
dce_fail = test(main_args=['-s', 'MAIN_MODULE=2'], library_args=['-DUSE_PRINTF'], expected=('cannot', 'undefined'))
# exporting printf in main keeps it alive for the library
dce_save = test(main_args=['-s', 'MAIN_MODULE=2', '-s', 'EXPORTED_FUNCTIONS=["_main", "_printf", "_puts"]'], library_args=['-DUSE_PRINTF'])
self.assertLess(percent_diff(full[0], printf[0]), 4)
self.assertLess(percent_diff(dce[0], dce_fail[0]), 4)
self.assertLess(dce[0], 0.2 * full[0]) # big effect, 80%+ is gone
self.assertGreater(dce_save[0], 1.05 * dce[0]) # save exported all of printf
# side module tests
# mode 2, so dce in side, but library_func is not exported, so it is dce'd
side_dce_fail = test(main_args=['-s', 'MAIN_MODULE=1'], library_args=['-s', 'SIDE_MODULE=2'], expected='cannot find side function')
# mode 2, so dce in side, and library_func is not exported
side_dce_work = test(main_args=['-s', 'MAIN_MODULE=1'], library_args=['-s', 'SIDE_MODULE=2', '-s', 'EXPORTED_FUNCTIONS=["_library_func"]'], expected='hello from library')
self.assertLess(side_dce_fail[1], 0.95 * side_dce_work[1]) # removing that function saves a chunk
run(wasm=1)
if not self.is_wasm_backend():
run(wasm=0)
def test_ld_library_path(self):
create_test_file('hello1.c', r'''
#include <stdio.h>
void
hello1 ()
{
printf ("Hello1\n");
return;
}
''')
create_test_file('hello2.c', r'''
#include <stdio.h>
void
hello2 ()
{
printf ("Hello2\n");
return;
}
''')
create_test_file('hello3.c', r'''
#include <stdio.h>
void
hello3 ()
{
printf ("Hello3\n");
return;
}
''')
create_test_file('hello4.c', r'''
#include <stdio.h>
#include <math.h>
double
hello4 (double x)
{
printf ("Hello4\n");
return fmod(x, 2.0);
}
''')
create_test_file('pre.js', r'''
Module['preRun'].push(function (){
ENV['LD_LIBRARY_PATH']='/lib:/usr/lib';
});
''')
create_test_file('main.c', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dlfcn.h>
int
main()
{
void *h;
void (*f) ();
double (*f2) (double);
h = dlopen ("libhello1.wasm", RTLD_NOW);
f = dlsym (h, "hello1");
f();
dlclose (h);
h = dlopen ("libhello2.wasm", RTLD_NOW);
f = dlsym (h, "hello2");
f();
dlclose (h);
h = dlopen ("libhello3.wasm", RTLD_NOW);
f = dlsym (h, "hello3");
f();
dlclose (h);
h = dlopen ("/usr/local/lib/libhello4.wasm", RTLD_NOW);
f2 = dlsym (h, "hello4");
double result = f2(5.5);
dlclose (h);
if (result == 1.5) {
printf("Ok\n");
}
return 0;
}
''')
run_process([EMCC, '-o', 'libhello1.wasm', 'hello1.c', '-s', 'SIDE_MODULE=1', '-s', 'EXPORT_ALL=1'])
run_process([EMCC, '-o', 'libhello2.wasm', 'hello2.c', '-s', 'SIDE_MODULE=1', '-s', 'EXPORT_ALL=1'])
run_process([EMCC, '-o', 'libhello3.wasm', 'hello3.c', '-s', 'SIDE_MODULE=1', '-s', 'EXPORT_ALL=1'])
run_process([EMCC, '-o', 'libhello4.wasm', 'hello4.c', '-s', 'SIDE_MODULE=1', '-s', 'EXPORT_ALL=1'])
run_process([EMCC, '-o', 'main.js', 'main.c', '-s', 'MAIN_MODULE=1', '-s', 'INITIAL_MEMORY=' + str(32 * 1024 * 1024),
'--embed-file', 'libhello1.wasm@/lib/libhello1.wasm',
'--embed-file', 'libhello2.wasm@/usr/lib/libhello2.wasm',
'--embed-file', 'libhello3.wasm@/libhello3.wasm',
'--embed-file', 'libhello4.wasm@/usr/local/lib/libhello4.wasm',
'--pre-js', 'pre.js'])
out = run_js('main.js')
self.assertContained('Hello1', out)
self.assertContained('Hello2', out)
self.assertContained('Hello3', out)
self.assertContained('Hello4', out)
self.assertContained('Ok', out)
def test_dlopen_rtld_global(self):
# This test checks RTLD_GLOBAL where a module is loaded
# before the module providing a global it needs is. in asm.js we use JS
# to create a redirection function. In wasm we just have wasm, so we
# need to introspect the wasm module. Browsers may add that eventually,
# or we could ship a little library that does it.
create_test_file('hello1.c', r'''
#include <stdio.h>
extern int hello1_val;
int hello1_val=3;
void
hello1 (int i)
{
printf ("hello1_val by hello1:%d\n",hello1_val);
printf ("Hello%d\n",i);
}
''')
create_test_file('hello2.c', r'''
#include <stdio.h>
extern int hello1_val;
extern void hello1 (int);
void
hello2 (int i)
{
void (*f) (int);
printf ("hello1_val by hello2:%d\n",hello1_val);
f = hello1;
f(i);
}
''')
create_test_file('main.c', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dlfcn.h>
int
main(int argc,char** argv)
{
void *h;
void *h2;
void (*f) (int);
h = dlopen ("libhello1.wasm", RTLD_NOW|RTLD_GLOBAL);
h2 = dlopen ("libhello2.wasm", RTLD_NOW|RTLD_GLOBAL);
f = dlsym (h, "hello1");
f(1);
f = dlsym (h2, "hello2");
f(2);
dlclose (h);
dlclose (h2);
return 0;
}
''')
run_process([EMCC, '-o', 'libhello1.js', 'hello1.c', '-s', 'SIDE_MODULE=1', '-s', 'EXPORT_ALL=1'])
run_process([EMCC, '-o', 'libhello2.js', 'hello2.c', '-s', 'SIDE_MODULE=1', '-s', 'EXPORT_ALL=1'])
run_process([EMCC, '-o', 'main.js', 'main.c', '-s', 'MAIN_MODULE=1',
'--embed-file', 'libhello1.wasm',
'--embed-file', 'libhello2.wasm'])
out = run_js('main.js')
self.assertContained('Hello1', out)
self.assertContained('Hello2', out)
self.assertContained('hello1_val by hello1:3', out)
self.assertContained('hello1_val by hello2:3', out)
@no_fastcomp()
def test_main_module_without_exceptions_message(self):
# A side module that needs exceptions needs a main module with that
# support enabled; show a clear message in that case.
create_test_file('side.cpp', r'''
#include <exception>
#include <stdio.h>
extern "C" void test_throw() {
try {
throw 42;
} catch(int x) {
printf("catch %d.\n", x);
return;
}
puts("bad location");
}
''')
create_test_file('main.cpp', r'''
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dlfcn.h>
typedef void (*voidf)();
int main() {
void* h = dlopen ("libside.wasm", RTLD_NOW|RTLD_GLOBAL);
assert(h);
voidf f = (voidf)dlsym(h, "test_throw");
assert(f);
f();
return 0;
}
''')
run_process([EMCC, '-o', 'libside.wasm', 'side.cpp', '-s', 'SIDE_MODULE=1', '-fexceptions'])
def build_main(args):
print(args)
with env_modify({'EMCC_FORCE_STDLIBS': 'libc++abi'}):
run_process([EMCC, 'main.cpp', '-s', 'MAIN_MODULE=1',
'--embed-file', 'libside.wasm'] + args)
build_main([])
out = run_js('a.out.js', assert_returncode=None, stderr=STDOUT)
self.assertContained('Exception catching is disabled, this exception cannot be caught.', out)
self.assertContained('note: in dynamic linking, if a side module wants exceptions, the main module must be built with that support', out)
build_main(['-fexceptions'])
out = run_js('a.out.js')
self.assertContained('catch 42', out)
def test_debug_asmLastOpts(self):
create_test_file('src.c', r'''
#include <stdio.h>
struct Dtlink_t
{ struct Dtlink_t* right; /* right child */
union
{ unsigned int _hash; /* hash value */
struct Dtlink_t* _left; /* left child */
} hl;
};
int treecount(register struct Dtlink_t* e)
{
return e ? treecount(e->hl._left) + treecount(e->right) + 1 : 0;
}
int main() {
printf("hello, world!\n");
}
''')
run_process([EMCC, 'src.c', '-s', 'EXPORTED_FUNCTIONS=["_main", "_treecount"]', '--minify', '0', '-g4', '-Oz'])
self.assertContained('hello, world!', run_js('a.out.js'))
@no_wasm_backend('MEM_INIT_METHOD not supported under wasm')
def test_meminit_crc(self):
create_test_file('src.c', r'''
#include <stdio.h>
int main() { printf("Mary had a little lamb.\n"); }
''')
run_process([EMCC, 'src.c', '--memory-init-file', '0', '-s', 'MEM_INIT_METHOD=2', '-s', 'ASSERTIONS=1', '-s', 'WASM=0'])
with open('a.out.js') as f:
d = f.read()
return
self.assertContained('Mary had', d)
d = d.replace('Mary had', 'Paul had')
create_test_file('a.out.js', d)
out = run_js('a.out.js', assert_returncode=None, stderr=STDOUT)
self.assertContained('Assertion failed: memory initializer checksum', out)
def test_emscripten_print_double(self):
create_test_file('src.c', r'''
#include <stdio.h>
#include <assert.h>
#include <emscripten.h>
void test(double d) {
char buffer[100], buffer2[100];
unsigned len, len2, len3;
len = emscripten_print_double(d, NULL, -1);
len2 = emscripten_print_double(d, buffer, len+1);
assert(len == len2);
buffer[len] = 0;
len3 = snprintf(buffer2, 100, "%g", d);
printf("|%g : %u : %s : %s : %d|\n", d, len, buffer, buffer2, len3);
}
int main() {
printf("\n");
test(0);
test(1);
test(-1);
test(1.234);
test(-1.234);
test(1.1234E20);
test(-1.1234E20);
test(1.1234E-20);
test(-1.1234E-20);
test(1.0/0.0);
test(-1.0/0.0);
}
''')
run_process([EMCC, 'src.c'])
out = run_js('a.out.js')
self.assertContained('''
|0 : 1 : 0 : 0 : 1|
|1 : 1 : 1 : 1 : 1|
|-1 : 2 : -1 : -1 : 2|
|1.234 : 5 : 1.234 : 1.234 : 5|
|-1.234 : 6 : -1.234 : -1.234 : 6|
|1.1234e+20 : 21 : 112340000000000000000 : 1.1234e+20 : 10|
|-1.1234e+20 : 22 : -112340000000000000000 : -1.1234e+20 : 11|
|1.1234e-20 : 10 : 1.1234e-20 : 1.1234e-20 : 10|
|-1.1234e-20 : 11 : -1.1234e-20 : -1.1234e-20 : 11|
|inf : 8 : Infinity : inf : 3|
|-inf : 9 : -Infinity : -inf : 4|
''', out)
def test_emscripten_scan_stack(self):
create_test_file('src.cpp', r'''
#include <set>
#include <emscripten.h>
#include <stdio.h>
#include <assert.h>
std::set<int> seenInts;
void scan(void* x, void* y) {
printf("scan\n");
int* p = (int*)x;
int* q = (int*)y;
// The callback sends us the [low, high) range.
assert(p < q);
// The range is of a reasonable size - not all of memory.
assert(q - p < 100);
while (p < q) {
seenInts.insert(*p);
p++;
}
}
int main() {
int x;
int* y = &x;
*y = 12345678;
emscripten_scan_stack(scan);
assert(seenInts.count(12345678));
puts("ok");
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained('ok', run_js('a.out.js'))
def test_no_warn_exported_jslibfunc(self):
err = run_process([EMCC, path_from_root('tests', 'hello_world.c'),
'-s', 'DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=["alGetError"]',
'-s', 'EXPORTED_FUNCTIONS=["_main", "_alGetError"]'], stderr=PIPE).stderr
self.assertNotContained('function requested to be exported, but not implemented: "_alGetError"', err)
@no_wasm_backend()
def test_almost_asm_warning(self):
def run(args, expected):
print(args, expected)
err = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0'] + args, stderr=PIPE).stderr
if expected:
self.assertContained('[-Walmost-asm]', err)
else:
self.assertEqual(err, '')
run(['-O1', '-s', 'ALLOW_MEMORY_GROWTH=1'], True), # default
# suppress almost-asm warning manually
run(['-O1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-Wno-almost-asm'], False),
# last warning flag should "win"
run(['-O1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-Wno-almost-asm', '-Walmost-asm'], True)
def test_musl_syscalls(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c')])
src = open('a.out.js').read()
# there should be no musl syscalls in hello world output
self.assertNotContained('__syscall', src)
@no_windows('posix-only')
def test_emcc_dev_null(self):
out = run_process([EMCC, '-dM', '-E', '-x', 'c', '/dev/null'], stdout=PIPE).stdout
self.assertContained('#define __EMSCRIPTEN__ 1', out) # all our defines should show up
def test_umask_0(self):
create_test_file('src.c', r'''
#include <sys/stat.h>
#include <stdio.h>
int main() {
umask(0);
printf("hello, world!\n");
}''')
run_process([EMCC, 'src.c'])
self.assertContained('hello, world!', run_js('a.out.js'))
def test_no_missing_symbols(self): # simple hello world should not show any missing symbols
run_process([EMCC, path_from_root('tests', 'hello_world.c')])
# main() is implemented in C, and even if requested from JS, we should not warn
create_test_file('library_foo.js', '''
mergeInto(LibraryManager.library, {
my_js__deps: ['main'],
my_js: (function() {
return function() {
console.log("hello " + _nonexistingvariable);
};
}()),
});
''')
create_test_file('test.cpp', '''
#include <stdio.h>
#include <stdlib.h>
extern "C" {
extern void my_js();
}
int main() {
my_js();
return EXIT_SUCCESS;
}
''')
run_process([EMCC, 'test.cpp', '--js-library', 'library_foo.js'])
# but we do error on a missing js var
create_test_file('library_foo_missing.js', '''
mergeInto(LibraryManager.library, {
my_js__deps: ['main', 'nonexistingvariable'],
my_js: (function() {
return function() {
console.log("hello " + _nonexistingvariable);
};
}()),
});
''')
err = self.expect_fail([EMCC, 'test.cpp', '--js-library', 'library_foo_missing.js'])
self.assertContained('undefined symbol: nonexistingvariable', err)
# and also for missing C code, of course (without the --js-library, it's just a missing C method)
err = self.expect_fail([EMCC, 'test.cpp'])
self.assertContained('undefined symbol: my_js', err)
@no_fastcomp('fastcomp links in memset in JS in a hackish way')
def test_js_lib_to_system_lib(self):
# memset is in compiled code, so a js library __deps can't access it. it
# would need to be in deps_info.json or EXPORTED_FUNCTIONS
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
depper__deps: ['memset'],
depper: function(ptr) {
_memset(ptr, 'd'.charCodeAt(0), 10);
},
});
''')
create_test_file('test.cpp', r'''
#include <string.h>
#include <stdio.h>
extern "C" {
extern void depper(char*);
}
int main(int argc, char** argv) {
char buffer[11];
buffer[10] = '\0';
// call by a pointer, to force linking of memset, no llvm intrinsic here
volatile auto ptr = memset;
(*ptr)(buffer, 'a', 10);
depper(buffer);
puts(buffer);
}
''')
err = self.expect_fail([EMCC, 'test.cpp', '--js-library', 'lib.js'])
self.assertContained('_memset may need to be added to EXPORTED_FUNCTIONS if it arrives from a system library', err)
# without the dep, and with EXPORTED_FUNCTIONS, it works ok
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
depper: function(ptr) {
_memset(ptr, 'd'.charCodeAt(0), 10);
},
});
''')
run_process([EMCC, 'test.cpp', '--js-library', 'lib.js', '-s', 'EXPORTED_FUNCTIONS=[_main,_memset]'])
self.assertContained('dddddddddd', run_js('a.out.js'))
def test_realpath(self):
create_test_file('src.c', r'''
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#define TEST_PATH "/boot/README.txt"
int
main(int argc, char **argv)
{
errno = 0;
char *t_realpath_buf = realpath(TEST_PATH, NULL);
if (NULL == t_realpath_buf) {
perror("Resolve failed");
return 1;
} else {
printf("Resolved: %s\n", t_realpath_buf);
free(t_realpath_buf);
return 0;
}
}
''')
ensure_dir('boot')
create_test_file(os.path.join('boot', 'README.txt'), ' ')
run_process([EMCC, 'src.c', '--embed-file', 'boot'])
self.assertContained('Resolved: /boot/README.txt', run_js('a.out.js'))
def test_realpath_nodefs(self):
create_test_file('src.c', r'''
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <emscripten.h>
#define TEST_PATH "/working/TEST_NODEFS.txt"
int
main(int argc, char **argv)
{
errno = 0;
EM_ASM({
FS.mkdir('/working');
FS.mount(NODEFS, { root: '.' }, '/working');
});
char *t_realpath_buf = realpath(TEST_PATH, NULL);
if (NULL == t_realpath_buf) {
perror("Resolve failed");
return 1;
} else {
printf("Resolved: %s\n", t_realpath_buf);
free(t_realpath_buf);
return 0;
}
}
''')
create_test_file('TEST_NODEFS.txt', ' ')
run_process([EMCC, 'src.c', '-lnodefs.js'])
self.assertContained('Resolved: /working/TEST_NODEFS.txt', run_js('a.out.js'))
def test_realpath_2(self):
ensure_dir('Folder')
create_test_file('src.c', r'''
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
int testrealpath(const char* path) {
errno = 0;
char *t_realpath_buf = realpath(path, NULL);
if (NULL == t_realpath_buf) {
printf("Resolve failed: \"%s\"\n",path);fflush(stdout);
return 1;
} else {
printf("Resolved: \"%s\" => \"%s\"\n", path, t_realpath_buf);fflush(stdout);
free(t_realpath_buf);
return 0;
}
}
int main(int argc, char **argv)
{
// files:
testrealpath("testfile.txt");
testrealpath("Folder/testfile.txt");
testrealpath("testnonexistentfile.txt");
// folders
testrealpath("Folder");
testrealpath("/Folder");
testrealpath("./");
testrealpath("");
testrealpath("/");
return 0;
}
''')
create_test_file('testfile.txt', '')
create_test_file(os.path.join('Folder', 'testfile.txt'), '')
run_process([EMCC, 'src.c', '--embed-file', 'testfile.txt', '--embed-file', 'Folder'])
self.assertContained('''Resolved: "testfile.txt" => "/testfile.txt"
Resolved: "Folder/testfile.txt" => "/Folder/testfile.txt"
Resolve failed: "testnonexistentfile.txt"
Resolved: "Folder" => "/Folder"
Resolved: "/Folder" => "/Folder"
Resolved: "./" => "/"
Resolve failed: ""
Resolved: "/" => "/"
''', run_js('a.out.js'))
def test_no_warnings(self):
# build once before to make sure system libs etc. exist
run_process([EMCC, path_from_root('tests', 'hello_libcxx.cpp')])
# check that there is nothing in stderr for a regular compile
err = run_process([EMCC, path_from_root('tests', 'hello_libcxx.cpp')], stderr=PIPE).stderr
self.assertEqual(err, '')
@no_wasm_backend("llvm-lto is fastcomp only flag")
def test_llvm_lto(self):
sizes = {}
lto_levels = [0, 1, 2, 3]
for lto in lto_levels:
cmd = [EMCC, path_from_root('tests', 'hello_libcxx.cpp'), '-O2', '--llvm-lto', str(lto)]
if self.is_wasm_backend():
cmd += ['-flto']
print(cmd)
run_process(cmd)
self.assertContained('hello, world!', run_js('a.out.js'))
sizes[lto] = os.path.getsize('a.out.wasm')
print(sizes)
# LTO sizes should be distinct
for i in lto_levels:
assert sizes[i] not in set(sizes).difference(set([sizes[i]]))
# LTO should reduce code size
# Skip mode 2 because it has historically increased code size, but not always
self.assertLess(sizes[1], sizes[0])
if not self.is_wasm_backend():
self.assertLess(sizes[3], sizes[0])
def test_dlmalloc_modes(self):
create_test_file('src.cpp', r'''
#include <stdlib.h>
#include <stdio.h>
int main() {
void* c = malloc(1024);
free(c);
free(c);
printf("double-freed\n");
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained('double-freed', run_js('a.out.js'))
# in debug mode, the double-free is caught
run_process([EMCC, 'src.cpp', '-s', 'ASSERTIONS=2'])
seen_error = False
out = '?'
try:
out = run_js('a.out.js')
except Exception:
seen_error = True
self.assertTrue(seen_error, out)
def test_mallocs(self):
def run(opts):
print(opts)
sizes = {}
for malloc, name in (
('dlmalloc', 'dlmalloc'),
(None, 'default'),
('emmalloc', 'emmalloc')
):
print(malloc, name)
cmd = [EMCC, path_from_root('tests', 'hello_libcxx.cpp'), '-o', 'a.out.js'] + opts
if malloc:
cmd += ['-s', 'MALLOC="%s"' % malloc]
print(cmd)
run_process(cmd)
sizes[name] = os.path.getsize('a.out.wasm')
print(sizes)
# dlmalloc is the default
self.assertEqual(sizes['dlmalloc'], sizes['default'])
# emmalloc is much smaller
self.assertLess(sizes['emmalloc'], sizes['dlmalloc'] - 5000)
run([])
run(['-O2'])
@no_fastcomp("fastcomp doesn't support 2GB+")
def test_emmalloc_2GB(self):
def test(args, text=None):
if text:
stderr = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MALLOC=emmalloc'] + args)
self.assertContained(text, stderr)
else:
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MALLOC=emmalloc'] + args)
test(['-s', 'INITIAL_MEMORY=2GB'], 'INITIAL_MEMORY must be less than 2GB due to current spec limitations')
# emmalloc allows growth by default (as the max size is fine), but not if
# a too-high max is set
test(['-s', 'ALLOW_MEMORY_GROWTH'])
test(['-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=1GB'])
test(['-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=3GB'], 'emmalloc only works on <2GB of memory. Use the default allocator, or decrease MAXIMUM_MEMORY')
@no_fastcomp("fastcomp doesn't support 2GB+")
def test_2GB_plus(self):
# when the heap size can be over 2GB, we rewrite pointers to be unsigned
def test(page_diff):
args = [EMCC, path_from_root('tests', 'hello_world.c'), '-O2', '-s', 'ALLOW_MEMORY_GROWTH']
if page_diff is not None:
args += ['-s', 'MAXIMUM_MEMORY=%d' % (2**31 + page_diff * 64 * 1024)]
print(args)
run_process(args)
return os.path.getsize('a.out.js')
less = test(-1)
equal = test(0)
more = test(1)
none = test(None)
# exactly 2GB still doesn't require unsigned pointers, as we can't address
# the 2GB location in memory
self.assertEqual(less, equal)
self.assertLess(equal, more)
# not specifying maximum memory does not result in unsigned pointers, as the
# default maximum memory is 2GB.
self.assertEqual(less, none)
@no_fastcomp('depends on wasm-emscripten-finalize')
@parameterized({
'normal': (['-s', 'WASM_BIGINT=0'], 'testbind.js'),
'bigint': (['-s', 'WASM_BIGINT=1'], 'testbind_bigint.js'),
})
def test_sixtyfour_bit_return_value(self, args, bind_js):
# This test checks that the most significant 32 bits of a 64 bit long are correctly made available
# to native JavaScript applications that wish to interact with compiled code returning 64 bit longs.
# The MS 32 bits should be available in Runtime.getTempRet0() even when compiled with -O2 --closure 1
# Compile test.c and wrap it in a native JavaScript binding so we can call our compiled function from JS.
run_process([EMCC, path_from_root('tests', 'return64bit', 'test.c'),
'--pre-js', path_from_root('tests', 'return64bit', 'testbindstart.js'),
'--pre-js', path_from_root('tests', 'return64bit', bind_js),
'--post-js', path_from_root('tests', 'return64bit', 'testbindend.js'),
'-s', 'EXPORTED_FUNCTIONS=["_test_return64"]', '-o', 'test.js', '-O2',
'--closure', '1', '-g1', '-s', 'WASM_ASYNC_COMPILATION=0'] + args)
# Simple test program to load the test.js binding library and call the binding to the
# C function returning the 64 bit long.
create_test_file('testrun.js', '''
var test = require("./test.js");
test.runtest();
''')
# Run the test and confirm the output is as expected.
out = run_js('testrun.js', engine=NODE_JS + ['--experimental-wasm-bigint'])
self.assertContained('''\
input = 0xaabbccdd11223344
low = 5678
high = 1234
input = 0xabcdef1912345678
low = 5678
high = 1234
''', out)
def test_lib_include_flags(self):
run_process([EMCC] + '-l m -l c -I'.split() + [path_from_root('tests', 'include_test'), path_from_root('tests', 'lib_include_flags.c')])
def test_dash_s(self):
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s'])
self.assertContained('hello, world!', run_js('a.out.js'))
def test_dash_s_response_file_string(self):
create_test_file('response_file', '"MyModule"\n')
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'EXPORT_NAME=@response_file'])
def test_dash_s_response_file_list(self):
create_test_file('response_file', '["_main", "_malloc"]\n')
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'EXPORTED_FUNCTIONS=@response_file'])
def test_dash_s_response_file_misssing(self):
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'EXPORTED_FUNCTIONS=@foo'])
self.assertContained('error: foo: file not found parsing argument: EXPORTED_FUNCTIONS=@foo', err)
def test_dash_s_unclosed_quote(self):
# Unclosed quote
err = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), "-s", "TEST_KEY='MISSING_QUOTE"], stderr=PIPE, check=False).stderr
self.assertNotContained('AssertionError', err) # Do not mention that it is an assertion error
self.assertContained('unclosed opened quoted string. expected final character to be "\'"', err)
def test_dash_s_single_quote(self):
# Only one quote
err = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), "-s", "TEST_KEY='"], stderr=PIPE, check=False).stderr
self.assertNotContained('AssertionError', err) # Do not mention that it is an assertion error
self.assertContained('unclosed opened quoted string.', err)
def test_dash_s_unclosed_list(self):
# Unclosed list
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), "-s", "TEST_KEY=[Value1, Value2"])
self.assertNotContained('AssertionError', err) # Do not mention that it is an assertion error
self.assertContained('unclosed opened string list. expected final character to be "]"', err)
def test_dash_s_valid_list(self):
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), "-s", "TEST_KEY=[Value1, \"Value2\"]"])
self.assertNotContained('a problem occurred in evaluating the content after a "-s", specifically', err)
def test_dash_s_wrong_type(self):
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'EXPORTED_FUNCTIONS=foo'])
self.assertContained("error: setting `EXPORTED_FUNCTIONS` expects `<class 'list'>` but got `<class 'str'>`", err)
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'EXIT_RUNTIME=[foo,bar]'])
self.assertContained("error: setting `EXIT_RUNTIME` expects `<class 'int'>` but got `<class 'list'>`", err)
def test_dash_s_typo(self):
# with suggestions
stderr = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'DISABLE_EXCEPTION_CATCH=1'])
self.assertContained("Attempt to set a non-existent setting: 'DISABLE_EXCEPTION_CATCH'", stderr)
self.assertContained('did you mean one of DISABLE_EXCEPTION_CATCHING', stderr)
# no suggestions
stderr = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'CHEEZ=1'])
self.assertContained("perhaps a typo in emcc\'s -s X=Y notation?", stderr)
self.assertContained('(see src/settings.js for valid values)', stderr)
# suggestions do not include renamed legacy settings
stderr = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'ZBINARYEN_ASYNC_COMPILATION'])
self.assertContained("Attempt to set a non-existent setting: 'ZBINARYEN_ASYNC_COMPILATION'", stderr)
self.assertNotContained(' BINARYEN_ASYNC_COMPILATION', stderr)
def test_python_2_3(self):
# check emcc/em++ can be called by any python
def trim_py_suffix(filename):
"""remove .py from EMCC(=emcc.py)"""
return filename[:-3] if filename.endswith('.py') else filename
def run(python):
if python == 'python3':
has = is_python3_version_supported()
else:
has = shared.which(python) is not None
print(python, has)
if has:
print(' checking emcc.py...')
run_process([python, path_from_root('emcc.py'), '--version'], stdout=PIPE)
print(' checking em++.py...')
run_process([python, path_from_root('em++.py'), '--version'], stdout=PIPE)
run('python')
run('python2')
run('python3')
def test_zeroinit(self):
create_test_file('src.c', r'''
#include <stdio.h>
int buf[1048576];
int main() {
printf("hello, world! %d\n", buf[123456]);
return 0;
}
''')
run_process([EMCC, 'src.c', '-O2', '-g'])
size = os.path.getsize('a.out.wasm')
# size should be much smaller than the size of that zero-initialized buffer
self.assertLess(size, 123456 / 2)
@no_wasm_backend('asm.js')
def test_separate_asm_warning(self):
# Test that -s PRECISE_F32=2 raises a warning that --separate-asm is implied.
stderr = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '-s', 'PRECISE_F32=2', '-o', 'a.html'], stderr=PIPE).stderr
self.assertContained('forcing separate asm output', stderr)
# Test that -s PRECISE_F32=2 --separate-asm should not post a warning.
stderr = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '-s', 'PRECISE_F32=2', '-o', 'a.html', '--separate-asm'], stderr=PIPE).stderr
self.assertNotContained('forcing separate asm output', stderr)
# Test that -s PRECISE_F32=1 should not post a warning.
stderr = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '-s', 'PRECISE_F32=1', '-o', 'a.html'], stderr=PIPE).stderr
self.assertNotContained('forcing separate asm output', stderr)
# Manually doing separate asm should show a warning, if not targeting html
warning = '--separate-asm works best when compiling to HTML'
stderr = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '--separate-asm'], stderr=PIPE).stderr
self.assertContained(warning, stderr)
stderr = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '--separate-asm', '-o', 'a.html'], stderr=PIPE).stderr
self.assertNotContained(warning, stderr)
# test that the warning can be suppressed
stderr = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '--separate-asm', '-Wno-separate-asm'], stderr=PIPE).stderr
self.assertNotContained(warning, stderr)
def test_canonicalize_nan_warning(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
union U {
int x;
float y;
} a;
int main() {
a.x = 0x7FC01234;
printf("%f\n", a.y);
printf("0x%x\n", a.x);
return 0;
}
''')
stderr = run_process([EMCC, 'src.cpp', '-O1'], stderr=PIPE).stderr
if not self.is_wasm_backend():
self.assertContained("emcc: warning: cannot represent a NaN literal", stderr)
stderr = run_process([EMCC, 'src.cpp', '-O1', '-g'], stderr=PIPE).stderr
self.assertContained("emcc: warning: cannot represent a NaN literal", stderr)
self.assertContained('//@line 12 "src.cpp"', stderr)
else:
out = run_js('a.out.js')
self.assertContained('nan\n', out)
self.assertContained('0x7fc01234\n', out)
@no_wasm_backend('tests our python linking logic')
def test_link_response_file_does_not_force_absolute_paths(self):
with_space = 'with space'
ensure_dir(with_space)
create_test_file(os.path.join(with_space, 'main.cpp'), '''
int main() {
return 0;
}
''')
building.emcc(os.path.join(with_space, 'main.cpp'), ['-g'])
with chdir(with_space):
link_args = building.link(['main.cpp.o'], 'all.bc', just_calculate=True)
time.sleep(0.2) # Wait for Windows FS to release access to the directory
shutil.rmtree(with_space)
# We want only the relative path to be in the linker args, it should not be converted to an absolute path.
if hasattr(self, 'assertCountEqual'):
self.assertCountEqual(link_args, ['main.cpp.o'])
else:
# Python 2 compatibility
self.assertItemsEqual(link_args, ['main.cpp.o'])
def test_memory_growth_noasm(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-O2', '-s', 'ALLOW_MEMORY_GROWTH=1'])
src = open('a.out.js').read()
assert 'use asm' not in src
def test_EM_ASM_i64(self):
create_test_file('src.cpp', '''
#include <stdint.h>
#include <emscripten.h>
int main() {
EM_ASM({
out('inputs: ' + $0 + ', ' + $1 + '.');
}, int64_t(0x12345678ABCDEF1FLL));
}
''')
stderr = self.expect_fail([EMCC, 'src.cpp', '-Oz'])
if not self.is_wasm_backend():
self.assertContained('EM_ASM should not receive i64s as inputs, they are not valid in JS', stderr)
def test_eval_ctors_non_terminating(self):
for wasm in (1, 0):
if self.is_wasm_backend() and not wasm:
continue
print('wasm', wasm)
src = r'''
struct C {
C() {
volatile int y = 0;
while (y == 0) {}
}
};
C always;
int main() {}
'''
create_test_file('src.cpp', src)
run_process([EMCC, 'src.cpp', '-O2', '-s', 'EVAL_CTORS=1', '-profiling-funcs', '-s', 'WASM=%d' % wasm])
@no_wasm_backend('EVAL_CTORS is monolithic with the wasm backend')
def test_eval_ctors(self):
for wasm in (1, 0):
if self.is_wasm_backend() and not wasm:
continue
print('wasm', wasm)
print('check no ctors is ok')
# on by default in -Oz, but user-overridable
def get_size(args):
print('get_size', args)
run_process([EMCC, path_from_root('tests', 'hello_libcxx.cpp'), '-s', 'WASM=%d' % wasm] + args)
self.assertContained('hello, world!', run_js('a.out.js'))
if wasm:
codesize = self.count_wasm_contents('a.out.wasm', 'funcs')
memsize = self.count_wasm_contents('a.out.wasm', 'memory-data')
else:
codesize = os.path.getsize('a.out.js')
memsize = os.path.getsize('a.out.js.mem')
return (codesize, memsize)
def check_size(left, right):
# can't measure just the mem out of the wasm, so ignore [1] for wasm
if left[0] == right[0] and left[1] == right[1]:
return 0
if left[0] < right[0] and left[1] > right[1]:
return -1 # smaller code, bigger mem
if left[0] > right[0] and left[1] < right[1]:
return 1
assert False, [left, right]
o2_size = get_size(['-O2'])
assert check_size(get_size(['-O2']), o2_size) == 0, 'deterministic'
assert check_size(get_size(['-O2', '-s', 'EVAL_CTORS=1']), o2_size) < 0, 'eval_ctors works if user asks for it'
oz_size = get_size(['-Oz'])
assert check_size(get_size(['-Oz']), oz_size) == 0, 'deterministic'
assert check_size(get_size(['-Oz', '-s', 'EVAL_CTORS=1']), oz_size) == 0, 'eval_ctors is on by default in oz'
assert check_size(get_size(['-Oz', '-s', 'EVAL_CTORS=0']), oz_size) == 1, 'eval_ctors can be turned off'
linkable_size = get_size(['-Oz', '-s', 'EVAL_CTORS=1', '-s', 'LINKABLE=1'])
assert check_size(get_size(['-Oz', '-s', 'EVAL_CTORS=0', '-s', 'LINKABLE=1']), linkable_size) == 1, 'noticeable difference in linkable too'
def test_eval_ctor_ordering(self):
# ensure order of execution remains correct, even with a bad ctor
def test(p1, p2, p3, last, expected):
src = r'''
#include <stdio.h>
#include <stdlib.h>
volatile int total = 0;
struct C {
C(int x) {
volatile int y = x;
y++;
y--;
if (y == 0xf) {
printf("you can't eval me ahead of time\n"); // bad ctor
}
total <<= 4;
total += int(y);
}
};
C __attribute__((init_priority(%d))) c1(0x5);
C __attribute__((init_priority(%d))) c2(0x8);
C __attribute__((init_priority(%d))) c3(%d);
int main() {
printf("total is 0x%%x.\n", total);
}
''' % (p1, p2, p3, last)
create_test_file('src.cpp', src)
run_process([EMCC, 'src.cpp', '-O2', '-s', 'EVAL_CTORS=1', '-profiling-funcs', '-s', 'WASM=%d' % wasm])
self.assertContained('total is %s.' % hex(expected), run_js('a.out.js'))
shutil.copyfile('a.out.js', 'x' + hex(expected) + '.js')
if wasm:
shutil.copyfile('a.out.wasm', 'x' + hex(expected) + '.wasm')
return self.count_wasm_contents('a.out.wasm', 'funcs')
else:
return open('a.out.js').read().count('function _')
print('no bad ctor')
first = test(1000, 2000, 3000, 0xe, 0x58e) # noqa
second = test(3000, 1000, 2000, 0xe, 0x8e5) # noqa
third = test(2000, 3000, 1000, 0xe, 0xe58) # noqa
print(first, second, third)
assert first == second and second == third
print('with bad ctor')
first = test(1000, 2000, 3000, 0xf, 0x58f) # noqa; 2 will succeed
second = test(3000, 1000, 2000, 0xf, 0x8f5) # noqa; 1 will succedd
third = test(2000, 3000, 1000, 0xf, 0xf58) # noqa; 0 will succeed
print(first, second, third)
assert first < second and second < third, [first, second, third]
@uses_canonical_tmp
@with_env_modify({'EMCC_DEBUG': '1'})
def test_eval_ctors_debug_output(self):
for wasm in (1, 0):
print('wasm', wasm)
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
external_thing: function() {}
});
''')
create_test_file('src.cpp', r'''
extern "C" void external_thing();
struct C {
C() { external_thing(); } // don't remove this!
};
C c;
int main() {}
''')
err = run_process([EMCC, 'src.cpp', '--js-library', 'lib.js', '-Oz', '-s', 'WASM=%d' % wasm], stderr=PIPE).stderr
if self.is_wasm_backend():
# disabled in the wasm backend
self.assertContained('Ctor evalling in the wasm backend is disabled', err)
self.assertNotContained('ctor_evaller: not successful', err) # with logging
else:
self.assertContained('external_thing', err) # the failing call should be mentioned
if not wasm and not self.is_wasm_backend(): # asm.js will show a stack trace
self.assertContained('ctorEval.js', err) # with a stack trace
self.assertContained('ctor_evaller: not successful', err) # with logging
def test_override_js_execution_environment(self):
create_test_file('main.cpp', r'''
#include <emscripten.h>
int main() {
EM_ASM({
out('environment is WEB? ' + ENVIRONMENT_IS_WEB);
out('environment is WORKER? ' + ENVIRONMENT_IS_WORKER);
out('environment is NODE? ' + ENVIRONMENT_IS_NODE);
out('environment is SHELL? ' + ENVIRONMENT_IS_SHELL);
});
}
''')
# use SINGLE_FILE since we don't want to depend on loading a side .wasm file on the environment in this test;
# with the wrong env we have very odd failures
run_process([EMCC, 'main.cpp', '-s', 'SINGLE_FILE=1'])
src = open('a.out.js').read()
envs = ['web', 'worker', 'node', 'shell']
for env in envs:
for engine in JS_ENGINES:
if engine == V8_ENGINE:
continue # ban v8, weird failures
actual = 'NODE' if engine == NODE_JS else 'SHELL'
print(env, actual, engine)
module = {'ENVIRONMENT': env}
if env != actual:
# avoid problems with arguments detection, which may cause very odd failures with the wrong environment code
module['arguments'] = []
curr = 'var Module = %s;\n' % str(module)
print(' ' + curr)
create_test_file('test.js', curr + src)
seen = run_js('test.js', engine=engine, stderr=PIPE, full_output=True, assert_returncode=None)
self.assertContained('Module.ENVIRONMENT has been deprecated. To force the environment, use the ENVIRONMENT compile-time option (for example, -s ENVIRONMENT=web or -s ENVIRONMENT=node', seen)
def test_override_c_environ(self):
create_test_file('pre.js', r'''
var Module = {
preRun: [function() { ENV.hello = 'world' }]
};
''')
create_test_file('src.cpp', r'''
#include <stdlib.h>
#include <stdio.h>
int main() {
printf("|%s|\n", getenv("hello"));
}
''')
run_process([EMCC, 'src.cpp', '--pre-js', 'pre.js'])
self.assertContained('|world|', run_js('a.out.js'))
create_test_file('pre.js', r'''
var Module = {
preRun: [function(module) { module.ENV.hello = 'world' }]
};
''')
run_process([EMCC, 'src.cpp', '--pre-js', 'pre.js', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["ENV"]'])
self.assertContained('|world|', run_js('a.out.js'))
run_process([EMCC, 'src.cpp', '--pre-js', 'pre.js', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["ENV"]', '-s', 'MODULARIZE=1'])
output = run_process(NODE_JS + ['-e', 'require("./a.out.js")();'], stdout=PIPE, stderr=PIPE)
self.assertContained('|world|', output.stdout)
def test_warn_no_filesystem(self):
WARNING = 'Filesystem support (FS) was not included. The problem is that you are using files from JS, but files were not used from C/C++, so filesystem support was not auto-included. You can force-include filesystem support with -s FORCE_FILESYSTEM=1'
run_process([EMCC, path_from_root('tests', 'hello_world.c')])
seen = run_js('a.out.js', stderr=PIPE)
assert WARNING not in seen
def test(contents):
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({ %s });
printf("hello, world!\n");
return 0;
}
''' % contents)
run_process([EMCC, 'src.cpp'])
self.assertContained(WARNING, run_js('a.out.js', stderr=PIPE, assert_returncode=None))
# might appear in handwritten code
test("FS.init()")
test("FS.createPreloadedFile('waka waka, just warning check')")
test("FS.createDataFile('waka waka, just warning check')")
test("FS.analyzePath('waka waka, just warning check')")
test("FS.loadFilesFromDB('waka waka, just warning check')")
# might appear in filesystem code from a separate script tag
test("Module['FS_createDataFile']('waka waka, just warning check')")
test("Module['FS_createPreloadedFile']('waka waka, just warning check')")
# text is in the source when needed, but when forcing FS, it isn't there
run_process([EMCC, 'src.cpp'])
self.assertContained(WARNING, open('a.out.js').read())
run_process([EMCC, 'src.cpp', '-s', 'FORCE_FILESYSTEM=1']) # forcing FS means no need
self.assertNotContained(WARNING, open('a.out.js').read())
run_process([EMCC, 'src.cpp', '-s', 'ASSERTIONS=0']) # no assertions, no need
self.assertNotContained(WARNING, open('a.out.js').read())
run_process([EMCC, 'src.cpp', '-O2']) # optimized, so no assertions
self.assertNotContained(WARNING, open('a.out.js').read())
def test_warn_module_print_err(self):
ERROR = 'was not exported. add it to EXTRA_EXPORTED_RUNTIME_METHODS (see the FAQ)'
def test(contents, expected, args=[]):
create_test_file('src.cpp', r'''
#include <emscripten.h>
int main() {
EM_ASM({ %s });
return 0;
}
''' % contents)
run_process([EMCC, 'src.cpp'] + args)
self.assertContained(expected, run_js('a.out.js', stderr=STDOUT, assert_returncode=None))
# error shown (when assertions are on)
test("Module.print('x')", ERROR)
test("Module['print']('x')", ERROR)
test("Module.printErr('x')", ERROR)
test("Module['printErr']('x')", ERROR)
# when exported, all good
test("Module['print']('print'); Module['printErr']('err'); ", 'print\nerr', ['-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print", "printErr"]'])
def test_warn_unexported_main(self):
WARNING = 'main() is in the input files, but "_main" is not in EXPORTED_FUNCTIONS, which means it may be eliminated as dead code. Export it if you want main() to run.'
proc = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'EXPORTED_FUNCTIONS=[]'], stderr=PIPE)
self.assertContained(WARNING, proc.stderr)
############################################################
# Function eliminator tests
############################################################
def normalize_line_endings(self, input):
return input.replace('\r\n', '\n').replace('\n\n', '\n').replace('\n\n', '\n')
def get_file_contents(self, file):
file_contents = ""
with open(file) as fout:
file_contents = "".join(fout.readlines())
file_contents = self.normalize_line_endings(file_contents)
return file_contents
def function_eliminator_test_helper(self, input_file, expected_output_file, use_hash_info=False):
input_file = path_from_root('tests', 'optimizer', input_file)
expected_output_file = path_from_root('tests', 'optimizer', expected_output_file)
command = [path_from_root('tools', 'eliminate-duplicate-functions.js'), input_file, '--no-minimize-whitespace', '--use-asm-ast']
if use_hash_info:
command.append('--use-hash-info')
proc = run_process(NODE_JS + command, stdin=PIPE, stderr=PIPE, stdout=PIPE)
assert proc.stderr == '', proc.stderr
expected_output = self.get_file_contents(expected_output_file)
output = self.normalize_line_endings(proc.stdout)
self.assertIdentical(expected_output, output)
def test_function_eliminator_simple(self):
self.function_eliminator_test_helper('test-function-eliminator-simple.js',
'test-function-eliminator-simple-output.js')
def test_function_eliminator_replace_function_call(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-function-call.js',
'test-function-eliminator-replace-function-call-output.js')
def test_function_eliminator_replace_function_call_two_passes(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-function-call-output.js',
'test-function-eliminator-replace-function-call-two-passes-output.js')
def test_function_eliminator_replace_array_value(self):
output_file = 'output.js'
try:
shared.safe_copy(path_from_root('tests', 'optimizer', 'test-function-eliminator-replace-array-value.js'), output_file)
tools.duplicate_function_eliminator.run(output_file)
output_file_contents = self.get_file_contents(output_file)
expected_file_contents = self.get_file_contents(path_from_root('tests', 'optimizer', 'test-function-eliminator-replace-array-value-output.js'))
self.assertIdentical(expected_file_contents, output_file_contents)
finally:
tools.tempfiles.try_delete(output_file)
def test_function_eliminator_replace_object_value_assignment(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-object-value-assignment.js',
'test-function-eliminator-replace-object-value-assignment-output.js')
def test_function_eliminator_variable_clash(self):
self.function_eliminator_test_helper('test-function-eliminator-variable-clash.js',
'test-function-eliminator-variable-clash-output.js')
def test_function_eliminator_replace_variable_value(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-variable-value.js',
'test-function-eliminator-replace-variable-value-output.js')
@no_wasm_backend('tests native asm.js optimizer, which is never build for wasm backend')
def test_function_eliminator_double_parsed_correctly(self):
# This is a test that makes sure that when we perform final optimization on
# the JS file, doubles are preserved (and not converted to ints).
output_file = 'output.js'
try:
shared.safe_copy(path_from_root('tests', 'optimizer', 'test-function-eliminator-double-parsed-correctly.js'), output_file)
# Run duplicate function elimination
tools.duplicate_function_eliminator.run(output_file)
# Run last opts
shutil.move(tools.js_optimizer.run(output_file, ['last', 'asm']), output_file)
output_file_contents = self.get_file_contents(output_file)
# Compare
expected_file_contents = self.get_file_contents(path_from_root('tests', 'optimizer', 'test-function-eliminator-double-parsed-correctly-output.js'))
self.assertIdentical(expected_file_contents, output_file_contents)
finally:
tools.tempfiles.try_delete(output_file)
# Now do the same, but using a pre-generated equivalent function hash info that
# comes in handy for parallel processing
def test_function_eliminator_simple_with_hash_info(self):
self.function_eliminator_test_helper('test-function-eliminator-simple-with-hash-info.js',
'test-function-eliminator-simple-output.js',
use_hash_info=True)
def test_function_eliminator_replace_function_call_with_hash_info(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-function-call-with-hash-info.js',
'test-function-eliminator-replace-function-call-output.js',
use_hash_info=True)
def test_function_eliminator_replace_function_call_two_passes_with_hash_info(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-function-call-output-with-hash-info.js',
'test-function-eliminator-replace-function-call-two-passes-output.js',
use_hash_info=True)
def test_function_eliminator_replace_object_value_assignment_with_hash_info(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-object-value-assignment-with-hash-info.js',
'test-function-eliminator-replace-object-value-assignment-output.js',
use_hash_info=True)
def test_function_eliminator_variable_clash_with_hash_info(self):
self.function_eliminator_test_helper('test-function-eliminator-variable-clash-with-hash-info.js',
'test-function-eliminator-variable-clash-output.js',
use_hash_info=True)
def test_function_eliminator_replace_variable_value_with_hash_info(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-variable-value-with-hash-info.js',
'test-function-eliminator-replace-variable-value-output.js',
use_hash_info=True)
@no_wasm_backend('uses CYBERDWARF')
def test_cyberdwarf_pointers(self):
run_process([EMCC, path_from_root('tests', 'debugger', 'test_pointers.cpp'), '-Oz', '-s', 'CYBERDWARF=1',
'--pre-js', path_from_root('tests', 'debugger', 'test_preamble.js'), '-o', 'test_pointers.js'])
run_js('test_pointers.js')
@no_wasm_backend('uses CYBERDWARF')
def test_cyberdwarf_union(self):
run_process([EMCC, path_from_root('tests', 'debugger', 'test_union.cpp'), '-Oz', '-s', 'CYBERDWARF=1',
'--pre-js', path_from_root('tests', 'debugger', 'test_preamble.js'), '-o', 'test_union.js'])
run_js('test_union.js')
def test_source_file_with_fixed_language_mode(self):
create_test_file('src_tmp_fixed_lang', '''
#include <string>
#include <iostream>
int main() {
std::cout << "Test_source_fixed_lang_hello" << std::endl;
return 0;
}
''')
run_process([EMCC, '-Wall', '-x', 'c++', 'src_tmp_fixed_lang'])
self.assertContained("Test_source_fixed_lang_hello", run_js('a.out.js'))
stderr = self.expect_fail([EMCC, '-Wall', 'src_tmp_fixed_lang'])
self.assertContained("Input file has an unknown suffix, don't know what to do with it!", stderr)
def test_disable_inlining(self):
create_test_file('test.c', r'''
#include <stdio.h>
void foo() {
printf("foo\n");
}
int main() {
foo();
return 0;
}
''')
# Without the 'INLINING_LIMIT=1', -O2 inlines foo()
cmd = [EMCC, 'test.c', '-O2', '-o', 'test.bc', '-s', 'INLINING_LIMIT=1']
if self.is_wasm_backend():
cmd += ['-flto']
run_process(cmd)
# If foo() had been wrongly inlined above, internalizing foo and running
# global DCE makes foo DCE'd
building.llvm_opt('test.bc', ['-internalize', '-internalize-public-api-list=main', '-globaldce'], 'test2.bc')
# To this test to be successful, foo() shouldn't have been inlined above and
# foo() should be in the function list
syms = building.llvm_nm('test2.bc', include_internal=True)
assert 'foo' in syms.defs, 'foo() should not be inlined'
@no_wasm_backend('--separate-asm')
def test_output_eol(self):
# --separate-asm only makes sense without wasm (no asm.js with wasm)
for params in [[], ['--separate-asm', '-s', 'WASM=0'], ['--proxy-to-worker'], ['--proxy-to-worker', '--separate-asm', '-s', 'WASM=0']]:
for output_suffix in ['html', 'js']:
for eol in ['windows', 'linux']:
files = ['a.js']
if '--separate-asm' in params:
files += ['a.asm.js']
if output_suffix == 'html':
files += ['a.html']
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-o', 'a.' + output_suffix, '--output_eol', eol] + params
run_process(cmd)
for f in files:
print(str(cmd) + ' ' + str(params) + ' ' + eol + ' ' + f)
assert os.path.isfile(f)
if eol == 'linux':
expected_ending = '\n'
else:
expected_ending = '\r\n'
ret = tools.line_endings.check_line_endings(f, expect_only=expected_ending)
assert ret == 0
for f in files:
try_delete(f)
@no_wasm_backend('asm2wasm specific')
@uses_canonical_tmp
def test_binaryen_opts(self):
with env_modify({'EMCC_DEBUG': '1'}):
for args, expect_js_opts, expect_wasm_opts, expect_only_wasm in [
([], False, False, True),
(['-O0'], False, False, True),
(['-O1'], False, True, True),
(['-O2'], False, True, True),
(['-O2', '--js-opts', '1'], True, True, False), # user asked
(['-O2', '-s', 'EVAL_CTORS=1'], False, True, True), # ctor evaller turned off since only-wasm
(['-O3'], False, True, True),
(['-Os'], False, True, True),
(['-Oz'], False, True, True), # ctor evaller turned off since only-wasm
]:
try_delete('a.out.js')
try_delete('a.out.wasm')
try_delete('a.out.wat')
cmd = [EMCC, path_from_root('tests', 'core', 'test_i64.c')] + args
print(args, 'js opts:', expect_js_opts, 'only-wasm:', expect_only_wasm, ' ', ' '.join(cmd))
err = run_process(cmd, stdout=PIPE, stderr=PIPE).stderr
assert expect_js_opts == ('applying js optimization passes:' in err), err
if not self.is_wasm_backend():
assert expect_only_wasm == ('-emscripten-only-wasm' in err and '--wasm-only' in err), err # check both flag to fastcomp and to asm2wasm
wat = run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), 'a.out.wasm'], stdout=PIPE).stdout
# i64s
i64s = wat.count('(i64.')
print(' seen i64s:', i64s)
assert expect_only_wasm == (i64s > 30), 'i64 opts can be emitted in only-wasm mode, but not normally' # note we emit a few i64s even without wasm-only, when we replace udivmoddi (around 15 such)
selects = wat.count('(select')
print(' seen selects:', selects)
if expect_wasm_opts:
# when optimizing we should create selects
self.assertGreater(selects, 15)
else:
# when not optimizing for size we should not
self.assertEqual(selects, 0)
# asm2wasm opt line
asm2wasm_line = [line for line in err.split('\n') if 'asm2wasm' in line]
asm2wasm_line = '' if not asm2wasm_line else asm2wasm_line[0]
if '-O0' in args or '-O' not in str(args):
assert '-O' not in asm2wasm_line, 'no opts should be passed to asm2wasm: ' + asm2wasm_line
else:
opts_str = args[0]
assert opts_str.startswith('-O')
assert opts_str in asm2wasm_line, 'expected opts: ' + asm2wasm_line
@no_wasm_backend('fastcomp specific')
def test_binaryen_and_precise_f32(self):
for args, expect in [
([], True),
(['-s', 'PRECISE_F32=0'], True), # disabled, but no asm.js, so we definitely want f32
(['-s', 'PRECISE_F32=1'], True),
(['-s', 'PRECISE_F32=2'], True),
]:
print(args, expect)
try_delete('a.out.js')
err = run_process([EMCC, '-v', path_from_root('tests', 'hello_world.cpp'), '-s', 'BINARYEN=1'] + args, stderr=PIPE).stderr
assert expect == (' -emscripten-precise-f32' in err), err
self.assertContained('hello, world!', run_js('a.out.js'))
def test_binaryen_names(self):
sizes = {}
for args, expect_names in [
([], False),
(['-g'], True),
(['-O1'], False),
(['-O2'], False),
(['-O2', '-g'], True),
(['-O2', '-g1'], False),
(['-O2', '-g2'], True),
(['-O2', '--profiling'], True),
(['-O2', '--profiling-funcs'], True),
]:
print(args, expect_names)
try_delete('a.out.js')
# we use dlmalloc here, as emmalloc has a bunch of asserts that contain the text "malloc" in them, which makes counting harder
run_process([EMCC, path_from_root('tests', 'hello_world.cpp')] + args + ['-s', 'MALLOC="dlmalloc"'])
code = open('a.out.wasm', 'rb').read()
if expect_names:
# name section adds the name of malloc (there is also another one for the export)
self.assertEqual(code.count(b'malloc'), 2)
else:
# should be just malloc for the export
self.assertEqual(code.count(b'malloc'), 1)
sizes[str(args)] = os.path.getsize('a.out.wasm')
print(sizes)
self.assertLess(sizes["['-O2']"], sizes["['-O2', '--profiling-funcs']"], 'when -profiling-funcs, the size increases due to function names')
def test_binaryen_warn_mem(self):
# if user changes INITIAL_MEMORY at runtime, the wasm module may not accept the memory import if it is too big/small
create_test_file('pre.js', 'var Module = { INITIAL_MEMORY: 50 * 1024 * 1024 };\n')
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'INITIAL_MEMORY=' + str(16 * 1024 * 1024), '--pre-js', 'pre.js', '-s', 'WASM_ASYNC_COMPILATION=0'])
out = run_js('a.out.js', full_output=True, stderr=PIPE, assert_returncode=None)
self.assertContained('LinkError', out)
self.assertContained('Memory size incompatibility issues may be due to changing INITIAL_MEMORY at runtime to something too large. Use ALLOW_MEMORY_GROWTH to allow any size memory (and also make sure not to set INITIAL_MEMORY at runtime to something smaller than it was at compile time).', out)
self.assertNotContained('hello, world!', out)
# and with memory growth, all should be good
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'INITIAL_MEMORY=' + str(16 * 1024 * 1024), '--pre-js', 'pre.js', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'WASM_ASYNC_COMPILATION=0'])
self.assertContained('hello, world!', run_js('a.out.js'))
@no_wasm_backend('asm.js specific')
def test_binaryen_asmjs_outputs(self):
# Test that an .asm.js file is outputted exactly when it is requested.
for args, output_asmjs in [
([], False),
(['-s', 'MAIN_MODULE=2'], False),
]:
with temp_directory(self.get_dir()) as temp_dir:
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-o', os.path.join(temp_dir, 'a.js')] + args
print(' '.join(cmd))
run_process(cmd)
if output_asmjs:
self.assertExists(os.path.join(temp_dir, 'a.asm.js'))
self.assertNotExists(os.path.join(temp_dir, 'a.temp.asm.js'))
# Test that outputting to .wasm does not nuke an existing .asm.js file, if
# user wants to manually dual-deploy both to same directory.
with temp_directory(self.get_dir()) as temp_dir:
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '-o', os.path.join(temp_dir, 'a.js'), '--separate-asm']
print(' '.join(cmd))
run_process(cmd)
self.assertExists(os.path.join(temp_dir, 'a.asm.js'))
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-o', os.path.join(temp_dir, 'a.js')]
print(' '.join(cmd))
run_process(cmd)
self.assertExists(os.path.join(temp_dir, 'a.asm.js'))
self.assertExists(os.path.join(temp_dir, 'a.wasm'))
self.assertNotExists(os.path.join(temp_dir, 'a.temp.asm.js'))
def test_binaryen_mem(self):
for args, expect_initial, expect_max in [
(['-s', 'INITIAL_MEMORY=20971520'], 320, 320),
(['-s', 'INITIAL_MEMORY=20971520', '-s', 'ALLOW_MEMORY_GROWTH=1'], 320, None),
(['-s', 'INITIAL_MEMORY=20971520', '-s', 'MAXIMUM_MEMORY=41943040'], 320, 640),
(['-s', 'INITIAL_MEMORY=20971520', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MAXIMUM_MEMORY=41943040'], 320, 640),
]:
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=1', '-O2'] + args
print(' '.join(cmd))
run_process(cmd)
wat = run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), 'a.out.wasm'], stdout=PIPE).stdout
for line in wat:
if '(import "env" "memory" (memory ' in line:
parts = line.strip().replace('(', '').replace(')', '').split(' ')
print(parts)
self.assertEqual(parts[5], str(expect_initial))
if not expect_max:
self.assertEqual(len(parts), 6)
else:
self.assertEqual(parts[6], str(expect_max))
def test_invalid_mem(self):
# A large amount is fine, multiple of 16MB or not
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'INITIAL_MEMORY=33MB'])
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'INITIAL_MEMORY=32MB'])
# But not in asm.js
if not self.is_wasm_backend():
ret = self.expect_fail([EMCC, '-s', 'WASM=0', path_from_root('tests', 'hello_world.c'), '-s', 'INITIAL_MEMORY=33MB'])
self.assertContained('INITIAL_MEMORY must be a multiple of 16MB', ret)
# A tiny amount is fine in wasm
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'INITIAL_MEMORY=65536', '-s', 'TOTAL_STACK=1024'])
# And the program works!
self.assertContained('hello, world!', run_js('a.out.js'))
# But not in asm.js
if not self.is_wasm_backend():
ret = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'INITIAL_MEMORY=65536', '-s', 'WASM=0'])
self.assertContained('INITIAL_MEMORY must be at least 16MB', ret)
# Must be a multiple of 64KB
ret = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'INITIAL_MEMORY=33554433']) # 32MB + 1 byte
self.assertContained('INITIAL_MEMORY must be a multiple of 64KB', ret)
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MAXIMUM_MEMORY=33MB'])
ret = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MAXIMUM_MEMORY=34603009']) # 33MB + 1 byte
self.assertContained('MAXIMUM_MEMORY must be a multiple of 64KB', ret)
def test_invalid_output_dir(self):
ret = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-o', os.path.join('NONEXISTING_DIRECTORY', 'out.js')])
self.assertContained('specified output file (NONEXISTING_DIRECTORY%sout.js) is in a directory that does not exist' % os.path.sep, ret)
def test_binaryen_ctors(self):
# ctor order must be identical to js builds, deterministically
create_test_file('src.cpp', r'''
#include <stdio.h>
struct A {
A() { puts("constructing A!"); }
};
A a;
struct B {
B() { puts("constructing B!"); }
};
B b;
int main() {}
''')
run_process([EMCC, 'src.cpp'])
correct = run_js('a.out.js')
for args in [[], ['-s', 'RELOCATABLE=1']]:
print(args)
run_process([EMCC, 'src.cpp', '-s', 'WASM=1', '-o', 'b.out.js'] + args)
seen = run_js('b.out.js')
assert correct == seen, correct + '\n vs \n' + seen
# test debug info and debuggability of JS output
@uses_canonical_tmp
def test_binaryen_debug(self):
with env_modify({'EMCC_DEBUG': '1'}):
for args, expect_dash_g, expect_emit_text, expect_clean_js, expect_whitespace_js, expect_closured in [
(['-O0'], False, False, False, True, False),
(['-O0', '-g1'], False, False, False, True, False),
(['-O0', '-g2'], True, False, False, True, False), # in -g2+, we emit -g to asm2wasm so function names are saved
(['-O0', '-g'], True, True, False, True, False),
(['-O0', '--profiling-funcs'], True, False, False, True, False),
(['-O1'], False, False, False, True, False),
(['-O2'], False, False, True, False, False),
(['-O2', '-g1'], False, False, True, True, False),
(['-O2', '-g'], True, True, False, True, False),
(['-O2', '--closure', '1'], False, False, True, False, True),
(['-O2', '--closure', '1', '-g1'], False, False, True, True, True),
(['-O2', '--js-opts', '1'], False, False, True, False, False),
]:
print(args, expect_dash_g, expect_emit_text)
try_delete('a.out.wat')
cmd = [EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'WASM=1'] + args
print(' '.join(cmd))
err = run_process(cmd, stdout=PIPE, stderr=PIPE).stderr
if not self.is_wasm_backend():
asm2wasm_line = [x for x in err.split('\n') if 'asm2wasm' in x][0]
asm2wasm_line = asm2wasm_line.strip() + ' ' # ensure it ends with a space, for simpler searches below
print('|' + asm2wasm_line + '|')
assert expect_dash_g == (' -g ' in asm2wasm_line)
assert expect_emit_text == (' -S ' in asm2wasm_line)
if expect_emit_text:
text = open('a.out.wat').read()
assert ';;' in text, 'must see debug info comment'
assert 'hello_world.cpp:12' in text, 'must be file:line info'
js = open('a.out.js').read()
assert expect_clean_js == ('// ' not in js), 'cleaned-up js must not have comments'
assert expect_whitespace_js == ('{\n ' in js), 'whitespace-minified js must not have excess spacing'
assert expect_closured == ('var a;' in js or 'var a,' in js or 'var a=' in js or 'var a ' in js), 'closured js must have tiny variable names'
@uses_canonical_tmp
def test_binaryen_ignore_implicit_traps(self):
sizes = []
with env_modify({'EMCC_DEBUG': '1'}):
for args, expect in [
([], False),
(['-s', 'BINARYEN_IGNORE_IMPLICIT_TRAPS=1'], True),
]:
print(args, expect)
cmd = [EMCC, path_from_root('tests', 'hello_libcxx.cpp'), '-s', 'WASM=1', '-O3'] + args
print(' '.join(cmd))
err = run_process(cmd, stdout=PIPE, stderr=PIPE).stderr
self.assertContainedIf('--ignore-implicit-traps ', err, expect)
sizes.append(os.path.getsize('a.out.wasm'))
print('sizes:', sizes)
# sizes must be different, as the flag has an impact
self.assertEqual(len(set(sizes)), 2)
@no_fastcomp('BINARYEN_EXTRA_PASSES is used to optimize only in the wasm backend (fastcomp uses flags to asm2wasm)')
def test_binaryen_passes_extra(self):
def build(args=[]):
return run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-O3'] + args, stdout=PIPE).stdout
build()
base_size = os.path.getsize('a.out.wasm')
out = build(['-s', 'BINARYEN_EXTRA_PASSES="--metrics"'])
# and --metrics output appears
self.assertContained('[funcs]', out)
# adding --metrics should not affect code size
self.assertEqual(base_size, os.path.getsize('a.out.wasm'))
def assertFileContents(self, filename, contents):
contents = contents.replace('\r', '')
if os.environ.get('EMTEST_REBASELINE'):
with open(filename, 'w') as f:
f.write(contents)
return
if not os.path.exists(filename):
self.fail('Test expectation file not found: ' + filename + '.\n' +
'Run with EMTEST_REBASELINE to generate.')
expected_content = open(filename).read()
message = "Run with EMTEST_REBASELINE=1 to automatically update expectations"
self.assertTextDataIdentical(expected_content, contents, message,
filename, filename + '.new')
def run_metadce_test(self, filename, args, expected_exists, expected_not_exists, expected_size,
check_sent=True, check_imports=True, check_exports=True, check_funcs=True):
size_slack = 0.05
# in -Os, -Oz, we remove imports wasm doesn't need
print('Running metadce test: %s:' % filename, args, expected_exists,
expected_not_exists, expected_size, check_sent, check_imports, check_exports, check_funcs)
filename = path_from_root('tests', 'other', 'metadce', filename)
def clean_arg(arg):
return arg.replace('-', '')
def args_to_filename(args):
result = ''
for a in args:
if a == '-s':
continue
a = a.replace('-', '')
a = a.replace('=1', '')
a = a.replace('=[]', '_NONE')
a = a.replace('=', '_')
if a:
result += '_' + a
return result
expected_basename = os.path.splitext(filename)[0]
if not self.is_wasm_backend():
expected_basename += '_fastcomp'
expected_basename += args_to_filename(args)
run_process([EMCC, filename, '-g2'] + args)
# find the imports we send from JS
js = open('a.out.js').read()
start = js.find('asmLibraryArg = ')
end = js.find('}', start) + 1
start = js.find('{', start)
relevant = js[start + 2:end - 2]
relevant = relevant.replace(' ', '').replace('"', '').replace("'", '').split(',')
sent = [x.split(':')[0].strip() for x in relevant]
sent = [x for x in sent if x]
sent.sort()
for exists in expected_exists:
self.assertIn(exists, sent)
for not_exists in expected_not_exists:
self.assertNotIn(not_exists, sent)
wasm_size = os.path.getsize('a.out.wasm')
if expected_size is not None:
ratio = abs(wasm_size - expected_size) / float(expected_size)
print(' seen wasm size: %d (expected: %d), ratio to expected: %f' % (wasm_size, expected_size, ratio))
self.assertLess(ratio, size_slack)
imports, exports, funcs = parse_wasm('a.out.wasm')
imports.sort()
exports.sort()
funcs.sort()
# filter out _NNN suffixed that can be the result of bitcode linking when
# internal symbol names collide.
def strip_numeric_suffixes(funcname):
parts = funcname.split('_')
while parts:
if parts[-1].isdigit():
parts.pop()
else:
break
return '_'.join(parts)
funcs = [strip_numeric_suffixes(f) for f in funcs]
if check_sent:
sent_file = expected_basename + '.sent'
sent_data = '\n'.join(sent) + '\n'
self.assertFileContents(sent_file, sent_data)
if check_imports:
filename = expected_basename + '.imports'
data = '\n'.join(imports) + '\n'
self.assertFileContents(filename, data)
if check_exports:
filename = expected_basename + '.exports'
data = '\n'.join(exports) + '\n'
self.assertFileContents(filename, data)
if check_funcs:
filename = expected_basename + '.funcs'
data = '\n'.join(funcs) + '\n'
self.assertFileContents(filename, data)
@parameterized({
'O0': ([], [], ['waka'], 9766), # noqa
'O1': (['-O1'], [], ['waka'], 7886), # noqa
'O2': (['-O2'], [], ['waka'], 7871), # noqa
# in -O3, -Os and -Oz we metadce, and they shrink it down to the minimal output we want
'O3': (['-O3'], [], [], 85), # noqa
'Os': (['-Os'], [], [], 85), # noqa
'Oz': (['-Oz'], [], [], 85), # noqa
'Os_mr': (['-Os', '-s', 'MINIMAL_RUNTIME'], [], [], 85), # noqa
})
@no_fastcomp()
def test_metadce_minimal(self, *args):
self.run_metadce_test('minimal.c', *args)
@parameterized({
'O0': ([], ['abort'], ['waka'], 22712), # noqa
'O1': (['-O1'], ['abort'], ['waka'], 10450), # noqa
'O2': (['-O2'], ['abort'], ['waka'], 10440), # noqa
# in -O3, -Os and -Oz we metadce, and they shrink it down to the minimal output we want
'O3': (['-O3'], [], [], 55), # noqa
'Os': (['-Os'], [], [], 55), # noqa
'Oz': (['-Oz'], [], [], 55), # noqa
})
@no_wasm_backend()
def test_metadce_minimal_fastcomp(self, *args):
self.run_metadce_test('minimal.c', *args)
@parameterized({
'noexcept': (['-O2'], [], ['waka'], 218988), # noqa
# exceptions increases code size significantly
'except': (['-O2', '-fexceptions'], [], ['waka'], 279827), # noqa
# exceptions does not pull in demangling by default, which increases code size
'mangle': (['-O2', '-fexceptions',
'-s', 'DEMANGLE_SUPPORT'], [], ['waka'], 408028), # noqa
})
@no_fastcomp()
def test_metadce_cxx(self, *args):
self.run_metadce_test('hello_libcxx.cpp', *args)
@parameterized({
'normal': (['-O2'], ['abort'], ['waka'], 186423),
'emulated_function_pointers':
(['-O2', '-s', 'EMULATED_FUNCTION_POINTERS=1'], ['abort'], ['waka'], 188310),
})
@no_wasm_backend()
def test_metadce_cxx_fastcomp(self, *args):
# test on libc++: see effects of emulated function pointers
self.run_metadce_test('hello_libcxx.cpp', *args)
@parameterized({
'O0': ([], [], ['waka'], 22849), # noqa
'O1': (['-O1'], [], ['waka'], 10533), # noqa
'O2': (['-O2'], [], ['waka'], 10256), # noqa
'O3': (['-O3'], [], [], 1999), # noqa; in -O3, -Os and -Oz we metadce
'Os': (['-Os'], [], [], 2010), # noqa
'Oz': (['-Oz'], [], [], 2004), # noqa
# finally, check what happens when we export nothing. wasm should be almost empty
'export_nothing':
(['-Os', '-s', 'EXPORTED_FUNCTIONS=[]'], [], [], 61), # noqa
# we don't metadce with linkable code! other modules may want stuff
# don't compare the # of functions in a main module, which changes a lot
# TODO(sbc): Investivate why the number of exports is order of magnitude
# larger for wasm backend.
'main_module_2': (['-O3', '-s', 'MAIN_MODULE=2'], [], [], 10652, True, True, True, False), # noqa
})
@no_fastcomp()
def test_metadce_hello(self, *args):
self.run_metadce_test('hello_world.cpp', *args)
@parameterized({
'O0': ([], ['abort'], ['waka'], 42701), # noqa
'O1': (['-O1'], ['abort'], ['waka'], 13199), # noqa
'O2': (['-O2'], ['abort'], ['waka'], 12425), # noqa
'O3': (['-O3'], [], [], 2045), # noqa; in -O3, -Os and -Oz we metadce
'Os': (['-Os'], [], [], 2064), # noqa
'Oz': (['-Oz'], [], [], 2045), # noqa
# finally, check what happens when we export nothing. wasm should be almost empty
'export_nothing':
(['-Os', '-s', 'EXPORTED_FUNCTIONS=[]'], [], [], 8), # noqa; totally empty!
# we don't metadce with linkable code! other modules may want stuff
# don't compare the # of functions in a main module, which changes a lot
'main_module_2': (['-O3', '-s', 'MAIN_MODULE=2'], [], [], 10017), # noqa
})
@no_wasm_backend()
def test_metadce_hello_fastcomp(self, *args):
self.run_metadce_test('hello_world.cpp', *args)
@parameterized({
'O3': ('mem.c', ['-O3'],
[], [], 6100), # noqa
# argc/argv support code etc. is in the wasm
'O3_standalone': ('mem.c', ['-O3', '-s', 'STANDALONE_WASM'],
[], [], 6309), # noqa
# without argc/argv, no support code for them is emitted
'O3_standalone_narg': ('mem_no_argv.c', ['-O3', '-s', 'STANDALONE_WASM'],
[], [], 6309), # noqa
# without main, no support code for argc/argv is emitted either
'O3_standalone_lib': ('mem_no_main.c', ['-O3', '-s', 'STANDALONE_WASM', '--no-entry'],
[], [], 6309), # noqa
# Growth support code is in JS, no significant change in the wasm
'O3_grow': ('mem.c', ['-O3', '-s', 'ALLOW_MEMORY_GROWTH'],
[], [], 6098), # noqa
# Growth support code is in the wasm
'O3_grow_standalone': ('mem.c', ['-O3', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'STANDALONE_WASM'],
[], [], 6449), # noqa
# without argc/argv, no support code for them is emitted, even with lto
'O3_standalone_narg_flto':
('mem_no_argv.c', ['-O3', '-s', 'STANDALONE_WASM', '-flto'],
[], [], 4971), # noqa
})
@no_fastcomp()
def test_metadce_mem(self, filename, *args):
self.run_metadce_test(filename, *args)
@parameterized({
'O3': ('libcxxabi_message.cpp', ['-O3'],
[], [], 128), # noqa
# argc/argv support code etc. is in the wasm
'O3_standalone': ('libcxxabi_message.cpp', ['-O3', '-s', 'STANDALONE_WASM'],
[], [], 174), # noqa
})
@no_fastcomp()
def test_metadce_libcxxabi_message(self, filename, *args):
self.run_metadce_test(filename, *args)
# ensures runtime exports work, even with metadce
def test_extra_runtime_exports(self):
exports = ['stackSave', 'stackRestore', 'stackAlloc', 'FS']
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'WASM=1', '-Os', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=%s' % str(exports)])
js = open('a.out.js').read()
for export in exports:
assert ('Module["%s"]' % export) in js, export
def test_legalize_js_ffi(self):
# test disabling of JS FFI legalization
wasm_dis = os.path.join(building.get_binaryen_bin(), 'wasm-dis')
for (args, js_ffi) in [
(['-s', 'LEGALIZE_JS_FFI=1', '-s', 'SIDE_MODULE=1', '-O1', '-s', 'EXPORT_ALL=1'], True),
(['-s', 'LEGALIZE_JS_FFI=0', '-s', 'SIDE_MODULE=1', '-O1', '-s', 'EXPORT_ALL=1'], False),
(['-s', 'LEGALIZE_JS_FFI=0', '-s', 'SIDE_MODULE=1', '-O0', '-s', 'EXPORT_ALL=1'], False),
(['-s', 'LEGALIZE_JS_FFI=0', '-s', 'WARN_ON_UNDEFINED_SYMBOLS=0', '-O0'], False),
]:
if self.is_wasm_backend() and 'SIDE_MODULE=1' in args:
continue
print(args)
try_delete('a.out.wasm')
try_delete('a.out.wat')
cmd = [EMCC, path_from_root('tests', 'other', 'ffi.c'), '-g', '-o', 'a.out.js'] + args
print(' '.join(cmd))
run_process(cmd)
run_process([wasm_dis, 'a.out.wasm', '-o', 'a.out.wat'])
text = open('a.out.wat').read()
# remove internal comments and extra whitespace
text = re.sub(r'\(;[^;]+;\)', '', text)
text = re.sub(r'\$var\$*.', '', text)
text = re.sub(r'param \$\d+', 'param ', text)
text = re.sub(r' +', ' ', text)
# TODO: remove the unecessary ".*" in e_* regexs after binaryen #2510 lands
e_add_f32 = re.search(r'func \$_?add_f .*\(param f32\) \(param f32\) \(result f32\)', text)
i_i64_i32 = re.search(r'import .*"_?import_ll" .*\(param i32 i32\) \(result i32\)', text)
i_f32_f64 = re.search(r'import .*"_?import_f" .*\(param f64\) \(result f64\)', text)
i_i64_i64 = re.search(r'import .*"_?import_ll" .*\(param i64\) \(result i64\)', text)
i_f32_f32 = re.search(r'import .*"_?import_f" .*\(param f32\) \(result f32\)', text)
e_i64_i32 = re.search(r'func \$_?add_ll .*\(param i32\) \(param i32\) \(param i32\) \(param i32\) \(result i32\)', text)
e_f32_f64 = re.search(r'func \$legalstub\$_?add_f .*\(param f64\) \(param f64\) \(result f64\)', text)
e_i64_i64 = re.search(r'func \$_?add_ll .*\(param i64\) \(param i64\) \(result i64\)', text)
assert e_add_f32, 'add_f export missing'
if js_ffi:
assert i_i64_i32, 'i64 not converted to i32 in imports'
assert i_f32_f64, 'f32 not converted to f64 in imports'
assert not i_i64_i64, 'i64 not converted to i32 in imports'
assert not i_f32_f32, 'f32 not converted to f64 in imports'
assert e_i64_i32, 'i64 not converted to i32 in exports'
assert not e_f32_f64, 'f32 not converted to f64 in exports'
assert not e_i64_i64, 'i64 not converted to i64 in exports'
else:
assert not i_i64_i32, 'i64 converted to i32 in imports'
assert not i_f32_f64, 'f32 converted to f64 in imports'
assert i_i64_i64, 'i64 converted to i32 in imports'
assert i_f32_f32, 'f32 converted to f64 in imports'
assert not e_i64_i32, 'i64 converted to i32 in exports'
assert not e_f32_f64, 'f32 converted to f64 in exports'
assert e_i64_i64, 'i64 converted to i64 in exports'
def test_no_legalize_js_ffi(self):
# test minimal JS FFI legalization for invoke and dyncalls
if self.is_wasm_backend():
self.skipTest('not testing legalize with main module and wasm backend')
wasm_dis = os.path.join(building.get_binaryen_bin(), 'wasm-dis')
for (args, js_ffi) in [
(['-s', 'LEGALIZE_JS_FFI=0', '-s', 'MAIN_MODULE=2', '-O3', '-s', 'DISABLE_EXCEPTION_CATCHING=0'], False),
]:
print(args)
try_delete('a.out.wasm')
try_delete('a.out.wat')
with env_modify({'EMCC_FORCE_STDLIBS': 'libc++'}):
cmd = [EMCC, path_from_root('tests', 'other', 'noffi.cpp'), '-g', '-o', 'a.out.js'] + args
print(' '.join(cmd))
run_process(cmd)
run_process([wasm_dis, 'a.out.wasm', '-o', 'a.out.wat'])
text = open('a.out.wat').read()
# remove internal comments and extra whitespace
text = re.sub(r'\(;[^;]+;\)', '', text)
text = re.sub(r'\$var\$*.', '', text)
text = re.sub(r'param \$\d+', 'param ', text)
text = re.sub(r' +', ' ', text)
# print("text: %s" % text)
i_legalimport_i64 = re.search(r'\(import.*\$legalimport\$invoke_j.*', text)
e_legalstub_i32 = re.search(r'\(func.*\$legalstub\$dyn.*\(result i32\)', text)
assert i_legalimport_i64, 'legal import not generated for invoke call'
assert e_legalstub_i32, 'legal stub not generated for dyncall'
def test_export_aliasee(self):
# build side module
args = ['-s', 'SIDE_MODULE=1']
cmd = [EMCC, path_from_root('tests', 'other', 'alias', 'side.c'), '-g', '-o', 'side.wasm'] + args
print(' '.join(cmd))
run_process(cmd)
# build main module
args = ['-s', 'EXPORTED_FUNCTIONS=["_main", "_foo"]', '-s', 'MAIN_MODULE=2', '-s', 'EXIT_RUNTIME=1', '-lnodefs.js']
cmd = [EMCC, path_from_root('tests', 'other', 'alias', 'main.c'), '-o', 'main.js'] + args
print(' '.join(cmd))
run_process(cmd)
# run the program
self.assertContained('success', run_js('main.js'))
def test_sysconf_phys_pages(self):
def run(args, expected):
if self.is_wasm_backend() and 'WASM=0' in args:
return
cmd = [EMCC, path_from_root('tests', 'unistd', 'sysconf_phys_pages.c')] + args
print(str(cmd))
run_process(cmd)
result = run_js('a.out.js').strip()
self.assertEqual(result, str(expected) + ', errno: 0')
run([], 1024)
run(['-s', 'INITIAL_MEMORY=32MB'], 2048)
run(['-s', 'INITIAL_MEMORY=32MB', '-s', 'ALLOW_MEMORY_GROWTH=1'], (2 * 1024 * 1024 * 1024) // 16384)
run(['-s', 'INITIAL_MEMORY=32MB', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'WASM=0'], (2 * 1024 * 1024 * 1024) // 16384)
def test_wasm_target_and_STANDALONE_WASM(self):
# STANDALONE_WASM means we never minify imports and exports.
for opts, potentially_expect_minified_exports_and_imports in (
([], False),
(['-s', 'STANDALONE_WASM'], False),
(['-O2'], False),
(['-O3'], True),
(['-O3', '-s', 'STANDALONE_WASM'], False),
(['-Os'], True),
):
if 'STANDALONE_WASM' in opts and not self.is_wasm_backend():
continue
# targeting .wasm (without .js) means we enable STANDALONE_WASM automatically, and don't minify imports/exports
for target in ('out.js', 'out.wasm'):
expect_minified_exports_and_imports = potentially_expect_minified_exports_and_imports and target.endswith('.js')
standalone = target.endswith('.wasm') or 'STANDALONE_WASM' in opts
print(opts, potentially_expect_minified_exports_and_imports, target, ' => ', expect_minified_exports_and_imports, standalone)
self.clear()
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-o', target] + opts)
self.assertExists('out.wasm')
if target.endswith('.wasm'):
# only wasm requested
self.assertNotExists('out.js')
wat = run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), 'out.wasm'], stdout=PIPE).stdout
wat_lines = wat.split('\n')
exports = [line.strip().split(' ')[1].replace('"', '') for line in wat_lines if "(export " in line]
imports = [line.strip().split(' ')[2].replace('"', '') for line in wat_lines if "(import " in line]
exports_and_imports = exports + imports
print(' exports', exports)
print(' imports', imports)
if expect_minified_exports_and_imports:
assert 'a' in exports_and_imports
else:
assert 'a' not in exports_and_imports
assert 'memory' in exports_and_imports or 'fd_write' in exports_and_imports, 'some things are not minified anyhow'
# verify the wasm runs with the JS
if target.endswith('.js'):
self.assertContained('hello, world!', run_js('out.js'))
# verify a standalone wasm
if standalone and self.is_wasm_backend():
for engine in WASM_ENGINES:
print(engine)
self.assertContained('hello, world!', run_js('out.wasm', engine=engine))
def test_wasm_targets_side_module(self):
# side modules do allow a wasm target
for opts, target in [([], 'a.out.wasm'), (['-o', 'lib.wasm'], 'lib.wasm')]:
# specified target
print('building: ' + target)
self.clear()
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'SIDE_MODULE=1'] + opts)
for x in os.listdir('.'):
assert not x.endswith('.js'), 'we should not emit js when making a wasm side module: ' + x
self.assertIn(b'dylink', open(target, 'rb').read())
@no_fastcomp('test wasm object files')
def test_wasm_backend_lto(self):
# test building of non-wasm-object-files libraries, building with them, and running them
src = path_from_root('tests', 'hello_libcxx.cpp')
# test codegen in lto mode, and compare to normal (wasm object) mode
for args in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os'], ['-Oz']]:
print(args)
print('wasm in object')
run_process([EMXX, src] + args + ['-c', '-o', 'hello_obj.o'])
self.assertTrue(building.is_wasm('hello_obj.o'))
self.assertFalse(building.is_bitcode('hello_obj.o'))
print('bitcode in object')
run_process([EMXX, src] + args + ['-c', '-o', 'hello_bitcode.o', '-flto'])
self.assertFalse(building.is_wasm('hello_bitcode.o'))
self.assertTrue(building.is_bitcode('hello_bitcode.o'))
print('use bitcode object (LTO)')
run_process([EMXX, 'hello_bitcode.o'] + args + ['-flto'])
self.assertContained('hello, world!', run_js('a.out.js'))
print('use bitcode object (non-LTO)')
run_process([EMXX, 'hello_bitcode.o'] + args)
self.assertContained('hello, world!', run_js('a.out.js'))
print('use native object (LTO)')
run_process([EMXX, 'hello_obj.o'] + args + ['-flto'])
self.assertContained('hello, world!', run_js('a.out.js'))
print('use native object (non-LTO)')
run_process([EMXX, 'hello_obj.o'] + args)
self.assertContained('hello, world!', run_js('a.out.js'))
@parameterized({
'except': [],
'noexcept': ['-s', 'DISABLE_EXCEPTION_CATCHING=0']
})
@no_fastcomp('test wasm object files')
def test_wasm_backend_lto_libcxx(self, *args):
run_process([EMXX, path_from_root('tests', 'hello_libcxx.cpp'), '-flto'] + list(args))
@no_fastcomp('wasm backend lto specific')
def test_lto_flags(self):
for flags, expect_bitcode in [
([], False),
(['-flto'], True),
(['-flto=thin'], True),
(['-s', 'WASM_OBJECT_FILES=0'], True),
(['-s', 'WASM_OBJECT_FILES=1'], False),
]:
run_process([EMCC, path_from_root('tests', 'hello_world.cpp')] + flags + ['-c', '-o', 'a.o'])
seen_bitcode = building.is_bitcode('a.o')
self.assertEqual(expect_bitcode, seen_bitcode, 'must emit LTO-capable bitcode when flags indicate so (%s)' % str(flags))
def test_wasm_nope(self):
for opts in [[], ['-O2']]:
print(opts)
# check we show a good error message if there is no wasm support
create_test_file('pre.js', 'WebAssembly = undefined;\n')
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '--pre-js', 'pre.js'] + opts)
out = run_js('a.out.js', stderr=STDOUT, assert_returncode=None)
self.assertContained('no native wasm support detected', out)
def test_jsrun(self):
print(NODE_JS)
jsrun.WORKING_ENGINES = {}
# Test that engine check passes
self.assertTrue(jsrun.check_engine(NODE_JS))
# Run it a second time (cache hit)
self.assertTrue(jsrun.check_engine(NODE_JS))
# Test that engine check fails
bogus_engine = ['/fake/inline4']
self.assertFalse(jsrun.check_engine(bogus_engine))
self.assertFalse(jsrun.check_engine(bogus_engine))
# Test the other possible way (list vs string) to express an engine
if type(NODE_JS) is list:
engine2 = NODE_JS[0]
else:
engine2 = [NODE_JS]
self.assertTrue(jsrun.check_engine(engine2))
# Test that run_js requires the engine
run_js(path_from_root('tests', 'hello_world.js'), NODE_JS)
caught_exit = 0
try:
run_js(path_from_root('tests', 'hello_world.js'), bogus_engine)
except SystemExit as e:
caught_exit = e.code
self.assertEqual(1, caught_exit, 'Did not catch SystemExit with bogus JS engine')
def test_error_on_missing_libraries(self):
# -llsomenonexistingfile is an error by default
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), '-lsomenonexistingfile'])
if self.is_wasm_backend():
self.assertContained('wasm-ld: error: unable to find library -lsomenonexistingfile', err)
else:
self.assertContained('emcc: cannot find library "somenonexistingfile"', err)
# Tests that if user accidentally attempts to link native object code, we show an error
def test_native_link_error_message(self):
run_process([CLANG_CC, '-c', path_from_root('tests', 'hello_123.c'), '-o', 'hello_123.o'])
err = self.expect_fail([EMCC, 'hello_123.o', '-o', 'hello_123.js'])
self.assertContained('hello_123.o is not a valid input', err)
# Tests that we should give a clear error on INITIAL_MEMORY not being enough for static initialization + stack
def test_clear_error_on_massive_static_data(self):
with open('src.cpp', 'w') as f:
f.write('''
char muchData[128 * 1024];
int main() {
return (int)&muchData;
}
''')
err = self.expect_fail([EMCC, 'src.cpp', '-s', 'TOTAL_STACK=1KB', '-s', 'INITIAL_MEMORY=64KB'])
if self.is_wasm_backend():
self.assertContained('wasm-ld: error: initial memory too small', err)
else:
self.assertContained('Memory is not large enough for static data (134000) plus the stack (1024), please increase INITIAL_MEMORY (65536)', err)
def test_o_level_clamp(self):
for level in [3, 4, 20]:
err = run_process([EMCC, '-O' + str(level), path_from_root('tests', 'hello_world.c')], stderr=PIPE).stderr
self.assertContainedIf("optimization level '-O" + str(level) + "' is not supported; using '-O3' instead", err, level > 3)
# Tests that if user specifies multiple -o output directives, then the last one will take precedence
def test_multiple_o_files(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-o', 'a.js', '-o', 'b.js'])
assert os.path.isfile('b.js')
assert not os.path.isfile('a.js')
# Tests that Emscripten-provided header files can be cleanly included in C code
def test_include_system_header_in_c(self):
for std in [[], ['-std=c89']]: # Test oldest C standard, and the default C standard
for directory, headers in [
('emscripten', ['dom_pk_codes.h', 'em_asm.h', 'emscripten.h', 'fetch.h', 'html5.h', 'key_codes.h', 'threading.h', 'trace.h']), # This directory has also bind.h, val.h and wire.h, which require C++11
('AL', ['al.h', 'alc.h']),
('EGL', ['egl.h', 'eglplatform.h']),
('GL', ['freeglut_std.h', 'gl.h', 'glew.h', 'glfw.h', 'glu.h', 'glut.h']),
('GLES', ['gl.h', 'glplatform.h']),
('GLES2', ['gl2.h', 'gl2platform.h']),
('GLES3', ['gl3.h', 'gl3platform.h', 'gl31.h', 'gl32.h']),
('GLFW', ['glfw3.h']),
('KHR', ['khrplatform.h'])]:
for h in headers:
inc = '#include <' + directory + '/' + h + '>'
print(inc)
create_test_file('a.c', inc)
create_test_file('b.c', inc)
run_process([EMCC] + std + ['a.c', 'b.c'])
@is_slow_test
def test_single_file(self):
for (single_file_enabled,
meminit1_enabled,
debug_enabled,
closure_enabled,
wasm_enabled) in itertools.product([True, False], repeat=5):
# skip unhelpful option combinations
if wasm_enabled and meminit1_enabled:
continue
if closure_enabled and debug_enabled:
continue
expect_wasm = wasm_enabled
expect_meminit = meminit1_enabled and not wasm_enabled
expect_wat = debug_enabled and wasm_enabled and not self.is_wasm_backend()
cmd = [EMCC, path_from_root('tests', 'hello_world.c')]
if single_file_enabled:
expect_meminit = False
expect_wasm = False
cmd += ['-s', 'SINGLE_FILE=1']
if meminit1_enabled:
cmd += ['--memory-init-file', '1']
if debug_enabled:
cmd += ['-g']
if closure_enabled:
cmd += ['--closure', '1']
if not wasm_enabled:
cmd += ['-s', 'WASM=0']
self.clear()
def do_test(cmd):
print(' '.join(cmd))
run_process(cmd)
print(os.listdir('.'))
assert expect_meminit == (os.path.exists('a.out.mem') or os.path.exists('a.out.js.mem'))
assert expect_wasm == os.path.exists('a.out.wasm')
assert expect_wat == os.path.exists('a.out.wat')
self.assertContained('hello, world!', run_js('a.out.js'))
do_test(cmd)
# additional combinations that are not part of the big product()
if self.is_wasm_backend() and debug_enabled:
separate_dwarf_cmd = cmd + ['-gseparate-dwarf']
if wasm_enabled:
do_test(separate_dwarf_cmd)
self.assertExists('a.out.wasm.debug.wasm')
else:
self.expect_fail(separate_dwarf_cmd)
def test_emar_M(self):
create_test_file('file1', ' ')
create_test_file('file2', ' ')
run_process([EMAR, 'cr', 'file1.a', 'file1'])
run_process([EMAR, 'cr', 'file2.a', 'file2'])
run_process([EMAR, '-M'], input='''create combined.a
addlib file1.a
addlib file2.a
save
end
''')
result = run_process([EMAR, 't', 'combined.a'], stdout=PIPE).stdout
self.assertContained('file1', result)
self.assertContained('file2', result)
def test_emar_duplicate_inputs(self):
# Verify the we can supply the same intput muliple times without
# confusing emar.py:
# See https://github.com/emscripten-core/emscripten/issues/9733
create_test_file('file1', ' ')
run_process([EMAR, 'cr', 'file1.a', 'file1', 'file1'])
# Temporarily disabled to allow this llvm change to roll
# https://reviews.llvm.org/D69665
@no_windows('Temporarily disabled under windows')
def test_emar_response_file(self):
# Test that special character such as single quotes in filenames survive being
# sent via response file
create_test_file("file'1", ' ')
create_test_file("file'2", ' ')
building.emar('cr', 'libfoo.a', ("file'1", "file'2"))
def test_archive_empty(self):
# This test added because we had an issue with the AUTO_ARCHIVE_INDEXES failing on empty
# archives (which inherently don't have indexes).
run_process([EMAR, 'crS', 'libfoo.a'])
run_process([EMCC, '-Werror', 'libfoo.a', path_from_root('tests', 'hello_world.c')])
def test_archive_no_index(self):
create_test_file('foo.c', 'int foo = 1;')
run_process([EMCC, '-c', 'foo.c'])
run_process([EMCC, '-c', path_from_root('tests', 'hello_world.c')])
# The `S` flag means don't add an archive index
run_process([EMAR, 'crS', 'libfoo.a', 'foo.o'])
# The llvm backend (link GNU ld and lld) doesn't support linking archives with no index.
# However we have logic that will automatically add indexes (unless running with
# NO_AUTO_ARCHIVE_INDEXES).
if self.is_wasm_backend():
stderr = self.expect_fail([EMCC, '-s', 'NO_AUTO_ARCHIVE_INDEXES', 'libfoo.a', 'hello_world.o'])
self.assertContained('libfoo.a: archive has no index; run ranlib to add one', stderr)
# The default behavior is to add archive indexes automatically.
run_process([EMCC, 'libfoo.a', 'hello_world.o'])
@no_fastcomp('AUTO_ARCHIVE_INDEXES only applies to wasm backend')
def test_archive_non_objects(self):
create_test_file('file.txt', 'test file')
# llvm-nm has issues with files that start with two or more null bytes since it thinks they
# are COFF files. Ensure that we correctly ignore such files when we process them.
create_test_file('zeros.bin', '\0\0\0\0')
run_process([EMCC, '-c', path_from_root('tests', 'hello_world.c')])
# No index added.
# --format=darwin (the default on OSX has a strange issue where it add extra
# newlines to files: https://bugs.llvm.org/show_bug.cgi?id=42562
run_process([EMAR, 'crS', '--format=gnu', 'libfoo.a', 'file.txt', 'zeros.bin', 'hello_world.o'])
run_process([EMCC, path_from_root('tests', 'hello_world.c'), 'libfoo.a'])
def test_flag_aliases(self):
def assert_aliases_match(flag1, flag2, flagarg, extra_args=[]):
results = {}
for f in (flag1, flag2):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', f + '=' + flagarg] + extra_args)
with open('a.out.js') as out:
results[f + '.js'] = out.read()
with open('a.out.wasm', 'rb') as out:
results[f + '.wasm'] = out.read()
self.assertEqual(results[flag1 + '.js'], results[flag2 + '.js'], 'js results should be identical')
self.assertEqual(results[flag1 + '.wasm'], results[flag2 + '.wasm'], 'wasm results should be identical')
assert_aliases_match('INITIAL_MEMORY', 'TOTAL_MEMORY', '16777216')
assert_aliases_match('INITIAL_MEMORY', 'TOTAL_MEMORY', '64MB')
assert_aliases_match('MAXIMUM_MEMORY', 'WASM_MEM_MAX', '16777216', ['-s', 'ALLOW_MEMORY_GROWTH'])
assert_aliases_match('MAXIMUM_MEMORY', 'BINARYEN_MEM_MAX', '16777216', ['-s', 'ALLOW_MEMORY_GROWTH'])
def test_IGNORE_CLOSURE_COMPILER_ERRORS(self):
create_test_file('pre.js', r'''
// make closure compiler very very angry
var dupe = 1;
var dupe = 2;
function Node() {
throw 'Node is a DOM thing too, and use the ' + dupe;
}
function Node() {
throw '(duplicate) Node is a DOM thing too, and also use the ' + dupe;
}
''')
def test(check, extra=[]):
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-O2', '--closure', '1', '--pre-js', 'pre.js'] + extra
proc = run_process(cmd, check=check, stderr=PIPE)
if not check:
self.assertNotEqual(proc.returncode, 0)
return proc
WARNING = 'Variable dupe declared more than once'
proc = test(check=False)
self.assertContained(WARNING, proc.stderr)
proc = test(check=True, extra=['-s', 'IGNORE_CLOSURE_COMPILER_ERRORS=1'])
self.assertNotContained(WARNING, proc.stderr)
def test_closure_full_js_library(self):
# test for closure errors in the entire JS library
# We must ignore various types of errors that are expected in this situation, as we
# are including a lot of JS without corresponding compiled code for it. This still
# lets us catch all other errors.
with env_modify({'EMCC_CLOSURE_ARGS': '--jscomp_off undefinedVars'}):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-O1', '--closure', '1', '-g1', '-s', 'INCLUDE_FULL_LIBRARY=1', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0'])
# Tests --closure-args command line flag
def test_closure_externs(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '--closure', '1', '--pre-js', path_from_root('tests', 'test_closure_externs_pre_js.js'), '--closure-args', '--externs "' + path_from_root('tests', 'test_closure_externs.js') + '"'])
def test_toolchain_profiler(self):
environ = os.environ.copy()
environ['EM_PROFILE_TOOLCHAIN'] = '1'
# replaced subprocess functions should not cause errors
run_process([EMCC, path_from_root('tests', 'hello_world.c')], env=environ)
def test_noderawfs(self):
fopen_write = open(path_from_root('tests', 'asmfs', 'fopen_write.cpp')).read()
create_test_file('main.cpp', fopen_write)
run_process([EMCC, 'main.cpp', '-s', 'NODERAWFS=1'])
self.assertContained("read 11 bytes. Result: Hello data!", run_js('a.out.js'))
# NODERAWFS should directly write on OS file system
self.assertEqual("Hello data!", open('hello_file.txt').read())
def test_noderawfs_disables_embedding(self):
expected = '--preload-file and --embed-file cannot be used with NODERAWFS which disables virtual filesystem'
base = [EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'NODERAWFS=1']
err = self.expect_fail(base + ['--preload-file', 'somefile'])
self.assertContained(expected, err)
err = self.expect_fail(base + ['--embed-file', 'somefile'])
self.assertContained(expected, err)
def test_node_code_caching(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'),
'-s', 'NODE_CODE_CACHING',
'-s', 'WASM_ASYNC_COMPILATION=0'])
def get_cached():
cached = glob.glob('a.out.wasm.*.cached')
if not cached:
return None
self.assertEqual(len(cached), 1)
return cached[0]
# running the program makes it cache the code
self.assertFalse(get_cached())
self.assertEqual('hello, world!', run_js('a.out.js').strip())
self.assertTrue(get_cached(), 'should be a cache file')
# hard to test it actually uses it to speed itself up, but test that it
# does try to deserialize it at least
with open(get_cached(), 'w') as f:
f.write('waka waka')
ERROR = 'NODE_CODE_CACHING: failed to deserialize, bad cache file?'
self.assertContained(ERROR, run_js('a.out.js', stderr=PIPE, full_output=True))
# we cached proper code after showing that error
with open(get_cached(), 'rb') as f:
self.assertEqual(f.read().count(b'waka'), 0)
self.assertNotContained(ERROR, run_js('a.out.js', stderr=PIPE, full_output=True))
def test_autotools_shared_check(self):
env = os.environ.copy()
env['LC_ALL'] = 'C'
expected = ': supported targets:.* elf'
for python in [PYTHON, 'python', 'python2', 'python3']:
if not shared.which(python):
continue
if python == 'python3' and not is_python3_version_supported():
continue
print(python)
out = run_process([python, path_from_root('emcc.py'), '--help'], stdout=PIPE, env=env).stdout
assert re.search(expected, out)
def test_ioctl_window_size(self):
self.do_other_test(os.path.join('other', 'ioctl', 'window_size'))
def test_fd_closed(self):
self.do_other_test(os.path.join('other', 'fd_closed'))
def test_fflush(self):
# fflush without the full filesystem won't quite work
self.do_other_test(os.path.join('other', 'fflush'))
def test_fflush_fs(self):
# fflush with the full filesystem will flush from libc, but not the JS logging, which awaits a newline
self.do_other_test(os.path.join('other', 'fflush_fs'), emcc_args=['-s', 'FORCE_FILESYSTEM=1'])
def test_fflush_fs_exit(self):
# on exit, we can send out a newline as no more code will run
self.do_other_test(os.path.join('other', 'fflush_fs_exit'), emcc_args=['-s', 'FORCE_FILESYSTEM=1', '-s', 'EXIT_RUNTIME=1'])
def test_extern_weak(self):
self.do_other_test(os.path.join('other', 'extern_weak'))
if not self.is_wasm_backend(): # TODO: wasm backend main module
self.do_other_test(os.path.join('other', 'extern_weak'), emcc_args=['-s', 'MAIN_MODULE=1', '-DLINKABLE'])
def test_main_module_without_main(self):
create_test_file('pre.js', r'''
var Module = {
onRuntimeInitialized: function() {
Module._foo();
}
};
''')
create_test_file('src.c', r'''
#include <emscripten.h>
EMSCRIPTEN_KEEPALIVE void foo() {
EM_ASM({ console.log("bar") });
}
''')
run_process([EMCC, 'src.c', '--pre-js', 'pre.js', '-s', 'MAIN_MODULE=2'])
self.assertContained('bar', run_js('a.out.js'))
def test_js_optimizer_parse_error(self):
# check we show a proper understandable error for JS parse problems
create_test_file('src.cpp', r'''
#include <emscripten.h>
int main() {
EM_ASM({
var x = !<->5.; // wtf
});
}
''')
stderr = self.expect_fail([EMCC, 'src.cpp', '-O2'])
# wasm backend output doesn't have spaces in the EM_ASM function bodies
self.assertContained(('''
var ASM_CONSTS = [function() { var x = !<->5.; }];
^
''', '''
1024: function() {var x = !<->5.;}
^
'''), stderr)
@no_fastcomp('wasm2js only')
def test_js_optimizer_chunk_size_determinism(self):
def build():
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-O3', '-s', 'WASM=0'])
with open('a.out.js') as f:
# FIXME: newline differences can exist, ignore for now
return f.read().replace('\n', '')
normal = build()
with env_modify({
'EMCC_JSOPT_MIN_CHUNK_SIZE': '1',
'EMCC_JSOPT_MAX_CHUNK_SIZE': '1'
}):
tiny = build()
with env_modify({
'EMCC_JSOPT_MIN_CHUNK_SIZE': '4294967296',
'EMCC_JSOPT_MAX_CHUNK_SIZE': '4294967296'
}):
huge = build()
self.assertIdentical(normal, tiny)
self.assertIdentical(normal, huge)
def test_EM_ASM_ES6(self):
create_test_file('src.cpp', r'''
#include <emscripten.h>
int main() {
EM_ASM({
var x = (a, b) => 5; // valid ES6
async function y() {} // valid ES2017
out('hello!');
});
}
''')
run_process([EMCC, 'src.cpp', '-O2'])
self.assertContained('hello!', run_js('a.out.js'))
def test_check_sourcemapurl(self):
if not self.is_wasm():
self.skipTest('only supported with wasm')
run_process([EMCC, path_from_root('tests', 'hello_123.c'), '-g4', '-o', 'a.js', '--source-map-base', 'dir/'])
output = open('a.wasm', 'rb').read()
# has sourceMappingURL section content and points to 'dir/a.wasm.map' file
source_mapping_url_content = encode_leb(len('sourceMappingURL')) + b'sourceMappingURL' + encode_leb(len('dir/a.wasm.map')) + b'dir/a.wasm.map'
self.assertEqual(output.count(source_mapping_url_content), 1)
# make sure no DWARF debug info sections remain - they would just waste space
self.assertNotIn(b'.debug_', output)
def test_check_source_map_args(self):
# -g4 is needed for source maps; -g is not enough
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-g'])
self.assertNotExists('a.out.wasm.map')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-g4'])
self.assertExists('a.out.wasm.map')
@parameterized({
'normal': [],
'profiling': ['--profiling'] # -g4 --profiling should still emit a source map; see #8584
})
def test_check_sourcemapurl_default(self, *args):
print(args)
if not self.is_wasm():
self.skipTest('only supported with wasm')
try_delete('a.wasm.map')
run_process([EMCC, path_from_root('tests', 'hello_123.c'), '-g4', '-o', 'a.js'] + list(args))
output = open('a.wasm', 'rb').read()
# has sourceMappingURL section content and points to 'a.wasm.map' file
source_mapping_url_content = encode_leb(len('sourceMappingURL')) + b'sourceMappingURL' + encode_leb(len('a.wasm.map')) + b'a.wasm.map'
self.assertIn(source_mapping_url_content, output)
def test_wasm_sourcemap(self):
# The no_main.c will be read (from relative location) due to speficied "-s"
shutil.copyfile(path_from_root('tests', 'other', 'wasm_sourcemap', 'no_main.c'), 'no_main.c')
wasm_map_cmd = [PYTHON, path_from_root('tools', 'wasm-sourcemap.py'),
'--sources', '--prefix', '=wasm-src://',
'--load-prefix', '/emscripten/tests/other/wasm_sourcemap=.',
'--dwarfdump-output',
path_from_root('tests', 'other', 'wasm_sourcemap', 'foo.wasm.dump'),
'-o', 'a.out.wasm.map',
path_from_root('tests', 'other', 'wasm_sourcemap', 'foo.wasm'),
'--basepath=' + os.getcwd()]
run_process(wasm_map_cmd)
output = open('a.out.wasm.map').read()
# has "sources" entry with file (includes also `--prefix =wasm-src:///` replacement)
self.assertIn('wasm-src:///emscripten/tests/other/wasm_sourcemap/no_main.c', output)
# has "sourcesContent" entry with source code (included with `-s` option)
self.assertIn('int foo()', output)
# has some entries
self.assertRegexpMatches(output, r'"mappings":\s*"[A-Za-z0-9+/]')
def test_wasm_sourcemap_dead(self):
wasm_map_cmd = [PYTHON, path_from_root('tools', 'wasm-sourcemap.py'),
'--dwarfdump-output',
path_from_root('tests', 'other', 'wasm_sourcemap_dead', 't.wasm.dump'),
'-o', 'a.out.wasm.map',
path_from_root('tests', 'other', 'wasm_sourcemap_dead', 't.wasm'),
'--basepath=' + os.getcwd()]
run_process(wasm_map_cmd, stdout=PIPE, stderr=PIPE)
output = open('a.out.wasm.map').read()
# has only two entries
self.assertRegexpMatches(output, r'"mappings":\s*"[A-Za-z0-9+/]+,[A-Za-z0-9+/]+"')
@no_fastcomp()
def test_wasm_sourcemap_relative_paths(self):
def test(infile, source_map_added_dir=''):
expected_source_map_path = 'a.cpp'
if source_map_added_dir:
expected_source_map_path = source_map_added_dir + '/' + expected_source_map_path
print(infile, expected_source_map_path)
shutil.copyfile(path_from_root('tests', 'hello_123.c'), infile)
infiles = [
infile,
os.path.abspath(infile),
'./' + infile
]
for curr in infiles:
print(' ', curr)
run_process([EMCC, curr, '-g4'])
with open('a.out.wasm.map', 'r') as f:
self.assertIn('"%s"' % expected_source_map_path, str(f.read()))
test('a.cpp')
ensure_dir('inner')
test('inner/a.cpp', 'inner')
@no_fastcomp('dwarf')
def test_separate_dwarf(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-g'])
self.assertExists('a.out.wasm')
self.assertNotExists('a.out.wasm.debug.wasm')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-gseparate-dwarf'])
self.assertExists('a.out.wasm')
self.assertExists('a.out.wasm.debug.wasm')
self.assertLess(os.path.getsize('a.out.wasm'), os.path.getsize('a.out.wasm.debug.wasm'))
# the special section should also exist, that refers to the side debug file
with open('a.out.wasm', 'rb') as f:
wasm = f.read()
self.assertIn(b'external_debug_info', wasm)
self.assertIn(b'a.out.wasm.debug.wasm', wasm)
@no_fastcomp('dwarf')
def test_separate_dwarf_with_filename(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-gseparate-dwarf=with_dwarf.wasm'])
self.assertNotExists('a.out.wasm.debug.wasm')
self.assertExists('with_dwarf.wasm')
# the correct notation is to have exactly one '=' and in the right place
for invalid in ('-gseparate-dwarf=x=', '-gseparate-dwarfy=', '-gseparate-dwarf-hmm'):
stderr = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), invalid])
self.assertContained('invalid -gseparate-dwarf=FILENAME notation', stderr)
def test_wasm_producers_section(self):
# no producers section by default
run_process([EMCC, path_from_root('tests', 'hello_world.c')])
with open('a.out.wasm', 'rb') as f:
self.assertNotIn('clang', str(f.read()))
size = os.path.getsize('a.out.wasm')
if self.is_wasm_backend():
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'EMIT_PRODUCERS_SECTION=1'])
with open('a.out.wasm', 'rb') as f:
self.assertIn('clang', str(f.read()))
size_with_section = os.path.getsize('a.out.wasm')
self.assertLess(size, size_with_section)
def test_html_preprocess(self):
test_file = path_from_root('tests', 'module', 'test_stdin.c')
output_file = 'test_stdin.html'
shell_file = path_from_root('tests', 'module', 'test_html_preprocess.html')
run_process([EMCC, '-o', output_file, test_file, '--shell-file', shell_file, '-s', 'ASSERTIONS=0'], stdout=PIPE, stderr=PIPE)
output = open(output_file).read()
self.assertContained("""<style>
/* Disable preprocessing inside style block as syntax is ambiguous with CSS */
#include {background-color: black;}
#if { background-color: red;}
#else {background-color: blue;}
#endif {background-color: green;}
#xxx {background-color: purple;}
</style>
T1:(else) ASSERTIONS != 1
T2:ASSERTIONS != 1
T3:ASSERTIONS < 2
T4:(else) ASSERTIONS <= 1
T5:(else) ASSERTIONS
T6:!ASSERTIONS""", output)
run_process([EMCC, '-o', output_file, test_file, '--shell-file', shell_file, '-s', 'ASSERTIONS=1'], stdout=PIPE, stderr=PIPE)
output = open(output_file).read()
self.assertContained("""<style>
/* Disable preprocessing inside style block as syntax is ambiguous with CSS */
#include {background-color: black;}
#if { background-color: red;}
#else {background-color: blue;}
#endif {background-color: green;}
#xxx {background-color: purple;}
</style>
T1:ASSERTIONS == 1
T2:(else) ASSERTIONS == 1
T3:ASSERTIONS < 2
T4:(else) ASSERTIONS <= 1
T5:ASSERTIONS
T6:(else) !ASSERTIONS""", output)
run_process([EMCC, '-o', output_file, test_file, '--shell-file', shell_file, '-s', 'ASSERTIONS=2'], stdout=PIPE, stderr=PIPE)
output = open(output_file).read()
self.assertContained("""<style>
/* Disable preprocessing inside style block as syntax is ambiguous with CSS */
#include {background-color: black;}
#if { background-color: red;}
#else {background-color: blue;}
#endif {background-color: green;}
#xxx {background-color: purple;}
</style>
T1:(else) ASSERTIONS != 1
T2:ASSERTIONS != 1
T3:(else) ASSERTIONS >= 2
T4:ASSERTIONS > 1
T5:ASSERTIONS
T6:(else) !ASSERTIONS""", output)
# Tests that Emscripten-compiled applications can be run from a relative path with node command line that is different than the current working directory.
def test_node_js_run_from_different_directory(self):
ensure_dir('subdir')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-o', os.path.join('subdir', 'a.js'), '-O3'])
ret = run_process(NODE_JS + [os.path.join('subdir', 'a.js')], stdout=PIPE).stdout
self.assertContained('hello, world!', ret)
# Tests that a pthreads + modularize build can be run in node js
@no_fastcomp('node pthreads only supported on wasm backend')
def test_node_js_pthread_module(self):
# create module loader script
moduleLoader = 'moduleLoader.js'
moduleLoaderContents = '''
const test_module = require("./module");
test_module().then((test_module_instance) => {
test_module_instance._main();
process.exit(0);
});
'''
ensure_dir('subdir')
create_test_file(os.path.join('subdir', moduleLoader), moduleLoaderContents)
# build hello_world.c
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-o', os.path.join('subdir', 'module.js'), '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=test_module', '-s', 'ENVIRONMENT=worker,node'])
# run the module
ret = run_process(NODE_JS + ['--experimental-wasm-threads'] + [os.path.join('subdir', moduleLoader)], stdout=PIPE).stdout
self.assertContained('hello, world!', ret)
@no_windows('node system() does not seem to work, see https://github.com/emscripten-core/emscripten/pull/10547')
def test_node_js_system(self):
run_process([EMCC, '-DENV_NODE', path_from_root('tests', 'system.c'), '-o', 'a.js', '-O3'])
ret = run_process(NODE_JS + ['a.js'], stdout=PIPE).stdout
self.assertContained('OK', ret)
def test_is_bitcode(self):
fname = 'tmp.o'
with open(fname, 'wb') as f:
f.write(b'foo')
self.assertFalse(building.is_bitcode(fname))
with open(fname, 'wb') as f:
f.write(b'\xDE\xC0\x17\x0B')
f.write(16 * b'\x00')
f.write(b'BC')
self.assertTrue(building.is_bitcode(fname))
with open(fname, 'wb') as f:
f.write(b'BC')
self.assertTrue(building.is_bitcode(fname))
def test_is_ar(self):
fname = 'tmp.a'
with open(fname, 'wb') as f:
f.write(b'foo')
self.assertFalse(building.is_ar(fname))
with open(fname, 'wb') as f:
f.write(b'!<arch>\n')
self.assertTrue(building.is_ar(fname))
def test_emcc_parsing(self):
create_test_file('src.c', r'''
#include <stdio.h>
void a() { printf("a\n"); }
void b() { printf("b\n"); }
void c() { printf("c\n"); }
void d() { printf("d\n"); }
''')
create_test_file('response', r'''[
"_a",
"_b",
"_c",
"_d"
]
''')
for export_arg, expected in [
# extra space at end - should be ignored
("EXPORTED_FUNCTIONS=['_a', '_b', '_c', '_d' ]", ''),
# extra newline in response file - should be ignored
("EXPORTED_FUNCTIONS=@response", ''),
# stray slash
("EXPORTED_FUNCTIONS=['_a', '_b', \\'_c', '_d']", '''undefined exported function: "\\\\'_c'"'''),
# stray slash
("EXPORTED_FUNCTIONS=['_a', '_b',\\ '_c', '_d']", '''undefined exported function: "\\\\ '_c'"'''),
# stray slash
('EXPORTED_FUNCTIONS=["_a", "_b", \\"_c", "_d"]', 'undefined exported function: "\\\\"_c""'),
# stray slash
('EXPORTED_FUNCTIONS=["_a", "_b",\\ "_c", "_d"]', 'undefined exported function: "\\\\ "_c"'),
# missing comma
('EXPORTED_FUNCTIONS=["_a", "_b" "_c", "_d"]', 'undefined exported function: "_b" "_c"'),
]:
print(export_arg)
proc = run_process([EMCC, 'src.c', '-s', export_arg], stdout=PIPE, stderr=PIPE, check=not expected)
print(proc.stderr)
if not expected:
self.assertFalse(proc.stderr)
else:
self.assertNotEqual(proc.returncode, 0)
self.assertContained(expected, proc.stderr)
@no_fastcomp('uses new ASYNCIFY')
def test_asyncify_escaping(self):
proc = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'ASYNCIFY=1', '-s', "ASYNCIFY_ONLY=[DOS_ReadFile(unsigned short, unsigned char*, unsigned short*, bool)]"], stdout=PIPE, stderr=PIPE)
self.assertContained('emcc: ASYNCIFY list contains an item without balanced parentheses', proc.stderr)
self.assertContained(' DOS_ReadFile(unsigned short', proc.stderr)
self.assertContained('Try to quote the entire argument', proc.stderr)
@no_fastcomp('uses new ASYNCIFY')
def test_asyncify_response_file(self):
return self.skipTest(' TODO remove the support for multiple binaryen versions warning output ("function name" vs "pattern" etc).')
create_test_file('a.txt', r'''[
"DOS_ReadFile(unsigned short, unsigned char*, unsigned short*, bool)"
]
''')
proc = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'ASYNCIFY=1', '-s', "[email protected]"], stdout=PIPE, stderr=PIPE)
# we should parse the response file properly, and then issue a proper warning for the missing function
self.assertContained(
'Asyncify onlylist contained a non-matching pattern: DOS_ReadFile(unsigned short, unsigned char*, unsigned short*, bool)',
proc.stderr)
# Sockets and networking
def test_inet(self):
self.do_run(open(path_from_root('tests', 'sha1.c')).read(), 'SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6')
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
printf("*%x,%x,%x,%x,%x,%x*\n", htonl(0xa1b2c3d4), htonl(0xfe3572e0), htonl(0x07abcdf0), htons(0xabcd), ntohl(0x43211234), ntohs(0xbeaf));
in_addr_t i = inet_addr("190.180.10.78");
printf("%x\n", i);
return 0;
}
'''
self.do_run(src, '*d4c3b2a1,e07235fe,f0cdab07,cdab,34122143,afbe*\n4e0ab4be\n')
def test_inet2(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
struct in_addr x, x2;
int *y = (int*)&x;
*y = 0x12345678;
printf("%s\n", inet_ntoa(x));
int r = inet_aton(inet_ntoa(x), &x2);
printf("%s\n", inet_ntoa(x2));
return 0;
}
'''
self.do_run(src, '120.86.52.18\n120.86.52.18\n')
def test_inet3(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
#include <sys/socket.h>
int main() {
char dst[64];
struct in_addr x, x2;
int *y = (int*)&x;
*y = 0x12345678;
printf("%s\n", inet_ntop(AF_INET,&x,dst,sizeof dst));
int r = inet_aton(inet_ntoa(x), &x2);
printf("%s\n", inet_ntop(AF_INET,&x2,dst,sizeof dst));
return 0;
}
'''
self.do_run(src, '120.86.52.18\n120.86.52.18\n')
def test_inet4(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
#include <sys/socket.h>
void test(const char *test_addr, bool first=true){
char str[40];
struct in6_addr addr;
unsigned char *p = (unsigned char*)&addr;
int ret;
ret = inet_pton(AF_INET6,test_addr,&addr);
if(ret == -1) return;
if(ret == 0) return;
if(inet_ntop(AF_INET6,&addr,str,sizeof(str)) == NULL ) return;
printf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x - %s\n",
p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9],p[10],p[11],p[12],p[13],p[14],p[15],str);
if (first) test(str, false); // check again, on our output
}
int main(){
test("::");
test("::1");
test("::1.2.3.4");
test("::17.18.19.20");
test("::ffff:1.2.3.4");
test("1::ffff");
test("::255.255.255.255");
test("0:ff00:1::");
test("0:ff::");
test("abcd::");
test("ffff::a");
test("ffff::a:b");
test("ffff::a:b:c");
test("ffff::a:b:c:d");
test("ffff::a:b:c:d:e");
test("::1:2:0:0:0");
test("0:0:1:2:3::");
test("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff");
test("1::255.255.255.255");
//below should fail and not produce results..
test("1.2.3.4");
test("");
test("-");
printf("ok.\n");
}
'''
self.do_run(src, r'''0000:0000:0000:0000:0000:0000:0000:0000 - ::
0000:0000:0000:0000:0000:0000:0000:0000 - ::
0000:0000:0000:0000:0000:0000:0000:0001 - ::1
0000:0000:0000:0000:0000:0000:0000:0001 - ::1
0000:0000:0000:0000:0000:0000:0102:0304 - ::102:304
0000:0000:0000:0000:0000:0000:0102:0304 - ::102:304
0000:0000:0000:0000:0000:0000:1112:1314 - ::1112:1314
0000:0000:0000:0000:0000:0000:1112:1314 - ::1112:1314
0000:0000:0000:0000:0000:ffff:0102:0304 - ::ffff:1.2.3.4
0000:0000:0000:0000:0000:ffff:0102:0304 - ::ffff:1.2.3.4
0001:0000:0000:0000:0000:0000:0000:ffff - 1::ffff
0001:0000:0000:0000:0000:0000:0000:ffff - 1::ffff
0000:0000:0000:0000:0000:0000:ffff:ffff - ::ffff:ffff
0000:0000:0000:0000:0000:0000:ffff:ffff - ::ffff:ffff
0000:ff00:0001:0000:0000:0000:0000:0000 - 0:ff00:1::
0000:ff00:0001:0000:0000:0000:0000:0000 - 0:ff00:1::
0000:00ff:0000:0000:0000:0000:0000:0000 - 0:ff::
0000:00ff:0000:0000:0000:0000:0000:0000 - 0:ff::
abcd:0000:0000:0000:0000:0000:0000:0000 - abcd::
abcd:0000:0000:0000:0000:0000:0000:0000 - abcd::
ffff:0000:0000:0000:0000:0000:0000:000a - ffff::a
ffff:0000:0000:0000:0000:0000:0000:000a - ffff::a
ffff:0000:0000:0000:0000:0000:000a:000b - ffff::a:b
ffff:0000:0000:0000:0000:0000:000a:000b - ffff::a:b
ffff:0000:0000:0000:0000:000a:000b:000c - ffff::a:b:c
ffff:0000:0000:0000:0000:000a:000b:000c - ffff::a:b:c
ffff:0000:0000:0000:000a:000b:000c:000d - ffff::a:b:c:d
ffff:0000:0000:0000:000a:000b:000c:000d - ffff::a:b:c:d
ffff:0000:0000:000a:000b:000c:000d:000e - ffff::a:b:c:d:e
ffff:0000:0000:000a:000b:000c:000d:000e - ffff::a:b:c:d:e
0000:0000:0000:0001:0002:0000:0000:0000 - ::1:2:0:0:0
0000:0000:0000:0001:0002:0000:0000:0000 - ::1:2:0:0:0
0000:0000:0001:0002:0003:0000:0000:0000 - 0:0:1:2:3::
0000:0000:0001:0002:0003:0000:0000:0000 - 0:0:1:2:3::
ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff - ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff - ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
0001:0000:0000:0000:0000:0000:ffff:ffff - 1::ffff:ffff
0001:0000:0000:0000:0000:0000:ffff:ffff - 1::ffff:ffff
ok.
''')
def test_getsockname_unconnected_socket(self):
self.do_run(r'''
#include <sys/socket.h>
#include <stdio.h>
#include <assert.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <string.h>
int main() {
int fd;
int z;
fd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
struct sockaddr_in adr_inet;
socklen_t len_inet = sizeof adr_inet;
z = getsockname(fd, (struct sockaddr *)&adr_inet, &len_inet);
if (z != 0) {
perror("getsockname error");
return 1;
}
char buffer[1000];
sprintf(buffer, "%s:%u", inet_ntoa(adr_inet.sin_addr), (unsigned)ntohs(adr_inet.sin_port));
const char *correct = "0.0.0.0:0";
printf("got (expected) socket: %s (%s), size %lu (%lu)\n", buffer, correct, strlen(buffer), strlen(correct));
assert(strlen(buffer) == strlen(correct));
assert(strcmp(buffer, correct) == 0);
puts("success.");
}
''', 'success.')
def test_getpeername_unconnected_socket(self):
self.do_run(r'''
#include <sys/socket.h>
#include <stdio.h>
#include <assert.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <string.h>
int main() {
int fd;
int z;
fd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
struct sockaddr_in adr_inet;
socklen_t len_inet = sizeof adr_inet;
z = getpeername(fd, (struct sockaddr *)&adr_inet, &len_inet);
if (z != 0) {
perror("getpeername error");
return 1;
}
puts("unexpected success.");
}
''', 'getpeername error: Socket not connected', assert_returncode=None)
def test_getaddrinfo(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_getaddrinfo.c')).read(), 'success')
def test_getnameinfo(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_getnameinfo.c')).read(), 'success')
def test_gethostbyname(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_gethostbyname.c')).read(), 'success')
def test_getprotobyname(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_getprotobyname.c')).read(), 'success')
def test_socketpair(self):
self.do_run(r'''
#include <sys/socket.h>
#include <stdio.h>
int main() {
int fd[2];
int err;
err = socketpair(AF_INET, SOCK_STREAM, 0, fd);
if (err != 0) {
perror("socketpair error");
return 1;
}
puts("unexpected success.");
}
''', 'socketpair error: Function not implemented', assert_returncode=None)
def test_link(self):
self.do_run(r'''
#include <netdb.h>
#include <sys/types.h>
#include <sys/socket.h>
int main () {
void* thing = gethostbyname("bing.com");
ssize_t rval = recv (0, thing, 0, 0);
rval = send (0, thing, 0, 0);
return 0;
}''', '', force_c=True)
# This test verifies that function names embedded into the build with --js-library (JS functions imported to asm.js/wasm)
# are minified when -O3 is used
def test_js_function_names_are_minified(self):
def check_size(f, expected_size):
if not os.path.isfile(f):
return # Nonexistent file passes in this check
obtained_size = os.path.getsize(f)
print('size of generated ' + f + ': ' + str(obtained_size))
try_delete(f)
self.assertLess(obtained_size, expected_size)
run_process([PYTHON, path_from_root('tests', 'gen_many_js_functions.py'), 'library_long.js', 'main_long.c'])
for wasm in [['-s', 'WASM=1'], ['-s', 'WASM=0']]:
if self.is_wasm_backend() and 'WASM=0' in wasm:
continue
# Currently we rely on Closure for full minification of every appearance of JS function names.
# TODO: Add minification also for non-Closure users and add [] to this list to test minification without Closure.
for closure in [['--closure', '1']]:
args = [EMCC, '-O3', '--js-library', 'library_long.js', 'main_long.c', '-o', 'a.html'] + wasm + closure
print(' '.join(args))
run_process(args)
ret = run_process(NODE_JS + ['a.js'], stdout=PIPE).stdout
self.assertTextDataIdentical('Sum of numbers from 1 to 1000: 500500 (expected 500500)', ret.strip())
check_size('a.js', 150000)
check_size('a.wasm', 80000)
# Checks that C++ exceptions managing invoke_*() wrappers will not be generated if exceptions are disabled
def test_no_invoke_functions_are_generated_if_exception_catching_is_disabled(self):
self.skipTest('Skipping other.test_no_invoke_functions_are_generated_if_exception_catching_is_disabled: Enable after new version of fastcomp has been tagged')
for args in [[], ['-s', 'WASM=0']]:
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'DISABLE_EXCEPTION_CATCHING=1', '-o', 'a.html'] + args)
output = open('a.js').read()
self.assertContained('_main', output) # Smoke test that we actually compiled
self.assertNotContained('invoke_', output)
# Verifies that only the minimal needed set of invoke_*() functions will be generated when C++ exceptions are enabled
def test_no_excessive_invoke_functions_are_generated_when_exceptions_are_enabled(self):
self.skipTest('Skipping other.test_no_excessive_invoke_functions_are_generated_when_exceptions_are_enabled: Enable after new version of fastcomp has been tagged')
for args in [[], ['-s', 'WASM=0']]:
run_process([EMCC, path_from_root('tests', 'invoke_i.cpp'), '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-o', 'a.html'] + args)
output = open('a.js').read()
self.assertContained('invoke_i', output)
self.assertNotContained('invoke_ii', output)
self.assertNotContained('invoke_v', output)
def test_emscripten_metadata(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c')])
self.assertNotIn(b'emscripten_metadata', open('a.out.wasm', 'rb').read())
run_process([EMCC, path_from_root('tests', 'hello_world.c'),
'-s', 'EMIT_EMSCRIPTEN_METADATA'])
self.assertIn(b'emscripten_metadata', open('a.out.wasm', 'rb').read())
# make sure wasm executes correctly
ret = run_process(NODE_JS + ['a.out.js'], stdout=PIPE).stdout
self.assertTextDataIdentical('hello, world!\n', ret)
@parameterized({
'O0': (False, ['-O0']), # noqa
'O0_emit': (True, ['-O0', '-s', 'EMIT_EMSCRIPTEN_LICENSE']), # noqa
'O2': (False, ['-O2']), # noqa
'O2_emit': (True, ['-O2', '-s', 'EMIT_EMSCRIPTEN_LICENSE']), # noqa
'O2_js_emit': (True, ['-O2', '-s', 'EMIT_EMSCRIPTEN_LICENSE', '-s', 'WASM=0']), # noqa
'O2_closure': (False, ['-O2', '--closure', '1']), # noqa
'O2_closure_emit': (True, ['-O2', '-s', 'EMIT_EMSCRIPTEN_LICENSE', '--closure', '1']), # noqa
'O2_closure_js_emit': (True, ['-O2', '-s', 'EMIT_EMSCRIPTEN_LICENSE', '--closure', '1', '-s', 'WASM=0']), # noqa
})
@no_fastcomp('EMIT_EMSCRIPTEN_LICENSE is upstream only')
def test_emscripten_license(self, expect_license, args):
# fastcomp does not support the new license flag
if not self.is_wasm_backend():
expect_license = False
run_process([EMCC, path_from_root('tests', 'hello_world.c')] + args)
with open('a.out.js') as f:
js = f.read()
licenses_found = len(re.findall('Copyright [0-9]* The Emscripten Authors', js))
if expect_license:
self.assertNotEqual(licenses_found, 0, 'Unable to find license block in output file!')
self.assertEqual(licenses_found, 1, 'Found too many license blocks in the output file!')
else:
self.assertEqual(licenses_found, 0, 'Found a license block in the output file, but it should not have been there!')
# This test verifies that the generated exports from asm.js/wasm module only reference the
# unminified exported name exactly once. (need to contain the export name once for unminified
# access from calling code, and should not have the unminified name exist more than once, that
# would be wasteful for size)
def test_function_exports_are_small(self):
def test(wasm, closure, opt):
extra_args = wasm + opt + closure
print(extra_args)
args = [EMCC, path_from_root('tests', 'long_function_name_in_export.c'), '-o', 'a.html', '-s', 'ENVIRONMENT=web', '-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-Werror'] + extra_args
run_process(args)
output = open('a.js', 'r').read()
try_delete('a.js')
self.assertNotContained('asm["_thisIsAFunctionExportedFromAsmJsOrWasmWithVeryLongFunction"]', output)
# TODO: Add stricter testing when Wasm side is also optimized: (currently Wasm does still need
# to reference exports multiple times)
if 'WASM=1' not in wasm:
num_times_export_is_referenced = output.count('thisIsAFunctionExportedFromAsmJsOrWasmWithVeryLongFunction')
self.assertEqual(num_times_export_is_referenced, 1)
for closure in [[], ['--closure', '1']]:
for opt in [['-O2'], ['-O3'], ['-Os']]:
test(['-s', 'WASM=0'], closure, opt)
test(['-s', 'WASM=1', '-s', 'WASM_ASYNC_COMPILATION=0'], closure, opt)
def test_minimal_runtime_code_size(self):
smallest_code_size_args = ['-s', 'MINIMAL_RUNTIME=2',
'-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1',
'-s', 'ENVIRONMENT=web',
'-s', 'TEXTDECODER=2',
'-s', 'ABORTING_MALLOC=0',
'-s', 'ALLOW_MEMORY_GROWTH=0',
'-s', 'SUPPORT_ERRNO=0',
'-s', 'DECLARE_ASM_MODULE_EXPORTS=1',
'-s', 'MALLOC=emmalloc',
'-s', 'GL_EMULATE_GLES_VERSION_STRING_FORMAT=0',
'-s', 'GL_EXTENSIONS_IN_PREFIXED_FORMAT=0',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0',
'-s', 'GL_TRACK_ERRORS=0',
'-s', 'GL_SUPPORT_EXPLICIT_SWAP_CONTROL=0',
'-s', 'GL_POOL_TEMP_BUFFERS=0',
'-s', 'FAST_UNROLLED_MEMCPY_AND_MEMSET=0',
'-s', 'MIN_CHROME_VERSION=58',
'-s', 'NO_FILESYSTEM=1',
'--output_eol', 'linux',
'-Oz',
'--closure', '1',
'-DNDEBUG',
'-ffast-math']
asmjs = ['-s', 'WASM=0', '--separate-asm', '-s', 'ELIMINATE_DUPLICATE_FUNCTIONS=1', '--memory-init-file', '1']
wasm2js = ['-s', 'WASM=0', '--memory-init-file', '1']
hello_world_sources = [path_from_root('tests', 'small_hello_world.c'),
'-s', 'RUNTIME_FUNCS_TO_IMPORT=[]',
'-s', 'USES_DYNAMIC_ALLOC=0',
'-s', 'ASM_PRIMITIVE_VARS=[STACKTOP]']
random_printf_sources = [path_from_root('tests', 'hello_random_printf.c'),
'-s', 'RUNTIME_FUNCS_TO_IMPORT=[]',
'-s', 'USES_DYNAMIC_ALLOC=0',
'-s', 'ASM_PRIMITIVE_VARS=[STACKTOP]',
'-s', 'SINGLE_FILE=1']
hello_webgl_sources = [path_from_root('tests', 'minimal_webgl', 'main.cpp'),
path_from_root('tests', 'minimal_webgl', 'webgl.c'),
'--js-library', path_from_root('tests', 'minimal_webgl', 'library_js.js'),
'-s', 'RUNTIME_FUNCS_TO_IMPORT=[]',
'-s', 'USES_DYNAMIC_ALLOC=1', '-lwebgl.js',
'-s', 'MODULARIZE=1']
hello_webgl2_sources = hello_webgl_sources + ['-s', 'MAX_WEBGL_VERSION=2']
def print_percent(actual, expected):
if actual == expected:
return ''
return ' ({:+.2f}%)'.format((actual - expected) * 100.0 / expected)
for js in [False, True]:
for sources, name in [
[hello_world_sources, 'hello_world'],
[random_printf_sources, 'random_printf'],
[hello_webgl_sources, 'hello_webgl'],
[hello_webgl2_sources, 'hello_webgl2']
]:
outputs = ['a.html', 'a.js']
test_name = name
args = smallest_code_size_args[:]
if not self.is_wasm_backend():
test_name += '_fastcomp'
if js:
outputs += ['a.mem']
if self.is_wasm_backend():
args += wasm2js
test_name += '_wasm2js'
else:
args += asmjs
outputs += ['a.asm.js']
test_name += '_asmjs'
else:
outputs += ['a.wasm']
test_name += '_wasm'
if 'SINGLE_FILE=1' in sources:
outputs = ['a.html']
results_file = path_from_root('tests', 'code_size', test_name + '.json')
print('\n-----------------------------\n' + test_name)
expected_results = {}
try:
expected_results = json.loads(open(results_file, 'r').read())
except Exception:
if not os.environ.get('EMTEST_REBASELINE'):
raise
args = [EMCC, '-o', 'a.html'] + args + sources
print('\n' + ' '.join(args))
run_process(args)
print('\n')
def get_file_gzipped_size(f):
f_gz = f + '.gz'
with gzip.open(f_gz, 'wb') as gzf:
gzf.write(open(f, 'rb').read())
size = os.path.getsize(f_gz)
try_delete(f_gz)
return size
obtained_results = {}
total_output_size = 0
total_expected_size = 0
total_output_size_gz = 0
total_expected_size_gz = 0
for f in outputs:
f_gz = f + '.gz'
expected_size = expected_results[f] if f in expected_results else float('inf')
expected_size_gz = expected_results[f_gz] if f_gz in expected_results else float('inf')
size = os.path.getsize(f)
size_gz = get_file_gzipped_size(f)
obtained_results[f] = size
obtained_results[f_gz] = size_gz
if size != expected_size and (f.endswith('.js') or f.endswith('.html')):
print('Contents of ' + f + ': ')
print(open(f, 'r').read())
print('size of ' + f + ' == ' + str(size) + ', expected ' + str(expected_size) + ', delta=' + str(size - expected_size) + print_percent(size, expected_size))
print('size of ' + f_gz + ' == ' + str(size_gz) + ', expected ' + str(expected_size_gz) + ', delta=' + str(size_gz - expected_size_gz) + print_percent(size_gz, expected_size_gz))
# Hack: Generated .mem initializer files have different sizes on different
# platforms (Windows gives x, CircleCI Linux gives x-17 bytes, my home
# Linux gives x+2 bytes..). Likewise asm.js files seem to be affected by
# the LLVM IR text names, which lead to asm.js names, which leads to
# difference code size, which leads to different relooper choices,
# as a result leading to slightly different total code sizes.
# TODO: identify what is causing this. meanwhile allow some amount of slop
mem_slop = 10 if self.is_wasm_backend() else 50
if size <= expected_size + mem_slop and size >= expected_size - mem_slop:
size = expected_size
# N.B. even though the test code above prints out gzip compressed sizes, regression testing is done against uncompressed sizes
# this is because optimizing for compressed sizes can be unpredictable and sometimes counterproductive
total_output_size += size
total_expected_size += expected_size
total_output_size_gz += size_gz
total_expected_size_gz += expected_size_gz
obtained_results['total'] = total_output_size
obtained_results['total_gz'] = total_output_size_gz
print('Total output size=' + str(total_output_size) + ' bytes, expected total size=' + str(total_expected_size) + ', delta=' + str(total_output_size - total_expected_size) + print_percent(total_output_size, total_expected_size))
print('Total output size gzipped=' + str(total_output_size_gz) + ' bytes, expected total size gzipped=' + str(total_expected_size_gz) + ', delta=' + str(total_output_size_gz - total_expected_size_gz) + print_percent(total_output_size_gz, total_expected_size_gz))
if os.environ.get('EMTEST_REBASELINE'):
open(results_file, 'w').write(json.dumps(obtained_results, indent=2) + '\n')
else:
if total_output_size > total_expected_size:
print('Oops, overall generated code size regressed by ' + str(total_output_size - total_expected_size) + ' bytes!')
if total_output_size < total_expected_size:
print('Hey amazing, overall generated code size was improved by ' + str(total_expected_size - total_output_size) + ' bytes! Rerun test with other.test_minimal_runtime_code_size with EMTEST_REBASELINE=1 to update the expected sizes!')
self.assertEqual(total_output_size, total_expected_size)
# Test that legacy settings that have been fixed to a specific value and their value can no longer be changed,
def test_legacy_settings_forbidden_to_change(self):
stderr = self.expect_fail([EMCC, '-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=0', path_from_root('tests', 'hello_world.c')])
self.assertContained('MEMFS_APPEND_TO_TYPED_ARRAYS=0 is no longer supported', stderr)
run_process([EMCC, '-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1', path_from_root('tests', 'hello_world.c')])
run_process([EMCC, '-s', 'PRECISE_I64_MATH=2', path_from_root('tests', 'hello_world.c')])
@no_fastcomp('depends on wasm backend .a linking')
def test_jsmath(self):
run_process([EMCC, path_from_root('tests', 'other', 'jsmath.cpp'), '-Os', '-o', 'normal.js', '--closure', '0'])
normal_js_size = os.path.getsize('normal.js')
normal_wasm_size = os.path.getsize('normal.wasm')
run_process([EMCC, path_from_root('tests', 'other', 'jsmath.cpp'), '-Os', '-o', 'jsmath.js', '-s', 'JS_MATH', '--closure', '0'])
jsmath_js_size = os.path.getsize('jsmath.js')
jsmath_wasm_size = os.path.getsize('jsmath.wasm')
# js math increases JS size, but decreases wasm, and wins overall
# it would win more with closure, but no point in making the test slower)
self.assertLess(normal_js_size, jsmath_js_size)
self.assertLess(jsmath_wasm_size, normal_wasm_size)
self.assertLess(jsmath_js_size + jsmath_wasm_size, 0.90 * (normal_js_size + normal_wasm_size))
# js math has almost identical output, but misses some corner cases, 4 out of 34
normal = run_js('normal.js').splitlines()
jsmath = run_js('jsmath.js').splitlines()
assert len(normal) == len(jsmath)
diff = 0
for i in range(len(normal)):
if normal[i] != jsmath[i]:
diff += 1
self.assertEqual(diff, 4)
def test_strict_mode_hello_world(self):
# Verify that strict mode can be used for simple hello world program both
# via the environment EMCC_STRICT=1 and from the command line `-s STRICT`
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'STRICT=1']
run_process(cmd)
with env_modify({'EMCC_STRICT': '1'}):
self.do_run(open(path_from_root('tests', 'hello_world.c')).read(), 'hello, world!')
def test_legacy_settings(self):
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'SPLIT_MEMORY=0']
# By default warnings are not shown
stderr = run_process(cmd, stderr=PIPE).stderr
self.assertNotContained('warning', stderr)
# Adding or -Wlegacy-settings enables the warning
stderr = run_process(cmd + ['-Wlegacy-settings'], stderr=PIPE).stderr
self.assertContained('warning: use of legacy setting: SPLIT_MEMORY', stderr)
self.assertContained('[-Wlegacy-settings]', stderr)
def test_strict_mode_legacy_settings(self):
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'SPLIT_MEMORY=0']
run_process(cmd)
stderr = self.expect_fail(cmd + ['-s', 'STRICT=1'])
self.assertContained('legacy setting used in strict mode: SPLIT_MEMORY', stderr)
with env_modify({'EMCC_STRICT': '1'}):
stderr = self.expect_fail(cmd)
self.assertContained('legacy setting used in strict mode: SPLIT_MEMORY', stderr)
def test_strict_mode_legacy_settings_runtime(self):
# Verify that legacy settings are not accessible at runtime under strict
# mode.
self.set_setting('RETAIN_COMPILER_SETTINGS', 1)
src = r'''\
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("BINARYEN_METHOD: %s\n", (char*)emscripten_get_compiler_setting("BINARYEN_METHOD"));
return 0;
}
'''
self.do_run(src, 'BINARYEN_METHOD: native-wasm')
with env_modify({'EMCC_STRICT': '1'}):
self.do_run(src, 'invalid compiler setting: BINARYEN_METHOD')
self.set_setting('STRICT', 1)
self.do_run(src, 'invalid compiler setting: BINARYEN_METHOD')
def test_renamed_setting(self):
# Verify that renamed settings are available by either name (when not in
# strict mode.
self.set_setting('RETAIN_COMPILER_SETTINGS', 1)
src = r'''\
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("%d %d\n",
emscripten_get_compiler_setting("BINARYEN_ASYNC_COMPILATION"),
emscripten_get_compiler_setting("WASM_ASYNC_COMPILATION"));
return 0;
}
'''
# Setting the new name should set both
self.set_setting('WASM_ASYNC_COMPILATION', 0)
self.do_run(src, '0 0')
self.set_setting('WASM_ASYNC_COMPILATION', 1)
self.do_run(src, '1 1')
self.clear_setting('WASM_ASYNC_COMPILATION')
# Setting the old name should set both
self.set_setting('BINARYEN_ASYNC_COMPILATION', 0)
self.do_run(src, '0 0')
self.set_setting('BINARYEN_ASYNC_COMPILATION', 1)
self.do_run(src, '1 1')
def test_strict_mode_legacy_settings_library(self):
create_test_file('lib.js', r'''
#if SPLIT_MEMORY
#endif
''')
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-o', 'out.js', '--js-library', 'lib.js']
run_process(cmd)
self.assertContained('ReferenceError: SPLIT_MEMORY is not defined', self.expect_fail(cmd + ['-s', 'STRICT=1']))
with env_modify({'EMCC_STRICT': '1'}):
self.assertContained('ReferenceError: SPLIT_MEMORY is not defined', self.expect_fail(cmd))
def test_safe_heap_log(self):
self.set_setting('SAFE_HEAP')
self.set_setting('SAFE_HEAP_LOG')
self.set_setting('EXIT_RUNTIME')
src = open(path_from_root('tests', 'hello_world.c')).read()
self.do_run(src, 'SAFE_HEAP load: ')
if not self.is_wasm_backend():
self.set_setting('WASM', 0)
self.do_run(src, 'SAFE_HEAP load: ')
@no_fastcomp('iprintf/__small_printf are wasm-backend-only features')
def test_mini_printfs(self):
def test(code):
with open('src.c', 'w') as f:
f.write('''
#include <stdio.h>
void* unknown_value;
int main() {
%s
}
''' % code)
run_process([EMCC, 'src.c', '-O1'])
return os.path.getsize('a.out.wasm')
i = test('printf("%d", *(int*)unknown_value);')
f = test('printf("%f", *(double*)unknown_value);')
lf = test('printf("%Lf", *(long double*)unknown_value);')
both = test('printf("%d", *(int*)unknown_value); printf("%Lf", *(long double*)unknown_value);')
print(i, f, lf, both)
# iprintf is much smaller than printf with float support
self.assertGreater(i, f - 3400)
self.assertLess(i, f - 3000)
# __small_printf is somewhat smaller than printf with long double support
self.assertGreater(f, lf - 900)
self.assertLess(f, lf - 500)
# both is a little bigger still
self.assertGreater(lf, both - 100)
self.assertLess(lf, both - 50)
@parameterized({
'normal': ([], '''\
0.000051 => -5.123719529365189373493194580078e-05
0.000051 => -5.123719300544352718866300544498e-05
0.000051 => -5.123719300544352718866300544498e-05
'''),
'full_long_double': (['-s', 'PRINTF_LONG_DOUBLE'], '''\
0.000051 => -5.123719529365189373493194580078e-05
0.000051 => -5.123719300544352718866300544498e-05
0.000051 => -5.123719300544352710023893104250e-05
'''),
})
@no_fastcomp('float128 is wasm backend only')
def test_long_double_printing(self, args, expected):
create_test_file('src.cpp', r'''
#include <stdio.h>
int main(void) {
float f = 5.123456789e-5;
double d = 5.123456789e-5;
long double ld = 5.123456789e-5;
printf("%f => %.30e\n", f, f / (f - 1));
printf("%f => %.30e\n", d, d / (d - 1));
printf("%Lf => %.30Le\n", ld, ld / (ld - 1));
}
''')
run_process([EMCC, 'src.cpp'] + args)
self.assertContained(expected, run_js('a.out.js'))
# Tests that passing -s MALLOC=none will not include system malloc() to the build.
def test_malloc_none(self):
stderr = self.expect_fail([EMCC, path_from_root('tests', 'malloc_none.c'), '-s', 'MALLOC=none'])
self.assertContained('undefined symbol: malloc', stderr)
@parameterized({
'c': ['c'],
'cpp': ['cpp'],
})
@no_fastcomp('lsan not supported on fastcomp')
def test_lsan_leaks(self, ext):
self.do_smart_test(path_from_root('tests', 'other', 'test_lsan_leaks.' + ext),
emcc_args=['-fsanitize=leak', '-s', 'ALLOW_MEMORY_GROWTH=1'],
assert_returncode=None, literals=[
'Direct leak of 2048 byte(s) in 1 object(s) allocated from',
'Direct leak of 1337 byte(s) in 1 object(s) allocated from',
'Direct leak of 42 byte(s) in 1 object(s) allocated from',
])
@parameterized({
'c': ['c', [
r'in malloc.*a\.out\.wasm\+0x',
r'(?im)in f (|[/a-z\.]:).*/test_lsan_leaks\.c:6:21$',
r'(?im)in main (|[/a-z\.]:).*/test_lsan_leaks\.c:10:16$',
r'(?im)in main (|[/a-z\.]:).*/test_lsan_leaks\.c:12:3$',
r'(?im)in main (|[/a-z\.]:).*/test_lsan_leaks\.c:13:3$',
]],
'cpp': ['cpp', [
r'in operator new\[\]\(unsigned long\).*a\.out\.wasm\+0x',
r'(?im)in f\(\) (|[/a-z\.]:).*/test_lsan_leaks\.cpp:4:21$',
r'(?im)in main (|[/a-z\.]:).*/test_lsan_leaks\.cpp:8:16$',
r'(?im)in main (|[/a-z\.]:).*/test_lsan_leaks\.cpp:10:3$',
r'(?im)in main (|[/a-z\.]:).*/test_lsan_leaks\.cpp:11:3$',
]],
})
@no_fastcomp('lsan not supported on fastcomp')
def test_lsan_stack_trace(self, ext, regexes):
self.do_smart_test(path_from_root('tests', 'other', 'test_lsan_leaks.' + ext),
emcc_args=['-fsanitize=leak', '-s', 'ALLOW_MEMORY_GROWTH=1', '-g4'],
assert_returncode=None, literals=[
'Direct leak of 2048 byte(s) in 1 object(s) allocated from',
'Direct leak of 1337 byte(s) in 1 object(s) allocated from',
'Direct leak of 42 byte(s) in 1 object(s) allocated from',
], regexes=regexes)
@parameterized({
'c': ['c'],
'cpp': ['cpp'],
})
@no_fastcomp('lsan not supported on fastcomp')
def test_lsan_no_leak(self, ext):
self.do_smart_test(path_from_root('tests', 'other', 'test_lsan_no_leak.' + ext),
emcc_args=['-fsanitize=leak', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'ASSERTIONS=0'],
regexes=[r'^\s*$'])
@no_fastcomp('lsan not supported on fastcomp')
def test_lsan_no_stack_trace(self):
self.do_smart_test(path_from_root('tests', 'other', 'test_lsan_leaks.c'),
emcc_args=['-fsanitize=leak', '-s', 'ALLOW_MEMORY_GROWTH=1', '-DDISABLE_CONTEXT'],
assert_returncode=None, literals=[
'Direct leak of 3427 byte(s) in 3 object(s) allocated from:',
'SUMMARY: LeakSanitizer: 3427 byte(s) leaked in 3 allocation(s).',
])
@no_fastcomp('asan is not supported on fastcomp')
def test_asan_null_deref(self):
self.do_smart_test(path_from_root('tests', 'other', 'test_asan_null_deref.c'),
emcc_args=['-fsanitize=address', '-s', 'ALLOW_MEMORY_GROWTH=1'],
assert_returncode=None, literals=[
'AddressSanitizer: null-pointer-dereference on address',
])
@no_fastcomp('asan is not supported on fastcomp')
def test_asan_no_stack_trace(self):
self.do_smart_test(path_from_root('tests', 'other', 'test_lsan_leaks.c'),
emcc_args=['-fsanitize=address', '-s', 'ALLOW_MEMORY_GROWTH=1', '-DDISABLE_CONTEXT', '-s', 'EXIT_RUNTIME'],
assert_returncode=None, literals=[
'Direct leak of 3427 byte(s) in 3 object(s) allocated from:',
'SUMMARY: AddressSanitizer: 3427 byte(s) leaked in 3 allocation(s).',
])
@no_fastcomp('asan is not supported on fastcomp')
def test_asan_pthread_stubs(self):
self.do_smart_test(path_from_root('tests', 'other', 'test_asan_pthread_stubs.c'), emcc_args=['-fsanitize=address', '-s', 'ALLOW_MEMORY_GROWTH=1'])
@parameterized({
'async': ['-s', 'WASM_ASYNC_COMPILATION=1'],
'sync': ['-s', 'WASM_ASYNC_COMPILATION=0'],
})
@no_fastcomp('offset converter is not supported on fastcomp')
def test_offset_converter(self, *args):
self.do_smart_test(path_from_root('tests', 'other', 'test_offset_converter.c'),
emcc_args=['-s', 'USE_OFFSET_CONVERTER', '-g4'] + list(args), literals=['ok'])
@no_windows('ptys and select are not available on windows')
@no_fastcomp('fastcomp clang detects colors differently')
def test_build_error_color(self):
create_test_file('src.c', 'int main() {')
returncode, output = self.run_on_pty([EMCC, 'src.c'])
self.assertNotEqual(returncode, 0)
self.assertIn(b"\x1b[1msrc.c:1:13: \x1b[0m\x1b[0;1;31merror: \x1b[0m\x1b[1mexpected '}'\x1b[0m", output)
self.assertIn(b"\x1b[31merror: ", output)
@parameterized({
'fno_diagnostics_color': ['-fno-diagnostics-color'],
'fdiagnostics_color_never': ['-fdiagnostics-color=never'],
})
@no_windows('ptys and select are not available on windows')
def test_pty_no_color(self, flag):
with open('src.c', 'w') as f:
f.write('int main() {')
returncode, output = self.run_on_pty([EMCC, flag, 'src.c'])
self.assertNotEqual(returncode, 0)
self.assertNotIn(b'\x1b', output)
@no_fastcomp('sanitizers are not supported on fastcomp')
def test_sanitizer_color(self):
create_test_file('src.c', '''
#include <emscripten.h>
int main() {
int *p = 0, q;
EM_ASM({ Module.printWithColors = true; });
q = *p;
}
''')
run_process([EMCC, '-fsanitize=null', 'src.c'])
output = run_js('a.out.js', stderr=PIPE, full_output=True)
self.assertIn('\x1b[1msrc.c', output)
@no_fastcomp('main param optimizations are upstream-only')
def test_main_reads_params(self):
create_test_file('no.c', '''
int main() {
return 42;
}
''')
run_process([EMCC, 'no.c', '-O3', '-o', 'no.js'])
no = os.path.getsize('no.js')
create_test_file('yes.c', '''
int main(int argc, char **argv) {
return argc;
}
''')
run_process([EMCC, 'yes.c', '-O3', '-o', 'yes.js'])
yes = os.path.getsize('yes.js')
# not having to set up argc/argv allows us to avoid including a
# significant amount of JS for string support (which is not needed
# otherwise in such a trivial program).
self.assertLess(no, 0.95 * yes)
@no_fastcomp('not optimized in fastcomp')
def test_INCOMING_MODULE_JS_API(self):
def test(args):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-O3', '--closure', '1'] + args)
for engine in JS_ENGINES:
self.assertContained('hello, world!', run_js('a.out.js', engine=engine))
with open('a.out.js') as f:
# ignore \r which on windows can increase the size
return len(f.read().replace('\r', ''))
normal = test([])
changed = test(['-s', 'INCOMING_MODULE_JS_API=[]'])
print('sizes', normal, changed)
# Changing this option to [] should decrease code size.
self.assertLess(changed, normal)
# Check an absolute code size as well, with some slack.
self.assertLess(abs(changed - 5795), 150)
def test_llvm_includes(self):
self.build('#include <stdatomic.h>', self.get_dir(), 'atomics.c')
def test_mmap_and_munmap(self):
emcc_args = []
for f in ['data_ro.dat', 'data_rw.dat']:
create_test_file(f, 'Test file')
emcc_args.extend(['--embed-file', f])
self.do_other_test('mmap_and_munmap', emcc_args)
def test_mmap_and_munmap_anonymous(self):
self.do_other_test('mmap_and_munmap_anonymous', emcc_args=['-s', 'NO_FILESYSTEM'])
def test_mmap_memorygrowth(self):
self.do_other_test('mmap_memorygrowth', ['-s', 'ALLOW_MEMORY_GROWTH=1'])
def test_files_and_module_assignment(self):
# a pre-js can set Module to a new object or otherwise undo file preloading/
# embedding changes to Module.preRun. we show an error to avoid confusion
create_test_file('pre.js', 'Module = {};')
create_test_file('src.cpp', r'''
#include <stdio.h>
int main() {
printf("file exists: %d\n", !!fopen("src.cpp", "rb"));
}
''')
run_process([EMCC, 'src.cpp', '--pre-js', 'pre.js', '--embed-file', 'src.cpp'])
result = run_js('a.out.js', assert_returncode=None, stderr=PIPE, full_output=True)
self.assertContained('Module.preRun should exist because file support used it; did a pre-js delete it?', result)
def test_error(pre):
create_test_file('pre.js', pre)
run_process([EMCC, 'src.cpp', '--pre-js', 'pre.js', '--embed-file', 'src.cpp'])
result = run_js('a.out.js', assert_returncode=None, stderr=PIPE, full_output=True)
self.assertContained('All preRun tasks that exist before user pre-js code should remain after; did you replace Module or modify Module.preRun?', result)
# error if the user replaces Module or Module.preRun
test_error('Module = { preRun: [] };')
test_error('Module.preRun = [];')
@no_fastcomp('fastcomp defines this in the backend itself, so it is always on there')
def test_EMSCRIPTEN_and_STRICT(self):
# __EMSCRIPTEN__ is the proper define; we support EMSCRIPTEN for legacy
# code, unless STRICT is enabled.
create_test_file('src.c', '''
#ifndef EMSCRIPTEN
#error "not defined"
#endif
''')
run_process([EMCC, 'src.c', '-c'])
self.expect_fail([EMCC, 'src.c', '-s', 'STRICT', '-c'])
def test_exception_settings(self):
for catching, throwing, opts in itertools.product([0, 1], repeat=3):
cmd = [EMCC, path_from_root('tests', 'other', 'exceptions_modes_symbols_defined.cpp'), '-s', 'DISABLE_EXCEPTION_THROWING=%d' % (1 - throwing), '-s', 'DISABLE_EXCEPTION_CATCHING=%d' % (1 - catching), '-O%d' % opts]
print(cmd)
if not throwing and not catching:
self.assertContained('DISABLE_EXCEPTION_THROWING was set (likely due to -fno-exceptions), which means no C++ exception throwing support code is linked in, but such support is required', self.expect_fail(cmd))
elif not throwing and catching:
self.assertContained('DISABLE_EXCEPTION_THROWING was set (probably from -fno-exceptions) but is not compatible with enabling exception catching (DISABLE_EXCEPTION_CATCHING=0)', self.expect_fail(cmd))
else:
run_process(cmd)
@no_fastcomp('new clang feature')
def test_fignore_exceptions(self):
# the new clang flag -fignore-exceptions basically is the same as -s DISABLE_EXCEPTION_CATCHING=1,
# that is, it allows throwing, but emits no support code for catching.
run_process([EMCC, path_from_root('tests', 'other', 'exceptions_modes_symbols_defined.cpp'), '-s', 'DISABLE_EXCEPTION_CATCHING=0'])
enable_size = os.path.getsize('a.out.wasm')
run_process([EMCC, path_from_root('tests', 'other', 'exceptions_modes_symbols_defined.cpp'), '-s', 'DISABLE_EXCEPTION_CATCHING=1'])
disable_size = os.path.getsize('a.out.wasm')
run_process([EMCC, path_from_root('tests', 'other', 'exceptions_modes_symbols_defined.cpp'), '-s', '-fignore-exceptions'])
ignore_size = os.path.getsize('a.out.wasm')
self.assertGreater(enable_size, disable_size)
self.assertEqual(disable_size, ignore_size)
@no_fastcomp('assumes wasm object files')
def test_f_exception(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
int main () {
try {
throw 42;
} catch (int e) {
printf("CAUGHT: %d\n", e);
}
return 0;
}
''')
for compile_flags, link_flags, expect_caught in [
# exceptions are off by default
([], [], False),
# enabling exceptions at link and compile works
(['-fexceptions'], ['-fexceptions'], True),
# just compile isn't enough as the JS runtime lacks support
(['-fexceptions'], [], False),
# just link isn't enough as codegen didn't emit exceptions support
([], ['-fexceptions'], False),
]:
print(compile_flags, link_flags, expect_caught)
run_process([EMCC, 'src.cpp', '-c', '-o', 'src.o'] + compile_flags)
run_process([EMCC, 'src.o'] + link_flags)
result = run_js('a.out.js', assert_returncode=None, stderr=PIPE)
self.assertContainedIf('CAUGHT', result, expect_caught)
def test_assertions_on_internal_api_changes(self):
create_test_file('src.c', r'''
#include <emscripten.h>
int main(int argc, char **argv) {
EM_ASM({
try {
Module['read'];
out('it should not be there');
} catch(e) {
out('error: ' + e);
}
});
}
''')
run_process([EMCC, 'src.c', '-s', 'ASSERTIONS'])
self.assertContained('Module.read has been replaced with plain read', run_js('a.out.js'))
def test_assertions_on_incoming_module_api_changes(self):
create_test_file('pre.js', r'''
var Module = {
read: function() {}
}
''')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'ASSERTIONS', '--pre-js', 'pre.js'])
self.assertContained('Module.read option was removed', run_js('a.out.js', assert_returncode=None, stderr=PIPE))
def test_assertions_on_outgoing_module_api_changes(self):
create_test_file('src.cpp', r'''
#include <emscripten.h>
int main() {
EM_ASM({
console.log();
function check(name) {
try {
Module[name];
console.log("success: " + name);
} catch(e) {
}
}
check("read");
// TODO check("setWindowTitle");
check("wasmBinary");
check("arguments");
});
}
''')
run_process([EMCC, 'src.cpp', '-s', 'ASSERTIONS'])
self.assertContained('''
Module.read has been replaced with plain read_ (the initial value can be provided on Module, but after startup the value is only looked for on a local variable of that name)
Module.wasmBinary has been replaced with plain wasmBinary (the initial value can be provided on Module, but after startup the value is only looked for on a local variable of that name)
Module.arguments has been replaced with plain arguments_ (the initial value can be provided on Module, but after startup the value is only looked for on a local variable of that name)
''', run_js('a.out.js', assert_returncode=None, stderr=PIPE))
def test_assertions_on_ready_promise(self):
# check that when assertions are on we give useful error messages for
# mistakenly thinking the Promise is an instance. I.e., once you could do
# Module()._main to get an instance and the main function, but after
# the breaking change in #10697 Module() now returns a promise, and to get
# the instance you must use .then() to get a callback with the instance.
create_test_file('test.js', r'''
try {
Module()._main;
} catch(e) {
console.log(e);
}
try {
Module().onRuntimeInitialized = 42;
} catch(e) {
console.log(e);
}
''')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MODULARIZE', '-s', 'ASSERTIONS', '--extern-post-js', 'test.js'])
out = run_js('a.out.js')
self.assertContained('You are getting _main on the Promise object, instead of the instance. Use .then() to get called back with the instance, see the MODULARIZE docs in src/settings.js', out)
self.assertContained('You are setting onRuntimeInitialized on the Promise object, instead of the instance. Use .then() to get called back with the instance, see the MODULARIZE docs in src/settings.js', out)
def test_em_asm_duplicate_strings(self):
# We had a regression where tow different EM_ASM strings from two diffferent
# object files were de-duplicated in wasm-emscripten-finalize. This used to
# work when we used zero-based index for store the JS strings, but once we
# switched to absolute addresses the string needs to exist twice in the JS
# file.
create_test_file('foo.c', '''
#include <emscripten.h>
void foo() {
EM_ASM({ console.log('Hello, world!'); });
}
''')
create_test_file('main.c', '''
#include <emscripten.h>
void foo();
int main() {
foo();
EM_ASM({ console.log('Hello, world!'); });
return 0;
}
''')
run_process([EMCC, '-c', 'foo.c'])
run_process([EMCC, '-c', 'main.c'])
run_process([EMCC, 'foo.o', 'main.o'])
self.assertContained('Hello, world!\nHello, world!\n', run_js('a.out.js'))
def test_em_asm_strict_c(self):
create_test_file('src.c', '''
#include <emscripten/em_asm.h>
int main() {
EM_ASM({ console.log('Hello, world!'); });
}
''')
result = run_process([EMCC, '-std=c11', 'src.c'], stderr=PIPE, check=False)
self.assertNotEqual(result.returncode, 0)
self.assertIn('EM_ASM does not work in -std=c* modes, use -std=gnu* modes instead', result.stderr)
def test_boost_graph(self):
self.do_smart_test(path_from_root('tests', 'test_boost_graph.cpp'),
emcc_args=['-s', 'USE_BOOST_HEADERS=1'],
assert_returncode=0)
@no_fastcomp('EM_ASM and setjmp works fine on fastcomp')
def test_setjmp_em_asm(self):
create_test_file('src.c', '''
#include <emscripten.h>
#include <setjmp.h>
int main() {
jmp_buf buf;
setjmp(buf);
EM_ASM({
console.log("hello world");
});
}
''')
result = run_process([EMCC, 'src.c'], stderr=PIPE, check=False)
self.assertNotEqual(result.returncode, 0)
self.assertIn('Cannot use EM_ASM* alongside setjmp/longjmp', result.stderr)
self.assertIn('Please consider using EM_JS, or move the EM_ASM into another function.', result.stderr)
def test_missing_stdlibs(self):
# Certain standard libraries are expected to be useable via -l flags but
# don't actually exist in our standard library path. Make sure we don't
# error out when linking with these flags.
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-lm', '-ldl', '-lrt', '-lpthread'])
@no_fastcomp('lld-specific')
def test_supported_linker_flags(self):
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Wl,--print-map'], stderr=PIPE).stderr
self.assertContained('warning: ignoring unsupported linker flag: `--print-map`', out)
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Xlinker', '--print-map'], stderr=PIPE).stderr
self.assertContained('warning: ignoring unsupported linker flag: `--print-map`', out)
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Wl,-rpath=foo'], stderr=PIPE).stderr
self.assertContained('warning: ignoring unsupported linker flag: `-rpath=foo`', out)
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Wl,-rpath-link,foo'], stderr=PIPE).stderr
self.assertContained('warning: ignoring unsupported linker flag: `-rpath-link`', out)
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'),
'-Wl,--no-check-features,-mllvm,-debug'], stderr=PIPE).stderr
self.assertNotContained('warning: ignoring unsupported linker flag', out)
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Wl,-allow-shlib-undefined'], stderr=PIPE).stderr
self.assertContained('warning: ignoring unsupported linker flag: `-allow-shlib-undefined`', out)
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Wl,--allow-shlib-undefined'], stderr=PIPE).stderr
self.assertContained('warning: ignoring unsupported linker flag: `--allow-shlib-undefined`', out)
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Wl,-version-script,foo'], stderr=PIPE).stderr
self.assertContained('warning: ignoring unsupported linker flag: `-version-script`', out)
@no_fastcomp('lld-specific')
def test_linker_flags_pass_through(self):
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Wl,--waka'])
self.assertContained('wasm-ld: error: unknown argument: --waka', err)
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Xlinker', '--waka'])
self.assertContained('wasm-ld: error: unknown argument: --waka', err)
def test_linker_flags_unused(self):
err = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-c', '-lbar'], stderr=PIPE).stderr
self.assertContained("warning: argument unused during compilation: '-lbar' [-Wunused-command-line-argument]", err)
def test_non_wasm_without_wasm_in_vm(self):
# Test that our non-wasm output does not depend on wasm support in the vm.
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'WASM=0'])
with open('a.out.js') as f:
js = f.read()
with open('a.out.js', 'w') as f:
f.write('var WebAssembly = null;\n' + js)
for engine in JS_ENGINES:
self.assertContained('hello, world!', run_js('a.out.js', engine=engine))
def test_compile_only_with_object_extension(self):
# Emscripten supports compiling to an object file when the output has an
# object extension.
# Most compilers require the `-c` to be explicit.
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-c', '-o', 'hello1.o'])
err = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-o', 'hello2.o'], stderr=PIPE).stderr
self.assertContained('warning: Assuming object file output in the absence of `-c`', err)
self.assertBinaryEqual('hello1.o', 'hello2.o')
def test_empty_output_extension(self):
# Default to JS output when no extension is present
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Werror', '-o', 'hello'])
self.assertContained('hello, world!', run_js('hello'))
def test_backwards_deps_in_archive(self):
# Test that JS dependencies from deps_info.json work for code linked via
# static archives using -l<name>
run_process([EMCC, path_from_root('tests', 'sockets', 'test_gethostbyname.c'), '-o', 'a.o'])
run_process([LLVM_AR, 'cr', 'liba.a', 'a.o'])
create_test_file('empty.c', 'static int foo = 0;')
run_process([EMCC, 'empty.c', '-la', '-L.'])
self.assertContained('success', run_js('a.out.js'))
def test_warning_flags(self):
create_test_file('not_object.bc', 'some text')
run_process([EMCC, '-c', '-o', 'hello.o', path_from_root('tests', 'hello_world.c')])
cmd = [EMCC, 'hello.o', 'not_object.bc', '-o', 'a.wasm']
# warning that is enabled by default
stderr = run_process(cmd, stderr=PIPE).stderr
self.assertContained('emcc: warning: not_object.bc is not a valid input file [-Winvalid-input]', stderr)
# -w to suppress warnings
stderr = run_process(cmd + ['-w'], stderr=PIPE).stderr
self.assertNotContained('warning', stderr)
# -Wno-invalid-input to suppress just this one warning
stderr = run_process(cmd + ['-Wno-invalid-input'], stderr=PIPE).stderr
self.assertNotContained('warning', stderr)
# with -Werror should fail
stderr = self.expect_fail(cmd + ['-Werror'])
self.assertContained('emcc: error: not_object.bc is not a valid input file [-Winvalid-input] [-Werror]', stderr)
# with -Werror + -Wno-error=<type> should only warn
stderr = run_process(cmd + ['-Werror', '-Wno-error=invalid-input'], stderr=PIPE).stderr
self.assertContained('emcc: warning: not_object.bc is not a valid input file [-Winvalid-input]', stderr)
# check that `-Werror=foo` also enales foo
stderr = self.expect_fail(cmd + ['-Werror=legacy-settings', '-s', 'TOTAL_MEMORY=1'])
self.assertContained('error: use of legacy setting: TOTAL_MEMORY (setting renamed to INITIAL_MEMORY) [-Wlegacy-settings] [-Werror]', stderr)
def test_emranlib(self):
create_test_file('foo.c', 'int foo = 1;')
create_test_file('bar.c', 'int bar = 2;')
run_process([EMCC, '-c', 'foo.c', 'bar.c'])
# Create a library with no archive map
run_process([EMAR, 'crS', 'liba.a', 'foo.o', 'bar.o'])
output = run_process([shared.LLVM_NM, '--print-armap', 'liba.a'], stdout=PIPE).stdout
self.assertNotContained('Archive map', output)
# Add an archive map
run_process([EMRANLIB, 'liba.a'])
output = run_process([shared.LLVM_NM, '--print-armap', 'liba.a'], stdout=PIPE).stdout
self.assertContained('Archive map', output)
def test_pthread_stub(self):
# Verify that programs containing pthread code can still be compiled even
# without enabling threads. This is possible becase we link in
# libpthread_stub.a
create_test_file('pthread.c', '''
#include <pthread.h>
int main() {
pthread_atfork(NULL, NULL, NULL);
return 0;
}
''')
run_process([EMCC, 'pthread.c'])
def test_stdin_preprocess(self):
create_test_file('temp.h', '#include <string>')
outputStdin = run_process([EMCC, '-x', 'c++', '-dM', '-E', '-'], input="#include <string>", stdout=PIPE).stdout
outputFile = run_process([EMCC, '-x', 'c++', '-dM', '-E', 'temp.h'], stdout=PIPE).stdout
self.assertTextDataIdentical(outputStdin, outputFile)
def test_stdin_compile_only(self):
# Should fail without -x lang specifier
with open(path_from_root('tests', 'hello_world.cpp')) as f:
err = self.expect_fail([EMCC, '-c', '-'], input=f.read())
self.assertContained('error: -E or -x required when input is from standard input', err)
with open(path_from_root('tests', 'hello_world.cpp')) as f:
run_process([EMCC, '-c', '-o', 'out.o', '-x', 'c++', '-'], input=f.read())
self.assertExists('out.o')
# Same again but without an explicit output filename
with open(path_from_root('tests', 'hello_world.cpp')) as f:
run_process([EMCC, '-c', '-x', 'c++', '-'], input=f.read())
self.assertExists('-.o')
def test_stdin_compile_and_link(self):
with open(path_from_root('tests', 'hello_world.cpp')) as f:
run_process([EMCC, '-x', 'c++', '-'], input=f.read())
self.assertContained('hello, world!', run_js('a.out.js'))
def is_object_file(self, filename):
if self.is_wasm_backend():
return building.is_wasm('-')
else:
return building.is_bitcode('-')
def test_stdout_link(self):
# linking to stdout `-` doesn't work, we have no way to pass such an output filename
# through post-link tools such as binaryen.
err = self.expect_fail([EMCC, '-o', '-', path_from_root('tests', 'hello_world.cpp')])
self.assertContained('invalid output filename: `-`', err)
self.assertNotExists('-')
err = self.expect_fail([EMCC, '-o', '-foo', path_from_root('tests', 'hello_world.cpp')])
self.assertContained('invalid output filename: `-foo`', err)
self.assertNotExists('-foo')
def test_output_to_nowhere(self):
nowhere = 'NULL' if WINDOWS else '/dev/null'
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-o', nowhere, '-c'])
# Test that passing -s MIN_X_VERSION=-1 on the command line will result in browser X being not supported at all.
# I.e. -s MIN_X_VERSION=-1 is equal to -s MIN_X_VERSION=Infinity
def test_drop_support_for_browser(self):
# Test that -1 means "not supported"
run_process([EMCC, path_from_root('tests', 'test_html5.c'), '-s', 'MIN_IE_VERSION=-1'])
self.assertContained('allowsDeferredCalls: true', open('a.out.js').read())
self.assertNotContained('allowsDeferredCalls: JSEvents.isInternetExplorer()', open('a.out.js').read())
def test_errno_type(self):
create_test_file('errno_type.c', '''
#include <errno.h>
// Use of these constants in C preprocessor comparisons should work.
#if EPERM > 0
#define DAV1D_ERR(e) (-(e))
#else
#define DAV1D_ERR(e) (e)
#endif
''')
run_process([EMCC, 'errno_type.c'])
@no_fastcomp("uses standalone mode")
def test_standalone_syscalls(self):
run_process([EMCC, path_from_root('tests', 'other', 'standalone_syscalls', 'test.cpp'), '-o', 'test.wasm'])
with open(path_from_root('tests', 'other', 'standalone_syscalls', 'test.out')) as f:
expected = f.read()
for engine in WASM_ENGINES:
self.assertContained(expected, run_js('test.wasm', engine))
@no_windows('TODO: fix setjmp.h on clang on windows on ci')
@no_fastcomp("uses standalone mode")
def test_wasm2c_reactor(self):
# test compiling an unsafe library using wasm2c, then using it from a
# main program. this shows it is easy to use wasm2c as a sandboxing
# mechanism.
# first compile the library with emcc, getting a .c and .h
run_process([EMCC,
path_from_root('tests', 'other', 'wasm2c', 'unsafe-library.c'),
'-O3', '-o', 'lib.wasm', '-s', 'WASM2C', '--no-entry'])
# compile that .c to a native object
run_process([CLANG_CC, 'lib.wasm.c', '-c', '-O3', '-o', 'lib.o'])
# compile the main program natively normally, and link with the
# unsafe library
run_process([CLANG_CC,
path_from_root('tests', 'other', 'wasm2c', 'my-code.c'),
'-O3', 'lib.o', '-o', 'program.exe'])
output = run_process([os.path.abspath('program.exe')], stdout=PIPE).stdout
with open(path_from_root('tests', 'other', 'wasm2c', 'output.txt')) as f:
self.assertEqual(output, f.read())
@parameterized({
'wasm2js': (['-s', 'WASM=0'], ''),
'modularize': (['-s', 'MODULARIZE'], 'Module()'),
})
@no_fastcomp('wasm2js only')
def test_promise_polyfill(self, constant_args, extern_post_js):
def test(args):
# legacy browsers may lack Promise, which wasm2js depends on. see what
# happens when we kill the global Promise function.
create_test_file('extern-post.js', extern_post_js)
run_process([EMCC, path_from_root('tests', 'hello_world.cpp')] + constant_args + args + ['--extern-post-js', 'extern-post.js'])
with open('a.out.js') as f:
js = f.read()
with open('a.out.js', 'w') as f:
f.write('Promise = undefined;\n' + js)
return run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None)
# we fail without legacy support
self.assertNotContained('hello, world!', test([]))
# but work with it
self.assertContained('hello, world!', test(['-s', 'LEGACY_VM_SUPPORT']))
# Compile-test for -s USE_WEBGPU=1 and library_webgpu.js.
def test_webgpu_compiletest(self):
for args in [[], ['-s', 'ASSERTIONS=1']]:
run_process([EMCC, path_from_root('tests', 'webgpu_dummy.cpp'), '-s', 'USE_WEBGPU=1'] + args)
@no_fastcomp('lld only')
def test_signature_mismatch(self):
create_test_file('a.c', 'void foo(); int main() { foo(); return 0; }')
create_test_file('b.c', 'int foo() { return 1; }')
stderr = run_process([EMCC, 'a.c', 'b.c'], stderr=PIPE).stderr
self.assertContained('function signature mismatch: foo', stderr)
self.expect_fail([EMCC, '-Wl,--fatal-warnings', 'a.c', 'b.c'])
self.expect_fail([EMCC, '-s', 'STRICT', 'a.c', 'b.c'])
@no_fastcomp('lld only')
def test_lld_report_undefined(self):
create_test_file('main.c', 'void foo(); int main() { foo(); return 0; }')
stderr = self.expect_fail([EMCC, '-s', 'LLD_REPORT_UNDEFINED', 'main.c'])
self.assertContained('wasm-ld: error:', stderr)
self.assertContained('main_0.o: undefined symbol: foo', stderr)
@no_fastcomp('wasm backend only')
def test_4GB(self):
stderr = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'INITIAL_MEMORY=2GB'])
self.assertContained('INITIAL_MEMORY must be less than 2GB due to current spec limitations', stderr)
# Verifies that warning messages that Closure outputs are recorded to console
def test_closure_warnings(self):
proc = run_process([EMCC, path_from_root('tests', 'test_closure_warning.c'), '-O3', '--closure', '1', '-s', 'CLOSURE_WARNINGS=quiet'], stderr=PIPE)
self.assertNotContained('WARNING', proc.stderr)
proc = run_process([EMCC, path_from_root('tests', 'test_closure_warning.c'), '-O3', '--closure', '1', '-s', 'CLOSURE_WARNINGS=warn'], stderr=PIPE)
self.assertContained('WARNING - [JSC_REFERENCE_BEFORE_DECLARE] Variable referenced before declaration', proc.stderr)
self.expect_fail([EMCC, path_from_root('tests', 'test_closure_warning.c'), '-O3', '--closure', '1', '-s', 'CLOSURE_WARNINGS=error'])
@no_fastcomp('test wasm object files')
def test_bitcode_input(self):
# Verify that bitcode files are accepted as input
create_test_file('main.c', 'void foo(); int main() { return 0; }')
run_process([EMCC, '-emit-llvm', '-c', '-o', 'main.bc', 'main.c'])
self.assertTrue(building.is_bitcode('main.bc'))
run_process([EMCC, '-c', '-o', 'main.o', 'main.bc'])
self.assertTrue(building.is_wasm('main.o'))
def test_nostdlib(self):
# First ensure all the system libs are built
run_process([EMCC, path_from_root('tests', 'unistd', 'close.c')])
self.assertContained('undefined symbol:', self.expect_fail([EMCC, path_from_root('tests', 'unistd', 'close.c'), '-nostdlib']))
self.assertContained('undefined symbol:', self.expect_fail([EMCC, path_from_root('tests', 'unistd', 'close.c'), '-nodefaultlibs']))
# Build again but with explit system libraries
libs = ['-lc', '-lcompiler_rt']
if self.is_wasm_backend():
libs.append('-lc_rt_wasm')
run_process([EMCC, path_from_root('tests', 'unistd', 'close.c'), '-nostdlib'] + libs)
run_process([EMCC, path_from_root('tests', 'unistd', 'close.c'), '-nodefaultlibs'] + libs)
def test_argument_match(self):
# Verify that emcc arguments match precisely. We had a bug where only the prefix
# was matched
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '--js-opts', '10'])
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '--js-optsXX'])
self.assertContained("error: unsupported option '--js-optsXX'", err)
def test_missing_argument(self):
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '--js-opts'])
self.assertContained("error: option '--js-opts' requires an argument", err)
def test_default_to_cxx(self):
create_test_file('foo.h', '#include <string.h>')
create_test_file('cxxfoo.h', '#include <string>')
# The default bahviour is to default to C++, which means the C++ header can be compiled even
# with emcc.
run_process([EMCC, '-c', 'cxxfoo.h'])
# But this means that C flags can't be passed (since we are assuming C++)
err = self.expect_fail([EMCC, '-std=gnu11', '-c', 'foo.h'])
self.assertContained("'-std=gnu11' not allowed with 'C++'", err)
# If we disable DEFAULT_TO_CXX the emcc can be used with cflags, but can't be used to build
# C++ headers
run_process([EMCC, '-std=gnu11', '-c', 'foo.h', '-s', 'DEFAULT_TO_CXX=0'])
err = self.expect_fail([EMCC, '-c', 'cxxfoo.h', '-s', 'DEFAULT_TO_CXX=0'])
self.assertContained("'string' file not found", err)
# Using em++ should alwasy work for C++ headers
run_process([EMXX, '-c', 'cxxfoo.h', '-s', 'DEFAULT_TO_CXX=0'])
# Or using emcc with `-x c++`
run_process([EMCC, '-c', 'cxxfoo.h', '-s', 'DEFAULT_TO_CXX=0', '-x', 'c++-header'])
@parameterized({
'': ([],),
'minimal': (['-s', 'MINIMAL_RUNTIME'],),
})
def test_support_errno(self, args):
self.emcc_args += args
src = path_from_root('tests', 'core', 'test_support_errno.c')
output = path_from_root('tests', 'core', 'test_support_errno.out')
self.do_run_from_file(src, output)
size_default = os.path.getsize('src.c.o.js')
# Run the same test again but with SUPPORT_ERRNO disabled. This time we don't expect errno
# to be set after the failing syscall.
self.set_setting('SUPPORT_ERRNO', 0)
output = path_from_root('tests', 'core', 'test_support_errno_disabled.out')
self.do_run_from_file(src, output)
# Verify the JS output was smaller
self.assertLess(os.path.getsize('src.c.o.js'), size_default)
@no_fastcomp('no .s file support')
def test_assembly(self):
run_process([EMCC, '-c', path_from_root('tests', 'other', 'test_asm.s'), '-o', 'foo.o'])
src = path_from_root('tests', 'other', 'test_asm.c')
output = path_from_root('tests', 'other', 'test_asm.out')
self.emcc_args.append('foo.o')
self.do_run_from_file(src, output)
@no_fastcomp('no .s file support')
def test_assembly_preprocessed(self):
run_process([EMCC, '-c', path_from_root('tests', 'other', 'test_asm_cpp.S'), '-o', 'foo.o'])
src = path_from_root('tests', 'other', 'test_asm.c')
output = path_from_root('tests', 'other', 'test_asm.out')
self.emcc_args.append('foo.o')
self.do_run_from_file(src, output)
def test_export_global_address(self):
src = path_from_root('tests', 'other', 'test_export_global_address.c')
output = path_from_root('tests', 'other', 'test_export_global_address.out')
self.do_run_from_file(src, output)
@no_fastcomp('wasm-ld only')
def test_linker_version(self):
out = run_process([EMCC, '-Wl,--version'], stdout=PIPE).stdout
self.assertContained('LLD ', out)
# Tests that if a JS library function is missing, the linker will print out which function
# depended on the missing function.
def test_chained_js_error_diagnostics(self):
err = self.expect_fail([EMCC, path_from_root('tests', 'test_chained_js_error_diagnostics.c'), '--js-library', path_from_root('tests', 'test_chained_js_error_diagnostics.js')])
self.assertContained("error: undefined symbol: nonexistent_function (referenced by bar__deps: ['nonexistent_function'], referenced by foo__deps: ['bar'], referenced by top-level compiled C/C++ code)", err)
def test_xclang_flag(self):
create_test_file('foo.h', ' ')
run_process([EMCC, '-c', '-o', 'out.o', '-Xclang', '-include', '-Xclang', 'foo.h', path_from_root('tests', 'hello_world.c')])
def test_emcc_size_parsing(self):
create_test_file('foo.h', ' ')
err = self.expect_fail([EMCC, '-s', 'TOTAL_MEMORY=X'])
self.assertContained('error: invalid byte size `X`. Valid suffixes are: kb, mb, gb, tb', err)
err = self.expect_fail([EMCC, '-s', 'TOTAL_MEMORY=11PB'])
self.assertContained('error: invalid byte size `11PB`. Valid suffixes are: kb, mb, gb, tb', err)
def test_native_call_before_init(self):
self.set_setting('ASSERTIONS')
self.set_setting('EXPORTED_FUNCTIONS', ['_foo'])
self.add_pre_run('console.log("calling foo"); Module["_foo"]();')
self.build('#include <stdio.h>\nint foo() { puts("foo called"); return 3; }', self.get_dir(), 'foo.c')
err = self.expect_fail(NODE_JS + ['foo.c.o.js'], stdout=PIPE)
self.assertContained('native function `foo` called before runtime initialization', err)
def test_native_call_after_exit(self):
self.set_setting('ASSERTIONS')
self.set_setting('EXIT_RUNTIME')
self.add_on_exit('console.log("calling main again"); Module["_main"]();')
self.build('#include <stdio.h>\nint main() { puts("foo called"); return 0; }', self.get_dir(), 'foo.c')
err = self.expect_fail(NODE_JS + ['foo.c.o.js'], stdout=PIPE)
self.assertContained('native function `main` called after runtime exit', err)
|
the-stack_0_11464 | #!/usr/bin/env python
# Run this test like so:
# vtkpython TestLinePlot.py -D $VTK_DATA_ROOT \
# -B $VTK_DATA_ROOT/Baseline/Charts/
import os
import vtk
import vtk.test.Testing
import math
class TestLinePlot(vtk.test.Testing.vtkTest):
def testLinePlot(self):
"Test if line plots can be built with python"
# Set up a 2D scene, add an XY chart to it
view = vtk.vtkContextView()
view.GetRenderer().SetBackground(1.0,1.0,1.0)
view.GetRenderWindow().SetSize(400,300)
chart = vtk.vtkChartXY()
view.GetScene().AddItem(chart)
# Create a table with some points in it
table = vtk.vtkTable()
arrX = vtk.vtkFloatArray()
arrX.SetName("X Axis")
arrC = vtk.vtkFloatArray()
arrC.SetName("Cosine")
arrS = vtk.vtkFloatArray()
arrS.SetName("Sine")
arrS2 = vtk.vtkFloatArray()
arrS2.SetName("Sine2")
numPoints = 69
inc = 7.5 / (numPoints - 1)
for i in range(0,numPoints):
arrX.InsertNextValue(i*inc)
arrC.InsertNextValue(math.cos(i * inc) + 0.0)
arrS.InsertNextValue(math.sin(i * inc) + 0.0)
arrS2.InsertNextValue(math.sin(i * inc) + 0.5)
table.AddColumn(arrX)
table.AddColumn(arrC)
table.AddColumn(arrS)
table.AddColumn(arrS2)
# Now add the line plots with appropriate colors
line = chart.AddPlot(0)
line.SetInput(table,0,1)
line.SetColor(0,255,0,255)
line.SetWidth(1.0)
line = chart.AddPlot(0)
line.SetInput(table,0,2)
line.SetColor(255,0,0,255);
line.SetWidth(5.0)
line = chart.AddPlot(0)
line.SetInput(table,0,3)
line.SetColor(0,0,255,255);
line.SetWidth(4.0)
view.GetRenderWindow().SetMultiSamples(0)
img_file = "TestLinePlot.png"
vtk.test.Testing.compareImage(view.GetRenderWindow(),vtk.test.Testing.getAbsImagePath(img_file),threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestLinePlot, 'test')])
|
the-stack_0_11465 | from kashgari.corpus import ChineseDailyNerCorpus
from kashgari.embeddings import BERTEmbedding
import kashgari
from kashgari.tasks.labeling import BiLSTM_CRF_Model
"""
pip install tensorflow==1.15.3
pip install 'kashgari>=1.0.0,<2.0.0'
"""
"""
https://eliyar.biz/nlp_chinese_bert_ner/
"""
def main():
# train_x, train_y = ChineseDailyNerCorpus.load_data("train")
# valid_x, valid_y = ChineseDailyNerCorpus.load_data("validate")
ChineseDailyNerCorpus.__zip_file__name
test_x, test_y = ChineseDailyNerCorpus.load_data("test")
# print(f"train data count: {len(train_x)}")
# print(f"validate data count: {len(valid_x)}")
print(f"test data count: {len(test_x)}")
bert_embed = BERTEmbedding(
"models/chinese_L-12_H-768_A-12", task=kashgari.LABELING, sequence_length=100
)
model = BiLSTM_CRF_Model(bert_embed)
# model.fit(
# train_x,
# train_y,
# x_validate=valid_x,
# y_validate=valid_y,
# epochs=1,
# batch_size=512,
# )
model.save("models/ner.h5")
model.evaluate(test_x, test_y)
predictions = model.predict_classes(test_x)
print(predictions)
if "__main__" == __name__:
main()
|
the-stack_0_11469 | from core.himesis import Himesis
import uuid
class HMother2Woman(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule Mother2Woman.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMother2Woman, self).__init__(name='HMother2Woman', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """Mother2Woman"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Mother2Woman')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class Parent() node
self.add_node()
self.vs[3]["mm__"] = """Parent"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class Parent()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class Family() node
self.add_node()
self.vs[5]["mm__"] = """Family"""
self.vs[5]["attr1"] = """1"""
# match_contains node for class Family()
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# apply class Woman() node
self.add_node()
self.vs[7]["mm__"] = """Woman"""
self.vs[7]["attr1"] = """1"""
# apply_contains node for class Woman()
self.add_node()
self.vs[8]["mm__"] = """apply_contains"""
# match association Parent--family-->Family node
self.add_node()
self.vs[9]["attr1"] = """family"""
self.vs[9]["mm__"] = """directLink_S"""
# match association Family--mothers-->Parent node
self.add_node()
self.vs[10]["attr1"] = """mothers"""
self.vs[10]["mm__"] = """directLink_S"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class Parent()
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class Family()
(1,8), # applymodel -> apply_contains
(8,7), # apply_contains -> apply_class Woman()
(3,9), # match_class Parent() -> association family
(9,5), # association family -> match_class Family()
(5,10), # match_class Family() -> association mothers
(10,3), # association mothers -> match_class Parent()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((7,'fullName'),('concat',((3,'firstName'),(5,'lastName')))), ((7,'ApplyAttribute'),('constant','solveRef')), ]
|
the-stack_0_11470 | ##############################################################################
# Copyright (c) 2016 ZTE Corporation
# [email protected]
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# 09/06/2016: change for migration after refactoring
# 16/06/2016: Alignment of test name (JIRA: FUNCTEST-304)
##############################################################################
collections_old2New = {
# 'pod': 'pods',
# 'test_projects': 'projects',
# 'test_testcases': 'testcases',
# 'test_results': 'results'
}
fields_old2New = {
# 'test_results': [({}, {'creation_date': 'start_date'})]
}
docs_old2New = {
# 'test_results': [
# ({'criteria': 'failed'}, {'criteria': 'FAILED'}),
# ({'criteria': 'passed'}, {'criteria': 'PASS'})
# ]
# 'testcases': [
# ({'name': 'vPing'}, {'name': 'vping_ssh'}),
# ({'name': 'Tempest'}, {'name': 'tempest_smoke_serial'}),
# ({'name': 'Rally'}, {'name': 'rally_sanity'}),
# ({'name': 'ODL'}, {'name': 'odl'}),
# ({'name': 'vIMS'}, {'name': 'vims'}),
# ({'name': 'ONOS'}, {'name': 'onos'}),
# ({'name': 'vPing_userdata'}, {'name': 'vping_userdata'}),
# ({'name': 'ovno'}, {'name': 'ocl'})
# ],
# 'results': [
# ({'case_name': 'vPing'}, {'case_name': 'vping_ssh'}),
# ({'case_name': 'Tempest'}, {'case_name': 'tempest_smoke_serial'}),
# ({'case_name': 'Rally'}, {'case_name': 'rally_sanity'}),
# ({'case_name': 'ODL'}, {'case_name': 'odl'}),
# ({'case_name': 'vIMS'}, {'case_name': 'vims'}),
# ({'case_name': 'ONOS'}, {'case_name': 'onos'}),
# ({'case_name': 'vPing_userdata'}, {'case_name': 'vping_userdata'}),
# ({'case_name': 'ovno'}, {'case_name': 'ocl'})
# ]
'results': [
({'trust_indicator': 0},
{'trust_indicator': {'current': 0, 'histories': []}})
]
}
|
the-stack_0_11472 | """
Robocopy backup script
In "execute_robocopy" replace the string "drive" with the actual drive letter.
Also, replace the path-placeholders with the actual paths.
Author: Fred Snyder
"""
# modules
import string
from glob import glob
from sys import argv
from sys import exit
from subprocess import call
# variable for the script name
script_name = "robocopy_backup.py"
# variable to turn on the robocopy /L (list files only) flag
if argv[1].lower() == 'test': # argv[0] is always the name of the script
test_run_robocopy = True
else:
test_run_robocopy = False
# variable to include/exclude certain folders
exclude_folders = False
# Get the drive letter as user input
drive_letter = input("External backup drive letter: ")
print('Is this correct? Drive: ' + drive_letter)
confirm_drive_letter = input("Y/N ")
if confirm_drive_letter.lower() == 'n':
exit('Wrong drive letter.')
# function that runs the robocopy command
def execute_robocopy():
# robocopy variables
source = "drive:\\path\\to\\folder"
destination = drive_letter + ":\\path\\to\\folder"
logPath = drive_letter + ":\\backup.log"
log = "/LOG:" + logPath
# folders which are excluded from backup
# WARNING: no trailing slashes behind directories
if exclude_folders == True:
excludeDirs = ['drive:\\foldername', 'drive:\\$RECYCLE.BIN', 'drive:\\System Volume Information']
else:
excludeDirs = []
# files which are excluded from backup
excludeFiles = ["pagefile.sys", "thumbs.db", ".DS_Store", ".Spotlight-V100", ".Trashes" ]
# check if certain file exists
if len(glob("drive:\\filename*")) > 0:
excludeTEMP = glob("drive:\\filename*")[0]
excludeFiles.append(excludeTEMP[3:])
# create command list for subprocess.call
command = ["robocopy", source, destination, "/MIR"]
# check if script is running in test mode
if test_run_robocopy == True:
command.extend(["/L"])
command.extend([log])
command.extend(["/XD"] + excludeDirs)
command.extend(["/XF"] + excludeFiles)
# call the subprocess
call(command)
execute_robocopy()
|
the-stack_0_11473 | import math
import torch
import torch.fft
import torch.nn as nn
class AutoCorrelation(nn.Module):
"""AutoCorrelation Mechanism with the following two phases:
(1) period-based dependencies discovery (2) time delay aggregation This block can replace the self-attention family mechanism seamlessly.
"""
def __init__(self,
mask_flag=True,
factor=1,
scale=None,
attention_dropout=0.1,
output_attention=False):
super(AutoCorrelation, self).__init__()
self.factor = factor
self.scale = scale
self.mask_flag = mask_flag
self.output_attention = output_attention
self.dropout = nn.Dropout(attention_dropout)
def time_delay_agg_training(self, values, corr):
"""SpeedUp version of Autocorrelation (a batch-normalization style
design) This is for the training phase."""
head = values.shape[1]
channel = values.shape[2]
length = values.shape[3]
# find top k
top_k = int(self.factor * math.log(length))
mean_value = torch.mean(torch.mean(corr, dim=1), dim=1)
index = torch.topk(torch.mean(mean_value, dim=0), top_k, dim=-1)[1]
weights = torch.stack([mean_value[:, index[i]] for i in range(top_k)],
dim=-1)
# update corr
tmp_corr = torch.softmax(weights, dim=-1)
# aggregation
tmp_values = values
delays_agg = torch.zeros_like(values).float()
for i in range(top_k):
pattern = torch.roll(tmp_values, -int(index[i]), -1)
delays_agg = delays_agg + pattern * (
tmp_corr[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(
1, head, channel, length))
return delays_agg
def time_delay_agg_inference(self, values, corr):
"""SpeedUp version of Autocorrelation (a batch-normalization style
design) This is for the inference phase."""
batch = values.shape[0]
head = values.shape[1]
channel = values.shape[2]
length = values.shape[3]
# index init
init_index = torch.arange(length).unsqueeze(0).unsqueeze(0).unsqueeze(
0).repeat(batch, head, channel, 1).cuda()
# find top k
top_k = int(self.factor * math.log(length))
mean_value = torch.mean(torch.mean(corr, dim=1), dim=1)
weights = torch.topk(mean_value, top_k, dim=-1)[0]
delay = torch.topk(mean_value, top_k, dim=-1)[1]
# update corr
tmp_corr = torch.softmax(weights, dim=-1)
# aggregation
tmp_values = values.repeat(1, 1, 1, 2)
delays_agg = torch.zeros_like(values).float()
for i in range(top_k):
tmp_delay = init_index + delay[:, i].unsqueeze(1).unsqueeze(
1).unsqueeze(1).repeat(1, head, channel, length)
pattern = torch.gather(tmp_values, dim=-1, index=tmp_delay)
delays_agg = delays_agg + pattern * (
tmp_corr[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(
1, head, channel, length))
return delays_agg
def time_delay_agg_full(self, values, corr):
"""Standard version of Autocorrelation."""
batch = values.shape[0]
head = values.shape[1]
channel = values.shape[2]
length = values.shape[3]
# index init
init_index = torch.arange(length).unsqueeze(0).unsqueeze(0).unsqueeze(
0).repeat(batch, head, channel, 1).cuda()
# find top k
top_k = int(self.factor * math.log(length))
weights = torch.topk(corr, top_k, dim=-1)[0]
delay = torch.topk(corr, top_k, dim=-1)[1]
# update corr
tmp_corr = torch.softmax(weights, dim=-1)
# aggregation
tmp_values = values.repeat(1, 1, 1, 2)
delays_agg = torch.zeros_like(values).float()
for i in range(top_k):
tmp_delay = init_index + delay[..., i].unsqueeze(-1)
pattern = torch.gather(tmp_values, dim=-1, index=tmp_delay)
delays_agg = delays_agg + pattern * (
tmp_corr[..., i].unsqueeze(-1))
return delays_agg
def forward(self, queries, keys, values, attn_mask):
B, L, H, E = queries.shape
_, S, _, D = values.shape
if L > S:
zeros = torch.zeros_like(queries[:, :(L - S), :]).float()
values = torch.cat([values, zeros], dim=1)
keys = torch.cat([keys, zeros], dim=1)
else:
values = values[:, :L, :, :]
keys = keys[:, :L, :, :]
# period-based dependencies
q_fft = torch.fft.rfft(
queries.permute(0, 2, 3, 1).contiguous(), dim=-1)
k_fft = torch.fft.rfft(keys.permute(0, 2, 3, 1).contiguous(), dim=-1)
res = q_fft * torch.conj(k_fft)
corr = torch.fft.irfft(res, dim=-1)
# time delay agg
if self.training:
V = self.time_delay_agg_training(
values.permute(0, 2, 3, 1).contiguous(),
corr).permute(0, 3, 1, 2)
else:
V = self.time_delay_agg_inference(
values.permute(0, 2, 3, 1).contiguous(),
corr).permute(0, 3, 1, 2)
if self.output_attention:
return (V.contiguous(), corr.permute(0, 3, 1, 2))
else:
return (V.contiguous(), None)
class AutoCorrelationLayer(nn.Module):
def __init__(self,
correlation,
d_model,
n_heads,
d_keys=None,
d_values=None):
super(AutoCorrelationLayer, self).__init__()
d_keys = d_keys or (d_model // n_heads)
d_values = d_values or (d_model // n_heads)
self.inner_correlation = correlation
self.query_projection = nn.Linear(d_model, d_keys * n_heads)
self.key_projection = nn.Linear(d_model, d_keys * n_heads)
self.value_projection = nn.Linear(d_model, d_values * n_heads)
self.out_projection = nn.Linear(d_values * n_heads, d_model)
self.n_heads = n_heads
def forward(self, queries, keys, values, attn_mask):
B, L, _ = queries.shape
_, S, _ = keys.shape
H = self.n_heads
queries = self.query_projection(queries).view(B, L, H, -1)
keys = self.key_projection(keys).view(B, S, H, -1)
values = self.value_projection(values).view(B, S, H, -1)
out, attn = self.inner_correlation(queries, keys, values, attn_mask)
out = out.view(B, L, -1)
return self.out_projection(out), attn
|
the-stack_0_11474 | import datetime
from rich.padding import Padding
from rich.panel import Panel
from rich.text import Text
from rich.console import Group
from .config import console_print, console, key_at_index
def toggle_timer(log, labels) -> None:
label_name = key_at_index(labels, log.cur_index)
if log.active_label == False:
log.start()
log.active_label = True
log_msg = f"[b green_yellow]Tracking for [b cyan1]'{label_name}'[/] initiated[/]"
log_msg = Padding(log_msg, (1, 0))
console.print(Padding(
Panel.fit(log_msg, style='green_yellow', title='Tracker Update'), (1, 0)))
else:
log.stop()
log.active_label = False
elapsed_time = log.stop_time - log.start_time
latest_log = [log.start_time, log.stop_time, elapsed_time]
if log.timestamp == False:
latest_log[0] = latest_log[1] = -1
labels[label_name].append(latest_log)
log_msg = f"[b deep_pink2]Tracking for [b cyan1]'{label_name}'[/] terminated[/]"
elapsed_time = str(datetime.timedelta(seconds=round(elapsed_time)))
session_len = Text(justify='center')
session_len.append('Session lasted for ', style='b bright_white')
session_len.append(f'{elapsed_time}', style='b orange1')
message_group = Padding(Group(log_msg, session_len, fit=True), (1, 0))
console.print(Padding(
Panel.fit(message_group, style='deep_pink2', title='Tracker Update'), (1, 0)))
def toggle_label(log, labels, num) -> None:
if log.active_label:
label_name = key_at_index(labels, log.cur_index)
console_print(
f'[b cyan1]{label_name}[/] is currently active.', style='info')
toggle_timer(log, labels)
log.cur_index = (log.cur_index + num) % len(labels)
label_name = key_at_index(labels, log.cur_index)
console_print(
f'Active label changed to [b cyan1]{label_name}[/]', style='info')
|
the-stack_0_11476 | #Semáforo peatonal
#Ernesto Tolocka 2021
#www.profetolocka.com.ar/pytrainer
#Normalmente está en verde hasta que un peatón pulsa teclaVerde, entonces cambia a Amarillo y luego Rojo.
#Después de un tiempo en rojo, vuelve a la condición inicial
from PyTrainer import *
from time import sleep
#Comienza con verde encendido y el resto apagado
ledVerde.on ()
ledRojo.off ()
ledAmarillo.off ()
#Tiempo de encendido de cada led
tiempoRojo = 4
tiempoVerde = 2
tiempoAmarillo = 0.5
#Repite por siempre
while (True):
if (teclaVerde.value () == False):
ledVerde.off ()
#Hace un "bip"
buzzer.on ()
sleep (0.1)
buzzer.off ()
#Prende led amarillo
ledAmarillo.on ()
sleep (tiempoAmarillo)
ledAmarillo.off ()
#Prende led Rojo
ledRojo.on ()
sleep (tiempoRojo)
ledRojo.off ()
#Vuelve al modo normal
ledVerde.on ()
|
the-stack_0_11477 | from typing import Dict, List, Optional, Set
from chia.types.coin_record import CoinRecord
from chia.types.condition_with_args import ConditionWithArgs
from chia.util.clvm import int_from_bytes
from chia.util.condition_tools import ConditionOpcode
from chia.util.errors import Err
from chia.util.ints import uint32, uint64
from chia.types.blockchain_format.sized_bytes import bytes32
def blockchain_assert_my_coin_id(condition: ConditionWithArgs, unspent: CoinRecord) -> Optional[Err]:
"""
Checks if CoinID matches the id from the condition
"""
if unspent.coin.name() != condition.vars[0]:
return Err.ASSERT_MY_COIN_ID_FAILED
return None
def blockchain_assert_absolute_block_height_exceeds(
condition: ConditionWithArgs, prev_transaction_block_height: uint32
) -> Optional[Err]:
"""
Checks if the next block index exceeds the block index from the condition
"""
try:
expected_block_index = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
if prev_transaction_block_height < expected_block_index:
return Err.ASSERT_HEIGHT_ABSOLUTE_FAILED
return None
def blockchain_assert_relative_block_height_exceeds(
condition: ConditionWithArgs, unspent: CoinRecord, prev_transaction_block_height: uint32
) -> Optional[Err]:
"""
Checks if the coin age exceeds the age from the condition
"""
try:
expected_block_age = int_from_bytes(condition.vars[0])
expected_block_index = expected_block_age + unspent.confirmed_block_index
except ValueError:
return Err.INVALID_CONDITION
if prev_transaction_block_height < expected_block_index:
return Err.ASSERT_HEIGHT_RELATIVE_FAILED
return None
def blockchain_assert_absolute_time_exceeds(condition: ConditionWithArgs, timestamp):
"""
Checks if current time in millis exceeds the time specified in condition
"""
try:
expected_mili_time = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
current_time = timestamp
if current_time <= expected_mili_time:
return Err.ASSERT_SECONDS_ABSOLUTE_FAILED
return None
def blockchain_assert_relative_time_exceeds(condition: ConditionWithArgs, unspent: CoinRecord, timestamp):
"""
Checks if time since unspent creation in millis exceeds the time specified in condition
"""
try:
expected_mili_time = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
current_time = timestamp
if current_time <= expected_mili_time + unspent.timestamp:
return Err.ASSERT_SECONDS_RELATIVE_FAILED
return None
def blockchain_assert_announcement(condition: ConditionWithArgs, announcements: Set[bytes32]) -> Optional[Err]:
"""
Check if an announcement is included in the list of announcements
"""
announcement_hash = condition.vars[0]
if announcement_hash not in announcements:
return Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
return None
def blockchain_check_conditions_dict(
unspent: CoinRecord,
coin_announcement_names: Set[bytes32],
puzzle_announcement_names: Set[bytes32],
conditions_dict: Dict[ConditionOpcode, List[ConditionWithArgs]],
prev_transaction_block_height: uint32,
timestamp: uint64,
) -> Optional[Err]:
"""
Check all conditions against current state.
"""
for con_list in conditions_dict.values():
cvp: ConditionWithArgs
for cvp in con_list:
error = None
if cvp.opcode is ConditionOpcode.ASSERT_MY_COIN_ID:
error = blockchain_assert_my_coin_id(cvp, unspent)
elif cvp.opcode is ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT:
error = blockchain_assert_announcement(cvp, coin_announcement_names)
elif cvp.opcode is ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT:
error = blockchain_assert_announcement(cvp, puzzle_announcement_names)
elif cvp.opcode is ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE:
error = blockchain_assert_absolute_block_height_exceeds(cvp, prev_transaction_block_height)
elif cvp.opcode is ConditionOpcode.ASSERT_HEIGHT_RELATIVE:
error = blockchain_assert_relative_block_height_exceeds(cvp, unspent, prev_transaction_block_height)
elif cvp.opcode is ConditionOpcode.ASSERT_SECONDS_ABSOLUTE:
error = blockchain_assert_absolute_time_exceeds(cvp, timestamp)
elif cvp.opcode is ConditionOpcode.ASSERT_SECONDS_RELATIVE:
error = blockchain_assert_relative_time_exceeds(cvp, unspent, timestamp)
if error:
return error
return None
|
the-stack_0_11480 | """Elasticsearch document model for django-elasticsearch-dsl
"""
from elasticsearch_dsl import analyzer
from django_elasticsearch_dsl import Document, fields, Keyword
from django_elasticsearch_dsl.registries import registry
from .models import ChildPage
booksearch_analyzer = analyzer(
"booksearch_analyzer",
tokenizer="standard",
filter=['lowercase', 'asciifolding', 'porter_stem'],
char_filter=['html_strip']
)
@registry.register_document
class ChildPageDocument(Document):
content = fields.TextField(attr='html_content',
analyzer=booksearch_analyzer)
title = fields.TextField(fields={'keyword': Keyword()})
author = fields.KeywordField(attr='author')
class Index:
name = 'booksearch'
settings = {'number_of_shards': 1,
'number_of_replicas': 1}
class Django:
model = ChildPage
fields = [
'page_number',
'parent_doc_id'
]
# Ignore auto updating of Elasticsearch when a model is saved
# or deleted:
# ignore_signals = True
# Don't perform an index refresh after every update (overrides global setting):
# auto_refresh = False
# Paginate the django queryset used to populate the index with the specified size
# (by default it uses the database driver's default setting)
# queryset_pagination = 5000
|
the-stack_0_11483 | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
import merlin.models.tf as ml
from merlin.models.data.synthetic import SyntheticData
targets = {"target": tf.cast(tf.random.uniform((100,), maxval=2, dtype=tf.int32), tf.float32)}
def test_binary_classification_head(testing_data: SyntheticData):
from merlin.models.tf.utils import testing_utils
body = ml.InputBlock(testing_data.schema).connect(ml.MLPBlock([64]))
model = body.connect(ml.BinaryClassificationTask("target"))
testing_utils.assert_loss_and_metrics_are_valid(model, (testing_data.tf_tensor_dict, targets))
def test_serialization_binary_classification_head(testing_data: SyntheticData):
from merlin.models.tf.utils import testing_utils
body = ml.InputBlock(testing_data.schema).connect(ml.MLPBlock([64]))
model = body.connect(ml.BinaryClassificationTask("target"))
copy_model = testing_utils.assert_serialization(model)
testing_utils.assert_loss_and_metrics_are_valid(
copy_model, (testing_data.tf_tensor_dict, targets)
)
|
the-stack_0_11485 | """
Tests for relationship detection.
"""
from itertools import chain
from typing import List, Optional, Tuple
import geopandas as gpd
import numpy as np
import pandas as pd
import pytest
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from shapely.geometry import MultiLineString
from shapely.prepared import PreparedGeometry
from fractopo.analysis.relationships import (
determine_crosscut_abutting_relationships,
determine_intersects,
determine_nodes_intersecting_sets,
plot_crosscut_abutting_relationships_plot,
)
from fractopo.general import prepare_geometry_traces
from tests import Helpers
@pytest.mark.parametrize(
"trace_series_two_sets, set_array, set_names_two_sets,"
"node_series_xy, buffer_value, assumed_intersections",
Helpers.test_determine_nodes_intersecting_sets_params,
)
def test_determine_nodes_intersecting_sets(
trace_series_two_sets: Tuple[gpd.GeoSeries, gpd.GeoSeries],
set_array: np.ndarray,
set_names_two_sets: Tuple[str, str],
node_series_xy: gpd.GeoSeries,
buffer_value: float,
assumed_intersections: Optional[List[bool]],
):
"""
Test determine_nodes_intersecting_sets.
"""
intersects_both_sets = determine_nodes_intersecting_sets(
trace_series_two_sets,
set_names_two_sets,
node_series_xy,
buffer_value,
)
assert isinstance(intersects_both_sets, list)
if assumed_intersections is not None:
assert len(assumed_intersections) == len(intersects_both_sets)
assert sum(assumed_intersections) == sum(intersects_both_sets)
assert assumed_intersections == intersects_both_sets
@pytest.mark.parametrize(
"trace_series",
Helpers.test_prepare_geometry_traces_params,
)
def test_prepare_geometry_traces(trace_series: gpd.GeoSeries):
"""
Test prepare_geometry_traces.
"""
prepared_traces = prepare_geometry_traces(trace_series)
assert isinstance(prepared_traces, PreparedGeometry)
assert isinstance(prepared_traces.context, MultiLineString)
assert all(
[prepared_traces.intersects(trace) for trace in trace_series.geometry.values]
)
@pytest.mark.parametrize(
"trace_series_two_sets, set_names_two_sets,"
"node_series_xy_intersects, node_types_xy_intersects, buffer_value",
Helpers.test_determine_intersects_params,
)
def test_determine_intersects(
trace_series_two_sets: Tuple[gpd.GeoSeries, gpd.GeoSeries],
set_names_two_sets: Tuple[str, str],
node_series_xy_intersects: gpd.GeoSeries,
node_types_xy_intersects: np.ndarray,
buffer_value: float,
):
"""
Test determine_intersects.
"""
assert isinstance(trace_series_two_sets, tuple)
assert isinstance(set_names_two_sets, tuple)
assert isinstance(node_series_xy_intersects, gpd.GeoSeries)
assert isinstance(node_types_xy_intersects, np.ndarray)
assert isinstance(buffer_value, float)
intersectframe = determine_intersects(
trace_series_two_sets=trace_series_two_sets,
set_names_two_sets=set_names_two_sets,
node_series_xy_intersects=node_series_xy_intersects,
node_types_xy_intersects=node_types_xy_intersects,
buffer_value=buffer_value,
)
assert isinstance(intersectframe, pd.DataFrame)
expected_cols = ["node", "nodeclass", "sets", "error"]
assert all(col in intersectframe.columns for col in expected_cols)
@pytest.mark.parametrize(
"trace_series, node_series, node_types, set_array, set_names, buffer_value, label",
Helpers.test_determine_crosscut_abutting_relationships_params,
)
def test_determine_crosscut_abutting_relationships(
trace_series: gpd.GeoSeries,
node_series: gpd.GeoSeries,
node_types: np.ndarray,
set_array: np.ndarray,
set_names: Tuple[str, ...],
buffer_value: float,
label: str,
):
"""
Test determine_crosscut_abutting_relationships.
"""
assert isinstance(trace_series, gpd.GeoSeries)
assert isinstance(node_series, gpd.GeoSeries)
assert isinstance(node_types, np.ndarray)
assert isinstance(set_array, np.ndarray)
assert isinstance(set_names, tuple)
assert isinstance(buffer_value, float)
assert isinstance(label, str)
relations_df = determine_crosscut_abutting_relationships(
trace_series=trace_series,
node_series=node_series,
node_types=node_types,
set_array=set_array,
set_names=set_names,
buffer_value=buffer_value,
label=label,
)
assert isinstance(relations_df, pd.DataFrame)
expected_cols = ["name", "sets", "x", "y", "y-reverse", "error-count"]
assert all(col in relations_df.columns for col in expected_cols)
def test_plot_crosscut_abutting_relationships_plot():
"""
Test plot_crosscut_abutting_relationships_plot.
"""
params = Helpers.test_determine_crosscut_abutting_relationships_params[0]
relations_df = determine_crosscut_abutting_relationships(*params)
set_array = params[3]
set_names = params[4]
assert isinstance(set_array, np.ndarray)
assert isinstance(set_names, tuple)
figs, fig_axes = plot_crosscut_abutting_relationships_plot(
relations_df=relations_df, set_array=set_array, set_names=set_names
)
assert all(isinstance(fig, Figure) for fig in figs)
assert all(isinstance(ax, Axes) for ax in chain(*fig_axes))
plt.close()
|
the-stack_0_11486 | from __future__ import unicode_literals # at top of module
import datetime
import json
import arrow
import pytest
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from marshmallow import ValidationError
from freezegun import freeze_time
from mock import patch
from lemur.certificates.service import create_csr
from lemur.certificates.views import * # noqa
from lemur.common import utils
from lemur.domains.models import Domain
from lemur.tests.vectors import VALID_ADMIN_API_TOKEN, VALID_ADMIN_HEADER_TOKEN, VALID_USER_HEADER_TOKEN, CSR_STR, \
INTERMEDIATE_CERT_STR, SAN_CERT_STR, SAN_CERT_KEY
def test_get_or_increase_name(session, certificate):
from lemur.certificates.models import get_or_increase_name
from lemur.tests.factories import CertificateFactory
serial = 'AFF2DB4F8D2D4D8E80FA382AE27C2333'
assert get_or_increase_name(certificate.name, certificate.serial) == '{0}-{1}'.format(certificate.name, serial)
certificate.name = 'test-cert-11111111'
assert get_or_increase_name(certificate.name, certificate.serial) == 'test-cert-11111111-' + serial
certificate.name = 'test-cert-11111111-1'
assert get_or_increase_name('test-cert-11111111-1', certificate.serial) == 'test-cert-11111111-1-' + serial
cert2 = CertificateFactory(name='certificate1-' + serial)
session.commit()
assert get_or_increase_name('certificate1', int(serial, 16)) == 'certificate1-{}-1'.format(serial)
def test_get_certificate_primitives(certificate):
from lemur.certificates.service import get_certificate_primitives
names = [x509.DNSName(x.name) for x in certificate.domains]
with freeze_time(datetime.date(year=2016, month=10, day=30)):
primitives = get_certificate_primitives(certificate)
assert len(primitives) == 25
def test_certificate_output_schema(session, certificate, issuer_plugin):
from lemur.certificates.schemas import CertificateOutputSchema
# Clear the cached attribute first
if 'parsed_cert' in certificate.__dict__:
del certificate.__dict__['parsed_cert']
# Make sure serialization parses the cert only once (uses cached 'parsed_cert' attribute)
with patch('lemur.common.utils.parse_certificate', side_effect=utils.parse_certificate) as wrapper:
data, errors = CertificateOutputSchema().dump(certificate)
assert data['issuer'] == 'LemurTrustUnittestsClass1CA2018'
assert wrapper.call_count == 1
def test_certificate_edit_schema(session):
from lemur.certificates.schemas import CertificateEditInputSchema
input_data = {'owner': '[email protected]'}
data, errors = CertificateEditInputSchema().load(input_data)
assert len(data['notifications']) == 3
def test_authority_key_identifier_schema():
from lemur.schemas import AuthorityKeyIdentifierSchema
input_data = {
'useKeyIdentifier': True,
'useAuthorityCert': True
}
data, errors = AuthorityKeyIdentifierSchema().load(input_data)
assert sorted(data) == sorted({
'use_key_identifier': True,
'use_authority_cert': True
})
assert not errors
data, errors = AuthorityKeyIdentifierSchema().dumps(data)
assert sorted(data) == sorted(json.dumps(input_data))
assert not errors
def test_certificate_info_access_schema():
from lemur.schemas import CertificateInfoAccessSchema
input_data = {'includeAIA': True}
data, errors = CertificateInfoAccessSchema().load(input_data)
assert not errors
assert data == {'include_aia': True}
data, errors = CertificateInfoAccessSchema().dump(data)
assert not errors
assert data == input_data
def test_subject_key_identifier_schema():
from lemur.schemas import SubjectKeyIdentifierSchema
input_data = {'includeSKI': True}
data, errors = SubjectKeyIdentifierSchema().load(input_data)
assert not errors
assert data == {'include_ski': True}
data, errors = SubjectKeyIdentifierSchema().dump(data)
assert not errors
assert data == input_data
def test_extension_schema(client):
from lemur.certificates.schemas import ExtensionSchema
input_data = {
'keyUsage': {
'useKeyEncipherment': True,
'useDigitalSignature': True
},
'extendedKeyUsage': {
'useServerAuthentication': True
},
'subjectKeyIdentifier': {
'includeSKI': True
}
}
data, errors = ExtensionSchema().load(input_data)
assert not errors
data, errors = ExtensionSchema().dump(data)
assert not errors
def test_certificate_input_schema(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'test.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityStart': arrow.get(2018, 11, 9).isoformat(),
'validityEnd': arrow.get(2019, 11, 9).isoformat(),
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
assert data['authority'].id == authority.id
# make sure the defaults got set
assert data['common_name'] == 'test.example.com'
assert data['country'] == 'US'
assert data['location'] == 'Los Gatos'
assert len(data.keys()) == 19
def test_certificate_input_with_extensions(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'test.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'extensions': {
'keyUsage': {
'digital_signature': True
},
'extendedKeyUsage': {
'useClientAuthentication': True,
'useServerAuthentication': True
},
'subjectKeyIdentifier': {
'includeSKI': True
},
'subAltNames': {
'names': [
{'nameType': 'DNSName', 'value': 'test.example.com'}
]
}
},
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_out_of_range_date(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'test.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityYears': 100,
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert errors
input_data['validityStart'] = '2017-04-30T00:12:34.513631'
data, errors = CertificateInputSchema().load(input_data)
assert errors
input_data['validityEnd'] = '2018-04-30T00:12:34.513631'
data, errors = CertificateInputSchema().load(input_data)
assert errors
def test_certificate_valid_years(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'test.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityYears': 1,
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_valid_dates(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'test.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityStart': '2020-01-01T00:00:00',
'validityEnd': '2020-01-01T00:00:01',
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_cn_admin(client, authority, logged_in_admin):
"""Admin is exempt from CN/SAN domain restrictions."""
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': '*.admin-overrides-whitelist.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityStart': '2020-01-01T00:00:00',
'validityEnd': '2020-01-01T00:00:01',
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_allowed_names(client, authority, session, logged_in_user):
"""Test for allowed CN and SAN values."""
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'Names with spaces are not checked',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityStart': '2020-01-01T00:00:00',
'validityEnd': '2020-01-01T00:00:01',
'extensions': {
'subAltNames': {
'names': [
{'nameType': 'DNSName', 'value': 'allowed.example.com'},
{'nameType': 'IPAddress', 'value': '127.0.0.1'},
]
}
},
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_incative_authority(client, authority, session, logged_in_user):
"""Cannot issue certificates with an inactive authority."""
from lemur.certificates.schemas import CertificateInputSchema
authority.active = False
session.add(authority)
input_data = {
'commonName': 'foo.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityStart': '2020-01-01T00:00:00',
'validityEnd': '2020-01-01T00:00:01',
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert errors['authority'][0] == "The authority is inactive."
def test_certificate_disallowed_names(client, authority, session, logged_in_user):
"""The CN and SAN are disallowed by LEMUR_WHITELISTED_DOMAINS."""
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': '*.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityStart': '2020-01-01T00:00:00',
'validityEnd': '2020-01-01T00:00:01',
'extensions': {
'subAltNames': {
'names': [
{'nameType': 'DNSName', 'value': 'allowed.example.com'},
{'nameType': 'DNSName', 'value': 'evilhacker.org'},
]
}
},
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert errors['common_name'][0].startswith("Domain *.example.com does not match whitelisted domain patterns")
assert (errors['extensions']['sub_alt_names']['names'][0]
.startswith("Domain evilhacker.org does not match whitelisted domain patterns"))
def test_certificate_sensitive_name(client, authority, session, logged_in_user):
"""The CN is disallowed by 'sensitive' flag on Domain model."""
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'sensitive.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityStart': '2020-01-01T00:00:00',
'validityEnd': '2020-01-01T00:00:01',
'dnsProvider': None,
}
session.add(Domain(name='sensitive.example.com', sensitive=True))
data, errors = CertificateInputSchema().load(input_data)
assert errors['common_name'][0].startswith("Domain sensitive.example.com has been marked as sensitive")
def test_create_basic_csr(client):
csr_config = dict(
common_name='example.com',
organization='Example, Inc.',
organizational_unit='Operations',
country='US',
state='CA',
location='A place',
owner='[email protected]',
key_type='RSA2048',
extensions=dict(names=dict(sub_alt_names=x509.SubjectAlternativeName([x509.DNSName('test.example.com'), x509.DNSName('test2.example.com')])))
)
csr, pem = create_csr(**csr_config)
csr = x509.load_pem_x509_csr(csr.encode('utf-8'), default_backend())
for name in csr.subject:
assert name.value in csr_config.values()
def test_csr_empty_san(client):
"""Test that an empty "names" list does not produce a CSR with empty SubjectAltNames extension.
The Lemur UI always submits this extension even when no alt names are defined.
"""
csr_text, pkey = create_csr(
common_name='daniel-san.example.com',
owner='[email protected]',
key_type='RSA2048',
extensions={'sub_alt_names': {'names': x509.SubjectAlternativeName([])}}
)
csr = x509.load_pem_x509_csr(csr_text.encode('utf-8'), default_backend())
with pytest.raises(x509.ExtensionNotFound):
csr.extensions.get_extension_for_class(x509.SubjectAlternativeName)
def test_csr_disallowed_cn(client, logged_in_user):
"""Domain name CN is disallowed via LEMUR_WHITELISTED_DOMAINS."""
from lemur.common import validators
request, pkey = create_csr(
common_name='evilhacker.org',
owner='[email protected]',
key_type='RSA2048',
)
with pytest.raises(ValidationError) as err:
validators.csr(request)
assert str(err.value).startswith('Domain evilhacker.org does not match whitelisted domain patterns')
def test_csr_disallowed_san(client, logged_in_user):
"""SAN name is disallowed by LEMUR_WHITELISTED_DOMAINS."""
from lemur.common import validators
request, pkey = create_csr(
common_name="CN with spaces isn't a domain and is thus allowed",
owner='[email protected]',
key_type='RSA2048',
extensions={'sub_alt_names': {'names': x509.SubjectAlternativeName([x509.DNSName('evilhacker.org')])}}
)
with pytest.raises(ValidationError) as err:
validators.csr(request)
assert str(err.value).startswith('Domain evilhacker.org does not match whitelisted domain patterns')
def test_get_name_from_arn(client):
from lemur.certificates.service import get_name_from_arn
arn = 'arn:aws:iam::11111111:server-certificate/mycertificate'
assert get_name_from_arn(arn) == 'mycertificate'
def test_get_account_number(client):
from lemur.certificates.service import get_account_number
arn = 'arn:aws:iam::11111111:server-certificate/mycertificate'
assert get_account_number(arn) == '11111111'
def test_mint_certificate(issuer_plugin, authority):
from lemur.certificates.service import mint
cert_body, private_key, chain, external_id, csr = mint(authority=authority, csr=CSR_STR)
assert cert_body == SAN_CERT_STR
def test_create_certificate(issuer_plugin, authority, user):
from lemur.certificates.service import create
cert = create(authority=authority, csr=CSR_STR, owner='[email protected]', creator=user['user'])
assert str(cert.not_after) == '2047-12-31T22:00:00+00:00'
assert str(cert.not_before) == '2017-12-31T22:00:00+00:00'
assert cert.issuer == 'LemurTrustUnittestsClass1CA2018'
assert cert.name == 'SAN-san.example.org-LemurTrustUnittestsClass1CA2018-20171231-20471231-AFF2DB4F8D2D4D8E80FA382AE27C2333'
cert = create(authority=authority, csr=CSR_STR, owner='[email protected]', name='ACustomName1', creator=user['user'])
assert cert.name == 'ACustomName1'
def test_reissue_certificate(issuer_plugin, authority, certificate):
from lemur.certificates.service import reissue_certificate
new_cert = reissue_certificate(certificate)
assert new_cert
def test_create_csr():
csr, private_key = create_csr(owner='[email protected]', common_name='ACommonName', organization='test', organizational_unit='Meters', country='US',
state='CA', location='Here', key_type='RSA2048')
assert csr
assert private_key
extensions = {'sub_alt_names': {'names': x509.SubjectAlternativeName([x509.DNSName('AnotherCommonName')])}}
csr, private_key = create_csr(owner='[email protected]', common_name='ACommonName', organization='test', organizational_unit='Meters', country='US',
state='CA', location='Here', extensions=extensions, key_type='RSA2048')
assert csr
assert private_key
def test_import(user):
from lemur.certificates.service import import_certificate
cert = import_certificate(body=SAN_CERT_STR, chain=INTERMEDIATE_CERT_STR, private_key=SAN_CERT_KEY, creator=user['user'])
assert str(cert.not_after) == '2047-12-31T22:00:00+00:00'
assert str(cert.not_before) == '2017-12-31T22:00:00+00:00'
assert cert.issuer == 'LemurTrustUnittestsClass1CA2018'
assert cert.name == 'SAN-san.example.org-LemurTrustUnittestsClass1CA2018-20171231-20471231-AFF2DB4F8D2D4D8E80FA382AE27C2333-2'
cert = import_certificate(body=SAN_CERT_STR, chain=INTERMEDIATE_CERT_STR, private_key=SAN_CERT_KEY, owner='[email protected]', name='ACustomName2', creator=user['user'])
assert cert.name == 'ACustomName2'
@pytest.mark.skip
def test_upload(user):
from lemur.certificates.service import upload
cert = upload(body=SAN_CERT_STR, chain=INTERMEDIATE_CERT_STR, private_key=SAN_CERT_KEY, owner='[email protected]', creator=user['user'])
assert str(cert.not_after) == '2040-01-01T20:30:52+00:00'
assert str(cert.not_before) == '2015-06-26T20:30:52+00:00'
assert cert.issuer == 'Example'
assert cert.name == 'long.lived.com-Example-20150626-20400101-3'
cert = upload(body=SAN_CERT_STR, chain=INTERMEDIATE_CERT_STR, private_key=SAN_CERT_KEY, owner='[email protected]', name='ACustomName', creator=user['user'])
assert 'ACustomName' in cert.name
# verify upload with a private key as a str
def test_upload_private_key_str(user):
from lemur.certificates.service import upload
cert = upload(body=SAN_CERT_STR, chain=INTERMEDIATE_CERT_STR, private_key=SAN_CERT_KEY, owner='[email protected]', name='ACustomName', creator=user['user'])
assert cert
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
('', 401)
])
def test_certificate_get_private_key(client, token, status):
assert client.get(api.url_for(Certificates, certificate_id=1), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
('', 401)
])
def test_certificate_get(client, token, status):
assert client.get(api.url_for(Certificates, certificate_id=1), headers=token).status_code == status
def test_certificate_get_body(client):
response_body = client.get(api.url_for(Certificates, certificate_id=1), headers=VALID_USER_HEADER_TOKEN).json
assert response_body['serial'] == '211983098819107449768450703123665283596'
assert response_body['serialHex'] == '9F7A75B39DAE4C3F9524C68B06DA6A0C'
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificate_post(client, token, status):
assert client.post(api.url_for(Certificates, certificate_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
('', 401)
])
def test_certificate_put(client, token, status):
assert client.put(api.url_for(Certificates, certificate_id=1), data={}, headers=token).status_code == status
def test_certificate_put_with_data(client, certificate, issuer_plugin):
resp = client.put(api.url_for(Certificates, certificate_id=certificate.id), data=json.dumps({'owner': '[email protected]', 'description': 'test', 'notify': True}), headers=VALID_ADMIN_HEADER_TOKEN)
assert resp.status_code == 200
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificate_delete(client, token, status):
assert client.delete(api.url_for(Certificates, certificate_id=1), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificate_patch(client, token, status):
assert client.patch(api.url_for(Certificates, certificate_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
('', 401)
])
def test_certificates_get(client, token, status):
assert client.get(api.url_for(CertificatesList), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
('', 401)
])
def test_certificates_post(client, token, status):
assert client.post(api.url_for(CertificatesList), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificates_put(client, token, status):
assert client.put(api.url_for(CertificatesList), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificates_delete(client, token, status):
assert client.delete(api.url_for(CertificatesList), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificates_patch(client, token, status):
assert client.patch(api.url_for(CertificatesList), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificate_credentials_post(client, token, status):
assert client.post(api.url_for(CertificatePrivateKey, certificate_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificate_credentials_put(client, token, status):
assert client.put(api.url_for(CertificatePrivateKey, certificate_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificate_credentials_delete(client, token, status):
assert client.delete(api.url_for(CertificatePrivateKey, certificate_id=1), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificate_credentials_patch(client, token, status):
assert client.patch(api.url_for(CertificatePrivateKey, certificate_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificates_upload_get(client, token, status):
assert client.get(api.url_for(CertificatesUpload), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
('', 401)
])
def test_certificates_upload_post(client, token, status):
assert client.post(api.url_for(CertificatesUpload), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificates_upload_put(client, token, status):
assert client.put(api.url_for(CertificatesUpload), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificates_upload_delete(client, token, status):
assert client.delete(api.url_for(CertificatesUpload), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificates_upload_patch(client, token, status):
assert client.patch(api.url_for(CertificatesUpload), data={}, headers=token).status_code == status
def test_sensitive_sort(client):
resp = client.get(api.url_for(CertificatesList) + '?sortBy=private_key&sortDir=asc', headers=VALID_ADMIN_HEADER_TOKEN)
assert "'private_key' is not sortable or filterable" in resp.json['message']
def test_boolean_filter(client):
resp = client.get(api.url_for(CertificatesList) + '?filter=notify;true', headers=VALID_ADMIN_HEADER_TOKEN)
assert resp.status_code == 200
# Also don't crash with invalid input (we currently treat that as false)
resp = client.get(api.url_for(CertificatesList) + '?filter=notify;whatisthis', headers=VALID_ADMIN_HEADER_TOKEN)
assert resp.status_code == 200
|
the-stack_0_11487 | from setuptools import setup, find_packages
with open('README.md', 'r', encoding='utf-8') as f:
long_description = f.read()
setup(
name="getpalette",
version="1.0.7",
description="Get color palette from images",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ssiyad/getpalette",
author="Sabu Siyad",
author_email="[email protected]",
license="MIT",
packages=find_packages(),
include_package_data=True,
install_requires=[
'Pillow==8.2.0',
'matplotlib==3.0.3',
'scipy==1.3.1',
'pandas==0.24.2'
],
entry_points={
"console_scripts": [
"getpalette = getpalette.__main__:main",
]
}
)
|
the-stack_0_11488 | from .illustration import plot_data_w_fluid, plot_mixing
from .activation import activation_fn_dispatcher
import json
import numpy as np
import sys
def do_measurements(ex, _config, _run, sim_info, pXs, pVs, acc, ms, fXs, fVs, plotting_this_iteration, save_all_data_this_iteration):
if acc is not None:
_run.log_scalar("Ratio activated", sum(acc)/len(acc), sim_info['time_step_index'])
_run.log_scalar("Mass ratio activated", sum(ms*acc)/sum(ms), sim_info['time_step_index'])
_run.log_scalar("Mass activated", sum(ms*acc), sim_info['time_step_index'])
if pVs is not None:
## Avg velocity
pV_avg = np.mean(pVs, axis=0)
_run.log_scalar("pv_avg_x", pV_avg[0], sim_info['time_step_index'])
_run.log_scalar("pv_avg_y", pV_avg[1], sim_info['time_step_index'])
if fVs is not None:
## Max Velocity
ind_fVs_max = np.argmax(np.linalg.norm(fVs,axis=1))
_run.log_scalar("vmax_x", fVs[ind_fVs_max,0], sim_info['time_step_index'])
_run.log_scalar("vmax_y", fVs[ind_fVs_max,1], sim_info['time_step_index'])
_run.log_scalar("x_vmax", fXs[ind_fVs_max,0], sim_info['time_step_index'])
_run.log_scalar("y_vmax", fXs[ind_fVs_max,1], sim_info['time_step_index'])
_run.log_scalar("x_vmax_c", fXs[ind_fVs_max,0]-(sim_info['x_max']+sim_info['x_min'])/2, sim_info['time_step_index'])
_run.log_scalar("y_vmax_c", fXs[ind_fVs_max,1]-(sim_info['x_max']+sim_info['x_min'])/2, sim_info['time_step_index'])
## Avg velocity
fV_avg = np.mean(fVs, axis=0)
_run.log_scalar("fv_avg_x", fV_avg[0], sim_info['time_step_index'])
_run.log_scalar("fv_avg_y", fV_avg[1], sim_info['time_step_index'])
## Avg velocity in activated area
w = activation_fn_dispatcher(_config, sim_info['t'])(fXs)
fV_acc_avg = np.average(fVs ,weights=w, axis=0)
_run.log_scalar("fv_acc_avg_x", fV_acc_avg[0], sim_info['time_step_index'])
_run.log_scalar("fv_acc_avg_y", fV_acc_avg[1], sim_info['time_step_index'])
if save_all_data_this_iteration:
d= {
'pXs' : pXs.tolist() ,
'pVs' : pVs.tolist() ,
'acc' : acc.tolist() ,
'ms' : ms.tolist() ,
'fXs' : fXs.tolist() ,
'fVs' : fVs.tolist() ,
'sim_info' : sim_info
}
dump_file_loc = f"{sim_info['data_dir']}/data_dump-{sim_info['time_step_index']}.json"
with open(dump_file_loc, 'w') as f:
json.dump(d,f, indent=4)
ex.add_artifact(dump_file_loc)
if plotting_this_iteration:
if _config.get('mixing_experiment',False):
plot_mixing(pXs,
sim_info,
image_folder=sim_info['data_dir'],
title=f"t={sim_info['t']:.3f}",
L=_config['L'],
fix_frame=True,
SAVEFIG=_config['SAVEFIG'],
ex=ex)
else:
plot_data_w_fluid(pXs, pVs, fXs, fVs,
sim_info,
image_folder=sim_info['data_dir'],
title=f"t={sim_info['t']:.3f}",
L=_config['L'],
fix_frame=True,
SAVEFIG=_config['SAVEFIG'],
ex=ex,
plot_particles=True,
plot_fluids=True,
side_by_side=True,
fluid_plot_type = 'quiver')
def do_one_timestep_correlation_measurement(ex, _config, _run, sim_info, pXs, pXs_old):
assert(pXs.shape==pXs_old.shape)
p1 = pXs.flatten()
p2 = pXs_old.flatten()
corr = np.dot(p1,p2)/(np.linalg.norm(p1)*np.linalg.norm(p2))
_run.log_scalar("One timestep correlator", corr, sim_info['time_step_index'])
return corr
|
the-stack_0_11489 | import math
import torch
from torch.optim.optimizer import Optimizer
from torch.nn.utils import parameters_to_vector, vector_to_parameters
import torch.nn as nn
import torch.nn.functional as F
################################
## PyTorch Optimizer for VOGN ##
################################
required = object()
def update_input(self, input, output):
self.input = input[0].data
self.output = output
class VOGN(Optimizer):
"""Implements the VOGN algorithm. It uses the Generalized Gauss Newton (GGN)
approximation to the Hessian and a mean-field approximation. Note that this
optimizer does **not** support multiple model parameter groups. All model
parameters must use the same optimizer parameters.
model (nn.Module): network model
train_set_size (int): number of data points in the full training set
(objective assumed to be on the form (1/M)*sum(-log p))
lr (float, optional): learning rate (default: 1e-3)
beta (float, optional): coefficient used for computing
running average of squared gradient (default: 0.999)
prior_prec (float, optional): prior precision on parameters
(default: 1.0)
prec_init (float, optional): initial precision for variational dist. q
(default: 1.0)
num_samples (float, optional): number of MC samples
(default: 1)
"""
def __init__(self, model, train_set_size, lr=1e-3, beta1=0.9, beta2=0.999, prior_prec=1.0, prec_init=1.0,
num_samples=1):
if lr <= 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if prior_prec < 0.0:
raise ValueError("Invalid prior precision value: {}".format(prior_prec))
if prec_init < 0.0:
raise ValueError("Invalid initial s value: {}".format(prec_init))
if not 0.0 <= beta1 < 1.0:
raise ValueError("Invalid beta parameter: {}".format(beta1))
if not 0.0 <= beta2 < 1.0:
raise ValueError("Invalid beta parameter: {}".format(beta2))
if num_samples < 1:
raise ValueError("Invalid num_samples parameter: {}".format(num_samples))
if train_set_size < 1:
raise ValueError("Invalid number of training data points: {}".format(train_set_size))
defaults = dict(lr=lr, beta1=beta1, beta2=beta2, prior_prec=prior_prec, prec_init=prec_init,
num_samples=num_samples,
train_set_size=train_set_size)
self.train_modules = []
self.set_train_modules(model)
super(type(self), self).__init__(model.parameters(), defaults)
for module in self.train_modules:
module.register_forward_hook(update_input)
p = parameters_to_vector(self.param_groups[0]['params'])
# mean parameter of variational distribution.
self.state['mu'] = p.clone().detach()
self.state['mu_grad_avg'] = torch.zeros_like(p).detach()
# covariance parameter of variational distribution -- saved as the diagonal precision matrix.
self.state['Precision'] = torch.ones_like(p).mul_(defaults['prec_init']).detach()
def set_train_modules(self, module):
if len(list(module.children())) == 0:
if len(list(module.parameters())) != 0:
self.train_modules.append(module)
else:
for child in list(module.children()):
self.set_train_modules(child)
def step(self, closure):
"""Performs a single optimization step.
Arguments:
closure (callable): A closure that reevaluates the model
and returns the loss without doing the backward pass
"""
if closure is None:
raise RuntimeError(
'For now, VOGN only supports that the model/loss can be reevaluated inside the step function')
defaults = self.defaults
# We only support a single parameter group.
parameters = self.param_groups[0]['params']
Precision = self.state['Precision']
mu = self.state['mu']
GGN_hat = None
mu_grad_hat = None
loss_list = []
pred_list = []
for _ in range(defaults['num_samples']):
# Sample a parameter vector:
raw_noise = torch.normal(mean=torch.zeros_like(mu), std=1.0).detach()
p = torch.addcdiv(mu, 1., raw_noise, torch.sqrt(Precision))
vector_to_parameters(p, parameters)
# Store the loss
loss, preds = closure()
loss_list.append(loss.detach())
pred_list.append(preds.detach())
linear_combinations = []
# Store the pre-activations
for module in self.train_modules:
linear_combinations.append(module.output)
# Do the Backward pass for gradients and square gradients
linear_grad = torch.autograd.grad(loss, linear_combinations)
ggn = []
grad = []
N = 1
for i, module in enumerate(self.train_modules):
G = linear_grad[i].detach()
A = module.input.detach()
M = A.shape[0]
A = torch.cat([A] * N)
G *= M
G2 = torch.mul(G, G)
if isinstance(module, nn.BatchNorm1d):
A2 = torch.mul(A, A)
grad.append(torch.einsum('ij->j', torch.mul(G, A)))
ggn.append(torch.einsum('ij->j', torch.mul(G2, A2)))
if module.bias is not None:
grad.append(torch.einsum('ij->j', G))
ggn.append(torch.einsum('ij->j', G2))
if isinstance(module, nn.BatchNorm2d):
A2 = torch.mul(A, A)
grad.append(torch.einsum('ijkl->j', torch.mul(G, A)))
ggn.append(torch.einsum('ijkl->j', torch.mul(G2, A2)))
if module.bias is not None:
grad.append(torch.einsum('ijkl->j', G))
ggn.append(torch.einsum('ijkl->j', G2))
if isinstance(module, nn.Linear):
A2 = torch.mul(A, A)
grad.append(torch.einsum('ij,ik->jk', G, A))
ggn.append(torch.einsum('ij, ik->jk', G2, A2))
if module.bias is not None:
# A = torch.ones((M * N, 1), device=A.device)
grad.append(torch.einsum('ij->j', G))
ggn.append(torch.einsum('ij->j', G2))
if isinstance(module, nn.Conv2d):
A = F.unfold(A, kernel_size=module.kernel_size, dilation=module.dilation, padding=module.padding,
stride=module.stride)
A2 = torch.mul(A, A)
_, k, hw = A.shape
_, c, _, _ = G.shape
'''G = G.view(M, c, -1)
mean = torch.zeros((c, k), device=A.device)
mean.addbmm_(G, A)'''
_, c, _, _ = G.shape
G = G.view(M * N, c, -1)
G2 = G2.view(M * N, c, -1)
grad.append(torch.einsum('ijl,ikl->jk', G, A))
'''mean = torch.zeros((c, k), device=A.device)
mean.addbmm_(torch.mul(G, G), torch.mul(A, A))'''
ggn.append(torch.einsum('ijl,ikl->jk', G2, A2))
if module.bias is not None:
# A = torch.ones((M * N, 1, hw), device=A.device)
'''mean = torch.zeros((c, 1), device=A.device)
mean.addbmm_(G, A)'''
grad.append(torch.einsum('ijl->j', G).detach())
'''mean = torch.zeros((c, 1), device=A.device)
mean.addbmm_(torch.mul(G, G), torch.mul(A, A))'''
ggn.append(torch.einsum('ijl->j', G2).detach())
grad_vec = parameters_to_vector(grad).div(M).detach()
ggn_vec = parameters_to_vector(ggn).div(M).detach()
if mu_grad_hat is None:
mu_grad_hat = torch.zeros_like(grad_vec)
mu_grad_hat.add_(grad_vec)
if GGN_hat is None:
GGN_hat = torch.zeros_like(ggn_vec)
GGN_hat.add_(ggn_vec)
# Convert the parameter gradient to a single vector.
mu_grad_hat = mu_grad_hat.mul(defaults['train_set_size'] / defaults['num_samples'])
GGN_hat.mul_(defaults['train_set_size'] / defaults['num_samples'])
self.state['mu_grad_avg'].mul_(defaults['beta1']).add_(mu_grad_hat.mul(1 - defaults['beta1']))
# Get the mean loss over the number of samples
loss = torch.mean(torch.stack(loss_list))
#preds = torch.mean(torch.stack(pred_list))
# Update precision matrix
Precision = Precision.mul(defaults['beta2']) + GGN_hat.add(defaults['prior_prec']).mul(1 - defaults['beta2'])
self.state['Precision'] = Precision
# Update mean vector
mu.addcdiv_(-self.param_groups[0]['lr'], self.state['mu_grad_avg'] + torch.mul(mu, defaults['prior_prec']), Precision)
self.state['mu'] = mu
vector_to_parameters(self.state['mu'], self.param_groups[0]['params'])
return loss, pred_list
def get_mc_predictions(self, forward_function, inputs, ret_numpy=False, raw_noises=None, *args, **kwargs):
"""Returns Monte Carlo predictions.
Arguments:
forward_function (callable): The forward function of the model
that takes inputs and returns the outputs.
inputs (FloatTensor): The inputs to the model.
mc_samples (int): The number of Monte Carlo samples.
ret_numpy (bool): If true, the returned list contains numpy arrays,
otherwise it contains torch tensors.
"""
# We only support a single parameter group.
parameters = self.param_groups[0]['params']
predictions = []
Precision = self.state['Precision']
mu = self.state['mu']
if raw_noises is None:
raw_noises = [torch.zeros_like(mu)]
for raw_noise in raw_noises:
# Sample a parameter vector:
# raw_noise = torch.normal(mean=torch.zeros_like(mu), std=1.0)
p = torch.addcdiv(mu, 1., raw_noise, torch.sqrt(Precision))
vector_to_parameters(p, parameters)
# Call the forward computation function
outputs = forward_function(inputs, *args, **kwargs)
if ret_numpy:
outputs = outputs.data.cpu().numpy()
predictions.append(outputs)
vector_to_parameters(self.state['mu'], self.param_groups[0]['params'])
return predictions
def _kl_gaussian(self, p_mu, p_sigma, q_mu, q_sigma):
var_ratio = (p_sigma / q_sigma).pow(2)
t1 = ((p_mu - q_mu) / q_sigma).pow(2)
return 0.5 * torch.sum((var_ratio + t1 - 1 - var_ratio.log()))
def kl_divergence(self):
prec0 = self.defaults['prior_prec']
prec = self.state['Precision']
mu = self.state['mu']
sigma = 1. / torch.sqrt(prec)
mu0 = 0.
sigma0 = 1. / math.sqrt(prec0)
kl = self._kl_gaussian(p_mu=mu, p_sigma=sigma, q_mu=mu0, q_sigma=sigma0)
return kl |
the-stack_0_11493 | from typing import List
SELFID = "0" * 32
def maybe_and(sql, a):
if a:
return sql + " AND "
else:
return sql
def maybe_or(sql, a):
if a:
return sql + " OR "
else:
return sql
# TODO counts
def get_property_value(agent_memory, mem, prop):
# order of precedence:
# 1: main memory table
# 2: table corresponding to the nodes .TABLE
# 3: triple with the nodes memid as subject and prop as predicate
# is it in the main memory table?
cols = [c[1] for c in agent_memory._db_read("PRAGMA table_info(Memories)")]
if prop in cols:
cmd = "SELECT " + prop + " FROM Memories WHERE uuid=?"
r = agent_memory._db_read(cmd, mem.memid)
return r[0][0]
# is it in the mem.TABLE?
T = mem.TABLE
cols = [c[1] for c in agent_memory._db_read("PRAGMA table_info({})".format(T))]
if prop in cols:
cmd = "SELECT " + prop + " FROM " + T + " WHERE uuid=?"
r = agent_memory._db_read(cmd, mem.memid)
return r[0][0]
# is it a triple?
triples = agent_memory.get_triples(subj=mem.memid, pred_text=prop, return_obj_text="always")
if len(triples) > 0:
return triples[0][2]
return None
class MemorySearcher:
def __init__(self, self_memid=SELFID, search_data=None):
self.self_memid = self_memid
self.search_data = search_data
def search(self, memory, search_data=None) -> List["ReferenceObjectNode"]: # noqa T484
raise NotImplementedError
class ReferenceObjectSearcher(MemorySearcher):
def __init__(self, self_memid=SELFID, search_data=None):
super().__init__(self_memid=SELFID, search_data=None)
def is_filter_empty(self, filter_dict):
r = filter_dict.get("special")
if r and len(r) > 0:
return False
r = filter_dict.get("ref_obj_range")
if r and len(r) > 0:
return False
r = filter_dict.get("ref_obj_exact")
if r and len(r) > 0:
return False
r = filter_dict.get("memories_range")
if r and len(r) > 0:
return False
r = filter_dict.get("memories_exact")
if r and len(r) > 0:
return False
t = filter_dict.get("triples")
if t and len(t) > 0:
return False
return True
def range_queries(self, r, table, a=False):
""" this does x, y, z, pitch, yaw, etc.
input format for generates is
{"xmin": float, xmax: float, ... , yawmin: float, yawmax: float}
"""
sql = ""
vals = []
for k, v in r.items():
if "min" in k:
sql = maybe_and(sql, len(vals) > 0)
sql += table + "." + k.replace("min", "") + ">? "
vals.append(v)
if "max" in k:
sql = maybe_and(sql, len(vals) > 0)
sql += table + "." + k.replace("max", "") + "<? "
vals.append(v)
return sql, vals
def exact_matches(self, m, table, a=False):
sql = ""
vals = []
for k, v in m.items():
sql = maybe_and(sql, len(vals) > 0)
sql += table + "." + k + "=? "
vals.append(v)
return sql, vals
def triples(self, triples, a=False):
# currently does an "and": the memory needs to satisfy all triples
vals = []
if not triples:
return "", vals
sql = "ReferenceObjects.uuid IN (SELECT subj FROM Triples WHERE "
for t in triples:
sql = maybe_or(sql, len(vals) > 0)
vals.append(t["pred_text"])
if t.get("obj_text"):
sql += "(pred_text, obj_text)=(?, ?)"
vals.append(t["obj_text"])
else:
sql += "(pred_text, obj)=(?, ?)"
vals.append(t["obj"])
sql += " GROUP BY subj HAVING COUNT(subj)=? )"
vals.append(len(triples))
return sql, vals
def get_query(self, filter_dict, ignore_self=True):
if self.is_filter_empty(filter_dict):
query = "SELECT uuid FROM ReferenceObjects"
if ignore_self:
query += " WHERE uuid !=?"
return query, [self.self_memid]
else:
return query, []
query = (
"SELECT ReferenceObjects.uuid FROM ReferenceObjects"
" INNER JOIN Memories as M on M.uuid=ReferenceObjects.uuid"
" WHERE "
)
args = []
fragment, vals = self.range_queries(
filter_dict.get("ref_obj_range", {}), "ReferenceObjects"
)
query = maybe_and(query, len(args) > 0)
args.extend(vals)
query += fragment
fragment, vals = self.exact_matches(
filter_dict.get("ref_obj_exact", {}), "ReferenceObjects"
)
query = maybe_and(query, len(args) > 0 and len(vals) > 0)
args.extend(vals)
query += fragment
fragment, vals = self.range_queries(filter_dict.get("memories_range", {}), "M")
query = maybe_and(query, len(args) > 0 and len(vals) > 0)
args.extend(vals)
query += fragment
fragment, vals = self.exact_matches(filter_dict.get("memories_exact", {}), "M")
query = maybe_and(query, len(args) > 0 and len(vals) > 0)
args.extend(vals)
query += fragment
fragment, vals = self.triples(filter_dict.get("triples", []))
query = maybe_and(query, len(args) > 0 and len(vals) > 0)
args.extend(vals)
query += fragment
if ignore_self:
query += " AND ReferenceObjects.uuid !=?"
args.append(self.self_memid)
return query, args
# flag (default) so that it makes a copy of speaker_look etc so that if the searcher is called
# later so it doesn't return the new position of the agent/speaker/speakerlook
# how to parse this distinction?
def handle_special(self, memory, search_data):
d = search_data.get("special")
if not d:
return []
if d.get("SPEAKER"):
return [memory.get_player_by_eid(d["SPEAKER"])]
if d.get("SPEAKER_LOOK"):
memids = memory._db_read_one(
'SELECT uuid FROM ReferenceObjects WHERE ref_type="attention" AND type_name=?',
d["SPEAKER_LOOK"],
)
if memids:
memid = memids[0]
mem = memory.get_location_by_id(memid)
return [mem]
if d.get("AGENT"):
return [memory.get_player_by_eid(d["AGENT"])]
if d.get("DUMMY"):
return [d["DUMMY"]]
return []
def search(self, memory, search_data=None) -> List["ReferenceObjectNode"]: # noqa T484
"""Find ref_objs matching the given filters
filter_dict has children:
"ref_obj_range", dict, with keys "min<column_name>" or "max<column_name>",
(that is the string "min" prepended to the column name)
and float values vmin and vmax respectively.
<column_name> is any column in the ReferenceObjects table that
is a numerical value. filters on rows satisfying the inequality
<column_entry> > vmin or <column_entry> < vmax
"ref_obj_exact", dict, with keys "<column_name>"
<column_name> is any column in the ReferenceObjects table
checks exact matches to the value
"memories_range" and "memories_exact" are the same, but columns in the Memories table
"triples" list [t0, t1, ...,, tm]. each t in the list is a dict
with form t = {"pred_text": <pred>, "obj_text": <obj>}
or t = {"pred_text": <pred>, "obj": <obj_memid>}
currently returns memories with all triples matched
"""
if not search_data:
search_data = self.search_data
assert search_data
if search_data.get("special"):
return self.handle_special(memory, search_data)
query, args = self.get_query(search_data)
self.search_data = search_data
memids = [m[0] for m in memory._db_read(query, *args)]
return [memory.get_mem_by_id(memid) for memid in memids]
if __name__ == "__main__":
filter_dict = {
"ref_obj_range": {"minx": 3},
"memories_exact": {"create_time": 1},
"triples": [
{"pred_text": "has_tag", "obj_text": "cow"},
{"pred_text": "has_name", "obj_text": "eddie"},
],
}
|
the-stack_0_11494 | # Time: O(n * k), n is the number of coins, k is the amount of money
# Space: O(k)
class Solution(object):
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
INF = 0x7fffffff # Using float("inf") would be slower.
dp = [INF] * (amount + 1)
dp[0] = 0
for i in range(amount + 1):
if dp[i] != INF:
for coin in coins:
if i + coin <= amount:
dp[i + coin] = min(dp[i + coin], dp[i] + 1)
return dp[amount] if dp[amount] != INF else -1
|
the-stack_0_11497 | from django.core.management.base import BaseCommand
from django.core.cache import cache
from redis.exceptions import ResponseError
class Command(BaseCommand):
def handle(self, *args, **kwargs):
try:
cache.clear()
except ResponseError:
cache.clear()
self.stdout.write('Cleared cache\n')
|
the-stack_0_11498 | import unittest
import six
from pynetbox.core.endpoint import Endpoint
if six.PY3:
from unittest.mock import patch, Mock
else:
from mock import patch, Mock
class EndPointTestCase(unittest.TestCase):
def test_filter(self):
with patch(
"pynetbox.core.query.Request.get", return_value=Mock()
) as mock:
api = Mock(base_url="http://localhost:8000/api")
app = Mock(name="test")
mock.return_value = [{'id': 123}, {'id': 321}]
test_obj = Endpoint(api, app, "test")
test = test_obj.filter(test="test")
self.assertEqual(len(test), 2)
def test_filter_empty_kwargs(self):
api = Mock(base_url="http://localhost:8000/api")
app = Mock(name="test")
test_obj = Endpoint(api, app, "test")
with self.assertRaises(ValueError) as _:
test_obj.filter()
def test_filter_reserved_kwargs(self):
api = Mock(base_url="http://localhost:8000/api")
app = Mock(name="test")
test_obj = Endpoint(api, app, "test")
with self.assertRaises(ValueError) as _:
test_obj.filter(id=1)
|
the-stack_0_11499 | import sys
import re
import argparse
parser = argparse.ArgumentParser(description='Filter morpho-analyzed data from stdin.')
parser.add_argument('--max', type=int, default=500000, help="How many unique words to include. Default %(default)d.")
parser.add_argument('FILE', default='extension/dict-purelist/fi_FI', action='store', help="FILE.dic and FILE.aff will be created")
args = parser.parse_args()
word_re=re.compile(r"^[a-zA-ZåäöÅÄÖ]([a-zA-ZåäöÅÄÖ-]*[a-zA-ZåäöÅÄÖ])?$")
words_s=set()
words_l=[]
for line in sys.stdin:
line=line.rstrip()
if not line: #empty
continue
wrd,analysis=line.split("\t",1)
if analysis.endswith("+?"): #unrecognized
continue
if wrd in words_s: #already done
continue
if wrd[0]!=analysis[0] and wrd[0].lower()==analysis[0]: #capitalized version of lowercased lemmas
wrd=wrd.lower()
if wrd in words_s: #already done
continue
if word_re.match(wrd): #is word
words_s.add(wrd)
words_l.append(wrd)
else:
continue
if len(words_l)>=args.max:
break
if len(words_l)%10000==0:
print("at word",len(words_l))
with open(args.FILE+".dic","w") as f:
print(len(words_l),file=f)
for w in words_l:
print(w,file=f)
with open(args.FILE+".aff","w") as f:
print("SET UTF-8",file=f)
print("TRY abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZäöåÄÖÅ",file=f)
|
the-stack_0_11500 | #!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright 2015 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# Import ArcPy site-package and os modules
#
import arcpy
import os
import sys, traceback
import time
from datetime import datetime
from datetime import date
## ======================================================
## Read in parameters
## ======================================================
featClass = arcpy.GetParameterAsText(0)
## ======================================================
## Read geoname file and insert feature
## ======================================================
try:
arcpy.AddMessage("Checking input feature class " + featClass + "...")
hasError = 0
# ======================================================
# Get shape type and check if point
# ======================================================
desc = arcpy.Describe(featClass)
arcpy.AddMessage("Geometry Check: Make sure input feature class geometry is point...")
if desc.shapeType.upper() != "POINT":
arcpy.AddError("Error: Input feature class does not have geometry type of point")
hasError = hasError + 1
# ======================================================
# Get list of fields and check if required fields exists
# ======================================================
fields = arcpy.ListFields(featClass)
arcpy.AddMessage("Field Check: Make sure input feature class has correct geonames fields...")
geonameFields = ["RC", "UFI", "UNI", "LAT", "LONG", "DMS_LAT", "DMS_LONG", "MGRS", "JOG", "FC", \
"DSG", "PC", "CC1", "ADM1", "POP", "ELEV", "CC2", "NT", "LC", "SHORT_FORM", \
"GENERIC", "SORT_NAME_RO", "FULL_NAME_RO", "FULL_NAME_ND_RO", "SORT_NAME_RG", \
"FULL_NAME_RG", "FULL_NAME_ND_RG", "NOTE", "MODIFY_DATE", "COUNTRYCODE1", \
"COUNTRYNAME1", "ADM1CODE", "ADM1NAMEALL", "ADM1NAME", "ADM1CLASSALL", \
"ADM1CLASS", "PLACENAME", "DSGNAME", "USER_FLD", \
"DISPLAY", "NAME_RANK", "NAME_LINK", "TRANSL_CD", "NM_MODIFY_DATE", \
"POINT_X", "POINT_Y", "F_EFCTV_DT", "F_TERM_DT"]
numMissing = 0
for geonameField in geonameFields:
found = 0
for field in fields:
if geonameField.upper() == field.name.upper():
found = 1
break
if found == 0:
numMissing = numMissing + 1
arcpy.AddError("Error: Input feature class is missing field: " + geonameField)
if numMissing > 0:
hasError = hasError + 1
# ======================================================
# Check if input has any features
# ======================================================
if sys.version_info[0] > 2:
numCount = int(arcpy.GetCount_management(featClass).getOutput(0))
else:
numCount = long(arcpy.GetCount_management(featClass).getOutput(0))
arcpy.AddMessage("Feature Count Check: Make sure input feature class does not have any features...")
if numCount > 0:
arcpy.AddError("Error: Input feature class has " + str(numCount) + " features.")
hasError = hasError + 1
# ======================================================
# Check if input coordinate system is WGS1984
# ======================================================
SR = desc.spatialReference
arcpy.AddMessage("Spatial Reference Check: Make sure input feature class is 'GCS_WGS_1984'...")
if SR.name.upper() != "GCS_WGS_1984":
arcpy.AddError("Error: Spatial Reference is " + SR.name)
hasError = hasError + 1
if hasError > 0:
result = "FALSE"
else:
result = "TRUE"
# Set Output parameter (required so that script
# tool output can be connected to other model tools)
arcpy.SetParameter(1, result)
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
|
the-stack_0_11502 | print('A frese é palíndromo?')
frase = str(input('Escreva uma frase sem pontuação e acentos: ')).strip().upper()
palavras = frase.split()
junto = ''.join(palavras)
inverso = ''
for letra in range(len(junto) - 1, -1, -1):
inverso += junto[letra]
print('O inverso de {} é {}'.format(junto, inverso))
if junto == inverso:
print('A frase é PALÍNDROMO')
else:
print('A frase NÃO É PALÍNDROMO')
|
the-stack_0_11506 | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import string
import re
import threading
from newrelic.packages import requests
from newrelic.core.internal_metrics import internal_count_metric
try:
import queue
except ImportError:
import Queue as queue
_logger = logging.getLogger(__name__)
VALID_CHARS_RE = re.compile(r'[0-9a-zA-Z_ ./-]')
class CommonUtilization(object):
METADATA_URL = ''
HEADERS = None
EXPECTED_KEYS = ()
VENDOR_NAME = ''
TIMEOUT = 0.4
@classmethod
def record_error(cls, resource, data):
# As per spec
internal_count_metric(
'Supportability/utilization/%s/error' % cls.VENDOR_NAME, 1)
_logger.warning('Invalid %r data (%r): %r',
cls.VENDOR_NAME, resource, data)
@classmethod
def _fetch(cls, q):
# Create own requests session and disable all environment variables,
# so that we can bypass any proxy set via env var for this request.
session = requests.Session()
session.trust_env = False
try:
resp = session.get(
cls.METADATA_URL,
timeout=cls.TIMEOUT,
headers=cls.HEADERS)
resp.raise_for_status()
except Exception as e:
resp = None
_logger.debug('Unable to fetch %s data from %r: %r',
cls.VENDOR_NAME, cls.METADATA_URL, e)
q.put(resp)
@classmethod
def fetch(cls):
q = queue.Queue()
t = threading.Thread(
target=cls._fetch,
name="UtilizationDetect/{}".format(cls.VENDOR_NAME),
args=(q,),
)
t.daemon = True
t.start()
try:
return q.get(timeout=cls.TIMEOUT + 0.1)
except queue.Empty:
_logger.debug('Timeout waiting to fetch %s data from %r',
cls.VENDOR_NAME, cls.METADATA_URL)
@classmethod
def get_values(cls, response):
if response is None:
return
try:
j = response.json()
except ValueError:
_logger.debug('Invalid %s data (%r): %r',
cls.VENDOR_NAME, cls.METADATA_URL, response.text)
return
return j
@classmethod
def valid_chars(cls, data):
if data is None:
return False
for c in data:
if not VALID_CHARS_RE.match(c) and ord(c) < 0x80:
return False
return True
@classmethod
def valid_length(cls, data):
if data is None:
return False
b = data.encode('utf-8')
valid = len(b) <= 255
if valid:
return True
return False
@classmethod
def normalize(cls, key, data):
if data is None:
return
try:
stripped = data.strip()
if (stripped and cls.valid_length(stripped) and
cls.valid_chars(stripped)):
return stripped
except:
pass
@classmethod
def sanitize(cls, values):
if values is None:
return
out = {}
for key in cls.EXPECTED_KEYS:
metadata = values.get(key, None)
if not metadata:
cls.record_error(key, metadata)
return
normalized = cls.normalize(key, metadata)
if not normalized:
cls.record_error(key, metadata)
return
out[key] = normalized
return out
@classmethod
def detect(cls):
response = cls.fetch()
values = cls.get_values(response)
return cls.sanitize(values)
class AWSUtilization(CommonUtilization):
EXPECTED_KEYS = ('availabilityZone', 'instanceId', 'instanceType')
METADATA_URL = '%s/2016-09-02/dynamic/instance-identity/document' % (
'http://169.254.169.254'
)
VENDOR_NAME = 'aws'
class AzureUtilization(CommonUtilization):
METADATA_URL = ('http://169.254.169.254'
'/metadata/instance/compute?api-version=2017-03-01')
EXPECTED_KEYS = ('location', 'name', 'vmId', 'vmSize')
HEADERS = {'Metadata': 'true'}
VENDOR_NAME = 'azure'
class GCPUtilization(CommonUtilization):
EXPECTED_KEYS = ('id', 'machineType', 'name', 'zone')
HEADERS = {'Metadata-Flavor': 'Google'}
METADATA_URL = 'http://%s/computeMetadata/v1/instance/?recursive=true' % (
'metadata.google.internal')
VENDOR_NAME = 'gcp'
@classmethod
def normalize(cls, key, data):
if data is None:
return
if key in ('machineType', 'zone'):
formatted = data.strip().split('/')[-1]
elif key == 'id':
formatted = str(data)
else:
formatted = data
return super(GCPUtilization, cls).normalize(key, formatted)
class PCFUtilization(CommonUtilization):
EXPECTED_KEYS = ('cf_instance_guid', 'cf_instance_ip', 'memory_limit')
VENDOR_NAME = 'pcf'
@staticmethod
def fetch():
cf_instance_guid = os.environ.get('CF_INSTANCE_GUID')
cf_instance_ip = os.environ.get('CF_INSTANCE_IP')
memory_limit = os.environ.get('MEMORY_LIMIT')
pcf_vars = (cf_instance_guid, cf_instance_ip, memory_limit)
if all(pcf_vars):
return pcf_vars
@classmethod
def get_values(cls, response):
if response is None or len(response) != 3:
return
values = {}
for k, v in zip(cls.EXPECTED_KEYS, response):
if hasattr(v, 'decode'):
v = v.decode('utf-8')
values[k] = v
return values
class DockerUtilization(CommonUtilization):
VENDOR_NAME = 'docker'
EXPECTED_KEYS = ('id',)
METADATA_FILE = '/proc/self/cgroup'
DOCKER_RE = re.compile(r'([0-9a-f]{64,})')
@classmethod
def fetch(cls):
try:
with open(cls.METADATA_FILE, 'rb') as f:
for line in f:
stripped = line.decode('utf-8').strip()
cgroup = stripped.split(':')
if len(cgroup) != 3:
continue
subsystems = cgroup[1].split(',')
if 'cpu' in subsystems:
return cgroup[2]
except:
# There are all sorts of exceptions that can occur here
# (i.e. permissions, non-existent file, etc)
pass
@classmethod
def get_values(cls, contents):
if contents is None:
return
value = contents.split('/')[-1]
match = cls.DOCKER_RE.search(value)
if match:
value = match.group(0)
return {'id': value}
@classmethod
def valid_chars(cls, data):
if data is None:
return False
hex_digits = set(string.hexdigits)
valid = all((c in hex_digits for c in data))
if valid:
return True
return False
@classmethod
def valid_length(cls, data):
if data is None:
return False
# Must be exactly 64 characters
valid = len(data) == 64
if valid:
return True
return False
class KubernetesUtilization(CommonUtilization):
EXPECTED_KEYS = ('kubernetes_service_host', )
VENDOR_NAME = 'kubernetes'
@staticmethod
def fetch():
kubernetes_service_host = os.environ.get('KUBERNETES_SERVICE_HOST')
if kubernetes_service_host:
return kubernetes_service_host
@classmethod
def get_values(cls, v):
if v is None:
return
if hasattr(v, 'decode'):
v = v.decode('utf-8')
return {'kubernetes_service_host': v}
|
the-stack_0_11508 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" Implements routines for converting FPGA interchange capnp files to models.
The models are implemented in python in fpga_interchange.logical_netlist and
fpga_interchange.physical_netlist.
LogicalNetlistBuilder - Internal helper class for constructing logical netlist
format. Recommend use is to first construct logical
netlist using classes from logical_netlist module
and calling Interchange.output_logical_netlist.
output_logical_netlist - Implements conversion of classes from logical_netlist
module to FPGA interchange logical netlist format.
This function requires LogicalNetlist schema loaded,
recommend to use Interchange class to load schemas
from interchange schema directory, and then invoke
Interchange.output_logical_netlist.
PhysicalNetlistBuilder - Internal helper class for constructing physical
netlist format.
output_physical_netlist - Implements conversion of classes from physicla
module to FPGA interchange physical netlist format.
This function requires PhysicalNetlist schema loaded,
recommend to use Interchange class to load schemas
from interchange schema directory, and then invoke
Interchange.output_interchange.
Interchange - Class that handles loading capnp schemas.
"""
import capnp
import capnp.lib.capnp
capnp.remove_import_hook()
import enum
import gzip
import os.path
from .logical_netlist import check_logical_netlist, LogicalNetlist, Cell, \
CellInstance, Library, Direction
from .physical_netlist import PhysicalNetlist, PhysicalCellType, \
PhysicalNetType, PhysicalBelPin, PhysicalSitePin, PhysicalSitePip, \
PhysicalPip, PhysicalNet, Placement
from .device_resources import DeviceResources
# Flag indicating use of Packed Cap'n Proto Serialization
IS_PACKED = False
class CompressionFormat(enum.Enum):
UNCOMPRESSED = 0
GZIP = 1
# Flag indicating that files are gziped on output
DEFAULT_COMPRESSION_TYPE = CompressionFormat.GZIP
# Set traversal limit to maximum to effectively disable.
NO_TRAVERSAL_LIMIT = 2**63 - 1
NESTING_LIMIT = 1024
# Level 6 is much faster than level 9, but still has a reasonable compression
# level.
#
# From man page:
# The default compression level is -6 (that is, biased towards high
# compression at expense of speed).
#
DEFAULT_COMPRESSION = 6
def read_capnp_file(capnp_schema,
f_in,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
""" Read file to a capnp object.
is_gzipped - bool
Is output GZIP'd?
is_packed - bool
Is capnp file in packed or unpacked in its encoding?
"""
if compression_format == CompressionFormat.GZIP:
f_comp = gzip.GzipFile(fileobj=f_in, mode='rb')
if is_packed:
return capnp_schema.from_bytes_packed(
f_comp.read(),
traversal_limit_in_words=NO_TRAVERSAL_LIMIT,
nesting_limit=NESTING_LIMIT)
else:
return capnp_schema.from_bytes(
f_comp.read(),
traversal_limit_in_words=NO_TRAVERSAL_LIMIT,
nesting_limit=NESTING_LIMIT)
else:
assert compression_format == CompressionFormat.UNCOMPRESSED
if is_packed:
return capnp_schema.read_packed(
f_in, traversal_limit_in_words=NO_TRAVERSAL_LIMIT)
else:
return capnp_schema.read(
f_in, traversal_limit_in_words=NO_TRAVERSAL_LIMIT)
def write_capnp_file(capnp_obj,
f_out,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
""" Write capnp object to file.
is_gzipped - bool
Is output GZIP'd?
is_packed - bool
Should output capnp file be packed or unpacked in its encoding?
"""
if compression_format == CompressionFormat.GZIP:
with gzip.GzipFile(
fileobj=f_out, mode='wb',
compresslevel=DEFAULT_COMPRESSION) as f:
if is_packed:
f.write(capnp_obj.to_bytes_packed())
else:
f.write(capnp_obj.to_bytes())
else:
assert compression_format == CompressionFormat.UNCOMPRESSED
if is_packed:
capnp_obj.write_packed(f_out)
else:
capnp_obj.write(f_out)
class LogicalNetlistBuilder():
""" Builder class for LogicalNetlist capnp format.
The total number of cells, ports, cell instances should be known prior to
calling the constructor for LogicalNetlistBuilder.
logical_netlist_schema - Loaded logical netlist schema.
name (str) - Name of logical netlist.
cell_count (int) - Total number of cells in all libraries for this file.
port_count (int) - Total number of cell ports in all cells in all
libraries for this file.
cell_instance_count (int) - Total number of cell instances in all cells
in all libraries for this file.
property_map (dict) - Root level property map for the netlist.
indexed_strings (list of str, optional) - If provided, this string list
is used to store strings, instead of LogicalNetlist.strList.
This is useful when embedding LogicalNetlist in other schemas.
"""
def __init__(self,
logical_netlist_schema,
name,
cell_count,
port_count,
cell_instance_count,
property_map,
indexed_strings=None):
self.logical_netlist_schema = logical_netlist_schema
self.logical_netlist = self.logical_netlist_schema.Netlist.new_message(
)
self.logical_netlist.name = name
if indexed_strings is None:
self.own_string_list = True
self.string_map = {}
self.string_list = []
else:
# An external string list is being provided. Use that list (and
# update it), and initialize the string_map with that initial list.
self.own_string_list = False
self.string_list = indexed_strings
self.string_map = {}
for idx, s in enumerate(self.string_list):
self.string_map[s] = idx
self.cell_idx = 0
self.cell_count = cell_count
self.cell_decls = self.logical_netlist.init("cellDecls", cell_count)
self.cells = self.logical_netlist.init("cellList", cell_count)
self.port_idx = 0
self.port_count = port_count
self.logical_netlist.init("portList", port_count)
self.ports = self.logical_netlist.portList
self.cell_instance_idx = 0
self.cell_instance_count = cell_instance_count
self.logical_netlist.init("instList", cell_instance_count)
self.cell_instances = self.logical_netlist.instList
self.create_property_map(self.logical_netlist.propMap, property_map)
def next_cell(self):
""" Return next logical_netlist.Cell pycapnp object and it's index. """
assert self.cell_idx < self.cell_count
cell_decl = self.cell_decls[self.cell_idx]
cell = self.cells[self.cell_idx]
cell_idx = self.cell_idx
cell.index = cell_idx
self.cell_idx += 1
return cell_idx, cell, cell_decl
def get_cell(self, cell_idx):
""" Get logical_netlist.Cell pycapnp object at given index. """
return self.logical_netlist.cellList[cell_idx]
def next_port(self):
""" Return next logical_netlist.Port pycapnp object and it's index. """
assert self.port_idx < self.port_count
port = self.ports[self.port_idx]
port_idx = self.port_idx
self.port_idx += 1
return port_idx, port
def next_cell_instance(self):
""" Return next logical_netlist.CellInstance pycapnp object and it's index. """
assert self.cell_instance_idx < self.cell_instance_count
cell_instance = self.cell_instances[self.cell_instance_idx]
cell_instance_idx = self.cell_instance_idx
self.cell_instance_idx += 1
return cell_instance_idx, cell_instance
def string_id(self, s):
""" Intern string into file, and return its StringIdx. """
assert isinstance(s, str)
if s not in self.string_map:
self.string_map[s] = len(self.string_list)
self.string_list.append(s)
return self.string_map[s]
def finish_encode(self):
""" Completes the encoding of the logical netlist and returns root pycapnp object.
Invoke after all cells, ports and cell instances have been populated
with data.
Returns completed logical_netlist.Netlist pycapnp object.
"""
if self.own_string_list:
self.logical_netlist.init('strList', len(self.string_list))
for idx, s in enumerate(self.string_list):
self.logical_netlist.strList[idx] = s
return self.logical_netlist
def create_property_map(self, property_map, d):
""" Create a property_map from a python dictionary for this LogicalNetlist file.
property_map (logical_netlist.PropertyMap pycapnp object) - Pycapnp
object to write property map.
d (dict-like) - Dictionary to convert to property map.
Keys must be strings. Values can be strings, ints or
bools.
"""
entries = property_map.init('entries', len(d))
for entry, (k, v) in zip(entries, d.items()):
assert isinstance(k, str)
entry.key = self.string_id(k)
if isinstance(v, str):
if v[0] == '"' and v[-1] == '"':
v = v[1:-1]
entry.textValue = self.string_id(v)
elif isinstance(v, bool):
entry.boolValue = v
elif isinstance(v, int):
entry.intValue = v
else:
assert False, "Unknown type of value {}, type = {}".format(
repr(v), type(v))
def get_top_cell_instance(self):
""" Return the top cell instance from the LogicalNetlist. """
return self.logical_netlist.topInst
def output_logical_netlist(logical_netlist_schema,
libraries,
name,
top_instance_name,
top_instance,
view="netlist",
property_map={},
indexed_strings=None):
""" Convert logical_netlist.Library python classes to a FPGA interchange LogicalNetlist capnp.
logical_netlist_schema - logical_netlist schema.
libraries (dict) - Dict of str to logical_netlist.Library python classes.
top_level_cell (str) - Name of Cell to instance at top level
top_level_name (str) - Name of top level cell instance
view (str) - EDIF internal constant.
property_map - PropertyMap for top level cell instance
"""
# Sanity that the netlist libraries are complete and consistent, also
# output master cell list.
master_cell_list = check_logical_netlist(libraries)
# Make sure top level cell is in the master cell list.
assert top_instance is None or top_instance.cell_name in master_cell_list
# Count cell, port and cell instance counts to enable pre-allocation of
# capnp arrays.
cell_count = 0
port_count = 0
cell_instance_count = 0
for lib in libraries.values():
cell_count += len(lib.cells)
for cell in lib.cells.values():
port_count += len(cell.ports)
cell_instance_count += len(cell.cell_instances)
logical_netlist = LogicalNetlistBuilder(
logical_netlist_schema=logical_netlist_schema,
name=name,
cell_count=cell_count,
port_count=port_count,
cell_instance_count=cell_instance_count,
property_map=property_map,
indexed_strings=indexed_strings)
# Assign each python Cell objects in libraries to capnp
# logical_netlist.Cell objects, and record the cell index for use with
# cell instances later.
#
# Ports can also be converted now, do that too. Build a map of cell name
# and port name to port objects for use on constructing cell nets.
cell_name_to_idx = {}
ports = {}
for library, lib in libraries.items():
library_id = logical_netlist.string_id(library)
for cell in lib.cells.values():
assert cell.name not in cell_name_to_idx
cell_idx, cell_obj, cell_decl = logical_netlist.next_cell()
cell_decl.name = logical_netlist.string_id(cell.name)
cell_decl.view = logical_netlist.string_id(cell.view)
cell_decl.lib = library_id
cell_name_to_idx[cell.name] = cell_idx
logical_netlist.create_property_map(cell_decl.propMap,
cell.property_map)
cell_decl.init('ports', len(cell.ports))
for idx, (port_name, port) in enumerate(cell.ports.items()):
port_idx, port_obj = logical_netlist.next_port()
ports[cell.name, port_name] = (port_idx, port)
cell_decl.ports[idx] = port_idx
port_obj.dir = logical_netlist_schema.Netlist.Direction.__dict__[
port.direction.name.lower()]
logical_netlist.create_property_map(port_obj.propMap,
port.property_map)
if port.bus is not None:
port_obj.name = logical_netlist.string_id(port_name)
bus = port_obj.init('bus')
bus.busStart = port.bus.start
bus.busEnd = port.bus.end
else:
port_obj.name = logical_netlist.string_id(port_name)
port_obj.bit = None
# Now that each cell type has been assigned a cell index, add cell
# instances to cells as needed.
for lib in libraries.values():
for cell in lib.cells.values():
cell_obj = logical_netlist.get_cell(cell_name_to_idx[cell.name])
# Save mapping of cell instance name to cell instance index for
# cell net construction
cell_instances = {}
cell_obj.init('insts', len(cell.cell_instances))
for idx, (cell_instance_name,
cell_instance) in enumerate(cell.cell_instances.items()):
cell_instance_idx, cell_instance_obj = logical_netlist.next_cell_instance(
)
cell_instances[cell_instance_name] = cell_instance_idx
cell_instance_obj.name = logical_netlist.string_id(
cell_instance_name)
logical_netlist.create_property_map(cell_instance_obj.propMap,
cell_instance.property_map)
cell_instance_obj.view = logical_netlist.string_id(
cell_instance.view)
cell_instance_obj.cell = cell_name_to_idx[cell_instance.
cell_name]
cell_obj.insts[idx] = cell_instance_idx
cell_obj.init('nets', len(cell.nets))
for net_obj, (netname, net) in zip(cell_obj.nets,
cell.nets.items()):
net_obj.name = logical_netlist.string_id(netname)
logical_netlist.create_property_map(net_obj.propMap,
net.property_map)
net_obj.init('portInsts', len(net.ports))
for port_obj, port in zip(net_obj.portInsts, net.ports):
if port.instance_name is not None:
# If port.instance_name is not None, then this is a
# cell instance port connection.
instance_cell_name = cell.cell_instances[
port.instance_name].cell_name
port_obj.inst = cell_instances[port.instance_name]
port_obj.port, port_pyobj = ports[instance_cell_name,
port.name]
else:
# If port.instance_name is None, then this is a cell
# port connection
port_obj.extPort = None
port_obj.port, port_pyobj = ports[cell.name, port.name]
# Handle bussed port annotations
if port.idx is not None:
port_obj.busIdx.idx = port_pyobj.encode_index(port.idx)
else:
port_obj.busIdx.singleBit = None
if top_instance is not None:
top_level_cell_instance = logical_netlist.get_top_cell_instance()
# Convert the top level cell now that the libraries have been converted.
top_level_cell_instance.name = logical_netlist.string_id(
top_instance_name)
top_level_cell_instance.cell = cell_name_to_idx[top_instance.cell_name]
top_level_cell_instance.view = logical_netlist.string_id(
top_instance.view)
logical_netlist.create_property_map(top_level_cell_instance.propMap,
top_instance.property_map)
return logical_netlist.finish_encode()
class PhysicalNetlistBuilder():
""" Builder class for PhysicalNetlist capnp format.
physical_netlist_schema - Loaded physical netlist schema.
"""
def __init__(self, physical_netlist_schema):
self.physical_netlist_schema = physical_netlist_schema
def init_string_map(self):
self.string_map = {}
self.string_list = []
def string_id(self, s):
""" Intern string into file, and return its StringIdx. """
assert isinstance(s, str)
if s not in self.string_map:
self.string_map[s] = len(self.string_list)
self.string_list.append(s)
return self.string_map[s]
def encode(self, phys_netlist):
""" Completes the encoding of the physical netlist and returns root pycapnp object.
Invoke after all placements, physical cells and physical nets have
been added.
Returns completed physical_netlist.PhysNetlist pycapnp object.
"""
self.init_string_map()
physical_netlist = self.physical_netlist_schema.PhysNetlist.new_message(
)
physical_netlist.part = phys_netlist.part
physical_netlist.init('placements', len(phys_netlist.placements))
placements = physical_netlist.placements
for idx, placement in enumerate(phys_netlist.placements):
placement_obj = placements[idx]
placement_obj.cellName = self.string_id(placement.cell_name)
placement_obj.type = self.string_id(placement.cell_type)
placement_obj.site = self.string_id(placement.site)
placement_obj.bel = self.string_id(placement.bel)
placement_obj.isSiteFixed = True
placement_obj.isBelFixed = True
if placement.other_bels:
placement_obj.init('otherBels', len(placement.other_bels))
other_bels_obj = placement_obj.otherBels
for idx, s in enumerate(placement.other_bels):
other_bels_obj[idx] = self.string_id(s)
placement_obj.init('pinMap', len(placement.pins))
pin_map = placement_obj.pinMap
for idx, pin in enumerate(placement.pins):
pin_map[idx].cellPin = self.string_id(pin.cell_pin)
pin_map[idx].belPin = self.string_id(pin.bel_pin)
if pin.bel is None:
pin_map[idx].bel = placement_obj.bel
else:
pin_map[idx].bel = self.string_id(pin.bel)
pin_map[idx].isFixed = True
if pin.other_cell_type:
assert pin.other_cell_name is not None
pin.otherCell.multiCell = self.string_id(
pin.other_cell_name)
pin.otherCell.multiType = self.string_id(
pin.other_cell_type)
physical_netlist.init('physNets', len(phys_netlist.nets))
nets = physical_netlist.physNets
for idx, net in enumerate(phys_netlist.nets):
net_obj = nets[idx]
net_obj.name = self.string_id(net.name)
net_obj.init('sources', len(net.sources))
for root_obj, root in zip(net_obj.sources, net.sources):
root.output_interchange(root_obj, self.string_id)
net_obj.init('stubs', len(net.stubs))
for stub_obj, stub in zip(net_obj.stubs, net.stubs):
stub.output_interchange(stub_obj, self.string_id)
net_obj.type = self.physical_netlist_schema.PhysNetlist.NetType.__dict__[
net.type.name.lower()]
physical_netlist.init('physCells', len(phys_netlist.physical_cells))
physical_cells = physical_netlist.physCells
for idx, (cell_name,
cell_type) in enumerate(phys_netlist.physical_cells.items()):
physical_cell = physical_cells[idx]
physical_cell.cellName = self.string_id(cell_name)
physical_cell.physType = self.physical_netlist_schema.PhysNetlist.PhysCellType.__dict__[
cell_type.name.lower()]
physical_netlist.init('properties', len(phys_netlist.properties))
properties = physical_netlist.properties
for idx, (k, v) in enumerate(phys_netlist.properties.items()):
properties[idx].key = self.string_id(k)
properties[idx].value = self.string_id(v)
physical_netlist.init('siteInsts', len(phys_netlist.site_instances))
site_instances = physical_netlist.siteInsts
for idx, (k, v) in enumerate(phys_netlist.site_instances.items()):
site_instances[idx].site = self.string_id(k)
site_instances[idx].type = self.string_id(v)
physical_netlist.init('strList', len(self.string_list))
for idx, s in enumerate(self.string_list):
physical_netlist.strList[idx] = s
return physical_netlist
def output_physical_netlist(physical_netlist, physical_netlist_schema):
builder = PhysicalNetlistBuilder(physical_netlist_schema)
return builder.encode(physical_netlist)
def first_upper(s):
return s[0].upper() + s[1:]
def to_logical_netlist(netlist_capnp, strs=None):
# name @0 : Text;
# propMap @1 : PropertyMap;
# topInst @2 : CellInstance;
# strList @3 : List(Text);
# cellList @4 : List(Cell);
# portList @5 : List(Port);
# instList @6 : List(CellInstance);
if strs is None:
strs = [s for s in netlist_capnp.strList]
libraries = {}
def convert_property_map(prop_map):
out = {}
for prop in prop_map.entries:
key = strs[prop.key]
if prop.which() == 'textValue':
value = strs[prop.textValue]
elif prop.which() == 'intValue':
value = prop.intValue
else:
assert prop.which() == 'boolValue'
value = prop.boolValue
out[key] = value
return out
def convert_cell_instance(cell_instance_capnp):
prop_map = convert_property_map(cell_instance_capnp.propMap)
name = strs[cell_instance_capnp.name]
return name, CellInstance(
view=strs[cell_instance_capnp.view],
cell_name=strs[netlist_capnp.cellDecls[cell_instance_capnp.cell].
name],
property_map=prop_map,
capnp_name=cell_instance_capnp.cell)
for cell_capnp in netlist_capnp.cellList:
cell_decl = netlist_capnp.cellDecls[cell_capnp.index]
cell = Cell(
name=strs[cell_decl.name],
capnp_index=cell_capnp.index,
property_map=convert_property_map(cell_decl.propMap),
)
cell.view = strs[cell_decl.view]
for inst in cell_capnp.insts:
cell_instance_name, cell_instance = convert_cell_instance(
netlist_capnp.instList[inst])
cell.cell_instances[cell_instance_name] = cell_instance
for port_idx in cell_decl.ports:
port = netlist_capnp.portList[port_idx]
port_name = strs[port.name]
direction = Direction[first_upper(str(port.dir))]
prop_map = convert_property_map(port.propMap)
if port.which() == 'bit':
cell.add_port(
name=port_name, direction=direction, property_map=prop_map)
else:
assert port.which() == 'bus'
cell.add_bus_port(
name=port_name,
direction=direction,
property_map=prop_map,
start=port.bus.busStart,
end=port.bus.busEnd)
for net in cell_capnp.nets:
net_name = strs[net.name]
cell.add_net(
name=net_name,
property_map=convert_property_map(net.propMap),
)
for port_inst in net.portInsts:
port_capnp = netlist_capnp.portList[port_inst.port]
port_name = strs[port_capnp.name]
if port_inst.busIdx.which() == 'singleBit':
idx = None
else:
assert port_inst.busIdx.which() == 'idx'
assert port_capnp.which() == 'bus'
bus = port_capnp.bus
if bus.busStart <= bus.busEnd:
idx = port_inst.busIdx.idx + bus.busStart
else:
idx = bus.busStart - port_inst.busIdx.idx
if port_inst.which() == 'extPort':
cell.connect_net_to_cell_port(
net_name=net_name, port=port_name, idx=idx)
else:
assert port_inst.which() == 'inst'
instance_name = strs[netlist_capnp.instList[port_inst.
inst].name]
cell.connect_net_to_instance(
net_name=net_name,
instance_name=instance_name,
port=port_name,
idx=idx)
library = strs[cell_decl.lib]
if library not in libraries:
libraries[library] = Library(name=library)
libraries[library].add_cell(cell)
top_instance_name, top_instance = convert_cell_instance(
netlist_capnp.topInst)
return LogicalNetlist(
name=netlist_capnp.name,
property_map=convert_property_map(netlist_capnp.propMap),
top_instance_name=top_instance_name,
top_instance=top_instance,
libraries=libraries)
def to_physical_netlist(phys_netlist_capnp):
strs = [s for s in phys_netlist_capnp.strList]
properties = {}
for prop in phys_netlist_capnp.properties:
properties[strs[prop.key]] = strs[prop.value]
phys_netlist = PhysicalNetlist(phys_netlist_capnp.part, properties)
for site_instance in phys_netlist_capnp.siteInsts:
phys_netlist.add_site_instance(strs[site_instance.site],
strs[site_instance.type])
for physical_cell in phys_netlist_capnp.physCells:
phys_netlist.add_physical_cell(
strs[physical_cell.cellName], PhysicalCellType[first_upper(
str(physical_cell.physType))])
def convert_route_segment(route_segment_capnp):
which = route_segment_capnp.which()
if which == 'belPin':
bel_pin = route_segment_capnp.belPin
return PhysicalBelPin(
site=strs[bel_pin.site],
bel=strs[bel_pin.bel],
pin=strs[bel_pin.pin])
elif which == 'sitePin':
site_pin = route_segment_capnp.sitePin
return PhysicalSitePin(
site=strs[site_pin.site], pin=strs[site_pin.pin])
elif which == 'pip':
# TODO: Shouldn't be discard isFixed field
pip = route_segment_capnp.pip
site = strs[pip.site] if pip.which() == 'site' else None
return PhysicalPip(
tile=strs[pip.tile],
wire0=strs[pip.wire0],
wire1=strs[pip.wire1],
forward=pip.forward,
site=site)
else:
assert which == 'sitePIP'
# TODO: Shouldn't be discard isFixed and inverts, isInverting
# fields
site_pip = route_segment_capnp.sitePIP
return PhysicalSitePip(
site=strs[site_pip.site],
bel=strs[site_pip.bel],
pin=strs[site_pip.pin],
is_inverting=site_pip.isInverting)
def convert_route_branch(route_branch_capnp):
obj = convert_route_segment(route_branch_capnp.routeSegment)
for branch in route_branch_capnp.branches:
obj.branches.append(convert_route_branch(branch))
return obj
def convert_net(net_capnp):
sources = []
for source_capnp in net_capnp.sources:
sources.append(convert_route_branch(source_capnp))
stubs = []
for stub_capnp in net_capnp.stubs:
stubs.append(convert_route_branch(stub_capnp))
return PhysicalNet(
name=strs[net_capnp.name],
type=PhysicalNetType[first_upper(str(net_capnp.type))],
sources=sources,
stubs=stubs)
null_net = convert_net(phys_netlist_capnp.nullNet)
assert len(null_net.sources) == 0
phys_netlist.set_null_net(null_net.stubs)
for physical_net in phys_netlist_capnp.physNets:
net = convert_net(physical_net)
phys_netlist.add_physical_net(
net_name=net.name,
sources=net.sources,
stubs=net.stubs,
net_type=net.type)
for placement_capnp in phys_netlist_capnp.placements:
# TODO: Shouldn't be discarding isBelFixed/isSiteFixed/altSiteType
placement = Placement(
cell_type=strs[placement_capnp.type],
cell_name=strs[placement_capnp.cellName],
site=strs[placement_capnp.site],
bel=strs[placement_capnp.bel],
)
for pin_map in placement_capnp.pinMap:
# TODO: Shouldn't be discarding isFixed
other_cell_name = None
other_cell_type = None
if pin_map.which() == 'otherCell':
other_cell = pin_map.otherCell
other_cell_name = strs[other_cell.multiCell]
other_cell_type = strs[other_cell.multiType]
placement.add_bel_pin_to_cell_pin(
bel=strs[pin_map.bel],
bel_pin=strs[pin_map.belPin],
cell_pin=strs[pin_map.cellPin],
other_cell_type=other_cell_type,
other_cell_name=other_cell_name)
for other_bel in placement_capnp.otherBels:
placement.other_bels.add(strs[other_bel])
phys_netlist.add_placement(placement)
return phys_netlist
class Interchange():
def __init__(self, schema_directory):
search_path = [os.path.dirname(os.path.dirname(capnp.__file__))]
if 'CONDA_PREFIX' in os.environ:
search_path.append(
os.path.join(os.environ['CONDA_PREFIX'], 'include'))
if 'CAPNP_PATH' in os.environ:
search_path.append(os.environ['CAPNP_PATH'])
for path in ['/usr/local/include', '/usr/include']:
if os.path.exists(path):
search_path.append(path)
self.references_schema = capnp.load(
os.path.join(schema_directory, 'References.capnp'),
imports=search_path)
self.logical_netlist_schema = capnp.load(
os.path.join(schema_directory, 'LogicalNetlist.capnp'),
imports=search_path)
self.physical_netlist_schema = capnp.load(
os.path.join(schema_directory, 'PhysicalNetlist.capnp'),
imports=search_path)
self.device_resources_schema = capnp.load(
os.path.join(schema_directory, 'DeviceResources.capnp'),
imports=search_path)
def output_logical_netlist(self, *args, **kwargs):
return output_logical_netlist(
logical_netlist_schema=self.logical_netlist_schema,
*args,
**kwargs)
def output_physical_netlist(self, *args, **kwargs):
return output_physical_netlist(
physical_netlist_schema=self.physical_netlist_schema,
*args,
**kwargs)
def read_logical_netlist_raw(self,
f,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
return read_capnp_file(self.logical_netlist_schema.Netlist, f,
compression_format, is_packed)
def read_logical_netlist(self,
f,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
return to_logical_netlist(
read_capnp_file(self.logical_netlist_schema.Netlist, f,
compression_format, is_packed))
def read_physical_netlist(self,
f,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
return to_physical_netlist(
read_capnp_file(self.physical_netlist_schema.PhysNetlist, f,
compression_format, is_packed))
def read_physical_netlist_raw(self,
f,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
return read_capnp_file(self.physical_netlist_schema.PhysNetlist, f,
compression_format, is_packed)
def read_device_resources_raw(self,
f,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
return read_capnp_file(self.device_resources_schema.Device, f,
compression_format, is_packed)
def read_device_resources(self,
f,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
return DeviceResources(
read_capnp_file(self.device_resources_schema.Device, f,
compression_format, is_packed))
|
the-stack_0_11511 | # ------------------------------------------------------------------------------
# Functions to save and restore different data types.
# ------------------------------------------------------------------------------
import os
# PICKLE
import pickle
def pkl_dump(obj, name, path='obj'):
r"""Saves an object in pickle format."""
if '.p' not in name:
name = name + '.pkl'
path = os.path.join(path, name)
pickle.dump(obj, open(path, 'wb'))
def pkl_load(name, path='obj'):
r"""Restores an object from a pickle file."""
if '.p' not in name:
name = name + '.pkl'
path = os.path.join(path, name)
try:
obj = pickle.load(open(path, 'rb'))
except FileNotFoundError:
obj = None
return obj
# NUMPY
from numpy import save, load
def np_dump(obj, name, path='obj'):
r"""Saves an object in npy format."""
if '.npy' not in name:
name = name + '.npy'
path = os.path.join(path, name)
save(path, obj)
def np_load(name, path='obj'):
r"""Restores an object from a npy file."""
if '.npy' not in name:
name = name + '.npy'
path = os.path.join(path, name)
try:
obj = load(path)
except FileNotFoundError:
obj = None
return obj
# JSON
import json
def save_json(dict_obj, path, name):
r"""Saves a dictionary in json format."""
if '.json' not in name:
name += '.json'
with open(os.path.join(path, name), 'w') as json_file:
json.dump(dict_obj, json_file)
def load_json(path, name):
r"""Restores a dictionary from a json file."""
if '.json' not in name:
name += '.json'
with open(os.path.join(path, name), 'r') as json_file:
return json.load(json_file)
# NIFTY
def nifty_dump(x, name, path):
r"""Save a tensor of numpy array in nifty format."""
if 'torch.Tensor' in str(type(x)):
x = x.detach().cpu().numpy()
if '.nii' not in name:
name = name + '.nii.gz'
# Remove channels dimension and rotate axis so depth first
if len(x.shape) == 4:
x = np.moveaxis(x[0], -1, 0)
assert len(x.shape) == 3
path = os.path.join(path, name)
sitk.WriteImage(sitk.GetImageFromArray(x), path)
# OTHERS
import functools
def join_path(list):
r"""From a list of chained directories, forms a path"""
return functools.reduce(os.path.join, list)
|
the-stack_0_11516 | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from landlab import RasterModelGrid
from landlab.layers import EventLayers
def test_EventLayersMixIn():
grid = RasterModelGrid((4, 4))
assert hasattr(grid, "event_layers")
assert grid.event_layers.number_of_layers == 0
assert grid.event_layers.number_of_stacks == 4
def test_setitem_with_scalar():
layers = EventLayers(5)
layers.add(1.0, age=3.0)
layers.add(2.0, age=4.0)
truth = np.array([[3.0, 3.0, 3.0, 3.0, 3.0], [4.0, 4.0, 4.0, 4.0, 4.0]])
assert_array_equal(layers["age"], truth)
layers["age"] = 2.0
truth = np.array([[2.0, 2.0, 2.0, 2.0, 2.0], [2.0, 2.0, 2.0, 2.0, 2.0]])
assert_array_equal(layers["age"], truth)
def test_set_item_with_1d():
layers = EventLayers(5)
layers.add(1.0, age=3.0)
layers.add(2.0, age=4.0)
truth = np.array([[3.0, 3.0, 3.0, 3.0, 3.0], [4.0, 4.0, 4.0, 4.0, 4.0]])
assert_array_equal(layers["age"], truth)
layers["age"] = [4.0, 7.0]
truth = np.array([[4.0, 4.0, 4.0, 4.0, 4.0], [7.0, 7.0, 7.0, 7.0, 7.0]])
assert_array_equal(layers["age"], truth)
def test_set_item_with_2d():
layers = EventLayers(5)
layers.add(1.0, age=3.0)
layers.add(2.0, age=4.0)
truth = np.array([[3.0, 3.0, 3.0, 3.0, 3.0], [4.0, 4.0, 4.0, 4.0, 4.0]])
assert_array_equal(layers["age"], truth)
layers["age"] = [[4.0, 4.0, 4.0, 4.0, 4.0], [7.0, 7.0, 7.0, 7.0, 7.0]]
truth = np.array([[4.0, 4.0, 4.0, 4.0, 4.0], [7.0, 7.0, 7.0, 7.0, 7.0]])
assert_array_equal(layers["age"], truth)
def test__str__():
layers = EventLayers(5)
layers.add(1.0, age=3.0)
vals = str(layers)
assert vals.splitlines() == [
"number_of_layers: 1",
"number_of_stacks: 5",
"tracking: age",
]
def test__repr__():
layers = EventLayers(5)
layers.add(1.0, age=3.0)
vals = repr(layers)
assert vals == "EventLayers(5)"
def test_adding_untracked_layer():
layers = EventLayers(3)
layers.add(1.0, type=3.0, size="sand")
layers.add([0.0, 0.0, 1.0], type=3.0, size="sand")
with pytest.raises(ValueError):
layers.add([1.0], type=3.0, size="sand", spam="eggs")
|
the-stack_0_11518 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convert references to JSON file."""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import with_statement
import re
import os
import sys
import glob
import logging
import json
from nltk.stem.snowball import SnowballStemmer as Stemmer
logging.basicConfig(level=logging.INFO)
references = {}
readers_references = {}
author_references = {}
for input_dir in glob.glob(sys.argv[1]+'/[0-9]*'):
file_id = input_dir.split('/')[-1].split('.')[0]
logging.info("loading author-assigned references from {}".format(file_id))
author_references[file_id] = []
try:
with open(input_dir+"/"+file_id+".kwd", 'r', errors='replace') as f:
text = f.read()
text = text.replace(u"\uFFFD", "\n")
text = re.sub(r'\n+', '\n', text).strip()
lines = text.split("\n")
keyphrases = []
for line in lines:
words = line.strip().split()
if sys.argv[3] == "stem":
stems = [Stemmer('porter').stem(w.lower()) for w in words]
keyphrases.append(' '.join(stems))
else:
keyphrases.append(' '.join([w.lower() for w in words]))
author_references[file_id] = keyphrases
except IOError:
logging.info("No author-assigned references for {}".format(file_id))
readers_references[file_id] = []
for reader_file in glob.glob(input_dir+'/KEY/*.key'):
logging.info("loading reader-assigned references from {}".format(reader_file))
with open(reader_file, 'r', errors='replace') as f:
text = f.read()
text = text.replace(u"\uFFFD", "\n")
text = re.sub(r'\n+', '\n', text).strip()
lines = text.split("\n")
keyphrases = []
for line in lines:
words = line.strip().split()
if sys.argv[3] == "stem":
stems = [Stemmer('porter').stem(w.lower()) for w in words]
keyphrases.append(' '.join(stems))
else:
keyphrases.append(' '.join([w.lower() for w in words]))
for keyphrase in keyphrases:
readers_references[file_id].append(keyphrase)
if sys.argv[4] == "author":
for doc_id in author_references:
references[doc_id] = [[u] for u in set(author_references[doc_id])]
elif sys.argv[4] == "reader":
for doc_id in readers_references:
references[doc_id] = [[u] for u in set(readers_references[doc_id])]
else:
for doc_id in readers_references:
references[doc_id] = [[u] for u in set(readers_references[doc_id])| set(author_references[doc_id])]
with open(sys.argv[2], 'w') as o:
json.dump(references, o, sort_keys = True, indent = 4)
|
the-stack_0_11521 | from setuptools import setup
import os
VERSION = "0.6"
def get_long_description():
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
encoding="utf8",
) as fp:
return fp.read()
setup(
name="airtable-export",
description="Export Airtable data to files on disk",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Simon Willison",
url="https://github.com/simonw/airtable-export",
project_urls={
"Issues": "https://github.com/simonw/airtable-export/issues",
"CI": "https://github.com/simonw/airtable-export/actions",
"Changelog": "https://github.com/simonw/airtable-export/releases",
},
license="Apache License, Version 2.0",
version=VERSION,
packages=["airtable_export"],
entry_points="""
[console_scripts]
airtable-export=airtable_export.cli:cli
""",
install_requires=["click", "PyYAML", "httpx", "sqlite-utils"],
extras_require={"test": ["pytest", "pytest-mock"]},
tests_require=["airtable-export[test]"],
)
|
the-stack_0_11523 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .tfvc_branch_ref import TfvcBranchRef
class TfvcBranch(TfvcBranchRef):
"""TfvcBranch.
:param path: Path for the branch.
:type path: str
:param _links: A collection of REST reference links.
:type _links: :class:`ReferenceLinks <tfvc.v4_1.models.ReferenceLinks>`
:param created_date: Creation date of the branch.
:type created_date: datetime
:param description: Description of the branch.
:type description: str
:param is_deleted: Is the branch deleted?
:type is_deleted: bool
:param owner: Alias or display name of user
:type owner: :class:`IdentityRef <tfvc.v4_1.models.IdentityRef>`
:param url: URL to retrieve the item.
:type url: str
:param children: List of children for the branch.
:type children: list of :class:`TfvcBranch <tfvc.v4_1.models.TfvcBranch>`
:param mappings: List of branch mappings.
:type mappings: list of :class:`TfvcBranchMapping <tfvc.v4_1.models.TfvcBranchMapping>`
:param parent: Path of the branch's parent.
:type parent: :class:`TfvcShallowBranchRef <tfvc.v4_1.models.TfvcShallowBranchRef>`
:param related_branches: List of paths of the related branches.
:type related_branches: list of :class:`TfvcShallowBranchRef <tfvc.v4_1.models.TfvcShallowBranchRef>`
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'description': {'key': 'description', 'type': 'str'},
'is_deleted': {'key': 'isDeleted', 'type': 'bool'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'url': {'key': 'url', 'type': 'str'},
'children': {'key': 'children', 'type': '[TfvcBranch]'},
'mappings': {'key': 'mappings', 'type': '[TfvcBranchMapping]'},
'parent': {'key': 'parent', 'type': 'TfvcShallowBranchRef'},
'related_branches': {'key': 'relatedBranches', 'type': '[TfvcShallowBranchRef]'}
}
def __init__(self, path=None, _links=None, created_date=None, description=None, is_deleted=None, owner=None, url=None, children=None, mappings=None, parent=None, related_branches=None):
super(TfvcBranch, self).__init__(path=path, _links=_links, created_date=created_date, description=description, is_deleted=is_deleted, owner=owner, url=url)
self.children = children
self.mappings = mappings
self.parent = parent
self.related_branches = related_branches
|
the-stack_0_11524 | import warnings
from collections import defaultdict
import copy
from coffea.nanoevents.schemas.base import BaseSchema, zip_forms
from coffea.nanoevents.util import quote
class PHYSLITESchema(BaseSchema):
"""PHYSLITE schema builder - work in progress.
This is a schema for the `ATLAS DAOD_PHYSLITE derivation
<https://gitlab.cern.ch/atlas/athena/-/blob/release/21.2.108.0/PhysicsAnalysis/DerivationFramework/DerivationFrameworkPhys/share/PHYSLITE.py>`_.
Closely following `schemas.nanoaod.NanoAODSchema`, it is mainly build from
naming patterns where the "Analysis" prefix has been removed, so the
collections will be named Electrons, Muons, instead of AnalysisElectrons,
AnalysisMunos, etc. The collection fields correspond to the "Aux" and
"AuxDyn" columns.
Collections are assigned mixin types according to the `mixins` mapping.
All collections are then zipped into one `base.NanoEvents` record and returned.
Cross references are build from ElementLink columns. Global indices are
created dynamically, using an ``_eventindex`` field that is attached to
each collection.
"""
truth_collections = [
"TruthPhotons",
"TruthMuons",
"TruthNeutrinos",
"TruthTaus",
"TruthElectrons",
"TruthBoson",
"TruthBottom",
"TruthTop",
]
"""TRUTH3 collection names.
TruthParticle behavior is assigned to all of them and global index forms
for parent/children relations are created for all combinations.
"""
mixins = {
"Electrons": "Electron",
"Muons": "Muon",
"Jets": "Particle",
"TauJets": "Particle",
"CombinedMuonTrackParticles": "TrackParticle",
"ExtrapolatedMuonTrackParticles": "TrackParticle",
"GSFTrackParticles": "TrackParticle",
"InDetTrackParticles": "TrackParticle",
"MuonSpectrometerTrackParticles": "TrackParticle",
}
"""Default configuration for mixin types, based on the collection name.
The types are implemented in the `coffea.nanoevents.methods.physlite` module.
"""
for _k in truth_collections:
mixins[_k] = "TruthParticle"
def __init__(self, base_form):
super().__init__(base_form)
self._form["contents"] = self._build_collections(self._form["contents"])
def _build_collections(self, branch_forms):
zip_groups = defaultdict(list)
has_eventindex = defaultdict(bool)
for key, ak_form in branch_forms.items():
# Normal fields
key_fields = key.split("/")[-1].split(".")
top_key = key_fields[0]
sub_key = ".".join(key_fields[1:])
objname = top_key.replace("Analysis", "").replace("AuxDyn", "")
zip_groups[objname].append(((key, sub_key), ak_form))
# add eventindex form, based on the first single-jagged list column
if (
not has_eventindex[objname]
and "List" in ak_form["class"]
and "List" not in ak_form["content"]["class"]
):
zip_groups[objname].append(
((key, "_eventindex"), self._create_eventindex_form(ak_form, key))
)
has_eventindex[objname] = True
# zip the forms
contents = {}
for objname, keys_and_form in zip_groups.items():
try:
contents[objname] = zip_forms(
{sub_key: form for (key, sub_key), form in keys_and_form},
objname,
self.mixins.get(objname, None),
bypass=True,
)
content = contents[objname]["content"]
content["parameters"] = dict(
content.get("parameters", {}), collection_name=objname
)
except NotImplementedError:
warnings.warn(f"Can't zip collection {objname}")
return contents
@staticmethod
def _create_eventindex_form(base_form, key):
form = copy.deepcopy(base_form)
form["content"] = {
"class": "NumpyArray",
"parameters": {},
"form_key": quote(f"{key},!load,!eventindex,!content"),
"itemsize": 8,
"primitive": "int64",
}
return form
@property
def behavior(self):
"""Behaviors necessary to implement this schema"""
from coffea.nanoevents.methods import physlite
return physlite.behavior
|
the-stack_0_11527 | from const import GAME_COUNT
from game import game, TACTIC_LIST, game_process
from itertools import combinations
if __name__ == "__main__":
t = TACTIC_LIST
s = {i:0 for i in t.keys()}
for i, j in combinations(t.keys(), r=2):
x, y = game_process(t[i], t[j], GAME_COUNT)
print(f'{i} vs {j}: +{x}, +{y}')
s[i] += x
s[j] += y
print("----------result----------")
print(sorted(s.items(), key=lambda x:(-x[1])))
|
the-stack_0_11528 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 1 15:07:20 2017
@author: spxrxs
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import astropy.io.fits as fits
import matplotlib.cm as cm
import os
import matplotlib.ticker as ticker
from astropy.wcs import WCS
import matplotlib.colors
from reproject import reproject_interp
from astroscrappy import detect_cosmics
#getting list of every file in the directory
files = os.listdir('/home/user/spxrxs/SupernovaImaging/lcogtdata-20170802-80')
print(files)
#loading the data for plotting
data = np.loadtxt('sn2017ahndata.csv', delimiter = ',', dtype = object)
#opening the FITS file whilch all imaags are aligned to
hdu1 = fits.open('/home/user/spxrxs/SupernovaImaging/lcogtdata-20170802-80/' + 'coj0m403-kb98-20170302-0140-e91.fits.fz')[1]
#loops through every file in teh folder
for i in range(len(files)):
#opening the file for plotting
hdu2 = fits.open('/home/user/spxrxs/SupernovaImaging/lcogtdata-20170802-80/' + files[i])[1]
thing, hdu2.data = detect_cosmics(hdu2.data, readnoise=20., gain=1.4, sigclip=5., sigfrac=.5, objlim=6.)
times = np.zeros(np.shape(data)[0])
mags = np.zeros(np.shape(data)[0])
dmags = np.zeros(np.shape(data)[0])
k = 0
for j in range(np.shape(data)[0]):
if hdu2.header['FILTER'] == data[j,1]:
if hdu2.header['MJD-OBS'] >= float(data[j,0]):
times[k] = float(data[j,0])
mags[k] = float(data[j,4])
dmags[k] = float(data[j,3])
k +=1
j +=1
times = times[:k]
mags = mags[:k]
dmags = dmags[:k]
array, footprint = reproject_interp(hdu2, hdu1.header)
plt.figure()
ax1 = plt.subplot2grid((3,3), (0,0), rowspan = 2, colspan = 3)
#ax1 = plt.subplot(2,1,1, projection = WCS(hdu1.header))
normalised = np.clip(array, np.nanpercentile(array, 50), np.nanpercentile(array, 99.5)) / np.nanpercentile(array, 40)
# normalised =array /np.nanpercentile(array, 25)
# sigma = np.sqrt(np.var(normalised))
# final_data = np.clip(normalised - np.nanpercentile(normalised, 25), 1,4)
ax1.imshow(np.log(normalised)[200:800,400:1200], norm =matplotlib.colors.Normalize() , cmap = cm.bone )
ax1.spines['right'].set_color('none')
ax1.spines['left'].set_color('none')
ax1.yaxis.set_major_locator(ticker.NullLocator())
ax1.xaxis.set_major_locator(ticker.NullLocator())
#ax1.coords.grid()
#ax1.coords['ra'].set_axislabel('Right Ascension')
#ax1.coords['dec'].set_axislabel('Declination')
#ax1.set_title(hdu2.header['FILTER']+ ' ' + str(hdu2.header['MJD-OBS']))
ax2 = plt.subplot2grid((3,3), (2,0), rowspan = 1, colspan = 3)
plt.errorbar(times -57790, mags, yerr = dmags, fmt = 'o', color = 'red')
plt.gca().invert_yaxis()
plt.ylim([21,12])
plt.xlim([0, 100])
plt.xlabel('Time (Days)')
plt.ylabel('Magnitude')
plt.tight_layout()
#plt.show()
i +=1
print(i)
plt.savefig(hdu2.header['FIlTER'] + str(hdu2.header['MJD-OBS']) + 'final' +'.png')
|
the-stack_0_11530 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import struct
from datetime import datetime
from Session import Session
from IoTDBConstants import *
from SessionDataSet import SessionDataSet
from thrift.protocol import TBinaryProtocol, TCompactProtocol
from thrift.transport import TSocket, TTransport
import csv
class Importer:
def __init__(self, session: Session):
self.session = session
def align_all_series(self, file, time_format='%Y-%m-%dT%H:%M:%S.%f%z', sg=None):
self.session.open(False)
try:
csvFile = open(file, "r")
reader = csv.reader(csvFile)
deviceID_lst = []
measurement_lst = []
for line in reader:
num_of_series = len(line) - 1
if reader.line_num == 1:
for item in line:
if item != 'Time':
deviceID_lst.append('.'.join(item.split('.')[:-1]))
measurement_lst.append(item.split('.')[-1])
else:
time = self.__time_to_timestamp(line[0], time_format)
for i in range(num_of_series):
if line[i + 1] not in ('', ' ', None, "null", "Null"):
if sg:
deviceID = sg + "." + deviceID_lst[i]
else:
deviceID = deviceID_lst[i]
self.session.insert_str_record(deviceID, time,
[measurement_lst[i]],
[line[i + 1]])
csvFile.close()
except Exception:
print("the csv format is incorrect")
self.session.close()
def align_by_device(self, file, time_format='%Y-%m-%dT%H:%M:%S.%f%z', sg=None):
self.session.open(False)
try:
csvFile = open(file, "r")
reader = csv.reader(csvFile)
measurement_lst = []
for line in reader:
num_of_series = len(line) - 2
if reader.line_num == 1:
for item in line:
if item != 'Time' and item != 'Device':
measurement_lst.append(item)
else:
time = self.__time_to_timestamp(line[0], time_format)
if sg:
deviceID = sg + "." + line[1]
else:
deviceID = line[1]
for i in range(num_of_series):
if line[i + 2] not in ('', ' ', None, "null", "Null"):
self.session.insert_str_record(deviceID, time,
[measurement_lst[i]],
[line[i + 2]])
csvFile.close()
except Exception:
print("the csv format is incorrect")
self.session.close()
@staticmethod
def __time_to_timestamp(str_time: str, time_format: str):
"""str_time: the string representation of date and time with timezone
at the end.
e.g. '2012-11-01T04:16:13-04:00'
time_format: the time format written with format tokens and included
the time zone at the end
e.g. '%Y-%m-%dT%H:%M:%S%z'
"""
try:
return int(str_time)
except TypeError:
time = datetime.strptime(''.join(str_time.rsplit(':', 1)), time_format)
timestamp = int(datetime.timestamp(time))
return timestamp
|
the-stack_0_11531 | import json
import re
from rasa_nlu.model import Interpreter
# Custom Components
class SemesterExtractor:
@staticmethod
def process(text):
words = text.split(" ")
ordinal_values = {"first": 1, "second": 2, "third": 3, "fourth": 4, "fifth": 5, "sixth": 6, "seventh": 7, "eigth": 8}
semester = None
for word in words:
pattern = re.compile(r"\d+(st|nd|rd|th)")
if pattern.search(word):
semester = int(word[:-2])
for word in words:
word = word.lower()
pattern = re.compile(r"(first|second|third|fourth|fifth|sixth|seventh|eigth)")
if pattern.search(word):
semester = ordinal_values[word]
if semester != None:
data = [{'entity': 'semester', 'value': semester}]
return data
return semester
# End of Custom Components
class Data:
def __init__(self, text, data):
self.text = text
self.data = data
def __repr__(self):
return str(self.data)
def get_intent(self):
if self.text == "Get Started":
return "start"
else:
return self.data['intent']['name']
def get_confidence(self):
return self.data['intent']['confidence']
def get_entities(self):
semester_data = SemesterExtractor.process(self.text)
if semester_data != None:
self.data['entities'] += semester_data
return dict(map((lambda x : (x['entity'], x['value'])), self.data['entities']))
class Engine:
def __init__(self, models_path = "./models/vardhamanbot/nlu"):
self.interpreter = Interpreter.load(models_path)
def parse(self, message):
message = message.strip(" \n\t\r.")
return Data(message, self.interpreter.parse(message)) |
the-stack_0_11532 | from test import support
import time
import unittest
import locale
import sysconfig
import sys
import platform
try:
import threading
except ImportError:
threading = None
# Max year is only limited by the size of C int.
SIZEOF_INT = sysconfig.get_config_var('SIZEOF_INT') or 4
TIME_MAXYEAR = (1 << 8 * SIZEOF_INT - 1) - 1
TIME_MINYEAR = -TIME_MAXYEAR - 1
class TimeTestCase(unittest.TestCase):
def setUp(self):
self.t = time.time()
def test_data_attributes(self):
time.altzone
time.daylight
time.timezone
time.tzname
def test_time(self):
time.time()
info = time.get_clock_info('time')
self.assertFalse(info.monotonic)
self.assertTrue(info.adjustable)
def test_clock(self):
time.clock()
info = time.get_clock_info('clock')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
@unittest.skipUnless(hasattr(time, 'clock_gettime'),
'need time.clock_gettime()')
def test_clock_realtime(self):
time.clock_gettime(time.CLOCK_REALTIME)
@unittest.skipUnless(hasattr(time, 'clock_gettime'),
'need time.clock_gettime()')
@unittest.skipUnless(hasattr(time, 'CLOCK_MONOTONIC'),
'need time.CLOCK_MONOTONIC')
def test_clock_monotonic(self):
a = time.clock_gettime(time.CLOCK_MONOTONIC)
b = time.clock_gettime(time.CLOCK_MONOTONIC)
self.assertLessEqual(a, b)
@unittest.skipUnless(hasattr(time, 'clock_getres'),
'need time.clock_getres()')
def test_clock_getres(self):
res = time.clock_getres(time.CLOCK_REALTIME)
self.assertGreater(res, 0.0)
self.assertLessEqual(res, 1.0)
@unittest.skipUnless(hasattr(time, 'clock_settime'),
'need time.clock_settime()')
def test_clock_settime(self):
t = time.clock_gettime(time.CLOCK_REALTIME)
try:
time.clock_settime(time.CLOCK_REALTIME, t)
except PermissionError:
pass
if hasattr(time, 'CLOCK_MONOTONIC'):
self.assertRaises(OSError,
time.clock_settime, time.CLOCK_MONOTONIC, 0)
def test_conversions(self):
self.assertEqual(time.ctime(self.t),
time.asctime(time.localtime(self.t)))
self.assertEqual(int(time.mktime(time.localtime(self.t))),
int(self.t))
def test_sleep(self):
self.assertRaises(ValueError, time.sleep, -2)
self.assertRaises(ValueError, time.sleep, -1)
time.sleep(1.2)
def test_strftime(self):
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = ' %' + directive
try:
time.strftime(format, tt)
except ValueError:
self.fail('conversion specifier: %r failed.' % format)
# Issue #10762: Guard against invalid/non-supported format string
# so that Python don't crash (Windows crashes when the format string
# input to [w]strftime is not kosher.
if sys.platform.startswith('win'):
with self.assertRaises(ValueError):
time.strftime('%f')
def _bounds_checking(self, func):
# Make sure that strftime() checks the bounds of the various parts
# of the time tuple (0 is valid for *all* values).
# The year field is tested by other test cases above
# Check month [1, 12] + zero support
func((1900, 0, 1, 0, 0, 0, 0, 1, -1))
func((1900, 12, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, -1, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 13, 1, 0, 0, 0, 0, 1, -1))
# Check day of month [1, 31] + zero support
func((1900, 1, 0, 0, 0, 0, 0, 1, -1))
func((1900, 1, 31, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, -1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 32, 0, 0, 0, 0, 1, -1))
# Check hour [0, 23]
func((1900, 1, 1, 23, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, -1, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 24, 0, 0, 0, 1, -1))
# Check minute [0, 59]
func((1900, 1, 1, 0, 59, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, -1, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 60, 0, 0, 1, -1))
# Check second [0, 61]
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, -1, 0, 1, -1))
# C99 only requires allowing for one leap second, but Python's docs say
# allow two leap seconds (0..61)
func((1900, 1, 1, 0, 0, 60, 0, 1, -1))
func((1900, 1, 1, 0, 0, 61, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 62, 0, 1, -1))
# No check for upper-bound day of week;
# value forced into range by a ``% 7`` calculation.
# Start check at -2 since gettmarg() increments value before taking
# modulo.
self.assertEqual(func((1900, 1, 1, 0, 0, 0, -1, 1, -1)),
func((1900, 1, 1, 0, 0, 0, +6, 1, -1)))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, -2, 1, -1))
# Check day of the year [1, 366] + zero support
func((1900, 1, 1, 0, 0, 0, 0, 0, -1))
func((1900, 1, 1, 0, 0, 0, 0, 366, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, 0, -1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, 0, 367, -1))
def test_strftime_bounding_check(self):
self._bounds_checking(lambda tup: time.strftime('', tup))
def test_default_values_for_zero(self):
# Make sure that using all zeros uses the proper default
# values. No test for daylight savings since strftime() does
# not change output based on its value and no test for year
# because systems vary in their support for year 0.
expected = "2000 01 01 00 00 00 1 001"
with support.check_warnings():
result = time.strftime("%Y %m %d %H %M %S %w %j", (2000,)+(0,)*8)
self.assertEqual(expected, result)
def test_strptime(self):
# Should be able to go round-trip from strftime to strptime without
# raising an exception.
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = '%' + directive
strf_output = time.strftime(format, tt)
try:
time.strptime(strf_output, format)
except ValueError:
self.fail("conversion specifier %r failed with '%s' input." %
(format, strf_output))
def test_strptime_bytes(self):
# Make sure only strings are accepted as arguments to strptime.
self.assertRaises(TypeError, time.strptime, b'2009', "%Y")
self.assertRaises(TypeError, time.strptime, '2009', b'%Y')
def test_strptime_exception_context(self):
# check that this doesn't chain exceptions needlessly (see #17572)
with self.assertRaises(ValueError) as e:
time.strptime('', '%D')
self.assertIs(e.exception.__suppress_context__, True)
# additional check for IndexError branch (issue #19545)
with self.assertRaises(ValueError) as e:
time.strptime('19', '%Y %')
self.assertIs(e.exception.__suppress_context__, True)
def test_asctime(self):
time.asctime(time.gmtime(self.t))
# Max year is only limited by the size of C int.
for bigyear in TIME_MAXYEAR, TIME_MINYEAR:
asc = time.asctime((bigyear, 6, 1) + (0,) * 6)
self.assertEqual(asc[-len(str(bigyear)):], str(bigyear))
self.assertRaises(OverflowError, time.asctime,
(TIME_MAXYEAR + 1,) + (0,) * 8)
self.assertRaises(OverflowError, time.asctime,
(TIME_MINYEAR - 1,) + (0,) * 8)
self.assertRaises(TypeError, time.asctime, 0)
self.assertRaises(TypeError, time.asctime, ())
self.assertRaises(TypeError, time.asctime, (0,) * 10)
def test_asctime_bounding_check(self):
self._bounds_checking(time.asctime)
def test_ctime(self):
t = time.mktime((1973, 9, 16, 1, 3, 52, 0, 0, -1))
self.assertEqual(time.ctime(t), 'Sun Sep 16 01:03:52 1973')
t = time.mktime((2000, 1, 1, 0, 0, 0, 0, 0, -1))
self.assertEqual(time.ctime(t), 'Sat Jan 1 00:00:00 2000')
for year in [-100, 100, 1000, 2000, 10000]:
try:
testval = time.mktime((year, 1, 10) + (0,)*6)
except (ValueError, OverflowError):
# If mktime fails, ctime will fail too. This may happen
# on some platforms.
pass
else:
self.assertEqual(time.ctime(testval)[20:], str(year))
@unittest.skipUnless(hasattr(time, "tzset"),
"time module has no attribute tzset")
def test_tzset(self):
from os import environ
# Epoch time of midnight Dec 25th 2002. Never DST in northern
# hemisphere.
xmas2002 = 1040774400.0
# These formats are correct for 2002, and possibly future years
# This format is the 'standard' as documented at:
# http://www.opengroup.org/onlinepubs/007904975/basedefs/xbd_chap08.html
# They are also documented in the tzset(3) man page on most Unix
# systems.
eastern = 'EST+05EDT,M4.1.0,M10.5.0'
victoria = 'AEST-10AEDT-11,M10.5.0,M3.5.0'
utc='UTC+0'
org_TZ = environ.get('TZ',None)
try:
# Make sure we can switch to UTC time and results are correct
# Note that unknown timezones default to UTC.
# Note that altzone is undefined in UTC, as there is no DST
environ['TZ'] = eastern
time.tzset()
environ['TZ'] = utc
time.tzset()
self.assertEqual(
time.gmtime(xmas2002), time.localtime(xmas2002)
)
self.assertEqual(time.daylight, 0)
self.assertEqual(time.timezone, 0)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
# Make sure we can switch to US/Eastern
environ['TZ'] = eastern
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
self.assertEqual(time.tzname, ('EST', 'EDT'))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, 18000)
self.assertEqual(time.altzone, 14400)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
self.assertEqual(len(time.tzname), 2)
# Now go to the southern hemisphere.
environ['TZ'] = victoria
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
# Issue #11886: Australian Eastern Standard Time (UTC+10) is called
# "EST" (as Eastern Standard Time, UTC-5) instead of "AEST"
# (non-DST timezone), and "EDT" instead of "AEDT" (DST timezone),
# on some operating systems (e.g. FreeBSD), which is wrong. See for
# example this bug:
# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=93810
self.assertIn(time.tzname[0], ('AEST' 'EST'), time.tzname[0])
self.assertTrue(time.tzname[1] in ('AEDT', 'EDT'), str(time.tzname[1]))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, -36000)
self.assertEqual(time.altzone, -39600)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 1)
finally:
# Repair TZ environment variable in case any other tests
# rely on it.
if org_TZ is not None:
environ['TZ'] = org_TZ
elif 'TZ' in environ:
del environ['TZ']
time.tzset()
def test_insane_timestamps(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for func in time.ctime, time.gmtime, time.localtime:
for unreasonable in -1e200, 1e200:
self.assertRaises(OverflowError, func, unreasonable)
def test_ctime_without_arg(self):
# Not sure how to check the values, since the clock could tick
# at any time. Make sure these are at least accepted and
# don't raise errors.
time.ctime()
time.ctime(None)
def test_gmtime_without_arg(self):
gt0 = time.gmtime()
gt1 = time.gmtime(None)
t0 = time.mktime(gt0)
t1 = time.mktime(gt1)
self.assertAlmostEqual(t1, t0, delta=0.2)
def test_localtime_without_arg(self):
lt0 = time.localtime()
lt1 = time.localtime(None)
t0 = time.mktime(lt0)
t1 = time.mktime(lt1)
self.assertAlmostEqual(t1, t0, delta=0.2)
def test_mktime(self):
# Issue #1726687
for t in (-2, -1, 0, 1):
try:
tt = time.localtime(t)
except (OverflowError, OSError):
pass
else:
self.assertEqual(time.mktime(tt), t)
# Issue #13309: passing extreme values to mktime() or localtime()
# borks the glibc's internal timezone data.
@unittest.skipUnless(platform.libc_ver()[0] != 'glibc',
"disabled because of a bug in glibc. Issue #13309")
def test_mktime_error(self):
# It may not be possible to reliably make mktime return error
# on all platfom. This will make sure that no other exception
# than OverflowError is raised for an extreme value.
tt = time.gmtime(self.t)
tzname = time.strftime('%Z', tt)
self.assertNotEqual(tzname, 'LMT')
try:
time.mktime((-1, 1, 1, 0, 0, 0, -1, -1, -1))
except OverflowError:
pass
self.assertEqual(time.strftime('%Z', tt), tzname)
@unittest.skipUnless(hasattr(time, 'monotonic'),
'need time.monotonic')
def test_monotonic(self):
t1 = time.monotonic()
time.sleep(0.5)
t2 = time.monotonic()
dt = t2 - t1
self.assertGreater(t2, t1)
# Issue #20101: On some Windows machines, dt may be slightly low
self.assertTrue(0.45 <= dt <= 1.0, dt)
info = time.get_clock_info('monotonic')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
def test_perf_counter(self):
time.perf_counter()
def test_process_time(self):
# process_time() should not include time spend during a sleep
start = time.process_time()
time.sleep(0.100)
stop = time.process_time()
# use 20 ms because process_time() has usually a resolution of 15 ms
# on Windows
self.assertLess(stop - start, 0.020)
info = time.get_clock_info('process_time')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
@unittest.skipUnless(hasattr(time, 'monotonic'),
'need time.monotonic')
@unittest.skipUnless(hasattr(time, 'clock_settime'),
'need time.clock_settime')
def test_monotonic_settime(self):
t1 = time.monotonic()
realtime = time.clock_gettime(time.CLOCK_REALTIME)
# jump backward with an offset of 1 hour
try:
time.clock_settime(time.CLOCK_REALTIME, realtime - 3600)
except PermissionError as err:
self.skipTest(err)
t2 = time.monotonic()
time.clock_settime(time.CLOCK_REALTIME, realtime)
# monotonic must not be affected by system clock updates
self.assertGreaterEqual(t2, t1)
def test_localtime_failure(self):
# Issue #13847: check for localtime() failure
invalid_time_t = None
for time_t in (-1, 2**30, 2**33, 2**60):
try:
time.localtime(time_t)
except OverflowError:
self.skipTest("need 64-bit time_t")
except OSError:
invalid_time_t = time_t
break
if invalid_time_t is None:
self.skipTest("unable to find an invalid time_t value")
self.assertRaises(OSError, time.localtime, invalid_time_t)
self.assertRaises(OSError, time.ctime, invalid_time_t)
def test_get_clock_info(self):
clocks = ['clock', 'perf_counter', 'process_time', 'time']
if hasattr(time, 'monotonic'):
clocks.append('monotonic')
for name in clocks:
info = time.get_clock_info(name)
#self.assertIsInstance(info, dict)
self.assertIsInstance(info.implementation, str)
self.assertNotEqual(info.implementation, '')
self.assertIsInstance(info.monotonic, bool)
self.assertIsInstance(info.resolution, float)
# 0.0 < resolution <= 1.0
self.assertGreater(info.resolution, 0.0)
self.assertLessEqual(info.resolution, 1.0)
self.assertIsInstance(info.adjustable, bool)
self.assertRaises(ValueError, time.get_clock_info, 'xxx')
class TestLocale(unittest.TestCase):
def setUp(self):
self.oldloc = locale.setlocale(locale.LC_ALL)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.oldloc)
def test_bug_3061(self):
try:
tmp = locale.setlocale(locale.LC_ALL, "fr_FR")
except locale.Error:
self.skipTest('could not set locale.LC_ALL to fr_FR')
# This should not cause an exception
time.strftime("%B", (2009,2,1,0,0,0,0,0,0))
class _TestAsctimeYear:
_format = '%d'
def yearstr(self, y):
return time.asctime((y,) + (0,) * 8).split()[-1]
def test_large_year(self):
# Check that it doesn't crash for year > 9999
self.assertEqual(self.yearstr(12345), '12345')
self.assertEqual(self.yearstr(123456789), '123456789')
class _TestStrftimeYear:
# Issue 13305: For years < 1000, the value is not always
# padded to 4 digits across platforms. The C standard
# assumes year >= 1900, so it does not specify the number
# of digits.
if time.strftime('%Y', (1,) + (0,) * 8) == '0001':
_format = '%04d'
else:
_format = '%d'
def yearstr(self, y):
return time.strftime('%Y', (y,) + (0,) * 8)
def test_4dyear(self):
# Check that we can return the zero padded value.
if self._format == '%04d':
self.test_year('%04d')
else:
def year4d(y):
return time.strftime('%4Y', (y,) + (0,) * 8)
self.test_year('%04d', func=year4d)
def skip_if_not_supported(y):
msg = "strftime() is limited to [1; 9999] with Visual Studio"
# Check that it doesn't crash for year > 9999
try:
time.strftime('%Y', (y,) + (0,) * 8)
except ValueError:
cond = False
else:
cond = True
return unittest.skipUnless(cond, msg)
@skip_if_not_supported(10000)
def test_large_year(self):
return super().test_large_year()
@skip_if_not_supported(0)
def test_negative(self):
return super().test_negative()
del skip_if_not_supported
class _Test4dYear:
_format = '%d'
def test_year(self, fmt=None, func=None):
fmt = fmt or self._format
func = func or self.yearstr
self.assertEqual(func(1), fmt % 1)
self.assertEqual(func(68), fmt % 68)
self.assertEqual(func(69), fmt % 69)
self.assertEqual(func(99), fmt % 99)
self.assertEqual(func(999), fmt % 999)
self.assertEqual(func(9999), fmt % 9999)
def test_large_year(self):
self.assertEqual(self.yearstr(12345), '12345')
self.assertEqual(self.yearstr(123456789), '123456789')
self.assertEqual(self.yearstr(TIME_MAXYEAR), str(TIME_MAXYEAR))
self.assertRaises(OverflowError, self.yearstr, TIME_MAXYEAR + 1)
def test_negative(self):
self.assertEqual(self.yearstr(-1), self._format % -1)
self.assertEqual(self.yearstr(-1234), '-1234')
self.assertEqual(self.yearstr(-123456), '-123456')
self.assertEqual(self.yearstr(-123456789), str(-123456789))
self.assertEqual(self.yearstr(-1234567890), str(-1234567890))
self.assertEqual(self.yearstr(TIME_MINYEAR + 1900), str(TIME_MINYEAR + 1900))
# Issue #13312: it may return wrong value for year < TIME_MINYEAR + 1900
# Skip the value test, but check that no error is raised
self.yearstr(TIME_MINYEAR)
# self.assertEqual(self.yearstr(TIME_MINYEAR), str(TIME_MINYEAR))
self.assertRaises(OverflowError, self.yearstr, TIME_MINYEAR - 1)
class TestAsctime4dyear(_TestAsctimeYear, _Test4dYear, unittest.TestCase):
pass
class TestStrftime4dyear(_TestStrftimeYear, _Test4dYear, unittest.TestCase):
pass
class TestPytime(unittest.TestCase):
def setUp(self):
self.invalid_values = (
-(2 ** 100), 2 ** 100,
-(2.0 ** 100.0), 2.0 ** 100.0,
)
@support.cpython_only
def test_time_t(self):
from _testcapi import pytime_object_to_time_t
for obj, time_t in (
(0, 0),
(-1, -1),
(-1.0, -1),
(-1.9, -1),
(1.0, 1),
(1.9, 1),
):
self.assertEqual(pytime_object_to_time_t(obj), time_t)
for invalid in self.invalid_values:
self.assertRaises(OverflowError, pytime_object_to_time_t, invalid)
@support.cpython_only
def test_timeval(self):
from _testcapi import pytime_object_to_timeval
for obj, timeval in (
(0, (0, 0)),
(-1, (-1, 0)),
(-1.0, (-1, 0)),
(1e-6, (0, 1)),
(-1e-6, (-1, 999999)),
(-1.2, (-2, 800000)),
(1.1234560, (1, 123456)),
(1.1234569, (1, 123456)),
(-1.1234560, (-2, 876544)),
(-1.1234561, (-2, 876543)),
):
self.assertEqual(pytime_object_to_timeval(obj), timeval)
for invalid in self.invalid_values:
self.assertRaises(OverflowError, pytime_object_to_timeval, invalid)
@support.cpython_only
def test_timespec(self):
from _testcapi import pytime_object_to_timespec
for obj, timespec in (
(0, (0, 0)),
(-1, (-1, 0)),
(-1.0, (-1, 0)),
(1e-9, (0, 1)),
(-1e-9, (-1, 999999999)),
(-1.2, (-2, 800000000)),
(1.1234567890, (1, 123456789)),
(1.1234567899, (1, 123456789)),
(-1.1234567890, (-2, 876543211)),
(-1.1234567891, (-2, 876543210)),
):
self.assertEqual(pytime_object_to_timespec(obj), timespec)
for invalid in self.invalid_values:
self.assertRaises(OverflowError, pytime_object_to_timespec, invalid)
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_localtime_timezone(self):
# Get the localtime and examine it for the offset and zone.
lt = time.localtime()
self.assertTrue(hasattr(lt, "tm_gmtoff"))
self.assertTrue(hasattr(lt, "tm_zone"))
# See if the offset and zone are similar to the module
# attributes.
if lt.tm_gmtoff is None:
self.assertTrue(not hasattr(time, "timezone"))
else:
self.assertEqual(lt.tm_gmtoff, -[time.timezone, time.altzone][lt.tm_isdst])
if lt.tm_zone is None:
self.assertTrue(not hasattr(time, "tzname"))
else:
self.assertEqual(lt.tm_zone, time.tzname[lt.tm_isdst])
# Try and make UNIX times from the localtime and a 9-tuple
# created from the localtime. Test to see that the times are
# the same.
t = time.mktime(lt); t9 = time.mktime(lt[:9])
self.assertEqual(t, t9)
# Make localtimes from the UNIX times and compare them to
# the original localtime, thus making a round trip.
new_lt = time.localtime(t); new_lt9 = time.localtime(t9)
self.assertEqual(new_lt, lt)
self.assertEqual(new_lt.tm_gmtoff, lt.tm_gmtoff)
self.assertEqual(new_lt.tm_zone, lt.tm_zone)
self.assertEqual(new_lt9, lt)
self.assertEqual(new_lt.tm_gmtoff, lt.tm_gmtoff)
self.assertEqual(new_lt9.tm_zone, lt.tm_zone)
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_strptime_timezone(self):
t = time.strptime("UTC", "%Z")
self.assertEqual(t.tm_zone, 'UTC')
t = time.strptime("+0500", "%z")
self.assertEqual(t.tm_gmtoff, 5 * 3600)
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_short_times(self):
import pickle
# Load a short time structure using pickle.
st = b"ctime\nstruct_time\np0\n((I2007\nI8\nI11\nI1\nI24\nI49\nI5\nI223\nI1\ntp1\n(dp2\ntp3\nRp4\n."
lt = pickle.loads(st)
self.assertIs(lt.tm_gmtoff, None)
self.assertIs(lt.tm_zone, None)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_11533 | """
Low-level serial communication for Trinamic TMCM-140-42-SE controller
(used internally for the Thorlabs MFC1)
"""
import serial, struct, time, collections
try:
# this is nicer because it provides deadlock debugging information
from acq4.util.Mutex import RecursiveMutex as RLock
except ImportError:
from threading import RLock
try:
from ..SerialDevice import SerialDevice, TimeoutError, DataError
except ValueError:
## relative imports not allowed when running from command prompt, so
## we adjust sys.path when running the script for testing
if __name__ == '__main__':
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from SerialDevice import SerialDevice, TimeoutError, DataError
def threadsafe(method):
# decorator for automatic mutex lock/unlock
def lockMutex(self, *args, **kwds):
with self.lock:
return method(self, *args, **kwds)
return lockMutex
COMMANDS = {
'rol': 2,
'ror': 1,
'mvp': 4,
'mst': 3,
'rfs': 13,
'sco': 30,
'cco': 32,
'gco': 31,
'sap': 5,
'gap': 6,
'stap': 7,
'rsap': 8,
'sgp': 9,
'ggp': 10,
'stgp': 11,
'rsgp': 12,
'sio': 14,
'gio': 15,
'calc': 19,
'comp': 20,
'jc': 21,
'ja': 22,
'csub': 23,
'rsub': 24,
'wait': 27,
'stop': 28,
'sco': 30,
'gco': 31,
'cco': 32,
'calcx': 33,
'aap': 34,
'agp': 35,
'aco': 39,
'sac': 29,
'stop_application': 128,
'run_application': 129,
'step_application': 130,
'reset_application': 131,
'start_download': 132,
'stop_download': 133,
'get_application_status': 135,
'get_firmware_version': 136,
'restore_factory_settings': 137,
}
PARAMETERS = { # negative values indicate read-only parameters
'target_position': 0,
'actual_position': 1,
'target_speed': 2,
'actual_speed': 3,
'maximum_speed': 4,
'maximum_acceleration': 5,
'maximum_current': 6,
'standby_current': 7,
'target_pos_reached': 8,
'ref_switch_status': 9,
'right_limit_switch_status': 10,
'left_limit_switch_status': 11,
'right_limit_switch_disable': 12,
'left_limit_switch_disable': 13,
'minimum_speed': -130,
'acceleration': -135,
'ramp_mode': 138,
'microstep_resolution': 140,
'soft_stop_flag': 149,
'ramp_divisor': 153,
'pulse_divisor': 154,
'referencing_mode': 193,
'referencing_search_speed': 194,
'referencing_switch_speed': 195,
'distance_end_switches': 196,
'mixed_decay_threshold': 203,
'freewheeling': 204,
'stall_detection_threshold': 205,
'actual_load_value': 206,
'driver_error_flags': -208,
'encoder_position': 209,
'encoder_prescaler': 210,
'fullstep_threshold': 211,
'maximum_encoder_deviation': 212,
'power_down_delay': 214,
'absolute_encoder_value': -215,
}
GLOBAL_PARAMETERS = {
'eeprom_magic': 64,
'baud_rate': 65,
'serial_address': 66,
'ascii_mode': 67,
'eeprom_lock': 73,
'auto_start_mode': 77,
'tmcl_code_protection': 81,
'coordinate_storage': 84,
'tmcl_application_status': 128,
'download_mode': 129,
'tmcl_program_counter': 130,
'tick_timer': 132,
'random_number': -133,
}
OPERATORS = {
'add': 0,
'sub': 1,
'mul': 2,
'div': 3,
'mod': 4,
'and': 5,
'or': 6,
'xor': 7,
'not': 8,
'load': 9,
'swap': 10,
}
CONDITIONS = {
'ze': 0,
'nz': 1,
'eq': 2,
'ne': 3,
'gt': 4,
'ge': 5,
'lt': 6,
'le': 7,
'eto': 8,
'eal': 9,
'esd': 12,
}
STATUS = {
1: "Wrong checksum",
2: "Invalid command",
3: "Wrong type",
4: "Invalid value",
5: "Configuration EEPROM locked",
6: "Command not available",
}
class TMCMError(Exception):
def __init__(self, status):
self.status = status
msg = STATUS[status]
Exception.__init__(msg)
class TMCM140(SerialDevice):
def __init__(self, port, baudrate=9600, module_addr=1):
"""
port: serial COM port (eg. COM3 or /dev/ttyACM0)
baudrate: 9600 by default
module_addr: 1 by default
"""
self.lock = RLock(debug=True)
self.port = port
assert isinstance(module_addr, int)
assert module_addr > 0
self.module_addr = module_addr
self.module_str = chr(module_addr+64)
self._waiting_for_reply = False
SerialDevice.__init__(self, port=self.port, baudrate=baudrate)
@threadsafe
def command(self, cmd, type, motor, value):
"""Send a command to the controller and return the reply.
If an error is returned from the controller then raise an exception.
"""
self._send_cmd(cmd, type, motor, value)
return self._get_reply()
def rotate(self, velocity):
"""Begin rotating motor.
velocity: -2047 to +2047
negative values turn left; positive values turn right.
"""
assert isinstance(velocity, int)
assert -2047 <= velocity <= 2047
if velocity < 0:
direction = 'l'
velocity = -velocity
else:
direction = 'r'
self.command('ro'+direction, 0, 0, velocity)
def stop(self):
"""Stop the motor.
Note: does not stop currently running programs.
"""
self.command('mst', 0, 0, 0)
def move(self, pos, relative=False, velocity=None):
"""Rotate until reaching *pos*.
pos: The target position
relative: If True, then *pos* is interpreted as relative to the current
position
velocity: Optionally set the target velocity before moving
"""
assert isinstance(pos, int)
assert -2**32 <= pos < 2**32
if velocity is not None:
assert isinstance(velocity, int)
assert 0 <= velocity < 2048
raise NotImplementedError()
type = 1 if relative else 0
self.command('mvp', type, 0, pos)
def get_param(self, param):
pnum = abs(PARAMETERS[param])
return self.command('gap', pnum, 0, 0)[4]
def __getitem__(self, param):
return self.get_param(param)
def set_param(self, param, value, **kwds):
"""Set a parameter value.
If valus is 'accum' then the parameter is set from the accumulator
register.
"""
pnum = PARAMETERS[param]
if pnum < 0:
raise TypeError("Parameter %s is read-only." % param)
if pnum in (PARAMETERS['maximum_current'], PARAMETERS['standby_current']) and value > 100:
if kwds.get('force', False) is not True:
raise Exception("Refusing to set current > 100 (this can damage the motor). "
"To override, use force=True.")
if value == 'accum':
self.command('aap', pnum, 0, 0)
else:
self.command('sap', pnum, 0, value)
@threadsafe
def set_params(self, **kwds):
"""Set multiple parameters.
The driver is thread-locked until all parameters are set.
"""
for param, value in kwds.items():
self.set_param(param, value)
def __setitem__(self, param, value):
return self.set_param(param, value)
def get_global(self, param):
"""Return a global parameter or copy global to accumulator.
Use param='gpX' to refer to general-purpose variables.
"""
if param.startswith('gp'):
pnum = int(param[2:])
bank = 2
else:
pnum = abs(GLOBAL_PARAMETERS[param])
bank = 0
return self.command('ggp', pnum, bank, 0)[4]
def set_global(self, param, value):
if param.startswith('gp'):
pnum = int(param[2:])
bank = 2
else:
pnum = GLOBAL_PARAMETERS[param]
bank = 0
if pnum < 0:
raise TypeError("Parameter %s is read-only." % param)
if value == 'accum':
self.command('agp', pnum, bank, 0)
else:
self.command('sgp', pnum, bank, value)
def stop_program(self):
"""Stop the currently running TMCL program.
"""
self.command('stop_application', 0, 0, 0)
def start_program(self, address=None):
"""Start running TMCL program code from the given address (in bytes?),
or from the current address if None.
"""
if address is None:
self.command('run_application', 0, 0, 0)
else:
self.command('run_application', 1, 0, address)
def start_download(self, address=0):
"""Begin loading TMCL commands into EEPROM .
"""
self.command('start_download', 0, 0, address)
def stop_download(self):
"""Finish loading TMCL commands into EEPROM.
"""
self.command('stop_download', 0, 0, 0)
def write_program(self, address=0):
return ProgramManager(self, address)
def program_status(self):
"""Return current program status:
0=stop, 1=run, 2=step, 3=reset
"""
return self.command('get_application_status', 0, 0, 0)[4]
def calc(self, op, value):
opnum = OPERATORS[op]
if opnum > 9:
raise TypeError("Operator %s invalid for calc" % op)
self.command('calc', opnum, 0, value)
def calcx(self, op):
opnum = OPERATORS[op]
self.command('calcx', opnum, 0, 0)
def comp(self, val):
self.command('comp', 0, 0, val)
def jump(self, *args):
"""Program jump to *addr* (instruction index).
Usage:
jump(address)
jump(cond, address)
Where *cond* may be ze, nz, eq, ne, gt, ge, lt, le, eto, eal, or esd.
"""
if len(args) == 1:
assert isinstance(args[0], int)
self.command('ja', 0, 0, args[0])
else:
cnum = CONDITIONS[args[0]]
self.command('jc', cnum, 0, args[1])
def _send_cmd(self, cmd, type, motor, value):
"""Send a command to the controller.
"""
if self._waiting_for_reply:
raise Exception("Cannot send command; previous reply has not been "
"received yet.")
cmd_num = COMMANDS[cmd]
assert isinstance(type, int)
assert isinstance(motor, int)
# Try packing the value first as unsigned, then signed. (the overlapping
# integer ranges have identical bit representation, so there is no
# ambiguity)
try:
cmd = struct.pack('>BBBBI', self.module_addr, cmd_num, type, motor, value)
except struct.error:
cmd = struct.pack('>BBBBi', self.module_addr, cmd_num, type, motor, value)
chksum = sum(bytearray(cmd)) % 256
out = cmd + struct.pack('B', chksum)
self.write(out)
self._waiting_for_reply = True
def _get_reply(self):
"""Read and parse a reply from the controller.
Raise an exception if an error was reported.
"""
if not self._waiting_for_reply:
raise Exception("No reply expected.")
try:
d = self.read(9)
finally:
self._waiting_for_reply = False
d2 = self.readAll()
if len(d2) > 0:
raise Exception("Error: extra data while reading reply.")
parts = struct.unpack('>BBBBiB', d)
reply_addr, module_addr, status, cmd_num, value, chksum = parts
if chksum != sum(bytearray(d[:-1])) % 256:
raise Exception("Invalid checksum reading from controller.")
if status < 100:
raise TMCMError(status)
return parts
class ProgramManager(object):
def __init__(self, mcm, start=0):
self.mcm = mcm
self.start = start
self.count = 0
def __enter__(self):
self.mcm.lock.acquire()
self.mcm.start_download(self.start)
return self
def __exit__(self, *args):
# insert an extra stop to ensure the program can't leak
# into previously written code.
self.mcm.command('stop', 0, 0, 0)
self.mcm.stop_download()
self.mcm.lock.release()
def __getattr__(self, name):
self.count += 1
return getattr(self.mcm, name)
|
the-stack_0_11534 | # Python 3 - Verifica disponibilidade de sites
# verifica conexao com um teste que sempre esta online e armazena log em um arquivo de texto.
from urllib.request import Request, urlopen
from urllib.error import URLError
from datetime import datetime
import time
class Url(object):
def __init__(self, url, nome):
self.url = url
self.sucesso = 0
self.erro = 0
self.nome = nome
self.teste = False # se passou no teste
tempo = 120 # verificar a cada quanto tempo, segundos.
url0 = Url('http://www.google.com', 'teste')
url1 = Url('http://uol.com.br', 'Site 1')
url2 = Url('http://baixaki.com.br', 'Site 2')
urls = [url0, url1, url2] # Quais vai testar
while True:
for url in urls:
try:
response = urlopen(url.url)
if response.info():
url.teste = True
url.sucesso += 1
else:
url.teste = False
url.erro += 1
except URLError:
url.teste = False
url.erro += 1
#print(url.nome + ' - ' + url.teste)
if url.nome == 'teste' and not url.teste: # se o teste nao passar, break
texto = '\nSem conexao local com a internet.'
arq = open('log-status.txt', 'a')
arq.write(texto)
arq.close()
print(texto)
break
elif url.nome != 'teste': # se nao for o link teste, escreve
texto = url.nome + ' - Sucessos: '+ str(url.sucesso) + \
' - Erros: '+ str(url.erro) + ' - ' + str(datetime.now())+'\n'
arq = open('log-status.txt', 'a')
arq.write(texto)
arq.close()
print(texto)
time.sleep(1)
time.sleep(tempo)
|
the-stack_0_11536 | # coding=utf-8
from __future__ import print_function, unicode_literals
import numpy as np
import pandas as pd
import json
import requests
import argparse
data = pd.read_csv('/home/purvar/Downloads/location/t_sup_complaint.csv',
names=np.arange(27))
# text = data.iloc[:, 11]
NER_URL = 'http://api.bosonnlp.com/ner/analysis' # BosonNLP
parser = argparse.ArgumentParser()
parser.add_argument('--index',
type=int,
default=100,
help='Please input an index')
FLAGS, unparsed = parser.parse_known_args()
s = [ data.iloc[FLAGS.index, 11] ] #投诉内容位于第十二列
print(s)
# s = ['硚口区汉西三路香江家居对面常青国际小区,光头卤店铺24小时抽烟机噪音扰民,\
# 油烟扰民,区局已派第三方检查公司进行检测,投诉人等待测试结果的回复。多次来电,请重点处理。']
data = json.dumps(s)
headers = {'X-Token': 'LkwQR-rW.21981.qz7z9JKCk9A9'}
resp = requests.post(NER_URL, headers=headers, data=data.encode('utf-8'))
for item in resp.json():
for entity in item['entity']:
if entity[2] in ['location', 'org_name', 'company_name']:
print(''.join(item['word'][entity[0]:entity[1]]), entity[2])
# print(resp.text)
|
the-stack_0_11537 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Assemble function for converting a list of circuits into a qobj"""
import uuid
import copy
import logging
import warnings
from time import time
from typing import Union, List, Dict, Optional
from qiskit.circuit import QuantumCircuit, Qubit, Parameter
from qiskit.exceptions import QiskitError
from qiskit.pulse import LoConfig, Instruction
from qiskit.assembler.run_config import RunConfig
from qiskit.assembler import assemble_circuits, assemble_schedules
from qiskit.qobj import QobjHeader, Qobj
from qiskit.qobj.utils import MeasLevel, MeasReturnType
from qiskit.validation.jsonschema import SchemaValidationError
from qiskit.providers import BaseBackend
from qiskit.providers.backend import Backend
from qiskit.pulse.channels import PulseChannel
from qiskit.pulse import Schedule
logger = logging.getLogger(__name__)
def _log_assembly_time(start_time, end_time):
log_msg = "Total Assembly Time - %.5f (ms)" % ((end_time - start_time) * 1000)
logger.info(log_msg)
# TODO: parallelize over the experiments (serialize each separately, then add global header/config)
def assemble(experiments: Union[QuantumCircuit, List[QuantumCircuit], Schedule, List[Schedule]],
backend: Optional[Union[Backend, BaseBackend]] = None,
qobj_id: Optional[str] = None,
qobj_header: Optional[Union[QobjHeader, Dict]] = None,
shots: Optional[int] = None, memory: Optional[bool] = False,
max_credits: Optional[int] = None,
seed_simulator: Optional[int] = None,
qubit_lo_freq: Optional[List[int]] = None,
meas_lo_freq: Optional[List[int]] = None,
qubit_lo_range: Optional[List[int]] = None,
meas_lo_range: Optional[List[int]] = None,
schedule_los: Optional[Union[List[Union[Dict[PulseChannel, float], LoConfig]],
Union[Dict[PulseChannel, float], LoConfig]]] = None,
meas_level: Union[int, MeasLevel] = MeasLevel.CLASSIFIED,
meas_return: Union[str, MeasReturnType] = MeasReturnType.AVERAGE,
meas_map: Optional[List[List[Qubit]]] = None,
memory_slot_size: int = 100,
rep_time: Optional[int] = None,
rep_delay: Optional[float] = None,
parameter_binds: Optional[List[Dict[Parameter, float]]] = None,
parametric_pulses: Optional[List[str]] = None,
init_qubits: bool = True,
**run_config: Dict) -> Qobj:
"""Assemble a list of circuits or pulse schedules into a ``Qobj``.
This function serializes the payloads, which could be either circuits or schedules,
to create ``Qobj`` "experiments". It further annotates the experiment payload with
header and configurations.
Args:
experiments: Circuit(s) or pulse schedule(s) to execute
backend: If set, some runtime options are automatically grabbed from
``backend.configuration()`` and ``backend.defaults()``.
If any other option is explicitly set (e.g., ``rep_time``), it
will override the backend's.
If any other options is set in the run_config, it will
also override the backend's.
qobj_id: String identifier to annotate the ``Qobj``
qobj_header: User input that will be inserted in ``Qobj`` header, and will also be
copied to the corresponding Result header. Headers do not affect the run.
shots: Number of repetitions of each circuit, for sampling. Default: 1024
or ``max_shots`` from the backend configuration, whichever is smaller
memory: If ``True``, per-shot measurement bitstrings are returned as well
(provided the backend supports it). For OpenPulse jobs, only
measurement level 2 supports this option.
max_credits: Maximum credits to spend on job. Default: 10
seed_simulator: Random seed to control sampling, for when backend is a simulator
qubit_lo_freq: List of default qubit LO frequencies in Hz. Will be overridden by
``schedule_los`` if set.
meas_lo_freq: List of default measurement LO frequencies in Hz. Will be overridden
by ``schedule_los`` if set.
qubit_lo_range: List of drive LO ranges each of form ``[range_min, range_max]`` in Hz.
Used to validate the supplied qubit frequencies.
meas_lo_range: List of measurement LO ranges each of form ``[range_min, range_max]`` in Hz.
Used to validate the supplied qubit frequencies.
schedule_los: Experiment LO configurations, frequencies are given in Hz.
meas_level: Set the appropriate level of the measurement output for pulse experiments.
meas_return: Level of measurement data for the backend to return.
For ``meas_level`` 0 and 1:
* ``single`` returns information from every shot.
* ``avg`` returns average measurement output (averaged over number of shots).
meas_map: List of lists, containing qubits that must be measured together.
memory_slot_size: Size of each memory slot if the output is Level 0.
rep_time (int): Time per program execution in seconds. Must be from the list provided
by the backend (``backend.configuration().rep_times``). Defaults to the first entry.
rep_delay (float): Delay between programs in seconds. Only supported on certain
backends (if ``backend.configuration().dynamic_reprate_enabled=True``). If supported,
``rep_delay`` will be used instead of ``rep_time`` and must be from the range supplied
by the backend (``backend.configuration().rep_delay_range``). Default is given by
``backend.configuration().default_rep_delay``.
parameter_binds: List of Parameter bindings over which the set of experiments will be
executed. Each list element (bind) should be of the form
{Parameter1: value1, Parameter2: value2, ...}. All binds will be
executed across all experiments; e.g., if parameter_binds is a
length-n list, and there are m experiments, a total of m x n
experiments will be run (one for each experiment/bind pair).
parametric_pulses: A list of pulse shapes which are supported internally on the backend.
Example::
['gaussian', 'constant']
init_qubits: Whether to reset the qubits to the ground state for each shot.
Default: ``True``.
**run_config: Extra arguments used to configure the run (e.g., for Aer configurable
backends). Refer to the backend documentation for details on these
arguments.
Returns:
A ``Qobj`` that can be run on a backend. Depending on the type of input,
this will be either a ``QasmQobj`` or a ``PulseQobj``.
Raises:
QiskitError: if the input cannot be interpreted as either circuits or schedules
"""
start_time = time()
experiments = experiments if isinstance(experiments, list) else [experiments]
qobj_id, qobj_header, run_config_common_dict = _parse_common_args(backend, qobj_id, qobj_header,
shots, memory, max_credits,
seed_simulator, init_qubits,
rep_delay, **run_config)
# assemble either circuits or schedules
if all(isinstance(exp, QuantumCircuit) for exp in experiments):
run_config = _parse_circuit_args(parameter_binds, backend, meas_level,
meas_return, parametric_pulses,
**run_config_common_dict)
# If circuits are parameterized, bind parameters and remove from run_config
bound_experiments, run_config = _expand_parameters(circuits=experiments,
run_config=run_config)
end_time = time()
_log_assembly_time(start_time, end_time)
return assemble_circuits(circuits=bound_experiments, qobj_id=qobj_id,
qobj_header=qobj_header, run_config=run_config)
elif all(isinstance(exp, (Schedule, Instruction)) for exp in experiments):
run_config = _parse_pulse_args(backend, qubit_lo_freq, meas_lo_freq,
qubit_lo_range, meas_lo_range,
schedule_los, meas_level, meas_return,
meas_map, memory_slot_size,
rep_time, parametric_pulses,
**run_config_common_dict)
end_time = time()
_log_assembly_time(start_time, end_time)
return assemble_schedules(schedules=experiments, qobj_id=qobj_id,
qobj_header=qobj_header, run_config=run_config)
else:
raise QiskitError("bad input to assemble() function; "
"must be either circuits or schedules")
# TODO: rework to return a list of RunConfigs (one for each experiments), and a global one
def _parse_common_args(backend, qobj_id, qobj_header, shots,
memory, max_credits, seed_simulator,
init_qubits, rep_delay, **run_config):
"""Resolve the various types of args allowed to the assemble() function through
duck typing, overriding args, etc. Refer to the assemble() docstring for details on
what types of inputs are allowed.
Here the args are resolved by converting them to standard instances, and prioritizing
them in case a run option is passed through multiple args (explicitly setting an arg
has more priority than the arg set by backend)
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
Raises:
QiskitError: if the memory arg is True and the backend does not support
memory. Also if shots exceeds max_shots for the configured backend. Also if
the type of shots is not int.
"""
# grab relevant info from backend if it exists
backend_config = None
if backend:
backend_config = backend.configuration()
# check for memory flag applied to backend that does not support memory
if memory and not backend_config.memory:
raise QiskitError("memory not supported by backend {}"
.format(backend_config.backend_name))
# an identifier for the Qobj
qobj_id = qobj_id or str(uuid.uuid4())
# The header that goes at the top of the Qobj (and later Result)
# we process it as dict, then write entries that are not None to a QobjHeader object
qobj_header = qobj_header or {}
if isinstance(qobj_header, QobjHeader):
qobj_header = qobj_header.to_dict()
backend_name = getattr(backend_config, 'backend_name', None)
backend_version = getattr(backend_config, 'backend_version', None)
qobj_header = {**dict(backend_name=backend_name, backend_version=backend_version),
**qobj_header}
qobj_header = QobjHeader(**{k: v for k, v in qobj_header.items() if v is not None})
max_shots = getattr(backend_config, 'max_shots', None)
if shots is None:
if max_shots:
shots = min(1024, max_shots)
else:
shots = 1024
elif not isinstance(shots, int):
raise QiskitError(
"Argument 'shots' should be of type 'int'")
elif max_shots and max_shots < shots:
raise QiskitError(
'Number of shots specified: %s exceeds max_shots property of the '
'backend: %s.' % (shots, max_shots))
dynamic_reprate_enabled = getattr(backend_config, 'dynamic_reprate_enabled', False)
if dynamic_reprate_enabled:
default_rep_delay = getattr(backend_config, "default_rep_delay", None)
rep_delay_range = getattr(backend_config, "rep_delay_range", None)
rep_delay = _parse_rep_delay(rep_delay, default_rep_delay, rep_delay_range)
else:
if rep_delay is not None:
rep_delay = None
warnings.warn(
"Dynamic rep rates not supported on this backend, cannot use rep_delay.",
RuntimeWarning,
)
# create run configuration and populate
run_config_dict = dict(shots=shots,
memory=memory,
max_credits=max_credits,
seed_simulator=seed_simulator,
init_qubits=init_qubits,
rep_delay=rep_delay,
**run_config)
return qobj_id, qobj_header, run_config_dict
def _parse_pulse_args(backend, qubit_lo_freq, meas_lo_freq, qubit_lo_range,
meas_lo_range, schedule_los, meas_level,
meas_return, meas_map,
memory_slot_size,
rep_time, parametric_pulses,
**run_config):
"""Build a pulse RunConfig replacing unset arguments with defaults derived from the `backend`.
See `assemble` for more information on the required arguments.
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
Raises:
SchemaValidationError: If the given meas_level is not allowed for the given `backend`.
"""
# grab relevant info from backend if it exists
backend_config = None
backend_default = None
if backend:
backend_default = backend.defaults()
backend_config = backend.configuration()
if meas_level not in getattr(backend_config, 'meas_levels', [MeasLevel.CLASSIFIED]):
raise SchemaValidationError(
('meas_level = {} not supported for backend {}, only {} is supported'
).format(meas_level, backend_config.backend_name, backend_config.meas_levels)
)
meas_map = meas_map or getattr(backend_config, 'meas_map', None)
schedule_los = schedule_los or []
if isinstance(schedule_los, (LoConfig, dict)):
schedule_los = [schedule_los]
# Convert to LoConfig if LO configuration supplied as dictionary
schedule_los = [lo_config if isinstance(lo_config, LoConfig) else LoConfig(lo_config)
for lo_config in schedule_los]
if not qubit_lo_freq and hasattr(backend_default, 'qubit_freq_est'):
qubit_lo_freq = backend_default.qubit_freq_est
if not meas_lo_freq and hasattr(backend_default, 'meas_freq_est'):
meas_lo_freq = backend_default.meas_freq_est
qubit_lo_range = qubit_lo_range or getattr(backend_config, 'qubit_lo_range', None)
meas_lo_range = meas_lo_range or getattr(backend_config, 'meas_lo_range', None)
dynamic_reprate_enabled = getattr(backend_config, 'dynamic_reprate_enabled', False)
rep_time = rep_time or getattr(backend_config, 'rep_times', None)
if rep_time:
if dynamic_reprate_enabled:
warnings.warn("Dynamic rep rates are supported on this backend. 'rep_delay' will be "
"used instead of 'rep_time'.", RuntimeWarning)
if isinstance(rep_time, list):
rep_time = rep_time[0]
rep_time = int(rep_time * 1e6) # convert sec to μs
parametric_pulses = parametric_pulses or getattr(backend_config, 'parametric_pulses', [])
# create run configuration and populate
run_config_dict = dict(qubit_lo_freq=qubit_lo_freq,
meas_lo_freq=meas_lo_freq,
qubit_lo_range=qubit_lo_range,
meas_lo_range=meas_lo_range,
schedule_los=schedule_los,
meas_level=meas_level,
meas_return=meas_return,
meas_map=meas_map,
memory_slot_size=memory_slot_size,
rep_time=rep_time,
parametric_pulses=parametric_pulses,
**run_config)
run_config = RunConfig(**{k: v for k, v in run_config_dict.items() if v is not None})
return run_config
def _parse_circuit_args(parameter_binds, backend, meas_level, meas_return,
parametric_pulses, **run_config):
"""Build a circuit RunConfig replacing unset arguments with defaults derived from the `backend`.
See `assemble` for more information on the required arguments.
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
"""
parameter_binds = parameter_binds or []
# create run configuration and populate
run_config_dict = dict(parameter_binds=parameter_binds, **run_config)
if backend:
run_config_dict['parametric_pulses'] = getattr(backend.configuration(), 'parametric_pulses',
[])
if parametric_pulses:
run_config_dict['parametric_pulses'] = parametric_pulses
if meas_level:
run_config_dict['meas_level'] = meas_level
# only enable `meas_return` if `meas_level` isn't classified
if meas_level != MeasLevel.CLASSIFIED:
run_config_dict['meas_return'] = meas_return
run_config = RunConfig(
**{k: v
for k, v in run_config_dict.items() if v is not None})
return run_config
def _parse_rep_delay(rep_delay: float,
default_rep_delay: float,
rep_delay_range: List[float]) -> float:
"""Parse and set ``rep_delay`` parameter in runtime config.
Args:
rep_delay: Initial rep delay.
default_rep_delay: Backend default rep delay.
rep_delay_range: Backend list defining allowable range of rep delays.
Raises:
SchemaValidationError: If rep_delay is not in the backend rep_delay_range.
Returns:
float: Modified rep delay after parsing.
"""
if rep_delay is None:
rep_delay = default_rep_delay
if rep_delay is not None:
# check that rep_delay is in rep_delay_range
if rep_delay_range is not None and isinstance(rep_delay_range, list):
if len(rep_delay_range) != 2:
raise SchemaValidationError(
"Backend rep_delay_range {} must be a list with two entries.".format(
rep_delay_range
)
)
if not rep_delay_range[0] <= rep_delay <= rep_delay_range[1]:
raise SchemaValidationError(
"Supplied rep delay {} not in the supported "
"backend range {}".format(rep_delay, rep_delay_range)
)
rep_delay = rep_delay * 1e6 # convert sec to μs
return rep_delay
def _expand_parameters(circuits, run_config):
"""Verifies that there is a single common set of parameters shared between
all circuits and all parameter binds in the run_config. Returns an expanded
list of circuits (if parameterized) with all parameters bound, and a copy of
the run_config with parameter_binds cleared.
If neither the circuits nor the run_config specify parameters, the two are
returned unmodified.
Raises:
QiskitError: if run_config parameters are not compatible with circuit parameters
Returns:
Tuple(List[QuantumCircuit], RunConfig):
- List of input circuits expanded and with parameters bound
- RunConfig with parameter_binds removed
"""
parameter_binds = run_config.parameter_binds
if parameter_binds or \
any(circuit.parameters for circuit in circuits):
# Unroll params here in order to handle ParamVects
all_bind_parameters = [QuantumCircuit()._unroll_param_dict(bind).keys()
for bind in parameter_binds]
all_circuit_parameters = [circuit.parameters for circuit in circuits]
# Collect set of all unique parameters across all circuits and binds
unique_parameters = {param
for param_list in all_bind_parameters + all_circuit_parameters
for param in param_list}
# Check that all parameters are common to all circuits and binds
if not all_bind_parameters \
or not all_circuit_parameters \
or any(unique_parameters != bind_params for bind_params in all_bind_parameters) \
or any(unique_parameters != parameters for parameters in all_circuit_parameters):
raise QiskitError(
('Mismatch between run_config.parameter_binds and all circuit parameters. ' +
'Parameter binds: {} ' +
'Circuit parameters: {}').format(all_bind_parameters, all_circuit_parameters))
circuits = [circuit.bind_parameters(binds)
for circuit in circuits
for binds in parameter_binds]
# All parameters have been expanded and bound, so remove from run_config
run_config = copy.deepcopy(run_config)
run_config.parameter_binds = []
return circuits, run_config
|
the-stack_0_11538 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.common import utils
from rally.plugins.openstack.cleanup import base
from rally.plugins.openstack.cleanup import manager
from tests.unit import test
BASE = "rally.plugins.openstack.cleanup.manager"
class SeekAndDestroyTestCase(test.TestCase):
def setUp(self):
super(SeekAndDestroyTestCase, self).setUp()
# clear out the client cache
manager.SeekAndDestroy.cache = {}
def test__get_cached_client(self):
api_versions = {"cinder": {"version": "1", "service_type": "volume"}}
destroyer = manager.SeekAndDestroy(None, None, None,
api_versions=api_versions)
cred = mock.Mock()
user = {"credential": cred}
clients = destroyer._get_cached_client(user)
self.assertIs(cred.clients.return_value, clients)
cred.clients.assert_called_once_with(api_info=api_versions)
self.assertIsNone(destroyer._get_cached_client(None))
@mock.patch("%s.LOG" % BASE)
def test__delete_single_resource(self, mock_log):
mock_resource = mock.MagicMock(_max_attempts=3, _timeout=10,
_interval=0.01)
mock_resource.delete.side_effect = [Exception, Exception, True]
mock_resource.is_deleted.side_effect = [False, False, True]
manager.SeekAndDestroy(None, None, None)._delete_single_resource(
mock_resource)
mock_resource.delete.assert_has_calls([mock.call()] * 3)
self.assertEqual(3, mock_resource.delete.call_count)
mock_resource.is_deleted.assert_has_calls([mock.call()] * 3)
self.assertEqual(3, mock_resource.is_deleted.call_count)
# NOTE(boris-42): No logs and no exceptions means no bugs!
self.assertEqual(0, mock_log.call_count)
@mock.patch("%s.LOG" % BASE)
def test__delete_single_resource_timeout(self, mock_log):
mock_resource = mock.MagicMock(_max_attempts=1, _timeout=0.02,
_interval=0.025)
mock_resource.delete.return_value = True
mock_resource.is_deleted.side_effect = [False, False, True]
manager.SeekAndDestroy(None, None, None)._delete_single_resource(
mock_resource)
mock_resource.delete.assert_called_once_with()
mock_resource.is_deleted.assert_called_once_with()
self.assertEqual(1, mock_log.warning.call_count)
@mock.patch("%s.LOG" % BASE)
def test__delete_single_resource_excpetion_in_is_deleted(self, mock_log):
mock_resource = mock.MagicMock(_max_attempts=3, _timeout=10,
_interval=0)
mock_resource.delete.return_value = True
mock_resource.is_deleted.side_effect = [Exception] * 4
manager.SeekAndDestroy(None, None, None)._delete_single_resource(
mock_resource)
mock_resource.delete.assert_called_once_with()
self.assertEqual(4, mock_resource.is_deleted.call_count)
self.assertEqual(5, mock_log.warning.call_count)
self.assertEqual(4, mock_log.exception.call_count)
def _manager(self, list_side_effect, **kw):
mock_mgr = mock.MagicMock()
mock_mgr().list.side_effect = list_side_effect
mock_mgr.reset_mock()
for k, v in kw.items():
setattr(mock_mgr, k, v)
return mock_mgr
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
def test__publisher_admin(self, mock__get_cached_client):
mock_mgr = self._manager([Exception, Exception, [1, 2, 3]],
_perform_for_admin_only=False)
admin = mock.MagicMock()
publish = manager.SeekAndDestroy(mock_mgr, admin, None)._publisher
queue = []
publish(queue)
mock__get_cached_client.assert_called_once_with(admin)
mock_mgr.assert_called_once_with(
admin=mock__get_cached_client.return_value)
self.assertEqual(queue, [(admin, None, x) for x in range(1, 4)])
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
def test__publisher_admin_only(self, mock__get_cached_client):
mock_mgr = self._manager([Exception, Exception, [1, 2, 3]],
_perform_for_admin_only=True)
admin = mock.MagicMock()
publish = manager.SeekAndDestroy(
mock_mgr, admin, ["u1", "u2"])._publisher
queue = []
publish(queue)
mock__get_cached_client.assert_called_once_with(admin)
mock_mgr.assert_called_once_with(
admin=mock__get_cached_client.return_value)
self.assertEqual(queue, [(admin, None, x) for x in range(1, 4)])
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
def test__publisher_user_resource(self, mock__get_cached_client):
mock_mgr = self._manager([Exception, Exception, [1, 2, 3],
Exception, Exception, [4, 5]],
_perform_for_admin_only=False,
_tenant_resource=True)
admin = mock.MagicMock()
users = [{"tenant_id": 1, "id": 1}, {"tenant_id": 2, "id": 2}]
publish = manager.SeekAndDestroy(mock_mgr, admin, users)._publisher
queue = []
publish(queue)
mock_client = mock__get_cached_client.return_value
mock_mgr.assert_has_calls([
mock.call(admin=mock_client, user=mock_client,
tenant_uuid=users[0]["tenant_id"]),
mock.call().list(),
mock.call().list(),
mock.call().list(),
mock.call(admin=mock_client, user=mock_client,
tenant_uuid=users[1]["tenant_id"]),
mock.call().list(),
mock.call().list()
])
mock__get_cached_client.assert_has_calls([
mock.call(admin),
mock.call(users[0]),
mock.call(users[1])
])
expected_queue = [(admin, users[0], x) for x in range(1, 4)]
expected_queue += [(admin, users[1], x) for x in range(4, 6)]
self.assertEqual(expected_queue, queue)
@mock.patch("%s.LOG" % BASE)
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
def test__gen_publisher_tenant_resource(self, mock__get_cached_client,
mock_log):
mock_mgr = self._manager([Exception, [1, 2, 3],
Exception, Exception, Exception,
["this shouldn't be in results"]],
_perform_for_admin_only=False,
_tenant_resource=True)
users = [{"tenant_id": 1, "id": 1},
{"tenant_id": 1, "id": 2},
{"tenant_id": 2, "id": 3}]
publish = manager.SeekAndDestroy(
mock_mgr, None, users)._publisher
queue = []
publish(queue)
mock_client = mock__get_cached_client.return_value
mock_mgr.assert_has_calls([
mock.call(admin=mock_client, user=mock_client,
tenant_uuid=users[0]["tenant_id"]),
mock.call().list(),
mock.call().list(),
mock.call(admin=mock_client, user=mock_client,
tenant_uuid=users[2]["tenant_id"]),
mock.call().list(),
mock.call().list(),
mock.call().list()
])
mock__get_cached_client.assert_has_calls([
mock.call(None),
mock.call(users[0]),
mock.call(users[2])
])
self.assertEqual(queue, [(None, users[0], x) for x in range(1, 4)])
self.assertTrue(mock_log.warning.mock_called)
self.assertTrue(mock_log.exception.mock_called)
@mock.patch("rally.common.utils.name_matches_object")
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
@mock.patch("%s.SeekAndDestroy._delete_single_resource" % BASE)
def test__consumer(self, mock__delete_single_resource,
mock__get_cached_client,
mock_name_matches_object):
mock_mgr = mock.MagicMock(__name__="Test")
resource_classes = [mock.Mock()]
task_id = "task_id"
mock_name_matches_object.return_value = True
consumer = manager.SeekAndDestroy(
mock_mgr, None, None,
resource_classes=resource_classes,
task_id=task_id)._consumer
admin = mock.MagicMock()
user1 = {"id": "a", "tenant_id": "uuid1"}
cache = {}
consumer(cache, (admin, user1, "res"))
mock_mgr.assert_called_once_with(
resource="res",
admin=mock__get_cached_client.return_value,
user=mock__get_cached_client.return_value,
tenant_uuid=user1["tenant_id"])
mock__get_cached_client.assert_has_calls([
mock.call(admin),
mock.call(user1)
])
mock__delete_single_resource.assert_called_once_with(
mock_mgr.return_value)
mock_mgr.reset_mock()
mock__get_cached_client.reset_mock()
mock__delete_single_resource.reset_mock()
mock_name_matches_object.reset_mock()
consumer(cache, (admin, None, "res2"))
mock_mgr.assert_called_once_with(
resource="res2",
admin=mock__get_cached_client.return_value,
user=mock__get_cached_client.return_value,
tenant_uuid=None)
mock__get_cached_client.assert_has_calls([
mock.call(admin),
mock.call(None)
])
mock__delete_single_resource.assert_called_once_with(
mock_mgr.return_value)
@mock.patch("rally.common.utils.name_matches_object")
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
@mock.patch("%s.SeekAndDestroy._delete_single_resource" % BASE)
def test__consumer_with_noname_resource(self, mock__delete_single_resource,
mock__get_cached_client,
mock_name_matches_object):
mock_mgr = mock.MagicMock(__name__="Test")
mock_mgr.return_value.name.return_value = True
task_id = "task_id"
mock_name_matches_object.return_value = False
consumer = manager.SeekAndDestroy(mock_mgr, None, None,
task_id=task_id)._consumer
consumer(None, (None, None, "res"))
self.assertFalse(mock__delete_single_resource.called)
mock_mgr.return_value.name.return_value = base.NoName("foo")
consumer(None, (None, None, "res"))
mock__delete_single_resource.assert_called_once_with(
mock_mgr.return_value)
@mock.patch("%s.broker.run" % BASE)
def test_exterminate(self, mock_broker_run):
manager_cls = mock.MagicMock(_threads=5)
cleaner = manager.SeekAndDestroy(manager_cls, None, None)
cleaner._publisher = mock.Mock()
cleaner._consumer = mock.Mock()
cleaner.exterminate()
mock_broker_run.assert_called_once_with(cleaner._publisher,
cleaner._consumer,
consumers_count=5)
class ResourceManagerTestCase(test.TestCase):
def _get_res_mock(self, **kw):
_mock = mock.MagicMock()
for k, v in kw.items():
setattr(_mock, k, v)
return _mock
def _list_res_names_helper(self, names, admin_required, mock_iter):
self.assertEqual(set(names),
manager.list_resource_names(admin_required))
mock_iter.assert_called_once_with(base.ResourceManager)
mock_iter.reset_mock()
@mock.patch("%s.discover.itersubclasses" % BASE)
def test_list_resource_names(self, mock_itersubclasses):
mock_itersubclasses.return_value = [
self._get_res_mock(_service="fake", _resource="1",
_admin_required=True),
self._get_res_mock(_service="fake", _resource="2",
_admin_required=False),
self._get_res_mock(_service="other", _resource="2",
_admin_required=False)
]
self._list_res_names_helper(
["fake", "other", "fake.1", "fake.2", "other.2"],
None, mock_itersubclasses)
self._list_res_names_helper(
["fake", "fake.1"],
True, mock_itersubclasses)
self._list_res_names_helper(
["fake", "other", "fake.2", "other.2"],
False, mock_itersubclasses)
@mock.patch("%s.discover.itersubclasses" % BASE)
def test_find_resource_managers(self, mock_itersubclasses):
mock_itersubclasses.return_value = [
self._get_res_mock(_service="fake", _resource="1", _order=1,
_admin_required=True),
self._get_res_mock(_service="fake", _resource="2", _order=3,
_admin_required=False),
self._get_res_mock(_service="other", _resource="2", _order=2,
_admin_required=False)
]
self.assertEqual(mock_itersubclasses.return_value[0:2],
manager.find_resource_managers(names=["fake"]))
self.assertEqual(mock_itersubclasses.return_value[0:1],
manager.find_resource_managers(names=["fake.1"]))
self.assertEqual(
[mock_itersubclasses.return_value[0],
mock_itersubclasses.return_value[2],
mock_itersubclasses.return_value[1]],
manager.find_resource_managers(names=["fake", "other"]))
self.assertEqual(mock_itersubclasses.return_value[0:1],
manager.find_resource_managers(names=["fake"],
admin_required=True))
self.assertEqual(mock_itersubclasses.return_value[1:2],
manager.find_resource_managers(names=["fake"],
admin_required=False))
@mock.patch("rally.common.plugin.discover.itersubclasses")
@mock.patch("%s.SeekAndDestroy" % BASE)
@mock.patch("%s.find_resource_managers" % BASE,
return_value=[mock.MagicMock(), mock.MagicMock()])
def test_cleanup(self, mock_find_resource_managers, mock_seek_and_destroy,
mock_itersubclasses):
class A(utils.RandomNameGeneratorMixin):
pass
class B(object):
pass
mock_itersubclasses.return_value = [A, B]
manager.cleanup(names=["a", "b"], admin_required=True,
admin="admin", users=["user"],
superclass=A,
task_id="task_id")
mock_find_resource_managers.assert_called_once_with(["a", "b"], True)
mock_seek_and_destroy.assert_has_calls([
mock.call(mock_find_resource_managers.return_value[0], "admin",
["user"], api_versions=None,
resource_classes=[A], task_id="task_id"),
mock.call().exterminate(),
mock.call(mock_find_resource_managers.return_value[1], "admin",
["user"], api_versions=None,
resource_classes=[A], task_id="task_id"),
mock.call().exterminate()
])
@mock.patch("rally.common.plugin.discover.itersubclasses")
@mock.patch("%s.SeekAndDestroy" % BASE)
@mock.patch("%s.find_resource_managers" % BASE,
return_value=[mock.MagicMock(), mock.MagicMock()])
def test_cleanup_with_api_versions(self,
mock_find_resource_managers,
mock_seek_and_destroy,
mock_itersubclasses):
class A(utils.RandomNameGeneratorMixin):
pass
class B(object):
pass
mock_itersubclasses.return_value = [A, B]
api_versions = {"cinder": {"version": "1", "service_type": "volume"}}
manager.cleanup(names=["a", "b"], admin_required=True,
admin="admin", users=["user"],
api_versions=api_versions,
superclass=utils.RandomNameGeneratorMixin,
task_id="task_id")
mock_find_resource_managers.assert_called_once_with(["a", "b"], True)
mock_seek_and_destroy.assert_has_calls([
mock.call(mock_find_resource_managers.return_value[0], "admin",
["user"], api_versions=api_versions,
resource_classes=[A], task_id="task_id"),
mock.call().exterminate(),
mock.call(mock_find_resource_managers.return_value[1], "admin",
["user"], api_versions=api_versions,
resource_classes=[A], task_id="task_id"),
mock.call().exterminate()
])
|
the-stack_0_11540 | from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from dataactcore.models.domainModels import CountyCode
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs40_detached_award_financial_assistance_1'
def test_column_headers(database):
expected_subset = {"row_number", "place_of_performance_code"}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" PrimaryPlaceOfPerformanceCode last three digits must be a valid county code when format is XX**###. """
county_code = CountyCode(county_number="123", state_code="NY")
det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="NY*****")
det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00FO333")
det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="NY**123")
det_award_4 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="Ny**123")
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, county_code])
assert errors == 0
def test_failure(database):
""" Test failure for PrimaryPlaceOfPerformanceCode last three digits must be a valid county code when
format is XX**###. """
county_code = CountyCode(county_number="123", state_code="NY")
det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00**333")
det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00**33")
det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="Ny**124")
det_award_4 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="NA**123")
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, county_code])
assert errors == 4
|
the-stack_0_11541 | #!/bin/bash/env python
import argparse
import numpy as np
import math
from numpy.linalg import inv
from numpy import linalg as LA
from os.path import basename, expanduser, isfile, join, splitext
import socket
from matplotlib import pyplot as plt
import time
from skimage import measure
import rospy
from sensor_msgs.msg import CompressedImage
from geometry_msgs.msg import PoseArray, Point, Pose, Quaternion
from duckietown_utils import d8_compressed_image_from_cv_image, logger, rgb_from_ros, yaml_load, get_duckiefleet_root
from duckietown_utils import get_base_name, load_camera_intrinsics, load_homography, load_map, rectify
from duckietown_utils import load_map, load_camera_intrinsics, load_homography, rectify
class Avoider():
'''class to avoid detected obstacles'''
def __init__(self, robot_name=''):
# Robot name
self.robot_name = robot_name
# Parameter definitions
self.lWidthRobot = 140 # mm
self.lWidthLane = 250 # mm
# Control parameters
self.yAvoidanceMargin = 20 # mm
def avoid(self, obstacle_poses_on_track, d_current, theta):
print('AvoiderFct')
self.d_target = 0
self.d_current = d_current
self.theta = theta
emergency_stop = 0
if len(obstacle_poses_on_track.poses) == 1:
# self.d_robot = self.d_current
# self.theta = self.theta_current
x_obstacle = obstacle_poses_on_track.poses[0].position.x * 1000 # mm
y_obstacle = obstacle_poses_on_track.poses[0].position.y * 1000 # mm
r_obstacle = obstacle_poses_on_track.poses[0].position.z * 1000 # mm
# print('x_obstacle = ', x_obstacle)
# print('y_obstacle = ', y_obstacle)
# print('r_obstacle = ', r_obstacle)
global_pos_vec = self.coordinatetransform(x_obstacle, y_obstacle, self.theta, self.d_current)
# x_global = global_pos_vec[0]
y_global = global_pos_vec[1]
# print('y_global = ', y_global)
# print('abs(y_global) = ', abs(y_global))
# print('lanew = ', self.lWidthLane)
# print('robiwidth = ', self.lWidthRobot)
# print('margin =', self.yAvoidanceMargin)
# print('theta=', self.theta)
# print('d_current= ',self.d_current)
# Stop if there is no space
if (abs(y_global) + self.lWidthLane/2 - abs(r_obstacle)) < (self.lWidthRobot + self.yAvoidanceMargin):
print('Emergency Stop')
emergency_stop = 1
# React if possible
self.d_target = (y_global - (np.sign(y_global) * (self.lWidthRobot / 2 + self.yAvoidanceMargin + abs(r_obstacle))))/1000 # convert to m
# print('d_target = ', self.d_target)
elif len(obstacle_poses_on_track.poses) > 1:
print('Number of obstacles reaching avoid function too high')
emergency_stop = 1
targets = [self.d_target, emergency_stop]
return targets
def coordinatetransform(self, x_obstacle, y_obstacle, theta, d_current):
self.theta = theta
self.d_current = d_current
self.x_obstacle = x_obstacle
self.y_obstacle = y_obstacle
vector_local = [self.x_obstacle, self.y_obstacle]
rot_matrix = [[math.cos(self.theta), -math.sin(self.theta)],
[math.sin(self.theta), math.cos(self.theta)]]
vector_global = np.dot(rot_matrix, vector_local) + np.array([0, self.d_current])
x_global = vector_global[0]
y_global = vector_global[1]
return np.array([x_global, y_global])
|
the-stack_0_11542 | """This module contains the ``SeleniumMiddleware`` scrapy middleware"""
from importlib import import_module
from scrapy import signals
from scrapy.exceptions import NotConfigured
from scrapy.http import HtmlResponse
from selenium.webdriver.support.ui import WebDriverWait
from .http import SeleniumRequest
class SeleniumMiddleware:
"""Scrapy middleware handling the requests using selenium"""
def __init__(self, driver_name, driver_executable_path, driver_arguments,
browser_executable_path):
"""Initialize the selenium webdriver
Parameters
----------
driver_name: str
The selenium ``WebDriver`` to use
driver_executable_path: str
The path of the executable binary of the driver
driver_arguments: list
A list of arguments to initialize the driver
browser_executable_path: str
The path of the executable binary of the browser
"""
webdriver_base_path = f'selenium.webdriver.{driver_name}'
driver_klass_module = import_module(f'{webdriver_base_path}.webdriver')
driver_klass = getattr(driver_klass_module, 'WebDriver')
driver_options_module = import_module(f'{webdriver_base_path}.options')
driver_options_klass = getattr(driver_options_module, 'Options')
driver_options = driver_options_klass()
if browser_executable_path:
driver_options.binary_location = browser_executable_path
for argument in driver_arguments:
driver_options.add_argument(argument)
driver_kwargs = {
'executable_path': driver_executable_path,
f'{driver_name}_options': driver_options
}
self.driver = driver_klass(**driver_kwargs)
@classmethod
def from_crawler(cls, crawler):
"""Initialize the middleware with the crawler settings"""
driver_name = crawler.settings.get('SELENIUM_DRIVER_NAME')
driver_executable_path = crawler.settings.get('SELENIUM_DRIVER_EXECUTABLE_PATH')
browser_executable_path = crawler.settings.get('SELENIUM_BROWSER_EXECUTABLE_PATH')
driver_arguments = crawler.settings.get('SELENIUM_DRIVER_ARGUMENTS')
if not driver_name or not driver_executable_path:
raise NotConfigured(
'SELENIUM_DRIVER_NAME and SELENIUM_DRIVER_EXECUTABLE_PATH must be set'
)
middleware = cls(
driver_name=driver_name,
driver_executable_path=driver_executable_path,
driver_arguments=driver_arguments,
browser_executable_path=browser_executable_path
)
crawler.signals.connect(middleware.spider_closed, signals.spider_closed)
return middleware
def process_request(self, request, spider):
"""Process a request using the selenium driver if applicable"""
if not isinstance(request, SeleniumRequest):
return None
self.driver.get(request.url)
for cookie_name, cookie_value in request.cookies.items():
self.driver.add_cookie(
{
'name': cookie_name,
'value': cookie_value
}
)
if request.wait_until:
WebDriverWait(self.driver, request.wait_time).until(
request.wait_until
)
if request.screenshot:
request.meta['screenshot'] = self.driver.get_screenshot_as_png()
body = str.encode(self.driver.page_source)
# Expose the driver via the "meta" attribute
request.meta.update({'driver': self.driver})
return HtmlResponse(
self.driver.current_url,
body=body,
encoding='utf-8',
request=request
)
def spider_closed(self):
"""Shutdown the driver when spider is closed"""
self.driver.quit()
|
the-stack_0_11544 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import sys,argparse
from fnmatch import fnmatch
from openvino.tools.benchmark.utils.utils import show_available_devices
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def check_positive(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError(f"{value} is an invalid positive int value")
return ivalue
class print_help(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
show_available_devices()
sys.exit()
def parse_args():
parser = argparse.ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action=print_help, nargs='?', default=argparse.SUPPRESS,
help='Show this help message and exit.')
args.add_argument('-i', '--paths_to_input', action='append', nargs='+', type=str, required=False,
help='Optional. '
'Path to a folder with images and/or binaries or to specific image or binary file.')
args.add_argument('-m', '--path_to_model', type=str, required=True,
help='Required. Path to an .xml/.onnx/.prototxt file with a trained model or '
'to a .blob file with a trained compiled model.')
args.add_argument('-d', '--target_device', type=str, required=False, default='CPU',
help='Optional. Specify a target device to infer on (the list of available devices is shown below). '
'Default value is CPU. Use \'-d HETERO:<comma separated devices list>\' format to specify HETERO plugin. '
'Use \'-d MULTI:<comma separated devices list>\' format to specify MULTI plugin. '
'The application looks for a suitable plugin for the specified device.')
args.add_argument('-l', '--path_to_extension', type=str, required=False, default=None,
help='Optional. Required for CPU custom layers. '
'Absolute path to a shared library with the kernels implementations.')
args.add_argument('-c', '--path_to_cldnn_config', type=str, required=False,
help='Optional. Required for GPU custom kernels. Absolute path to an .xml file with the '
'kernels description.')
args.add_argument('-api', '--api_type', type=str, required=False, default='async', choices=['sync', 'async'],
help='Optional. Enable using sync/async API. Default value is async.')
args.add_argument('-niter', '--number_iterations', type=check_positive, required=False, default=None,
help='Optional. Number of iterations. '
'If not specified, the number of iterations is calculated depending on a device.')
args.add_argument('-nireq', '--number_infer_requests', type=check_positive, required=False, default=None,
help='Optional. Number of infer requests. Default value is determined automatically for device.')
args.add_argument('-b', '--batch_size', type=int, required=False, default=0,
help='Optional. ' +
'Batch size value. ' +
'If not specified, the batch size value is determined from Intermediate Representation')
args.add_argument('-stream_output', type=str2bool, required=False, default=False, nargs='?', const=True,
help='Optional. '
'Print progress as a plain text. '
'When specified, an interactive progress bar is replaced with a multi-line output.')
args.add_argument('-t', '--time', type=int, required=False, default=None,
help='Optional. Time in seconds to execute topology.')
args.add_argument('-progress', type=str2bool, required=False, default=False, nargs='?', const=True,
help='Optional. '
'Show progress bar (can affect performance measurement). Default values is \'False\'.')
args.add_argument('-shape', type=str, required=False, default='',
help='Optional. '
'Set shape for input. For example, "input1[1,3,224,224],input2[1,4]" or "[1,3,224,224]" in case of one input size.')
args.add_argument('-layout', type=str, required=False, default='',
help='Optional. '
'Prompts how network layouts should be treated by application. '
'For example, "input1[NCHW],input2[NC]" or "[NCHW]" in case of one input size.')
args.add_argument('-nstreams', '--number_streams', type=str, required=False, default=None,
help='Optional. Number of streams to use for inference on the CPU/GPU/MYRIAD '
'(for HETERO and MULTI device cases use format <device1>:<nstreams1>,<device2>:<nstreams2> '
'or just <nstreams>). '
'Default value is determined automatically for a device. Please note that although the automatic selection '
'usually provides a reasonable performance, it still may be non - optimal for some cases, especially for very small networks. '
'Also, using nstreams>1 is inherently throughput-oriented option, while for the best-latency '
'estimations the number of streams should be set to 1. '
'See samples README for more details.')
args.add_argument('-enforcebf16', '--enforce_bfloat16', type=str2bool, required=False, default=False, nargs='?', const=True, choices=[True, False],
help='Optional. By default floating point operations execution in bfloat16 precision are enforced if supported by platform. '
'\'true\' - enable bfloat16 regardless of platform support. '
'\'false\' - disable bfloat16 regardless of platform support.')
args.add_argument('-nthreads', '--number_threads', type=int, required=False, default=None,
help='Number of threads to use for inference on the CPU, GNA '
'(including HETERO and MULTI cases).')
args.add_argument('-pin', '--infer_threads_pinning', type=str, required=False, default='YES', choices=['YES', 'NO', 'NUMA'],
help='Optional. Enable threads->cores (\'YES\' is default value), threads->(NUMA)nodes (\'NUMA\') or completely disable (\'NO\')'
'CPU threads pinning for CPU-involved inference.')
args.add_argument('-exec_graph_path', '--exec_graph_path', type=str, required=False,
help='Optional. Path to a file where to store executable graph information serialized.')
args.add_argument('-pc', '--perf_counts', type=str2bool, required=False, default=False, nargs='?', const=True,
help='Optional. Report performance counters.', )
args.add_argument('-report_type', '--report_type', type=str, required=False,
choices=['no_counters', 'average_counters', 'detailed_counters'],
help="Optional. Enable collecting statistics report. \"no_counters\" report contains "
"configuration options specified, resulting FPS and latency. \"average_counters\" "
"report extends \"no_counters\" report and additionally includes average PM "
"counters values for each layer from the network. \"detailed_counters\" report "
"extends \"average_counters\" report and additionally includes per-layer PM "
"counters and latency for each executed infer request.")
args.add_argument('-report_folder', '--report_folder', type=str, required=False, default='',
help="Optional. Path to a folder where statistics report is stored.")
args.add_argument('-dump_config', type=str, required=False, default='',
help="Optional. Path to JSON file to dump IE parameters, which were set by application.")
args.add_argument('-load_config', type=str, required=False, default='',
help="Optional. Path to JSON file to load custom IE parameters."
" Please note, command line parameters have higher priority then parameters from configuration file.")
args.add_argument('-qb', '--quantization_bits', type=int, required=False, default=None, choices=[8, 16],
help="Optional. Weight bits for quantization: 8 (I8) or 16 (I16) ")
args.add_argument('-ip', '--input_precision', type=str, required=False, default='U8', choices=['U8', 'FP16', 'FP32'],
help='Optional. Specifies precision for all input layers of the network.')
args.add_argument('-op', '--output_precision', type=str, required=False, default='FP32', choices=['U8', 'FP16', 'FP32'],
help='Optional. Specifies precision for all output layers of the network.')
args.add_argument('-iop', '--input_output_precision', type=str, required=False,
help='Optional. Specifies precision for input and output layers by name. Example: -iop "input:FP16, output:FP16". Notice that quotes are required. Overwrites precision from ip and op options for specified layers.')
parsed_args = parser.parse_args()
return parsed_args
|
the-stack_0_11545 | """
This module provides fundamental solar physical constants.
"""
import io
from astropy.table import Table
from astropy.time import Time
from sunpy.sun import _constants as _con
__all__ = [
'get', 'find', 'print_all', 'spectral_classification', 'au', 'mass', 'equatorial_radius',
'volume', 'surface_area', 'average_density', 'equatorial_surface_gravity',
'effective_temperature', 'luminosity', 'mass_conversion_rate', 'escape_velocity', 'sfu',
'average_angular_size', 'sidereal_rotation_rate', 'first_carrington_rotation',
'mean_synodic_period'
]
constants = _con.physical_constants
def get(key):
"""
Retrieve a constant by key. This is just a short cut into a dictionary.
Parameters
----------
key : `str`
Key in dictionary in ``constants``.
Returns
-------
constant : `~astropy.constants.Constant`
See Also
--------
`~sunpy.sun.constants` :
Contains the description of ``constants``, which, as a dictionary literal object, does not
itself possess a docstring.
Examples
--------
>>> from sunpy.sun import constants
>>> constants.get('mass')
<<class 'astropy.constants.iau2015.IAU2015'> name='Solar mass' value=1.9884754153381438e+30 uncertainty=9.236140093538353e+25 unit='kg' reference='IAU 2015 Resolution B 3 + CODATA 2014'>
"""
ret = constants[key]
ret.__doc__ = ret.name
return ret
def find(sub=None):
"""
Return list of constants keys containing a given string.
Parameters
----------
sub : `str`, optional
Sub-string to search keys for. By default set to `None` and returns all keys.
Returns
-------
`None`, `list`
The matching keys.
See Also
--------
`~sunpy.sun.constants` :
Contains the description of ``constants``, which, as a dictionary literal object, does not itself possess a docstring.
"""
if sub is None:
result = list(constants.keys())
else:
result = [key for key in constants if sub.lower() in key.lower()]
result.sort()
return result
def print_all():
"""
Provides a table of the complete list of constants.
Returns
-------
`astropy.table.Table`
"""
data_rows = []
for key, this_constant in constants.items():
data_rows.append([
key, this_constant.name, this_constant.value, this_constant.uncertainty,
str(this_constant.unit), this_constant.reference
])
t = Table(rows=data_rows, names=('key', 'name', 'value', 'uncertainty', 'unit', 'Reference'))
return t
def _build_docstring():
"""Build docstring containing RST-formatted table of constants."""
lines = ['The following constants are available:\n']
rows = []
for key, const in constants.items():
rows.append([key, const.value, const._unit_string, const.name])
table = Table(rows=rows, names=('Name', 'Value', 'Unit', 'Description'))
table['Value'].info.format = '14.9g'
f = io.StringIO()
table.write(f, format='ascii.rst')
lines.append(f.getvalue())
return '\n'.join(lines)
# Add a table of constants to the docs
if __doc__ is not None:
__doc__ += _build_docstring()
# Spectral class is not included in physical constants since it is not a number
#: Spectral classification
spectral_classification = 'G2V'
au = astronomical_unit = get('mean distance')
# The following variables from _gets are brought out by making them
# accessible through a call such as sun.volume
mass = get('mass')
equatorial_radius = radius = get('radius')
volume = get('volume')
surface_area = get('surface area')
average_density = density = get('average density')
equatorial_surface_gravity = surface_gravity = get('surface gravity')
effective_temperature = get('effective temperature')
luminosity = get('luminosity')
mass_conversion_rate = get('mass conversion rate')
escape_velocity = get('escape velocity')
sfu = get('solar flux unit')
# Observable parameters
average_angular_size = get('average angular size')
sidereal_rotation_rate = get('sidereal rotation rate')
#: Time of the start of the first Carrington rotation
first_carrington_rotation = Time(get('first Carrington rotation (JD TT)'), format='jd', scale='tt')
mean_synodic_period = get('mean synodic period')
|
the-stack_0_11546 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test Cases to be run for the import module"""
IMPLEMENTED_OPERATORS_TEST = [
'test_random_uniform',
'test_random_normal',
'test_add',
'test_sub',
'test_mul',
'test_div',
'test_neg',
'test_abs',
'test_sum',
'test_tanh',
'test_ceil',
'test_floor',
'test_concat',
'test_sigmoid',
'test_relu',
'test_constant_pad',
'test_edge_pad',
'test_reflect_pad',
'test_reduce_min',
'test_reduce_max',
'test_reduce_mean',
'test_reduce_prod',
'test_squeeze',
'test_unsqueeze',
'test_softmax_example',
'test_softmax_large_number',
'test_softmax_axis_2',
'test_transpose',
'test_globalmaxpool',
'test_globalaveragepool',
'test_slice_cpu',
'test_slice_neg',
'test_squeeze_',
'test_reciprocal',
'test_sqrt',
'test_pow',
'test_exp',
'test_argmax',
'test_argmin',
'test_min',
'test_logical_and',
'test_logical_xor',
'test_logical_not',
'test_logical_or',
'test_clip',
'test_softsign',
'test_reduce_l2',
'test_reduce_log_sum',
'test_reduce_log_sum_exp',
'test_reduce_sum_square'
#pytorch operator tests
'test_operator_exp',
'test_operator_maxpool',
'test_operator_params',
'test_operator_permute2'
]
BASIC_MODEL_TESTS = [
'test_AvgPool2D',
'test_BatchNorm',
'test_ConstantPad2d'
'test_Conv2d',
'test_ELU',
'test_LeakyReLU',
'test_MaxPool',
'test_PReLU',
'test_ReLU',
'test_Sigmoid',
'test_Softmax',
'test_softmax_functional',
'test_softmax_lastdim',
'test_Tanh'
]
STANDARD_MODEL = [
'test_bvlc_alexnet',
'test_densenet121',
#'test_inception_v1',
#'test_inception_v2',
'test_resnet50',
#'test_shufflenet',
'test_squeezenet',
'test_zfnet512',
'test_vgg19'
]
|
the-stack_0_11547 | import unittest
from find_max_indices import find_max_indices
from drop_first import drop_first
class TestFindMaxIndices(unittest.TestCase):
def test_find_max_indices(self):
Ms = [[[1, 2, 3], [9, 8, 7, 6], [4, 5]]]
expecteds = [(1, (0, 9))]
for M, expected in zip(Ms, expecteds):
self.assertEqual(expected, find_max_indices(M))
class TestDropFirst(unittest.TestCase):
def test_drop_first(self):
iterables = [[1, 2, 3]]
expecteds = [[2, 3]]
for iterable, expected in zip(iterables, expecteds):
self.assertEqual(expected, list(drop_first(iterable)))
if __name__ == "__main__":
unittest.main()
|
the-stack_0_11548 | from filebeat import BaseTest
from beat.beat import INTEGRATION_TESTS
import os
import unittest
import glob
import subprocess
from elasticsearch import Elasticsearch
import json
import logging
class Test(BaseTest):
def init(self):
self.elasticsearch_url = self.get_elasticsearch_url()
print("Using elasticsearch: {}".format(self.elasticsearch_url))
self.es = Elasticsearch([self.elasticsearch_url])
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("elasticsearch").setLevel(logging.ERROR)
self.modules_path = os.path.abspath(self.working_dir +
"/../../../../module")
self.filebeat = os.path.abspath(self.working_dir +
"/../../../../filebeat.test")
self.index_name = "test-filebeat-modules"
@unittest.skipIf(not INTEGRATION_TESTS or
os.getenv("TESTING_ENVIRONMENT") == "2x",
"integration test not available on 2.x")
def test_modules(self):
self.init()
modules = os.getenv("TESTING_FILEBEAT_MODULES")
if modules:
modules = modules.split(",")
else:
modules = os.listdir(self.modules_path)
# generate a minimal configuration
cfgfile = os.path.join(self.working_dir, "filebeat.yml")
self.render_config_template(
template="filebeat_modules.yml.j2",
output=cfgfile,
index_name=self.index_name,
elasticsearch_url=self.elasticsearch_url)
for module in modules:
path = os.path.join(self.modules_path, module)
filesets = [name for name in os.listdir(path) if
os.path.isfile(os.path.join(path, name,
"manifest.yml"))]
for fileset in filesets:
test_files = glob.glob(os.path.join(self.modules_path, module,
fileset, "test", "*.log"))
for test_file in test_files:
self.run_on_file(
module=module,
fileset=fileset,
test_file=test_file,
cfgfile=cfgfile)
def run_on_file(self, module, fileset, test_file, cfgfile):
print("Testing {}/{} on {}".format(module, fileset, test_file))
try:
self.es.indices.delete(index=self.index_name)
except:
pass
cmd = [
self.filebeat, "-systemTest",
"-e", "-d", "*", "-once",
"-c", cfgfile,
"-modules={}".format(module),
"-M", "{module}.*.enabled=false".format(module=module),
"-M", "{module}.{fileset}.enabled=true".format(module=module, fileset=fileset),
"-M", "{module}.{fileset}.var.paths=[{test_file}]".format(
module=module, fileset=fileset, test_file=test_file),
"-M", "*.*.prospector.close_eof=true",
]
output = open(os.path.join(self.working_dir, "output.log"), "ab")
output.write(" ".join(cmd) + "\n")
subprocess.Popen(cmd,
stdin=None,
stdout=output,
stderr=subprocess.STDOUT,
bufsize=0).wait()
# Make sure index exists
self.wait_until(lambda: self.es.indices.exists(self.index_name))
self.es.indices.refresh(index=self.index_name)
res = self.es.search(index=self.index_name,
body={"query": {"match_all": {}}})
objects = [o["_source"] for o in res["hits"]["hits"]]
assert len(objects) > 0
for obj in objects:
assert obj["fileset"]["module"] == module, "expected fileset.module={} but got {}".format(
module, obj["fileset"]["module"])
if not (module == "mysql" and fileset == "slowlog"):
# TODO: There are errors parsing the test logs from these modules.
assert "error" not in obj, "not error expected but got: {}".format(obj)
if module != "auditd" and fileset != "log":
# There are dynamic fields in audit logs that are not documented.
self.assert_fields_are_documented(obj)
if os.path.exists(test_file + "-expected.json"):
with open(test_file + "-expected.json", "r") as f:
expected = json.load(f)
assert len(expected) == len(objects), "expected {} but got {}".format(len(expected), len(objects))
for ev in expected:
found = False
for obj in objects:
if ev["_source"][module] == obj[module]:
found = True
break
if not found:
raise Exception("The following expected object was" +
" not found: {}".format(obj))
@unittest.skipIf(not INTEGRATION_TESTS or
os.getenv("TESTING_ENVIRONMENT") == "2x",
"integration test not available on 2.x")
def test_prospector_pipeline_config(self):
"""
Tests that the pipeline configured in the prospector overwrites
the one from the output.
"""
self.init()
index_name = "filebeat-test-prospector"
try:
self.es.indices.delete(index=index_name)
except:
pass
self.wait_until(lambda: not self.es.indices.exists(index_name))
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
elasticsearch=dict(
host=self.elasticsearch_url,
pipeline="estest",
index=index_name),
pipeline="test",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
with open(testfile, 'a') as file:
file.write("Hello World1\n")
# put pipeline
self.es.transport.perform_request("PUT", "/_ingest/pipeline/test",
body={
"processors": [{
"set": {
"field": "x-pipeline",
"value": "test-pipeline",
}
}]})
filebeat = self.start_beat()
# Wait until the event is in ES
self.wait_until(lambda: self.es.indices.exists(index_name))
def search_objects():
try:
self.es.indices.refresh(index=index_name)
res = self.es.search(index=index_name,
body={"query": {"match_all": {}}})
return [o["_source"] for o in res["hits"]["hits"]]
except:
return []
self.wait_until(lambda: len(search_objects()) > 0, max_timeout=20)
filebeat.check_kill_and_wait()
objects = search_objects()
assert len(objects) == 1
o = objects[0]
assert o["x-pipeline"] == "test-pipeline"
|
the-stack_0_11550 | from __future__ import annotations
import itertools
from typing import (
TYPE_CHECKING,
cast,
)
import numpy as np
import pandas._libs.reshape as libreshape
from pandas._libs.sparse import IntIndex
from pandas._typing import Dtype
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
ensure_platform_int,
is_1d_only_ea_dtype,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import notna
import pandas.core.algorithms as algos
from pandas.core.arrays import SparseArray
from pandas.core.arrays.categorical import factorize_from_iterable
from pandas.core.frame import DataFrame
from pandas.core.indexes.api import (
Index,
MultiIndex,
)
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index,
decons_obs_group_ids,
get_compressed_ids,
get_group_index,
get_group_index_sorter,
)
if TYPE_CHECKING:
from pandas.core.arrays import ExtensionArray
class _Unstacker:
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
index : MultiIndex
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
fill_value : scalar, optional
Default value to fill in missing values if subgroups do not have the
same set of labels. By default, missing values will be replaced with
the default fill value for that data type, NaN for float, NaT for
datetimelike, etc. For integer types, by default data will converted to
float and missing values will be set to NaN.
constructor : object
Pandas ``DataFrame`` or subclass used to create unstacked
response. If None, DataFrame will be used.
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, index: MultiIndex, level=-1, constructor=None):
if constructor is None:
constructor = DataFrame
self.constructor = constructor
self.index = index.remove_unused_levels()
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.codes[self.level] else 0
# Note: the "pop" below alters these in-place.
self.new_index_levels = list(self.index.levels)
self.new_index_names = list(self.index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self.removed_level_full = index.levels[self.level]
# Bug fix GH 20601
# If the data frame is too big, the number of unique index combination
# will cause int32 overflow on windows environments.
# We want to check and raise an error before this happens
num_rows = np.max([index_level.size for index_level in self.new_index_levels])
num_columns = self.removed_level.size
# GH20601: This forces an overflow if the number of cells is too high.
num_cells = np.multiply(num_rows, num_columns, dtype=np.int32)
if num_rows > 0 and num_columns > 0 and num_cells <= 0:
raise ValueError("Unstacked DataFrame is too big, causing int32 overflow")
self._make_selectors()
@cache_readonly
def _indexer_and_to_sort(self):
v = self.level
codes = list(self.index.codes)
levs = list(self.index.levels)
to_sort = codes[:v] + codes[v + 1 :] + [codes[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1 :] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = get_group_index_sorter(comp_index, ngroups)
indexer = ensure_platform_int(indexer)
return indexer, to_sort
@cache_readonly
def sorted_labels(self):
indexer, to_sort = self._indexer_and_to_sort
return [line.take(indexer) for line in to_sort]
def _make_sorted_values(self, values: np.ndarray) -> np.ndarray:
indexer, _ = self._indexer_and_to_sort
sorted_values = algos.take_nd(values, indexer, axis=0)
return sorted_values
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
# error: Argument 1 to "zeros" has incompatible type "number"; expected
# "Union[int, Sequence[int]]"
mask = np.zeros(np.prod(self.full_shape), dtype=bool) # type: ignore[arg-type]
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError("Index contains duplicate entries, cannot reshape")
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self, values, value_columns, fill_value):
if values.ndim == 1:
values = values[:, np.newaxis]
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError("must pass column labels for multi-column data")
values, _ = self.get_new_values(values, fill_value)
columns = self.get_new_columns(value_columns)
index = self.new_index
return self.constructor(values, index=index, columns=columns)
def get_new_values(self, values, fill_value=None):
if values.ndim == 1:
values = values[:, np.newaxis]
sorted_values = self._make_sorted_values(values)
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
mask = self.mask
mask_all = mask.all()
# we can simply reshape if we don't have a mask
if mask_all and len(values):
# TODO: Under what circumstances can we rely on sorted_values
# matching values? When that holds, we can slice instead
# of take (in particular for EAs)
new_values = (
sorted_values.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
# if our mask is all True, then we can use our existing dtype
if mask_all:
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = maybe_promote(values.dtype, fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
name = np.dtype(dtype).name
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
# e.g. ints -> floats
if needs_i8_conversion(values.dtype):
sorted_values = sorted_values.view("i8")
new_values = new_values.view("i8")
elif is_bool_dtype(values.dtype):
sorted_values = sorted_values.astype("object")
new_values = new_values.astype("object")
else:
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
libreshape.unstack(
sorted_values,
mask.view("u1"),
stride,
length,
width,
new_values,
new_mask.view("u1"),
)
# reconstruct dtype if needed
if needs_i8_conversion(values.dtype):
new_values = new_values.view(values.dtype)
return new_values, new_mask
def get_new_columns(self, value_columns):
if value_columns is None:
if self.lift == 0:
return self.removed_level._rename(name=self.removed_name)
lev = self.removed_level.insert(0, item=self.removed_level._na_value)
return lev.rename(self.removed_name)
stride = len(self.removed_level) + self.lift
width = len(value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(value_columns, MultiIndex):
new_levels = value_columns.levels + (self.removed_level_full,)
new_names = value_columns.names + (self.removed_name,)
new_codes = [lab.take(propagator) for lab in value_columns.codes]
else:
new_levels = [value_columns, self.removed_level_full]
new_names = [value_columns.name, self.removed_name]
new_codes = [propagator]
# The two indices differ only if the unstacked level had unused items:
if len(self.removed_level_full) != len(self.removed_level):
# In this case, we remap the new codes to the original level:
repeater = self.removed_level_full.get_indexer(self.removed_level)
if self.lift:
repeater = np.insert(repeater, 0, -1)
else:
# Otherwise, we just use each level item exactly once:
repeater = np.arange(stride) - self.lift
# The entire level is then just a repetition of the single chunk:
new_codes.append(np.tile(repeater, width))
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
@cache_readonly
def new_index(self):
# Does not depend on values or value_columns
result_codes = [lab.take(self.compressor) for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
level, level_codes = self.new_index_levels[0], result_codes[0]
if (level_codes == -1).any():
level = level.insert(len(level), level._na_value)
return level.take(level_codes).rename(self.new_index_names[0])
return MultiIndex(
levels=self.new_index_levels,
codes=result_codes,
names=self.new_index_names,
verify_integrity=False,
)
def _unstack_multiple(data, clocs, fill_value=None):
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
# GH 19966 Make sure if MultiIndexed index has tuple name, they will be
# recognised as a whole
if clocs in index.names:
clocs = [clocs]
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
ccodes = [index.codes[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rcodes = [index.codes[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(ccodes, shape, sort=False, xnull=False)
comp_ids, obs_ids = compress_group_index(group_index, sort=False)
recons_codes = decons_obs_group_ids(comp_ids, obs_ids, shape, ccodes, xnull=False)
if not rlocs:
# Everything is in clocs, so the dummy df has a regular index
dummy_index = Index(obs_ids, name="__placeholder__")
else:
dummy_index = MultiIndex(
levels=rlevels + [obs_ids],
codes=rcodes + [comp_ids],
names=rnames + ["__placeholder__"],
verify_integrity=False,
)
if isinstance(data, Series):
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack("__placeholder__", fill_value=fill_value)
new_levels = clevels
new_names = cnames
new_codes = recons_codes
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val, fill_value=fill_value)
clocs = [v if v < val else v - 1 for v in clocs]
return result
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack("__placeholder__", fill_value=fill_value)
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
assert isinstance(unstcols, MultiIndex) # for mypy
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_codes = [unstcols.codes[0]]
for rec in recons_codes:
new_codes.append(rec.take(unstcols.codes[-1]))
new_columns = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
if len(level) != 1:
# _unstack_multiple only handles MultiIndexes,
# and isn't needed for a single level
return _unstack_multiple(obj, level, fill_value=fill_value)
else:
level = level[0]
# Prioritize integer interpretation (GH #21677):
if not is_integer(level) and not level == "__placeholder__":
level = obj.index._get_level_number(level)
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value)
else:
return obj.T.stack(dropna=False)
elif not isinstance(obj.index, MultiIndex):
# GH 36113
# Give nicer error messages when unstack a Series whose
# Index is not a MultiIndex.
raise ValueError(
f"index must be a MultiIndex to unstack, {type(obj.index)} was passed"
)
else:
if is_1d_only_ea_dtype(obj.dtype):
return _unstack_extension_series(obj, level, fill_value)
unstacker = _Unstacker(
obj.index, level=level, constructor=obj._constructor_expanddim
)
return unstacker.get_result(
obj._values, value_columns=None, fill_value=fill_value
)
def _unstack_frame(obj, level, fill_value=None):
if not obj._can_fast_transpose:
unstacker = _Unstacker(obj.index, level=level)
mgr = obj._mgr.unstack(unstacker, fill_value=fill_value)
return obj._constructor(mgr)
else:
unstacker = _Unstacker(obj.index, level=level, constructor=obj._constructor)
return unstacker.get_result(
obj._values, value_columns=obj.columns, fill_value=fill_value
)
def _unstack_extension_series(series, level, fill_value):
"""
Unstack an ExtensionArray-backed Series.
The ExtensionDtype is preserved.
Parameters
----------
series : Series
A Series with an ExtensionArray for values
level : Any
The level name or number.
fill_value : Any
The user-level (not physical storage) fill value to use for
missing values introduced by the reshape. Passed to
``series.values.take``.
Returns
-------
DataFrame
Each column of the DataFrame will have the same dtype as
the input Series.
"""
# Defer to the logic in ExtensionBlock._unstack
df = series.to_frame()
result = df.unstack(level=level, fill_value=fill_value)
return result.droplevel(level=0, axis=1)
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_codes = [lab.repeat(K) for lab in frame.index.codes]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_codes.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
else:
levels, (ilab, clab) = zip(*map(factorize, (frame.index, frame.columns)))
codes = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(
levels=levels,
codes=codes,
names=[frame.index.name, frame.columns.name],
verify_integrity=False,
)
if not frame.empty and frame._is_homogeneous_type:
# For homogeneous EAs, frame._values will coerce to object. So
# we concatenate instead.
dtypes = list(frame.dtypes._values)
dtype = dtypes[0]
if is_extension_array_dtype(dtype):
arr = dtype.construct_array_type()
new_values = arr._concat_same_type(
[col._values for _, col in frame.items()]
)
new_values = _reorder_for_extension_array_stack(new_values, N, K)
else:
# homogeneous, non-EA
new_values = frame._values.ravel()
else:
# non-homogeneous
new_values = frame._values.ravel()
if dropna:
mask = notna(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return frame._constructor_sliced(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError(
"level should contain all level names or all level "
"numbers, not a mixture of the two."
)
return result
def _stack_multi_column_index(columns: MultiIndex) -> MultiIndex:
"""Creates a MultiIndex from the first N-1 levels of this MultiIndex."""
if len(columns.levels) <= 2:
return columns.levels[0]._rename(name=columns.names[0])
levs = [
[lev[c] if c >= 0 else None for c in codes]
for lev, codes in zip(columns.levels[:-1], columns.codes[:-1])
]
# Remove duplicate tuples in the MultiIndex.
tuples = zip(*levs)
unique_tuples = (key for key, _ in itertools.groupby(tuples))
new_levs = zip(*unique_tuples)
# The dtype of each level must be explicitly set to avoid inferring the wrong type.
# See GH-36991.
return MultiIndex.from_arrays(
[
# Not all indices can accept None values.
Index(new_lev, dtype=lev.dtype) if None not in new_lev else new_lev
for new_lev, lev in zip(new_levs, columns.levels)
],
names=columns.names[:-1],
)
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something we can safely pass
to swaplevel.
If `level_num` matches a column name return the name from
position `level_num`, otherwise return `level_num`.
"""
if level_num in columns.names:
return columns.names[level_num]
return level_num
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns._is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sort_index(level=level_to_sort, axis=1)
new_columns = _stack_multi_column_index(this.columns)
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_codes = sorted(set(this.columns.codes[-1]))
level_vals_nan = level_vals.insert(len(level_vals), None)
level_vals_used = np.take(level_vals_nan, level_codes)
levsize = len(level_codes)
drop_cols = []
for key in new_columns:
try:
loc = this.columns.get_loc(key)
except KeyError:
drop_cols.append(key)
continue
# can make more efficient?
# we almost always return a slice
# but if unsorted can get a boolean
# indexer
if not isinstance(loc, slice):
slice_len = len(loc)
else:
slice_len = loc.stop - loc.start
if slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
chunk.columns = level_vals_nan.take(chunk.columns.codes[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_homogeneous_type and is_extension_array_dtype(
frame.dtypes.iloc[0]
):
dtype = this[this.columns[loc]].dtypes.iloc[0]
subset = this[this.columns[loc]]
value_slice = dtype.construct_array_type()._concat_same_type(
[x._values for _, x in subset.items()]
)
N, K = this.shape
idx = np.arange(N * K).reshape(K, N).T.ravel()
value_slice = value_slice.take(idx)
elif frame._is_mixed_type:
value_slice = this[this.columns[loc]].values
else:
value_slice = this.values[:, loc]
if value_slice.ndim > 1:
# i.e. not extension
value_slice = value_slice.ravel()
new_data[key] = value_slice
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_codes = [lab.repeat(levsize) for lab in this.index.codes]
else:
old_codes, old_levels = factorize_from_iterable(this.index)
new_levels = [old_levels]
new_codes = [old_codes.repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(level_vals)
new_codes.append(np.tile(level_codes, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
result = frame._constructor(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how="all")
return result
def get_dummies(
data,
prefix=None,
prefix_sep="_",
dummy_na: bool = False,
columns=None,
sparse: bool = False,
drop_first: bool = False,
dtype: Dtype | None = None,
) -> DataFrame:
"""
Convert categorical variable into dummy/indicator variables.
Parameters
----------
data : array-like, Series, or DataFrame
Data of which to get dummy indicators.
prefix : str, list of str, or dict of str, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : str, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix`.
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy-encoded columns should be backed by
a :class:`SparseArray` (True) or a regular NumPy array (False).
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
Returns
-------
DataFrame
Dummy-coded data.
See Also
--------
Series.str.get_dummies : Convert Series to dummy codes.
Examples
--------
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> pd.get_dummies(pd.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas.core.reshape.concat import concat
dtypes_to_encode = ["object", "category"]
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
data_to_encode = data.select_dtypes(include=dtypes_to_encode)
elif not is_list_like(columns):
raise TypeError("Input must be a list-like for parameter `columns`")
else:
data_to_encode = data[columns]
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
len_msg = (
f"Length of '{name}' ({len(item)}) did not match the "
"length of the columns being encoded "
f"({data_to_encode.shape[1]})."
)
raise ValueError(len_msg)
check_len(prefix, "prefix")
check_len(prefix_sep, "prefix_sep")
if isinstance(prefix, str):
prefix = itertools.cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in data_to_encode.columns]
if prefix is None:
prefix = data_to_encode.columns
# validate separators
if isinstance(prefix_sep, str):
prefix_sep = itertools.cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
with_dummies: list[DataFrame]
if data_to_encode.shape == data.shape:
# Encoding the entire df, do not prepend any dropped columns
with_dummies = []
elif columns is not None:
# Encoding only cols specified in columns. Get all cols not in
# columns to prepend to result.
with_dummies = [data.drop(columns, axis=1)]
else:
# Encoding only object and category dtype columns. Get remaining
# columns to prepend to result.
with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]
for (col, pre, sep) in zip(data_to_encode.items(), prefix, prefix_sep):
# col is (column_name, column), use just column data here
dummy = _get_dummies_1d(
col[1],
prefix=pre,
prefix_sep=sep,
dummy_na=dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(
data,
prefix,
prefix_sep,
dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
)
return result
def _get_dummies_1d(
data,
prefix,
prefix_sep="_",
dummy_na: bool = False,
sparse: bool = False,
drop_first: bool = False,
dtype: Dtype | None = None,
) -> DataFrame:
from pandas.core.reshape.concat import concat
# Series avoids inconsistent NaN handling
codes, levels = factorize_from_iterable(Series(data))
if dtype is None:
dtype = np.uint8
# error: Argument 1 to "dtype" has incompatible type "Union[ExtensionDtype, str,
# dtype[Any], Type[object]]"; expected "Type[Any]"
dtype = np.dtype(dtype) # type: ignore[arg-type]
if is_object_dtype(dtype):
raise ValueError("dtype=object is not a valid dtype for get_dummies")
def get_empty_frame(data) -> DataFrame:
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
return DataFrame(index=index)
# if all NaN
if not dummy_na and len(levels) == 0:
return get_empty_frame(data)
codes = codes.copy()
if dummy_na:
codes[codes == -1] = len(levels)
levels = np.append(levels, np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
return get_empty_frame(data)
number_of_cols = len(levels)
if prefix is None:
dummy_cols = levels
else:
dummy_cols = Index([f"{prefix}{prefix_sep}{level}" for level in levels])
index: Index | None
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
fill_value: bool | float | int
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
sparse_series = []
N = len(data)
sp_indices: list[list] = [[] for _ in range(len(dummy_cols))]
mask = codes != -1
codes = codes[mask]
n_idx = np.arange(N)[mask]
for ndx, code in zip(n_idx, codes):
sp_indices[code].append(ndx)
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(
np.ones(len(ixs), dtype=dtype),
sparse_index=IntIndex(N, ixs),
fill_value=fill_value,
dtype=dtype,
)
sparse_series.append(Series(data=sarr, index=index, name=col))
out = concat(sparse_series, axis=1, copy=False)
# TODO: overload concat with Literal for axis
out = cast(DataFrame, out)
return out
else:
# take on axis=1 + transpose to ensure ndarray layout is column-major
dummy_mat = np.eye(number_of_cols, dtype=dtype).take(codes, axis=1).T
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def _reorder_for_extension_array_stack(
arr: ExtensionArray, n_rows: int, n_columns: int
) -> ExtensionArray:
"""
Re-orders the values when stacking multiple extension-arrays.
The indirect stacking method used for EAs requires a followup
take to get the order correct.
Parameters
----------
arr : ExtensionArray
n_rows, n_columns : int
The number of rows and columns in the original DataFrame.
Returns
-------
taken : ExtensionArray
The original `arr` with elements re-ordered appropriately
Examples
--------
>>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f'])
>>> _reorder_for_extension_array_stack(arr, 2, 3)
array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1')
>>> _reorder_for_extension_array_stack(arr, 3, 2)
array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1')
"""
# final take to get the order correct.
# idx is an indexer like
# [c0r0, c1r0, c2r0, ...,
# c0r1, c1r1, c2r1, ...]
idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.ravel()
return arr.take(idx)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.