ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4d2cd640db67c3bfe9649f1141fa13f338e5e4 | # Generated by Django 2.2.8 on 2019-12-12 22:13
from django.db import migrations, models
import django_extensions.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ARContents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('image_contents', models.ImageField(upload_to='contents')),
('image_target', models.ImageField(help_text='JPG 파일만 사용해주세요.', upload_to='targets')),
('target_id', models.CharField(blank=True, max_length=255)),
('name', models.CharField(max_length=64)),
('description', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'verbose_name': 'AR Content',
'db_table': 'ar_contents',
},
),
]
|
py | 1a4d2dca7c7541427c8e5488d878fb690d74492f | from django.shortcuts import render
from . import forms
# Create your views here.
def index(request):
return render(request, 'basicapp/index.html')
def form_name_view(request):
form = forms.FormName()
if request.method == 'POST':
form = forms.FormName(request.POST)
if form.is_valid():
print(form.cleaned_data)
return render(request, 'basicapp/form_page.html', {'form':form})
|
py | 1a4d2dd1893a0a5d15a6360ee04d38dd86d937de | #!/usr/bin/python
'''
Copyright 2011 Daniel Arndt
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
@author: Daniel Arndt <[email protected]>
dependencies:
pip install netifaces
apt-get install python-libpcap
'''
import sys, traceback
import argparse
import logging
import time
import binascii as ba
import socket
import struct
import string
import pcap
from flow import Flow
#Set up default logging system.
log = logging.getLogger()
log.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s;%(levelname)s:: "
"%(message)s :: %(filename)s:%(lineno)s",
"%H:%M:%S")
ch.setFormatter(formatter)
log.addHandler(ch)
def sort_by_IP(t):
'''
Re-arrange a flow tuple to have lowest IP first, for lookup
'''
return (t[2], t[3], t[0], t[1], t[4]) if t[2] < t[0] else t
def dumphex(s):
bytes = map(lambda x: '%.2x' % x, map(ord, s))
for i in xrange(0,len(bytes)/16):
log.error(' %s' % string.join(bytes[i*16:(i+1)*16],' '))
log.error(' %s' % string.join(bytes[(i+1)*16:],' '))
class Flowtbag:
'''
classdocs
'''
def __init__(self, packets):
try:
self.count = 0
self.flow_count = 0
self.active_flows = {}
for pkt in packets: self.callback(*pkt)
except KeyboardInterrupt:
exit(0)
def __repr__(self):
raise NotImplementedError()
def __str__(self):
return "I am a Flowtbag of size %s" % (len(self.active_flows))
def exportAll(self):
for flow in self.active_flows.values():
flow.export()
def create_flow(self, pkt, flow_tuple):
self.flow_count += 1
flow = Flow(pkt, self.flow_count)
self.active_flows[flow_tuple] = flow
def cleanup_active(self, time):
count = 0
for flow_tuple in self.active_flows.keys():
flow = self.active_flows[flow_tuple]
if flow.checkidle(time):
#flow.export()
del self.active_flows[flow_tuple]
count += 1
log.info("Cleaned up %d idle flows" % count)
def decode_IP_layer(self, data, pkt):
pkt['version'] = (ord(data[0]) & 0xf0) >> 4
pkt['iphlen'] = (ord(data[0]) & 0x0f) * 4
pkt['dscp'] = ord(data[1]) >> 2
pkt['len'] = socket.ntohs(struct.unpack('H',data[2:4])[0])
pkt['proto'] = ord(data[9])
pkt['srcip'] = pcap.ntoa(struct.unpack('i',data[12:16])[0])
pkt['dstip'] = pcap.ntoa(struct.unpack('i',data[16:20])[0])
pkt['data'] = data[pkt['iphlen']:]
def decode_TCP_layer(self, data, pkt):
pkt['srcport'] = socket.ntohs(struct.unpack('H', data[0:2])[0])
pkt['dstport'] = socket.ntohs(struct.unpack('H', data[2:4])[0])
pkt['prhlen'] = ((ord(data[12]) & 0xf0) >> 4) * 4
pkt['flags'] = ord(data[13]) & 0x3f
def decode_UDP_layer(self, data, pkt):
pkt['srcport'] = socket.ntohs(struct.unpack('H', data[0:2])[0])
pkt['dstport'] = socket.ntohs(struct.unpack('H', data[2:4])[0])
pkt['prhlen'] = socket.ntohs(struct.unpack('H', data[4:6])[0])
def callback(self, pktlen, data, ts):
'''
The callback function to be used to process each packet
This function is applied to each individual packet in the capture via a
loop function in the construction of the Flowtbag.
Args:
pktlen -- The length of the packet
data -- The packet payload
ts -- The timestamp of the packet
'''
self.count += 1
if not data:
# I don't know when this happens, so I wanna know.
raise Exception
pkt={}
# Check if the packet is an IP packet
if not data[12:14] == '\x08\x00':
#log.debug('Ignoring non-IP packet')
return
pkt['num'] = self.count
if len(data) < 34:
#Hmm, IP header seems to be too short
raise Exception
self.decode_IP_layer(data[14:], pkt)
if pkt['version'] != 4:
#Ignore non-IPv4
return
if pkt['proto'] == 6:
if len(pkt['data']) < 20:
log.info("Ignoring malformed TCP header on packet %d" %
(pkt['num']))
return
try:
self.decode_TCP_layer(pkt['data'], pkt)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
log.error("Error reading TCP header on packet %d" %
(pkt['num']))
log.error("Size: %d iphlen: %d" %
(len(data), pkt['iphlen']))
log.error("TCP header size: %d" % len(pkt['data']))
dumphex(data)
log.error(repr(traceback.format_exception(exc_type,
exc_value,
exc_traceback)))
raise e
elif pkt['proto'] == 17:
if len(pkt['data']) < 8:
log.info("Ignoring malformed UDP header on packet %d" %
(pkt['num']))
return
try:
self.decode_UDP_layer(pkt['data'], pkt)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
log.error("Error reading UDP header on packet %d" %
(pkt['num']))
dumphex(data)
log.error(repr(traceback.format_exception(exc_type,
exc_value,
exc_traceback)))
raise e
else:
#log.debug('Ignoring non-TCP/UDP packet')
return
# We're really going ahead with this packet! Let's get 'er done.
pkt['time'] = int(ts * 1000000)
flow_tuple = (pkt['srcip'],
pkt['srcport'],
pkt['dstip'],
pkt['dstport'],
pkt['proto'])
flow_tuple = sort_by_IP(flow_tuple)
# Find if a flow already exists for this tuple
if flow_tuple not in self.active_flows:
# A flow of this tuple does not exists yet, create it.
self.create_flow(pkt, flow_tuple)
else:
# A flow of this tuple already exists, add to it.
flow = self.active_flows[flow_tuple]
return_val = flow.add(pkt)
if return_val == 0:
return
elif return_val == 1:
#This packet ended the TCP connection. Export it.
#flow.export()
del self.active_flows[flow_tuple]
elif return_val == 2:
# This packet has been added to the wrong flow. This means the
# previous flow has ended. We export the old flow, remove it,
# and create a new flow.
#flow.export()
del self.active_flows[flow_tuple]
self.create_flow(pkt, flow_tuple)
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(description='Converts a network capture '\
'file into a comma seperated value list of integers representing ' \
'a list of flow statistics.')
arg_parser.add_argument('capture_file',
help='The capture file to be converted')
arg_parser.add_argument('--debug',
dest='debug',
action='store_true',
default=False,
help='display debugging information')
arg_parser.add_argument('-r',
dest='report',
type=int,
default=5000000,
help='interval (num pkts) which stats be reported')
args = arg_parser.parse_args()
if args.report:
REPORT_INTERVAL = args.report
if args.debug:
log.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
log.debug("Flowtbag begin")
Flowtbag(args.capture_file)
log.debug("Flowtbag end")
|
py | 1a4d2f5ebf1d5f2c1d36640f619b9a603905ae78 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetDiskResult',
'AwaitableGetDiskResult',
'get_disk',
]
@pulumi.output_type
class GetDiskResult:
"""
A Disk.
"""
def __init__(__self__, created_date=None, disk_blob_name=None, disk_size_gi_b=None, disk_type=None, disk_uri=None, host_caching=None, id=None, leased_by_lab_vm_id=None, location=None, managed_disk_id=None, name=None, provisioning_state=None, tags=None, type=None, unique_identifier=None):
if created_date and not isinstance(created_date, str):
raise TypeError("Expected argument 'created_date' to be a str")
pulumi.set(__self__, "created_date", created_date)
if disk_blob_name and not isinstance(disk_blob_name, str):
raise TypeError("Expected argument 'disk_blob_name' to be a str")
pulumi.set(__self__, "disk_blob_name", disk_blob_name)
if disk_size_gi_b and not isinstance(disk_size_gi_b, int):
raise TypeError("Expected argument 'disk_size_gi_b' to be a int")
pulumi.set(__self__, "disk_size_gi_b", disk_size_gi_b)
if disk_type and not isinstance(disk_type, str):
raise TypeError("Expected argument 'disk_type' to be a str")
pulumi.set(__self__, "disk_type", disk_type)
if disk_uri and not isinstance(disk_uri, str):
raise TypeError("Expected argument 'disk_uri' to be a str")
pulumi.set(__self__, "disk_uri", disk_uri)
if host_caching and not isinstance(host_caching, str):
raise TypeError("Expected argument 'host_caching' to be a str")
pulumi.set(__self__, "host_caching", host_caching)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if leased_by_lab_vm_id and not isinstance(leased_by_lab_vm_id, str):
raise TypeError("Expected argument 'leased_by_lab_vm_id' to be a str")
pulumi.set(__self__, "leased_by_lab_vm_id", leased_by_lab_vm_id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if managed_disk_id and not isinstance(managed_disk_id, str):
raise TypeError("Expected argument 'managed_disk_id' to be a str")
pulumi.set(__self__, "managed_disk_id", managed_disk_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unique_identifier and not isinstance(unique_identifier, str):
raise TypeError("Expected argument 'unique_identifier' to be a str")
pulumi.set(__self__, "unique_identifier", unique_identifier)
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> str:
"""
The creation date of the disk.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter(name="diskBlobName")
def disk_blob_name(self) -> Optional[str]:
"""
When backed by a blob, the name of the VHD blob without extension.
"""
return pulumi.get(self, "disk_blob_name")
@property
@pulumi.getter(name="diskSizeGiB")
def disk_size_gi_b(self) -> Optional[int]:
"""
The size of the disk in Gibibytes.
"""
return pulumi.get(self, "disk_size_gi_b")
@property
@pulumi.getter(name="diskType")
def disk_type(self) -> Optional[str]:
"""
The storage type for the disk (i.e. Standard, Premium).
"""
return pulumi.get(self, "disk_type")
@property
@pulumi.getter(name="diskUri")
def disk_uri(self) -> Optional[str]:
"""
When backed by a blob, the URI of underlying blob.
"""
return pulumi.get(self, "disk_uri")
@property
@pulumi.getter(name="hostCaching")
def host_caching(self) -> Optional[str]:
"""
The host caching policy of the disk (i.e. None, ReadOnly, ReadWrite).
"""
return pulumi.get(self, "host_caching")
@property
@pulumi.getter
def id(self) -> str:
"""
The identifier of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="leasedByLabVmId")
def leased_by_lab_vm_id(self) -> Optional[str]:
"""
The resource ID of the VM to which this disk is leased.
"""
return pulumi.get(self, "leased_by_lab_vm_id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedDiskId")
def managed_disk_id(self) -> Optional[str]:
"""
When backed by managed disk, this is the ID of the compute disk resource.
"""
return pulumi.get(self, "managed_disk_id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> Optional[str]:
"""
The unique immutable identifier of a resource (Guid).
"""
return pulumi.get(self, "unique_identifier")
class AwaitableGetDiskResult(GetDiskResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDiskResult(
created_date=self.created_date,
disk_blob_name=self.disk_blob_name,
disk_size_gi_b=self.disk_size_gi_b,
disk_type=self.disk_type,
disk_uri=self.disk_uri,
host_caching=self.host_caching,
id=self.id,
leased_by_lab_vm_id=self.leased_by_lab_vm_id,
location=self.location,
managed_disk_id=self.managed_disk_id,
name=self.name,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type,
unique_identifier=self.unique_identifier)
def get_disk(expand: Optional[str] = None,
lab_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
user_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDiskResult:
"""
A Disk.
:param str expand: Specify the $expand query. Example: 'properties($select=diskType)'
:param str lab_name: The name of the lab.
:param str name: The name of the disk.
:param str resource_group_name: The name of the resource group.
:param str user_name: The name of the user profile.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['labName'] = lab_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['userName'] = user_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:devtestlab/v20160515:getDisk', __args__, opts=opts, typ=GetDiskResult).value
return AwaitableGetDiskResult(
created_date=__ret__.created_date,
disk_blob_name=__ret__.disk_blob_name,
disk_size_gi_b=__ret__.disk_size_gi_b,
disk_type=__ret__.disk_type,
disk_uri=__ret__.disk_uri,
host_caching=__ret__.host_caching,
id=__ret__.id,
leased_by_lab_vm_id=__ret__.leased_by_lab_vm_id,
location=__ret__.location,
managed_disk_id=__ret__.managed_disk_id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type,
unique_identifier=__ret__.unique_identifier)
|
py | 1a4d31a8296e325cae8bec2fe27cca170eb211b2 | import cv2
import numpy as np
def showImage():
filename="Images/lena.jpg"
img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
cv2.imshow('image', img)
equ_img = cv2.equalizeHist(img)
cv2.imshow('equalized image', equ_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
showImage() |
py | 1a4d359d27ca15f0b180c086f7dac8cb96cafc71 | # -*- coding: windows-1252 -*-
'''
Record Order in BIFF8
Workbook Globals Substream
BOF Type = workbook globals
Interface Header
MMS
Interface End
WRITEACCESS
CODEPAGE
DSF
TABID
FNGROUPCOUNT
Workbook Protection Block
WINDOWPROTECT
PROTECT
PASSWORD
PROT4REV
PROT4REVPASS
BACKUP
HIDEOBJ
WINDOW1
DATEMODE
PRECISION
REFRESHALL
BOOKBOOL
FONT +
FORMAT *
XF +
STYLE +
? PALETTE
USESELFS
BOUNDSHEET +
COUNTRY
? Link Table
SST
ExtSST
EOF
'''
import BIFFRecords
import Style
class Workbook(object):
#################################################################
## Constructor
#################################################################
def __init__(self, encoding='ascii', style_compression=0):
self.encoding = encoding
self.__owner = 'None'
self.__country_code = None # 0x07 is Russia :-)
self.__wnd_protect = 0
self.__obj_protect = 0
self.__protect = 0
self.__backup_on_save = 0
# for WINDOW1 record
self.__hpos_twips = 0x01E0
self.__vpos_twips = 0x005A
self.__width_twips = 0x3FCF
self.__height_twips = 0x2A4E
self.__active_sheet = 0
self.__first_tab_index = 0
self.__selected_tabs = 0x01
self.__tab_width_twips = 0x0258
self.__wnd_hidden = 0
self.__wnd_mini = 0
self.__hscroll_visible = 1
self.__vscroll_visible = 1
self.__tabs_visible = 1
self.__styles = Style.StyleCollection(style_compression)
self.__dates_1904 = 0
self.__use_cell_values = 1
self.__sst = BIFFRecords.SharedStringTable(self.encoding)
self.__worksheets = []
self.__worksheet_idx_from_name = {}
self.__sheet_refs = {}
self._supbook_xref = {}
self._xcall_xref = {}
self._ownbook_supbookx = None
self._ownbook_supbook_ref = None
self._xcall_supbookx = None
self._xcall_supbook_ref = None
#################################################################
## Properties, "getters", "setters"
#################################################################
def get_style_stats(self):
return self.__styles.stats[:]
def set_owner(self, value):
self.__owner = value
def get_owner(self):
return self.__owner
owner = property(get_owner, set_owner)
#################################################################
def set_country_code(self, value):
self.__country_code = value
def get_country_code(self):
return self.__country_code
country_code = property(get_country_code, set_country_code)
#################################################################
def set_wnd_protect(self, value):
self.__wnd_protect = int(value)
def get_wnd_protect(self):
return bool(self.__wnd_protect)
wnd_protect = property(get_wnd_protect, set_wnd_protect)
#################################################################
def set_obj_protect(self, value):
self.__obj_protect = int(value)
def get_obj_protect(self):
return bool(self.__obj_protect)
obj_protect = property(get_obj_protect, set_obj_protect)
#################################################################
def set_protect(self, value):
self.__protect = int(value)
def get_protect(self):
return bool(self.__protect)
protect = property(get_protect, set_protect)
#################################################################
def set_backup_on_save(self, value):
self.__backup_on_save = int(value)
def get_backup_on_save(self):
return bool(self.__backup_on_save)
backup_on_save = property(get_backup_on_save, set_backup_on_save)
#################################################################
def set_hpos(self, value):
self.__hpos_twips = value & 0xFFFF
def get_hpos(self):
return self.__hpos_twips
hpos = property(get_hpos, set_hpos)
#################################################################
def set_vpos(self, value):
self.__vpos_twips = value & 0xFFFF
def get_vpos(self):
return self.__vpos_twips
vpos = property(get_vpos, set_vpos)
#################################################################
def set_width(self, value):
self.__width_twips = value & 0xFFFF
def get_width(self):
return self.__width_twips
width = property(get_width, set_width)
#################################################################
def set_height(self, value):
self.__height_twips = value & 0xFFFF
def get_height(self):
return self.__height_twips
height = property(get_height, set_height)
#################################################################
def set_active_sheet(self, value):
self.__active_sheet = value & 0xFFFF
self.__first_tab_index = self.__active_sheet
def get_active_sheet(self):
return self.__active_sheet
active_sheet = property(get_active_sheet, set_active_sheet)
#################################################################
def set_tab_width(self, value):
self.__tab_width_twips = value & 0xFFFF
def get_tab_width(self):
return self.__tab_width_twips
tab_width = property(get_tab_width, set_tab_width)
#################################################################
def set_wnd_visible(self, value):
self.__wnd_hidden = int(not value)
def get_wnd_visible(self):
return not bool(self.__wnd_hidden)
wnd_visible = property(get_wnd_visible, set_wnd_visible)
#################################################################
def set_wnd_mini(self, value):
self.__wnd_mini = int(value)
def get_wnd_mini(self):
return bool(self.__wnd_mini)
wnd_mini = property(get_wnd_mini, set_wnd_mini)
#################################################################
def set_hscroll_visible(self, value):
self.__hscroll_visible = int(value)
def get_hscroll_visible(self):
return bool(self.__hscroll_visible)
hscroll_visible = property(get_hscroll_visible, set_hscroll_visible)
#################################################################
def set_vscroll_visible(self, value):
self.__vscroll_visible = int(value)
def get_vscroll_visible(self):
return bool(self.__vscroll_visible)
vscroll_visible = property(get_vscroll_visible, set_vscroll_visible)
#################################################################
def set_tabs_visible(self, value):
self.__tabs_visible = int(value)
def get_tabs_visible(self):
return bool(self.__tabs_visible)
tabs_visible = property(get_tabs_visible, set_tabs_visible)
#################################################################
def set_dates_1904(self, value):
self.__dates_1904 = int(value)
def get_dates_1904(self):
return bool(self.__dates_1904)
dates_1904 = property(get_dates_1904, set_dates_1904)
#################################################################
def set_use_cell_values(self, value):
self.__use_cell_values = int(value)
def get_use_cell_values(self):
return bool(self.__use_cell_values)
use_cell_values = property(get_use_cell_values, set_use_cell_values)
#################################################################
def get_default_style(self):
return self.__styles.default_style
default_style = property(get_default_style)
##################################################################
## Methods
##################################################################
def add_style(self, style):
return self.__styles.add(style)
def add_font(self, font):
return self.__styles.add_font(font)
def add_str(self, s):
return self.__sst.add_str(s)
def del_str(self, sst_idx):
self.__sst.del_str(sst_idx)
def str_index(self, s):
return self.__sst.str_index(s)
def add_rt(self, rt):
return self.__sst.add_rt(rt)
def rt_index(self, rt):
return self.__sst.rt_index(rt)
def add_sheet(self, sheetname, cell_overwrite_ok=False):
import Worksheet, Utils
if not isinstance(sheetname, unicode):
sheetname = sheetname.decode(self.encoding)
if not Utils.valid_sheet_name(sheetname):
raise Exception("invalid worksheet name %r" % sheetname)
lower_name = sheetname.lower()
if lower_name in self.__worksheet_idx_from_name:
raise Exception("duplicate worksheet name %r" % sheetname)
self.__worksheet_idx_from_name[lower_name] = len(self.__worksheets)
self.__worksheets.append(Worksheet.Worksheet(sheetname, self, cell_overwrite_ok))
return self.__worksheets[-1]
def get_sheet(self, sheetnum):
return self.__worksheets[sheetnum]
def raise_bad_sheetname(self, sheetname):
raise Exception("Formula: unknown sheet name %s" % sheetname)
def convert_sheetindex(self, strg_ref, n_sheets):
idx = int(strg_ref)
if 0 <= idx < n_sheets:
return idx
msg = "Formula: sheet index (%s) >= number of sheets (%d)" % (strg_ref, n_sheets)
raise Exception(msg)
def _get_supbook_index(self, tag):
if tag in self._supbook_xref:
return self._supbook_xref[tag]
self._supbook_xref[tag] = idx = len(self._supbook_xref)
return idx
def setup_ownbook(self):
self._ownbook_supbookx = self._get_supbook_index(('ownbook', 0))
self._ownbook_supbook_ref = None
reference = (self._ownbook_supbookx, 0xFFFE, 0xFFFE)
if reference in self.__sheet_refs:
raise Exception("can't happen")
self.__sheet_refs[reference] = self._ownbook_supbook_ref = len(self.__sheet_refs)
def setup_xcall(self):
self._xcall_supbookx = self._get_supbook_index(('xcall', 0))
self._xcall_supbook_ref = None
reference = (self._xcall_supbookx, 0xFFFE, 0xFFFE)
if reference in self.__sheet_refs:
raise Exception("can't happen")
self.__sheet_refs[reference] = self._xcall_supbook_ref = len(self.__sheet_refs)
def add_sheet_reference(self, formula):
patches = []
n_sheets = len(self.__worksheets)
sheet_refs, xcall_refs = formula.get_references()
for ref0, ref1, offset in sheet_refs:
if not ref0.isdigit():
try:
ref0n = self.__worksheet_idx_from_name[ref0.lower()]
except KeyError:
self.raise_bad_sheetname(ref0)
else:
ref0n = self.convert_sheetindex(ref0, n_sheets)
if ref1 == ref0:
ref1n = ref0n
elif not ref1.isdigit():
try:
ref1n = self.__worksheet_idx_from_name[ref1.lower()]
except KeyError:
self.raise_bad_sheetname(ref1)
else:
ref1n = self.convert_sheetindex(ref1, n_sheets)
if ref1n < ref0n:
msg = "Formula: sheets out of order; %r:%r -> (%d, %d)" \
% (ref0, ref1, ref0n, ref1n)
raise Exception(msg)
if self._ownbook_supbookx is None:
self.setup_ownbook()
reference = (self._ownbook_supbookx, ref0n, ref1n)
if reference in self.__sheet_refs:
patches.append((offset, self.__sheet_refs[reference]))
else:
nrefs = len(self.__sheet_refs)
if nrefs > 65535:
raise Exception('More than 65536 inter-sheet references')
self.__sheet_refs[reference] = nrefs
patches.append((offset, nrefs))
for funcname, offset in xcall_refs:
if self._ownbook_supbookx is None:
self.setup_ownbook()
if self._xcall_supbookx is None:
self.setup_xcall()
# print funcname, self._supbook_xref
patches.append((offset, self._xcall_supbook_ref))
if not isinstance(funcname, unicode):
funcname = funcname.decode(self.encoding)
if funcname in self._xcall_xref:
idx = self._xcall_xref[funcname]
else:
self._xcall_xref[funcname] = idx = len(self._xcall_xref)
patches.append((offset + 2, idx + 1))
formula.patch_references(patches)
##################################################################
## BIFF records generation
##################################################################
def __bof_rec(self):
return BIFFRecords.Biff8BOFRecord(BIFFRecords.Biff8BOFRecord.BOOK_GLOBAL).get()
def __eof_rec(self):
return BIFFRecords.EOFRecord().get()
def __intf_hdr_rec(self):
return BIFFRecords.InteraceHdrRecord().get()
def __intf_end_rec(self):
return BIFFRecords.InteraceEndRecord().get()
def __intf_mms_rec(self):
return BIFFRecords.MMSRecord().get()
def __write_access_rec(self):
return BIFFRecords.WriteAccessRecord(self.__owner).get()
def __wnd_protect_rec(self):
return BIFFRecords.WindowProtectRecord(self.__wnd_protect).get()
def __obj_protect_rec(self):
return BIFFRecords.ObjectProtectRecord(self.__obj_protect).get()
def __protect_rec(self):
return BIFFRecords.ProtectRecord(self.__protect).get()
def __password_rec(self):
return BIFFRecords.PasswordRecord().get()
def __prot4rev_rec(self):
return BIFFRecords.Prot4RevRecord().get()
def __prot4rev_pass_rec(self):
return BIFFRecords.Prot4RevPassRecord().get()
def __backup_rec(self):
return BIFFRecords.BackupRecord(self.__backup_on_save).get()
def __hide_obj_rec(self):
return BIFFRecords.HideObjRecord().get()
def __window1_rec(self):
flags = 0
flags |= (self.__wnd_hidden) << 0
flags |= (self.__wnd_mini) << 1
flags |= (self.__hscroll_visible) << 3
flags |= (self.__vscroll_visible) << 4
flags |= (self.__tabs_visible) << 5
return BIFFRecords.Window1Record(self.__hpos_twips, self.__vpos_twips,
self.__width_twips, self.__height_twips,
flags,
self.__active_sheet, self.__first_tab_index,
self.__selected_tabs, self.__tab_width_twips).get()
def __codepage_rec(self):
return BIFFRecords.CodepageBiff8Record().get()
def __country_rec(self):
if not self.__country_code:
return ''
return BIFFRecords.CountryRecord(self.__country_code, self.__country_code).get()
def __dsf_rec(self):
return BIFFRecords.DSFRecord().get()
def __tabid_rec(self):
return BIFFRecords.TabIDRecord(len(self.__worksheets)).get()
def __fngroupcount_rec(self):
return BIFFRecords.FnGroupCountRecord().get()
def __datemode_rec(self):
return BIFFRecords.DateModeRecord(self.__dates_1904).get()
def __precision_rec(self):
return BIFFRecords.PrecisionRecord(self.__use_cell_values).get()
def __refresh_all_rec(self):
return BIFFRecords.RefreshAllRecord().get()
def __bookbool_rec(self):
return BIFFRecords.BookBoolRecord().get()
def __all_fonts_num_formats_xf_styles_rec(self):
return self.__styles.get_biff_data()
def __palette_rec(self):
result = ''
return result
def __useselfs_rec(self):
return BIFFRecords.UseSelfsRecord().get()
def __boundsheets_rec(self, data_len_before, data_len_after, sheet_biff_lens):
# .................................
# BOUNDSEHEET0
# BOUNDSEHEET1
# BOUNDSEHEET2
# ..................................
# WORKSHEET0
# WORKSHEET1
# WORKSHEET2
boundsheets_len = 0
for sheet in self.__worksheets:
boundsheets_len += len(BIFFRecords.BoundSheetRecord(
0x00L, sheet.visibility, sheet.name, self.encoding
).get())
start = data_len_before + boundsheets_len + data_len_after
result = ''
for sheet_biff_len, sheet in zip(sheet_biff_lens, self.__worksheets):
result += BIFFRecords.BoundSheetRecord(
start, sheet.visibility, sheet.name, self.encoding
).get()
start += sheet_biff_len
return result
def __all_links_rec(self):
pieces = []
temp = [(idx, tag) for tag, idx in self._supbook_xref.items()]
temp.sort()
for idx, tag in temp:
stype, snum = tag
if stype == 'ownbook':
rec = BIFFRecords.InternalReferenceSupBookRecord(len(self.__worksheets)).get()
pieces.append(rec)
elif stype == 'xcall':
rec = BIFFRecords.XcallSupBookRecord().get()
pieces.append(rec)
temp = [(idx, name) for name, idx in self._xcall_xref.items()]
temp.sort()
for idx, name in temp:
rec = BIFFRecords.ExternnameRecord(
options=0, index=0, name=name, fmla='\x02\x00\x1c\x17').get()
pieces.append(rec)
else:
raise Exception('unknown supbook stype %r' % stype)
if len(self.__sheet_refs) > 0:
# get references in index order
temp = [(idx, ref) for ref, idx in self.__sheet_refs.items()]
temp.sort()
temp = [ref for idx, ref in temp]
externsheet_record = BIFFRecords.ExternSheetRecord(temp).get()
pieces.append(externsheet_record)
return ''.join(pieces)
def __sst_rec(self):
return self.__sst.get_biff_record()
def __ext_sst_rec(self, abs_stream_pos):
return ''
#return BIFFRecords.ExtSSTRecord(abs_stream_pos, self.sst_record.str_placement,
#self.sst_record.portions_len).get()
def get_biff_data(self):
before = ''
before += self.__bof_rec()
before += self.__intf_hdr_rec()
before += self.__intf_mms_rec()
before += self.__intf_end_rec()
before += self.__write_access_rec()
before += self.__codepage_rec()
before += self.__dsf_rec()
before += self.__tabid_rec()
before += self.__fngroupcount_rec()
before += self.__wnd_protect_rec()
before += self.__protect_rec()
before += self.__obj_protect_rec()
before += self.__password_rec()
before += self.__prot4rev_rec()
before += self.__prot4rev_pass_rec()
before += self.__backup_rec()
before += self.__hide_obj_rec()
before += self.__window1_rec()
before += self.__datemode_rec()
before += self.__precision_rec()
before += self.__refresh_all_rec()
before += self.__bookbool_rec()
before += self.__all_fonts_num_formats_xf_styles_rec()
before += self.__palette_rec()
before += self.__useselfs_rec()
country = self.__country_rec()
all_links = self.__all_links_rec()
shared_str_table = self.__sst_rec()
after = country + all_links + shared_str_table
ext_sst = self.__ext_sst_rec(0) # need fake cause we need calc stream pos
eof = self.__eof_rec()
self.__worksheets[self.__active_sheet].selected = True
sheets = ''
sheet_biff_lens = []
for sheet in self.__worksheets:
data = sheet.get_biff_data()
sheets += data
sheet_biff_lens.append(len(data))
bundlesheets = self.__boundsheets_rec(len(before), len(after)+len(ext_sst)+len(eof), sheet_biff_lens)
sst_stream_pos = len(before) + len(bundlesheets) + len(country) + len(all_links)
ext_sst = self.__ext_sst_rec(sst_stream_pos)
return before + bundlesheets + after + ext_sst + eof + sheets
def save(self, filename):
import CompoundDoc
doc = CompoundDoc.XlsDoc()
doc.save(filename, self.get_biff_data())
|
py | 1a4d35bb29543cd87d9e0b2839eeb8545e6e72bf | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=0.25, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha, (float, int)): self.alpha = torch.Tensor([alpha, 1 - alpha])
if isinstance(alpha, list): self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim() > 2:
input = input.view(input.size(0), input.size(1), -1) # N,C,H,W => N,C,H*W
input = input.transpose(1, 2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1, 1)
logpt = F.log_softmax(input)
logpt = logpt.gather(1, target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type() != input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0, target.data.view(-1))
logpt = logpt * Variable(at)
loss = -1 * (1 - pt) ** self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
|
py | 1a4d35f6fc62ffaec05a80e0cdbcad63a242db33 | from django.conf import settings
from django.http import Http404
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
from rest_framework.views import APIView
from netbox.api.authentication import TokenAuthentication
from sidekick import utils
from sidekick.models import (
NetworkServiceGroup,
)
from tenancy.models import Tenant
class NetworkUsageListGroupsView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
renderer_class = JSONRenderer
def get(self, request):
result = []
for g in NetworkServiceGroup.objects.all():
result.append({
"id": g.id,
"name": g.name,
})
return Response(result)
class NetworkUsageListMembersView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
renderer_class = JSONRenderer
def get(self, request):
result = []
for t in Tenant.objects.filter(group__name='Members'):
result.append({
"id": t.id,
"name": t.name,
})
return Response(result)
class NetworkUsageGroupView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
renderer_class = JSONRenderer
def get(self, request, group_id=None):
graphite_render_host = settings.PLUGINS_CONFIG['sidekick'].get('graphite_render_host', None)
if graphite_render_host is None:
return Response({})
group_id = self.kwargs.get('group_id', None)
if group_id is None:
raise Http404
try:
service_group = NetworkServiceGroup.objects.get(id=group_id)
except NetworkServiceGroup.DoesNotExist:
raise Http404
period = utils.get_period(request)
services_by_member = {}
accounting_by_member = {}
for network_service in service_group.network_services.all():
member = network_service.member
if member.name not in services_by_member.keys():
services_by_member[member.name] = []
if member.name not in accounting_by_member.keys():
accounting_by_member[member.name] = []
services_by_member[member.name].append(network_service)
accounting_by_member[member.name] = utils.get_accounting_sources(member)
services_in = []
services_out = []
accounting_in = []
accounting_out = []
remaining_in = []
remaining_out = []
for member_name in services_by_member.keys():
services = services_by_member[member_name]
accounting = accounting_by_member[member_name]
(_in, _out) = utils.format_graphite_service_query(services)
services_in.append(_in)
services_out.append(_out)
(_in, _out) = utils.format_graphite_accounting_query(accounting)
accounting_in.append(_in)
accounting_out.append(_out)
(_in, _out) = utils.format_graphite_remaining_query(services, accounting)
remaining_in.append(_in)
remaining_out.append(_out)
service_data = utils.get_graphite_data(graphite_render_host, services_in, services_out, period)
accounting_data = utils.get_graphite_data(graphite_render_host, accounting_in, accounting_out, period)
remaining_data = utils.get_graphite_data(graphite_render_host, remaining_in, remaining_out, period)
graph_data = {
'service_data': service_data['data'],
'remaining_data': [service_data['data'][0], [0], [0]],
'accounting_data': [service_data['data'][0], [0], [0]],
}
queries = {
'service_data': service_data['query'],
'remaining_data': remaining_data['query'],
'accounting_data': accounting_data['query'],
}
if accounting_data is not None and 'data' in accounting_data:
graph_data['accounting_data'] = accounting_data['data']
if remaining_data is not None and 'data' in remaining_data:
graph_data['remaining_data'] = remaining_data['data']
return Response({
'graph_data': graph_data,
'queries': queries,
})
class NetworkUsageMemberView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
renderer_class = JSONRenderer
def get(self, request, member_id=None):
graphite_render_host = settings.PLUGINS_CONFIG['sidekick'].get('graphite_render_host', None)
if graphite_render_host is None:
return Response({})
member_id = self.kwargs.get('member_id', None)
if member_id is None:
raise Http404
try:
member = Tenant.objects.get(id=member_id)
except Tenant.DoesNotExist:
raise Http404
services = utils.get_services(member)
period = utils.get_period(request)
(services_in, services_out) = utils.format_graphite_service_query(services)
service_data = utils.get_graphite_data(graphite_render_host, [services_in], [services_out], period)
accounting_data = None
accounting_sources = utils.get_accounting_sources(member)
if len(accounting_sources) > 0:
(accounting_in, accounting_out) = utils.format_graphite_accounting_query(accounting_sources)
accounting_data = utils.get_graphite_data(graphite_render_host, [accounting_in], [accounting_out], period)
(remaining_in, remaining_out) = utils.format_graphite_remaining_query(services, accounting_sources)
remaining_data = utils.get_graphite_data(graphite_render_host, [remaining_in], [remaining_out], period)
graph_data = {
'service_data': service_data['data'],
'remaining_data': [service_data['data'][0], [0], [0]],
'accounting_data': [service_data['data'][0], [0], [0]],
}
queries = {
'service_data': service_data['query'],
}
if accounting_data is not None and 'data' in accounting_data:
graph_data['accounting_data'] = accounting_data['data']
queries['accounting_data'] = accounting_data['query']
if remaining_data is not None and 'data' in remaining_data:
graph_data['remaining_data'] = remaining_data['data']
queries['remaining_data'] = remaining_data['query']
return Response({
'graph_data': graph_data,
'queries': queries,
})
|
py | 1a4d3705e4e85a2468ad48f4cae8d580ac64965b | """
The tool to check the availability or syntax of domains, IPv4, IPv6 or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
This file is part of the PyFunceble project.
Provides the package.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
MIT License
Copyright (c) 2019, 2020 PyFunceble
Copyright (c) 2017, 2018, 2019, 2020 Nissar Chababy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
VERSION = "1.6.0"
|
py | 1a4d370ad4bc582ed1f069031c57516aeb751341 | import numpy as np
import pandas
output_data = []
train_csv = pandas.read_csv('data/train.csv', index_col=0)
test_csv = pandas.read_csv('data/test_2.csv', index_col=0)
train_X = train_csv.drop(train_csv.columns[range(146, 210)], axis=1).values
for i in range(62): # t=121 to 180, and D+1, D+2
if i == 60:
name_of_column = 'Ret_PlusOne'
name_of_weight = 'Weight_Daily'
elif i == 61:
name_of_column = 'Ret_PlusTwo'
name_of_weight = 'Weight_Daily'
else:
name_of_column = 'Ret_' + str(i + 120)
name_of_weight = 'Weight_Intraday'
train_y = train_csv[name_of_column].values
train_weights = train_csv[name_of_weight].values
test_X = test_csv.values
# training and predict logics
# model.train()
# pred = model.predict()
for stock_id, val in enumerate(pred):
output_data.append(
{'Id': str(stock_id + 1) + '_' + str(i), 'Predicted': val})
output = pandas.DataFrame(data=output_data)
output.sort_values(by='Id', inplace=True)
# print(output.head())
output.to_csv(path_or_buf='data/output.csv', index=False)
|
py | 1a4d3826fe05523d57fcaa16a40fcae86b60e476 | # -*- coding: utf-8 -*-
"""
numcolorpy.py
Created Saturday April 22 2017
@author: del
[email protected]
[email protected]
import numcolorpy as ncp
"""
import time
import numpy as np
from PIL import Image as IP
from PIL import ImageColor as IC
import colorsys
def range_norm(Z, lo=0.0, hi=1.0):
""" normaize input matrix Z within a lo - hi range
"""
I = graphic_norm(Z)
hi = max(min(hi, 1.0), 0.0)
lo = min(max(lo, 0.0), 1.0)
low_fence = min(hi, lo)
hi_fence = max(hi, lo)
if low_fence == hi_fence:
return I
v_span = hi_fence - low_fence
I = I * v_span + low_fence
return I
def etg_norm(Z0, Z, ET):
""" Zd, Zr, ETn = etg_norm(Z0, Z, ET); Graphically usable matrices from escape time algorithm result
"""
ETn = mat2graphic(ET)
Zv = Z - Z0
Zd = mat2graphic(Zv)
Zr = mat2graphic(np.arctan2(np.imag(Zv), np.real(Zv)))
return Zd, Zr, ETn
def mat2graphic(Z):
""" M, nClrs = mat2graphic(Z)
Use all the transformation tricks to prepare input matrix Z
for conversion to a viewable image.
Args:
Z: real or complex (rows x xcols x 1) matrix
Returns:
M: real (rows x xcols x 1) matrix (0 <= M <= 1)
"""
M, nClrs = flat_index(np.abs(Z))
return graphic_norm(M)
def graphic_norm(Z):
""" rescale matrix z to distance (float) s.t.
0 <= z <= 1 (will include 0,1 if it has more than 1 value)
Args:
Z: is a real or complex two dimensional matrix
Returns:
Z: same size, real valued matrix with smallest member = 0, largest = 1
"""
EPSILON = 1e-15
I = np.abs(Z)
I = I - I.min()
return I / max(EPSILON, I.max())
def flat_index(float_mat):
""" convert the input matrix to integers from 0 to number of unique values.
Args:
float_mat: two dimensional matrix.
Return:
float_mat: re-enumerated so that the matrix values are all sequential ints.
n_colors: number of unique values in the input / output matrix
"""
rows = float_mat.shape[0]
cols = float_mat.shape[1]
float_mat = np.reshape(float_mat, (1, float_mat.size))
ixA = np.argsort(float_mat)[0]
current_value = float_mat[0, ixA[0]]
enumeration_value = 0
for ix in ixA:
if float_mat[0,ix] != current_value:
current_value = float_mat[0,ix]
enumeration_value += 1
float_mat[0,ix] = enumeration_value
float_mat = np.array(np.reshape(float_mat, (rows, cols)))
float_mat = np.int_(float_mat)
n_colors = enumeration_value + 1
return float_mat, n_colors
def gray_mat(V):
n_rows = V.shape[0]
n_cols = V.shape[1]
V = V * 255
I = IP.new('RGB', (n_cols, n_rows))
for row in range(0, I.height):
for col in range(0, I.width):
P = tuple(np.int_([V[row, col], V[row, col], V[row, col]]))
I.putpixel((col, row), P)
return I
def rgb_2_hsv_mat(H, S, V):
n_rows = H.shape[0]
n_cols = H.shape[1]
I = IP.new('RGB', (n_cols, n_rows))
for row in range(0, I.height):
for col in range(0, I.width):
red, green, blue = colorsys.hsv_to_rgb(H[row, col], S[row, col], V[row, col])
red = int(np.round( red * 255 ))
green = int(np.round( green * 255 ))
blue = int(np.round( blue * 255 ))
P = (red, green, blue)
I.putpixel((col, row), P)
return I
def mat_to_gray(V, max_v=255, min_v=0):
R_max = max(min(max_v, 255), 0)
R_floor = min(max(min_v, 0), R_max)
G_max = max(min(max_v, 255), 0)
G_floor = min(max(min_v, 0), G_max)
B_max = max(min(max_v, 255), 0)
B_floor = min(max(min_v, 0), B_max)
return mat_to_Shade(V, R_max, G_max, B_max, R_floor, G_floor, B_floor)
def mat_to_red(V):
R_max = 255
R_floor = 180
G_max = 250
G_floor = 30
B_max = 250
B_floor = 30
return mat_to_Shade(V, R_max, G_max, B_max, R_floor, G_floor, B_floor)
def mat_to_green(V):
R_max = 250
R_floor = 30
G_max = 255
G_floor = 130
B_max = 250
B_floor = 30
return mat_to_Shade(V, R_max, G_max, B_max, R_floor, G_floor, B_floor)
def mat_to_blue(V):
R_max = 250
R_floor = 30
G_max = 250
G_floor = 30
B_max = 255
B_floor = 130
return mat_to_Shade(V, R_max, G_max, B_max, R_floor, G_floor, B_floor)
def mat_to_Shade(V, R_max, G_max, B_max, R_floor=0, G_floor=0, B_floor=0):
""" I = mat_to_gray(V)
matrix of values V, converted to a gray scale image
Args:
V: rows x cols x 1 numerical matrix
Returns:
I: rows x cols x 3 grayscale image
"""
R = R_max - R_floor
G = G_max - G_floor
B = B_max - B_floor
V = graphic_norm(V)
n_rows = V.shape[0]
n_cols = V.shape[1]
I = IP.new('RGB', (n_cols, n_rows))
for row in range(0, I.height):
for col in range(0, I.width):
P = tuple(np.int_(
[R_floor + V[row, col] * R, G_floor + V[row, col] * G, B_floor + V[row, col] * B]))
I.putpixel((col, row), P)
return I
def resize_color_map(mp0, n_colors):
""" givin a RGB colormap input return the same color order with n_colors number of colors
"""
mp = np.zeros((n_colors,3))
n_colors0 = mp0.shape[0]
if n_colors0 != n_colors:
tc = n_colors0 * n_colors
x = np.linspace(1,tc, n_colors0)
xq = np.linspace(1,tc, n_colors)
mp[:,0] = np.interp(xq, x, mp0[:,0])
mp[:,1] = np.interp(xq, x, mp0[:,1])
mp[:,2] = np.interp(xq, x, mp0[:,2])
return mp
def normat_hsv_intrgb(H, S, V, H_max=1.0, H_min=0.0, S_max=1.0, S_min=0.0, V_max=1.0, V_min=0.0):
""" I = normat_hsv_intrgb(H, S, V, H_max=1.0, H_min=0.0, S_max=1.0, S_min=0.0, V_max=1.0, V_min=0.0)
Three normaized matrices as hsv image converted to rgb
'normalized' means 0 <= M <= 1 where M is H, S, or V
Args:
H: rows x cols x 1 normalized matrix
S: rows x cols x 1 normalized matrix
V: rows x cols x 1 normalized matrix
Returns:
I: rows x cols x 3 hue-saturation-values image
"""
H_mul = H_max - H_min
S_mul = S_max - S_min
V_mul = V_max - V_min
n_rows = H.shape[0]
n_cols = H.shape[1]
I = IP.new('RGB', (n_cols, n_rows))
for row in range(0, I.height):
for col in range(0, I.width):
red, green, blue = colorsys.hsv_to_rgb(
H_min + H_mul * H[row, col],
S_min + S_mul * S[row, col],
V_min + V_mul * V[row, col])
red = int(np.round( red * 255 ))
green = int(np.round( green * 255 ))
blue = int(np.round( blue * 255 ))
P = (red, green, blue)
I.putpixel((col, row), P)
return I
def mat_to_mapped(A, mp):
n_rows = A.shape[0]
n_cols = A.shape[1]
A, nClrs = flat_index(A)
mp = resize_color_map(mp, nClrs)*255
I = IP.new('RGB', (n_cols, n_rows))
for r in range(0, n_rows):
for c in range(0, n_cols):
I.putpixel((c,r), tuple(np.uint8(mp[A[r,c], :])))
return I
|
py | 1a4d38689c325ebd313519c359881d0cd05faa35 | #!/usr/bin/env python2
# Copyright (c) 2014 The oaccoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Exercise the wallet backup code. Ported from walletbackup.sh.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from test_framework import oaccoinTestFramework
from util import *
from random import randint
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
class WalletBackupTest(oaccoinTestFramework):
def setup_chain(self):
logging.info("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
# nodes 1, 2,3 are spenders, let's give them a keypool=100
extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
self.nodes = start_nodes(4, self.options.tmpdir, extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.is_network_split=False
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].setgenerate(True, 1)
# As above, this mirrors the original bash test.
def start_three(self):
self.nodes[0] = start_node(0, self.options.tmpdir)
self.nodes[1] = start_node(1, self.options.tmpdir)
self.nodes[2] = start_node(2, self.options.tmpdir)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
stop_node(self.nodes[2], 2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[1].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[2].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[3].setgenerate(True, 100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
logging.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
logging.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].setgenerate(True, 101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
logging.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
logging.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
logging.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if __name__ == '__main__':
WalletBackupTest().main()
|
py | 1a4d3879f484ba2c5051811b5f0bd88592f85801 | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/Satoshi:0.13.(0|1|2|99)/|/PubgcoinCore:0.13.(0|1|2|99)/|/PubgcoinCore:0.14.(0|1|2|99)/|/PubgcoinCore:0.15.(0|1|2|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
py | 1a4d3a0991e110b0e675e016140a5ebbb90b6888 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: alexnet-dorefa.py
# Author: Yuxin Wu, Yuheng Zou ({wyx,zyh}@megvii.com)
import cv2
import tensorflow as tf
import argparse
import numpy as np
import os
import sys
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import prediction_incorrect
from tensorpack.tfutils.summary import add_moving_summary, add_param_summary
from tensorpack.tfutils.varreplace import remap_variables
from tensorpack.dataflow import dataset
from tensorpack.utils.gpu import get_nr_gpu
from imagenet_utils import get_imagenet_dataflow, fbresnet_augmentor
from dorefa import get_dorefa, ternarize
"""
This is a tensorpack script for the ImageNet results in paper:
DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients
http://arxiv.org/abs/1606.06160
The original experiements are performed on a proprietary framework.
This is our attempt to reproduce it on tensorpack & TensorFlow.
Accuracy:
Trained with 4 GPUs and (W,A,G)=(1,2,6), it can reach top-1 single-crop validation error of 47.6%,
after 70 epochs. This number is better than what's in the paper
due to more sophisticated augmentations.
With (W,A,G)=(32,32,32) -- full precision baseline, 41.4% error.
With (W,A,G)=(t,32,32) -- TTQ, 42.3% error
With (W,A,G)=(1,32,32) -- BWN, 44.3% error
With (W,A,G)=(1,1,32) -- BNN, 53.4% error
With (W,A,G)=(1,2,6), 47.6% error
With (W,A,G)=(1,2,4), 58.4% error
Training with 2 or 8 GPUs is supported but the result may get slightly
different, due to limited per-GPU batch size.
You may want to adjust total batch size and learning rate accordingly.
Speed:
About 11 iteration/s on 4 P100s. (Each epoch is set to 10000 iterations)
Note that this code was written early without using NCHW format. You
should expect a speed up if the code is ported to NCHW format.
To Train, for example:
./alexnet-dorefa.py --dorefa 1,2,6 --data PATH --gpu 0,1
PATH should look like:
PATH/
train/
n02134418/
n02134418_198.JPEG
...
...
val/
ILSVRC2012_val_00000001.JPEG
...
And you'll need the following to be able to fetch data efficiently
Fast disk random access (Not necessarily SSD. I used a RAID of HDD, but not sure if plain HDD is enough)
More than 20 CPU cores (for data processing)
More than 10G of free memory
To run pretrained model:
./alexnet-dorefa.py --load alexnet-126.npz --run a.jpg --dorefa 1,2,6
"""
BITW = 1
BITA = 2
BITG = 6
TOTAL_BATCH_SIZE = 128
BATCH_SIZE = None
class Model(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, [None, 224, 224, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')]
def build_graph(self, image, label):
image = image / 255.0
if BITW == 't':
fw, fa, fg = get_dorefa(32, 32, 32)
fw = ternarize
else:
fw, fa, fg = get_dorefa(BITW, BITA, BITG)
# monkey-patch tf.get_variable to apply fw
def new_get_variable(v):
name = v.op.name
# don't binarize first and last layer
if not name.endswith('W') or 'conv0' in name or 'fct' in name:
return v
else:
logger.info("Quantizing weight {}".format(v.op.name))
return fw(v)
def nonlin(x):
if BITA == 32:
return tf.nn.relu(x) # still use relu for 32bit cases
return tf.clip_by_value(x, 0.0, 1.0)
def activate(x):
return fa(nonlin(x))
with remap_variables(new_get_variable), \
argscope(BatchNorm, momentum=0.9, epsilon=1e-4), \
argscope(Conv2D, use_bias=False):
logits = (LinearWrap(image)
.Conv2D('conv0', 96, 12, strides=4, padding='VALID')
.apply(activate)
.Conv2D('conv1', 256, 5, padding='SAME', split=2)
.apply(fg)
.BatchNorm('bn1')
.MaxPooling('pool1', 3, 2, padding='SAME')
.apply(activate)
.Conv2D('conv2', 384, 3)
.apply(fg)
.BatchNorm('bn2')
.MaxPooling('pool2', 3, 2, padding='SAME')
.apply(activate)
.Conv2D('conv3', 384, 3, split=2)
.apply(fg)
.BatchNorm('bn3')
.apply(activate)
.Conv2D('conv4', 256, 3, split=2)
.apply(fg)
.BatchNorm('bn4')
.MaxPooling('pool4', 3, 2, padding='VALID')
.apply(activate)
.FullyConnected('fc0', 4096)
.apply(fg)
.BatchNorm('bnfc0')
.apply(activate)
.FullyConnected('fc1', 4096, use_bias=False)
.apply(fg)
.BatchNorm('bnfc1')
.apply(nonlin)
.FullyConnected('fct', 1000, use_bias=True)())
tf.nn.softmax(logits, name='output')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1'))
wrong = prediction_incorrect(logits, label, 5, name='wrong-top5')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5'))
# weight decay on all W of fc layers
wd_cost = regularize_cost('fc.*/W', l2_regularizer(5e-6), name='regularize_cost')
add_param_summary(('.*/W', ['histogram', 'rms']))
total_cost = tf.add_n([cost, wd_cost], name='cost')
add_moving_summary(cost, wd_cost, total_cost)
return total_cost
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=1e-4, trainable=False)
return tf.train.AdamOptimizer(lr, epsilon=1e-5)
def get_data(dataset_name):
isTrain = dataset_name == 'train'
augmentors = fbresnet_augmentor(isTrain)
return get_imagenet_dataflow(
args.data, dataset_name, BATCH_SIZE, augmentors)
def get_config():
data_train = get_data('train')
data_test = get_data('val')
return TrainConfig(
dataflow=data_train,
callbacks=[
ModelSaver(),
# HumanHyperParamSetter('learning_rate'),
ScheduledHyperParamSetter(
'learning_rate', [(56, 2e-5), (64, 4e-6)]),
InferenceRunner(data_test,
[ScalarStats('cost'),
ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')])
],
model=Model(),
steps_per_epoch=10000,
max_epoch=100,
)
def run_image(model, sess_init, inputs):
pred_config = PredictConfig(
model=model,
session_init=sess_init,
input_names=['input'],
output_names=['output']
)
predictor = OfflinePredictor(pred_config)
meta = dataset.ILSVRCMeta()
pp_mean = meta.get_per_pixel_mean()
pp_mean_224 = pp_mean[16:-16, 16:-16, :]
words = meta.get_synset_words_1000()
def resize_func(im):
h, w = im.shape[:2]
scale = 256.0 / min(h, w)
desSize = map(int, (max(224, min(w, scale * w)),
max(224, min(h, scale * h))))
im = cv2.resize(im, tuple(desSize), interpolation=cv2.INTER_CUBIC)
return im
transformers = imgaug.AugmentorList([
imgaug.MapImage(resize_func),
imgaug.CenterCrop((224, 224)),
imgaug.MapImage(lambda x: x - pp_mean_224),
])
for f in inputs:
assert os.path.isfile(f)
img = cv2.imread(f).astype('float32')
assert img is not None
img = transformers.augment(img)[np.newaxis, :, :, :]
outputs = predictor(img)[0]
prob = outputs[0]
ret = prob.argsort()[-10:][::-1]
names = [words[i] for i in ret]
print(f + ":")
print(list(zip(names, prob[ret])))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='the physical ids of GPUs to use')
parser.add_argument('--load', help='load a checkpoint, or a npz (given as the pretrained model)')
parser.add_argument('--data', help='ILSVRC dataset dir')
parser.add_argument('--dorefa', required=True,
help='number of bits for W,A,G, separated by comma. W="t" means TTQ')
parser.add_argument('--run', help='run on a list of images with the pretrained model', nargs='*')
args = parser.parse_args()
dorefa = args.dorefa.split(',')
if dorefa[0] == 't':
assert dorefa[1] == '32' and dorefa[2] == '32'
BITW, BITA, BITG = 't', 32, 32
else:
BITW, BITA, BITG = map(int, dorefa)
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.run:
assert args.load.endswith('.npz')
run_image(Model(), DictRestore(dict(np.load(args.load))), args.run)
sys.exit()
nr_tower = max(get_nr_gpu(), 1)
BATCH_SIZE = TOTAL_BATCH_SIZE // nr_tower
logger.set_logger_dir(os.path.join(
'train_log', 'alexnet-dorefa-{}'.format(args.dorefa)))
logger.info("Batch per tower: {}".format(BATCH_SIZE))
config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
launch_train_with_config(config, SyncMultiGPUTrainer(nr_tower))
|
py | 1a4d3a9b4c3142fa5772cf23dda2ae92c4544760 | import csv
import os
import re
import subprocess
from threading import Thread
from enum import Enum
JAVACLASSES = {}
DEPENDENCIES = []
MATCHES = {}
## Support for multithreading with return value
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args,
**self._kwargs)
def join(self):
Thread.join(self)
return self._return
## Analyses the source code files and imports the HUSACCT dependencies
class Setup(object):
def __init__(self, filepath_repository, filepath_dependencies, namespace, matchfilter):
self.regex_p1 = re.compile('\.[a-z].*\.[A-Z].*$|^[a-z]')
self.regex_p2 = re.compile('\.[A-Z]')
self.ignore = [namespace + ".R"]
self.getJavaClasses(filepath_repository)
self.getDependencies(filepath_dependencies)
self.getMatches(matchfilter)
# Create a dictionary of all Java classes
# This is used for getting a code block using a HUSACCT dependency
def getJavaClasses(self, filepath):
global JAVACLASSES
for root, dirs, files in os.walk(filepath):
for file in files:
if file.endswith(".java"):
JAVACLASSES[file[:-5]] = os.path.join(root, file)
# Detect innerclasses in HUSACCT dependencies and adds this relation to JAVACLASSES
# This only works if it is actually an inner class. Two classes on the same level won't work
def detectInnerclass(self, s, namespace):
if namespace in s and not(s in self.ignore):
s = s[12:]
# Format string to [(Object.)*Class]
x1 = self.regex_p1.search(s)
if x1 != None:
s = s[x1.span()[0]+1:]
x2 = self.regex_p2.search(s)
if x2 != None:
s = s[x2.span()[0]+1:]
# Split string
values = s.split('.')
path = JAVACLASSES[values[0]]
for i in reversed(range(1, len(values))):
JAVACLASSES[values[i]] = path
# Parse the XML file of HUSACCT dependencies
def getDependencies(self, filepath):
with open(filepath, newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
self.detectInnerclass(row[0], namespace)
self.detectInnerclass(row[1], namespace)
DEPENDENCIES.append(row)
# Generate an array of matches based on file and linenumber
def getMatches(self, dependency):
for i in range(0, len(DEPENDENCIES)):
row = DEPENDENCIES[i]
if (dependency in row[1] and row[2] != "Import"):
t1 = ThreadWithReturnValue(target=self.searchDependencies, args=("Search up ", row[0], i, -1))
t2 = ThreadWithReturnValue(target=self.searchDependencies, args=("Search down", row[0], i, 1))
t1.start()
t2.start()
results = t1.join() + t2.join()
if len(results) == 0:
continue
try:
old = MATCHES[row[0]]
new = [row[4], row[2], results]
old.append(new)
MATCHES[row[0]] = old
except:
MATCHES[row[0]] = [[row[4], row[2], results]]
# Search for matching dependencies on file and linenumber
def searchDependencies(self, threadName, file, startlinenr, searchDirection):
i = startlinenr + searchDirection
results = []
while i >= 0 and i < len(DEPENDENCIES) - 1 and DEPENDENCIES[i][0] == file:
if (int(DEPENDENCIES[i][4]) == int(DEPENDENCIES[startlinenr][4])):
results.append(i)
i += searchDirection
return results
## Static class that prints blocks of code
class Lines():
# Get lines using filename
def getLines(file, linenr, offset):
try:
filepath = JAVACLASSES[file]
except:
file = Tools.convertNot(file)
filepath = JAVACLASSES[file]
cmd = f'cat {filepath} | head -{linenr + offset} | tail -{1 + 2 * offset}'
return subprocess.check_output(cmd, shell=True).decode('UTF-8')
# Get lines by filename and linenumber
def getLinesByRange(file, start_linenr, end_linenr, offset):
try:
filepath = JAVACLASSES[file]
except:
file = Tools.convertNot(file)
filepath = JAVACLASSES[file]
cmd = f'cat {filepath} | head -{end_linenr + offset} | tail -{end_linenr - start_linenr + 2*offset + 1}'
return subprocess.check_output(cmd, shell=True).decode('UTF-8')
# Get lines using dependency
def printDependencyWithCodeLines(dependency, offset):
print(Lines.getLines(dependency[0], int(dependency[4]), offset))
# Get lines of code where matches start in target
def getCodeLinesStartingFromTarget(component, offset):
keys = list(MATCHES.keys())
for key in keys:
if component != None and Tools.convertNot(key) != component:
continue
# Preprocessing of matches
matches = MATCHES[key]
# Group by line number distance of max 3
grouped_matches = Lines.groupLinesByLinenumber(matches, 3)
for group in grouped_matches:
# [match1, match2] but sorted by linenr
linenr_start = group[0][0]
linenr_end = group[len(group) - 1][0]
for match in group:
print(f"{Tools.convertNot(key)}:{match[0]}| {match[1]} \
--> {[Tools.convertNot(DEPENDENCIES[x][1]) for x in match[2]]}")
print(Lines.getLinesByRange(key, int(linenr_start), int(linenr_end), offset))
# Get lines of code where matches end in target
def getCodeLinesEndingAtTarget(component, offset):
keys = list(MATCHES.keys())
for key in keys:
# Preprocessing of matches
matches = MATCHES[key]
# Group by line number distance of max 3
grouped_matches = Lines.groupLinesByLinenumber(matches, 3)
for group in grouped_matches:
# [match1, match2] but sorted by linenr
linenr_start = group[0][0]
linenr_end = group[len(group) - 1][0]
shouldPrint = False
for match in group:
if component in [Tools.convertNot(DEPENDENCIES[x][1]) for x in match[2]]:
shouldPrint = True
if not(shouldPrint):
continue
for match in group:
print(f"{Tools.convertNot(key)}:{match[0]}| {match[1]} \
--> {[Tools.convertNot(DEPENDENCIES[x][1]) for x in match[2]]}")
print(Lines.getLinesByRange(key, int(linenr_start), int(linenr_end), offset))
# Groups matches by distance of line numbers --> [[match1, match2], [match3]]
def groupLinesByLinenumber(matches, separation):
matches.sort(key=lambda x: int(x[0]))
linenrs = [int(item[0]) for item in matches]
grouped = []
current_group = []
prev_linenr = -1
for i in range(0, len(linenrs)):
# New matches --> Fill first group
if (prev_linenr == -1):
current_group.append(matches[i])
prev_linenr = linenrs[i]
continue
# Already worked with matches
current_linenr = linenrs[i]
if (current_linenr - prev_linenr < separation):
current_group.append(matches[i])
elif len(current_group) > 0:
grouped.append(current_group)
current_group = []
prev_linenr = linenrs[i]
# Still have groups left at the end
if len(current_group) > 0:
grouped.append(current_group)
current_group = []
return grouped
# Print lines of code of components that occur in the matches
def getCodeLines(componentfilter, offset, bothDirections = True):
for target in componentfilter:
Lines.getCodeLinesStartingFromTarget(target, offset)
print("_______________\n")
# If target is None, all matches are shown. This should not be displayed twice
if bothDirections and target is not None:
Lines.getCodeLinesEndingAtTarget(target, offset)
print("_______________\n")
print("_______________\n")
class Tools(object):
# Convert notation (HUSSACT: x.x.x.y -> y)
def convertNot(s):
regex_p1 = re.compile('\.[a-z].*\.[A-Z].*$|^[a-z]')
regex_p2 = re.compile('\.[A-Z]')
x1 = regex_p1.search(s)
if x1 != None:
s = s[x1.span()[0]+1:]
x2 = regex_p2.search(s)
if x2 != None:
s = s[x2.span()[0]+1:]
if "." in s:
return s.split(".")[0]
return s
# Search the dependencies using criteria
def searchDependencies(component, relationship):
result = []
for dep in DEPENDENCIES:
if (component in dep[1] and dep[2] == relationship):
result.append(dep)
return result
# Find a list of context-declared broadcast receivers
def findContextDeclaredBroadcastReceivers():
result = Tools.searchDependencies("xLibraries.android.content.BroadcastReceiver", "Inheritance")
if len(result) == 0:
print("No BroadcastReceivers found")
return
for r in result:
print(f"{Tools.convertNot(r[0])} \t\t <-- {r[0]}")
result += Tools.searchDependencies(r[0], "Inheritance")
# Find list of third party dependencies
def findingThirdPartyDependencies(namespaces):
namespaces += ["android.support", "butterknife", "xLibraries.android", "xLibraries.com.bumptech",
"xLibraries.com.google", "xLibraries.com.squareup", "xLibraries.java", "xLibraries.org.apache",
"xLibraries.org.json", "xLibraries.org.jsoup", "xLibraries.org.junit", "xLibraries.org.powermock",
"xLibraries.rx", "xLibraries.timber"]
thirdParties = set()
for dep in DEPENDENCIES:
dep_0 = False
dep_1 = False
for namespace in namespaces:
if namespace in dep[0]:
dep_0 = True
if namespace in dep[1]:
dep_1 = True
if dep_0 == False:
thirdParties.add(dep[0])
if dep_1 == False:
thirdParties.add(dep[1])
thirdParties = list(thirdParties)
thirdParties.sort()
for thirdParty in thirdParties:
print(thirdParty)
class MatchFilters(Enum):
## Match filters ##
INTENT_URI = "xLibraries.android.content.Intent"
PENDING_INTENT_URI = "xLibraries.android.app.PendingIntent"
# Content resolver does not work if context.getContentResolver() is used (and context is too broad)
CONTENT_RESOLVER_URI = "Loader"
# Detecting binders
BINDER_URI = "xLibraries.android.os.Binder"
# Used for finding all references from/to a component
NONE_URI = ""
# Detecting context-declared broadcast receivers
LOCAL_RECEIVERS = "BroadcastReceiver"
CUSTOM_COMPONENT = "Any component"
filepath_repository = "/home/yorick/Repositories/Omni-Notes"
filepath_dependencies = "/home/yorick/Repositories/OZP/dependencies_OmniNotes.csv"
namespace = "it.feio.android.omninotes"
matchFilter = MatchFilters.INTENT_URI
# Stupid exceptions when class files contain two separate classes :<
JAVACLASSES["ImageAndTextViewHolder"] = "/home/yorick/Repositories/Omni-Notes/omniNotes/src/main/java/it/feio/android/omninotes/models/adapters/ImageAndTextAdapter.java"
JAVACLASSES["NoteDrawerAdapterViewHolder"] = "/home/yorick/Repositories/Omni-Notes/omniNotes/src/main/java/it/feio/android/omninotes/models/adapters/NavDrawerAdapter.java"
JAVACLASSES["NoteDrawerCategoryAdapterViewHolder"] = "/home/yorick/Repositories/Omni-Notes/omniNotes/src/main/java/it/feio/android/omninotes/models/adapters/NavDrawerCategoryAdapter.java"
# Run setup; find java files and matches
Setup(filepath_repository, filepath_dependencies, namespace, matchFilter.value)
## Component filters ##
componentFilter = [None] # Show all matches
componentFilter = ["CategoriesUpdatedEvent","DynamicNavigationReadyEvent","NavigationUpdatedEvent",
"NavigationUpdatedNavDrawerClosedEvent", "NotesLoadedEvent",
"NotesMergeEvent", "NotesUpdatedEvent", "NotificationRemovedEvent", "PasswordRemovedEvent",
"PushbulletReplyEvent", "SwitchFragmentEvent"]
Lines.getCodeLines(componentFilter, 1, True)
# Tools.findContextDeclaredBroadcastReceivers()
# Tools.findingThirdPartyDependencies([namespace]) |
py | 1a4d3bc4d6f4efa8b4f31e72dc76c816dfa24783 | import re
from pathlib import Path
from unittest import mock
import pytest
from django_capture_on_commit_callbacks import capture_on_commit_callbacks
from grandchallenge.cases.models import RawImageUploadSession
from grandchallenge.reader_studies.models import Answer, Question
from tests.factories import ImageFactory, UserFactory
from tests.reader_studies_tests.factories import (
AnswerFactory,
CategoricalOptionFactory,
QuestionFactory,
ReaderStudyFactory,
)
from tests.reader_studies_tests.utils import TwoReaderStudies
from tests.uploads_tests.factories import (
create_completed_upload,
create_upload_from_file,
)
from tests.utils import get_view_for_user
@pytest.mark.django_db
def test_api_list_is_filtered(client):
rs1, rs2 = ReaderStudyFactory(), ReaderStudyFactory()
rs1_editor = UserFactory()
rs1.add_editor(rs1_editor)
q1, q2 = (
QuestionFactory(reader_study=rs1),
QuestionFactory(reader_study=rs2),
)
a1, _ = (
AnswerFactory(question=q1, answer=True),
AnswerFactory(question=q2, answer=False),
)
response = get_view_for_user(
viewname="api:reader-study-list", user=rs1_editor, client=client
)
assert response.status_code == 200
assert response.json()["count"] == 1
response = get_view_for_user(
viewname="api:reader-study-detail",
reverse_kwargs={"pk": rs1.pk},
user=rs1_editor,
client=client,
)
assert response.status_code == 200
assert len(response.json()["questions"]) == 1
response = get_view_for_user(
viewname="api:reader-studies-question-list",
user=rs1_editor,
client=client,
)
assert response.status_code == 200
assert response.json()["count"] == 1
assert response.json()["results"][0]["pk"] == str(q1.pk)
response = get_view_for_user(
viewname="api:reader-studies-answer-list",
user=rs1_editor,
client=client,
)
assert response.status_code == 200
assert response.json()["count"] == 1
assert response.json()["results"][0]["pk"] == str(a1.pk)
@pytest.mark.django_db
def test_answer_create(client):
im = ImageFactory()
rs = ReaderStudyFactory()
rs.images.add(im)
rs.save()
reader = UserFactory()
rs.add_reader(reader)
q = QuestionFactory(reader_study=rs, answer_type=Question.AnswerType.BOOL)
response = get_view_for_user(
viewname="api:reader-studies-answer-list",
user=reader,
client=client,
method=client.post,
data={"answer": True, "images": [im.api_url], "question": q.api_url},
content_type="application/json",
)
assert response.status_code == 201
answer = Answer.objects.get(pk=response.data.get("pk"))
assert answer.creator == reader
assert answer.images.count() == 1
assert answer.images.all()[0] == im
assert answer.question == q
assert answer.answer is True
@pytest.mark.django_db
def test_answer_update(client):
im1, im2 = ImageFactory(), ImageFactory()
rs = ReaderStudyFactory()
rs.images.add(im1, im2)
rs.save()
reader = UserFactory()
rs.add_reader(reader)
editor = UserFactory()
rs.add_editor(editor)
q = QuestionFactory(reader_study=rs, answer_type=Question.AnswerType.BOOL)
response = get_view_for_user(
viewname="api:reader-studies-answer-list",
user=reader,
client=client,
method=client.post,
data={"answer": True, "images": [im1.api_url], "question": q.api_url},
content_type="application/json",
)
assert response.status_code == 201
answer = Answer.objects.get(pk=response.data.get("pk"))
assert answer.answer is True
assert answer.images.first() == im1
assert answer.history.count() == 1
response = get_view_for_user(
viewname="api:reader-studies-answer-detail",
reverse_kwargs={"pk": answer.pk},
user=reader,
client=client,
method=client.patch,
data={"answer": False, "images": [im2.api_url]},
content_type="application/json",
)
assert response.status_code == 400
answer.refresh_from_db()
assert response.json() == {
"non_field_errors": [
"This reader study does not allow answer modification."
]
}
assert answer.answer is True
assert answer.images.first() == im1
assert answer.history.count() == 1
rs.allow_answer_modification = True
rs.save()
response = get_view_for_user(
viewname="api:reader-studies-answer-detail",
reverse_kwargs={"pk": answer.pk},
user=reader,
client=client,
method=client.patch,
data={"answer": False, "images": [im2.api_url]},
content_type="application/json",
)
assert response.status_code == 400
answer.refresh_from_db()
assert response.json() == {
"non_field_errors": ["Only the answer field can be modified."]
}
assert answer.answer is True
assert answer.images.first() == im1
assert answer.history.count() == 1
response = get_view_for_user(
viewname="api:reader-studies-answer-detail",
reverse_kwargs={"pk": answer.pk},
user=reader,
client=client,
method=client.patch,
data={"answer": False},
content_type="application/json",
)
assert response.status_code == 200
answer.refresh_from_db()
assert answer.answer is False
assert answer.images.first() == im1
assert answer.history.count() == 2
response = get_view_for_user(
viewname="api:reader-studies-answer-detail",
reverse_kwargs={"pk": answer.pk},
user=editor,
client=client,
method=client.patch,
data={"answer": False},
content_type="application/json",
)
assert response.status_code == 403
answer.refresh_from_db()
assert answer.answer is False
assert answer.history.count() == 2
@pytest.mark.django_db
def test_answer_creator_is_reader(client):
rs_set = TwoReaderStudies()
im = ImageFactory()
rs_set.rs1.images.add(im)
q = QuestionFactory(
reader_study=rs_set.rs1, answer_type=Question.AnswerType.BOOL
)
tests = (
(rs_set.editor1, 201),
(rs_set.reader1, 201),
(rs_set.editor2, 400),
(rs_set.reader2, 400),
(rs_set.u, 400),
)
for test in tests:
response = get_view_for_user(
viewname="api:reader-studies-answer-list",
user=test[0],
client=client,
method=client.post,
data={
"answer": True,
"images": [im.api_url],
"question": q.api_url,
},
content_type="application/json",
)
assert response.status_code == test[1]
@pytest.mark.django_db
@pytest.mark.parametrize(
"answer_type,answer,expected",
(
(Question.AnswerType.BOOL, True, 201),
(Question.AnswerType.BOOL, "True", 400),
(Question.AnswerType.BOOL, 12, 400),
(Question.AnswerType.NUMBER, 12, 201),
(Question.AnswerType.NUMBER, "12", 400),
(Question.AnswerType.NUMBER, True, 400),
(Question.AnswerType.SINGLE_LINE_TEXT, "dgfsgfds", 201),
(Question.AnswerType.SINGLE_LINE_TEXT, True, 400),
(Question.AnswerType.SINGLE_LINE_TEXT, 12, 400),
(Question.AnswerType.MULTI_LINE_TEXT, "dgfsgfds", 201),
(Question.AnswerType.MULTI_LINE_TEXT, True, 400),
(Question.AnswerType.MULTI_LINE_TEXT, 12, 400),
(Question.AnswerType.HEADING, True, 400),
(Question.AnswerType.HEADING, "null", 400),
(Question.AnswerType.HEADING, None, 400),
(Question.AnswerType.BOUNDING_BOX_2D, "", 400),
(Question.AnswerType.BOUNDING_BOX_2D, True, 400),
(Question.AnswerType.BOUNDING_BOX_2D, False, 400),
(Question.AnswerType.BOUNDING_BOX_2D, 134, 400),
(Question.AnswerType.BOUNDING_BOX_2D, "dsfuag", 400),
(Question.AnswerType.BOUNDING_BOX_2D, {}, 400),
(
Question.AnswerType.BOUNDING_BOX_2D,
{
"version": {"major": 1, "minor": 0},
"type": "2D bounding box",
"name": "test_name",
"corners": [[0, 0, 0], [10, 0, 0], [10, 10, 0], [0, 0, 0]],
},
201,
),
(
Question.AnswerType.BOUNDING_BOX_2D,
{
"type": "2D bounding box",
"name": "test_name",
"corners": [[0, 0, 0], [10, 0, 0], [10, 10, 0], [0, 0, 0]],
},
400,
),
(
Question.AnswerType.BOUNDING_BOX_2D,
{
"version": {"major": 1, "minor": 0},
"name": "test_name",
"corners": [[0, 0, 0], [10, 0, 0], [10, 10, 0], [0, 0, 0]],
},
400,
),
(
Question.AnswerType.BOUNDING_BOX_2D,
'{"version": {"major": 1, "minor": 0}, "type": "2D bounding box", "name": "test_name", "corners": [[0, 0, 0], [10, 0, 0], [10, 10, 0], [0, 0, 0]]}',
400,
), # Valid json, but a string
(Question.AnswerType.MULTIPLE_2D_BOUNDING_BOXES, "", 400),
(Question.AnswerType.MULTIPLE_2D_BOUNDING_BOXES, True, 400),
(Question.AnswerType.MULTIPLE_2D_BOUNDING_BOXES, False, 400),
(Question.AnswerType.MULTIPLE_2D_BOUNDING_BOXES, 134, 400),
(Question.AnswerType.MULTIPLE_2D_BOUNDING_BOXES, "dsfuag", 400),
(Question.AnswerType.MULTIPLE_2D_BOUNDING_BOXES, {}, 400),
(
Question.AnswerType.MULTIPLE_2D_BOUNDING_BOXES,
{
"version": {"major": 1, "minor": 0},
"type": "Multiple 2D bounding boxes",
"name": "test_name",
"boxes": [
{
"corners": [
[0, 0, 0],
[10, 0, 0],
[10, 10, 0],
[0, 0, 0],
]
}
],
},
201,
),
(
Question.AnswerType.MULTIPLE_2D_BOUNDING_BOXES,
{
"type": "2D bounding box",
"name": "test_name",
"boxes": [[0, 0, 0], [10, 0, 0], [10, 10, 0], [0, 0, 0]],
},
400,
),
(
Question.AnswerType.MULTIPLE_2D_BOUNDING_BOXES,
{
"version": {"major": 1, "minor": 0},
"name": "test_name",
"boxes": [
{
"corners": [
[0, 0, 0],
[10, 0, 0],
[10, 10, 0],
[0, 0, 0],
]
}
],
},
400,
),
(
Question.AnswerType.MULTIPLE_2D_BOUNDING_BOXES,
{
"version": {"major": 1, "minor": 0},
"type": "Multiple 2D bounding boxes",
"name": "test_name",
"boxes": [
{
"corners": [
[0, 0, 0],
[10, 0, 0],
[10, 10, 0],
[0, 0, 0],
]
},
{
"corners": [
[0, 0, 0],
[10, 0, 0],
[10, 10, 0],
[0, 0, 0],
]
},
],
},
201,
),
(
Question.AnswerType.DISTANCE_MEASUREMENT,
{
"version": {"major": 1, "minor": 0},
"type": "Distance measurement",
"name": "test",
"start": (1, 2, 3),
"end": (4, 5, 6),
},
201,
),
(
Question.AnswerType.DISTANCE_MEASUREMENT,
{
"version": {"major": 1, "minor": 0},
"type": "Distance measurement",
"name": "test",
"end": (4, 5, 6),
},
400,
),
(
Question.AnswerType.MULTIPLE_DISTANCE_MEASUREMENTS,
{
"version": {"major": 1, "minor": 0},
"type": "Multiple distance measurements",
"lines": [{"start": (1, 2, 3), "end": (4, 5, 6)}],
},
201,
),
(
Question.AnswerType.MULTIPLE_DISTANCE_MEASUREMENTS,
{
"version": {"major": 1, "minor": 0},
"type": "Multiple distance measurements",
"name": "test",
"lines": [
{"start": (1, 2, 3), "end": (4, 5, 6)},
{"start": (1, 2, 3), "end": (4, 5, 6)},
],
},
201,
),
(
Question.AnswerType.MULTIPLE_DISTANCE_MEASUREMENTS,
{
"version": {"major": 1, "minor": 0},
"type": "Multiple distance measurements",
"name": "test",
"lines": [{"start": (1, 2, 3), "end": (4, 5, 6)}],
},
201,
),
(
Question.AnswerType.MULTIPLE_DISTANCE_MEASUREMENTS,
{
"version": {"major": 1, "minor": 0},
"type": "Distance measurements",
"name": "test",
"lines": [{"start": (1, 2, 3), "end": (4, 5, 6)}],
},
400,
),
(
Question.AnswerType.MULTIPLE_DISTANCE_MEASUREMENTS,
{
"version": {"major": 1, "minor": 0},
"type": "Multiple distance measurements",
"name": "test",
"lines": [{"start": (1, 2, 3)}],
},
400,
),
(
Question.AnswerType.MULTIPLE_DISTANCE_MEASUREMENTS,
{
"version": {"major": 1, "minor": 0},
"type": "Multiple distance measurements",
"name": "test",
"lines": [
{"start": (1, 2, 3)},
{"start": (1, 2, 3), "end": (4, 5, 6)},
],
},
400,
),
(
Question.AnswerType.MULTIPLE_DISTANCE_MEASUREMENTS,
{
"type": "Multiple distance measurements",
"lines": [{"start": (1, 2, 3), "end": (4, 5, 6)}],
},
400,
),
(
Question.AnswerType.POINT,
{
"version": {"major": 1, "minor": 0},
"type": "Point",
"name": "test",
"point": (1, 2, 3),
},
201,
),
(
Question.AnswerType.POINT,
{
"version": {"major": 1, "minor": 0},
"type": "Point",
"name": "test",
"point": (1, 2),
},
400,
),
(
Question.AnswerType.MULTIPLE_POINTS,
{
"version": {"major": 1, "minor": 0},
"type": "Multiple points",
"name": "test",
"points": [{"point": (1, 2, 3)}, {"point": (4, 5, 6)}],
},
201,
),
(
Question.AnswerType.MULTIPLE_POINTS,
{
"version": {"major": 1, "minor": 0},
"type": "Multiple points",
"name": "test",
"points": [{"point": (1, 2)}, {"point": (4, 5, 6)}],
},
400,
),
(
Question.AnswerType.POLYGON,
{
"version": {"major": 1, "minor": 0},
"type": "Polygon",
"name": "test",
"seed_point": (1, 2, 3),
"path_points": [(1, 2, 3), (4, 5, 6)],
"sub_type": "poly",
"groups": ["a", "b"],
},
201,
),
(
Question.AnswerType.POLYGON,
{
"version": {"major": 1, "minor": 0},
"type": "Polygon",
"name": "test",
"path_points": [(1, 2, 3), (4, 5, 6)],
"sub_type": "poly",
"groups": ["a", "b"],
},
400,
),
(
Question.AnswerType.POLYGON,
{
"version": {"major": 1, "minor": 0},
"type": "Polygon",
"name": "test",
"seed_point": (1, 2, 3),
"sub_type": "poly",
"groups": ["a", "b"],
},
400,
),
(
Question.AnswerType.POLYGON,
{
"version": {"major": 1, "minor": 0},
"type": "Polygon",
"name": "test",
"seed_point": (1, 2, 3),
"path_points": [(1, 2, 3), (4, 5, 6)],
"sub_type": "poly",
},
400,
),
(
Question.AnswerType.POLYGON,
{
"version": {"major": 1, "minor": 0},
"type": "Polygon",
"name": "test",
"seed_point": (1, 2, 3),
"path_points": [(1, 2, 3), (4, 5, 6)],
"groups": ["a", "b"],
},
400,
),
(
Question.AnswerType.MULTIPLE_POLYGONS,
{
"version": {"major": 1, "minor": 0},
"type": "Multiple polygons",
"name": "test",
"polygons": [
{
"name": "test",
"seed_point": (1, 2, 3),
"path_points": [(1, 2, 3), (4, 5, 6)],
"sub_type": "poly",
"groups": ["a", "b"],
}
],
},
201,
),
(
Question.AnswerType.MULTIPLE_POLYGONS,
{
"version": {"major": 1, "minor": 0},
"type": "Multiple polygons",
"name": "test",
"polygons": [
{
"seed_point": (1, 2, 3),
"path_points": [(1, 2, 3), (4, 5, 6)],
"sub_type": "poly",
"groups": ["a", "b"],
}
],
},
201,
),
(Question.AnswerType.SINGLE_LINE_TEXT, None, 400),
(Question.AnswerType.MULTI_LINE_TEXT, None, 400),
(Question.AnswerType.BOOL, None, 400),
(Question.AnswerType.NUMBER, None, 400),
(Question.AnswerType.HEADING, None, 400),
(Question.AnswerType.BOUNDING_BOX_2D, None, 201),
(Question.AnswerType.MULTIPLE_2D_BOUNDING_BOXES, None, 201),
(Question.AnswerType.DISTANCE_MEASUREMENT, None, 201),
(Question.AnswerType.MULTIPLE_DISTANCE_MEASUREMENTS, None, 201),
(Question.AnswerType.POINT, None, 201),
(Question.AnswerType.MULTIPLE_POINTS, None, 201),
(Question.AnswerType.POLYGON, None, 201),
(Question.AnswerType.MULTIPLE_POLYGONS, None, 201),
(Question.AnswerType.CHOICE, None, 400),
(Question.AnswerType.MULTIPLE_CHOICE, None, 400),
(Question.AnswerType.MULTIPLE_CHOICE_DROPDOWN, None, 400),
),
)
def test_answer_is_correct_type(client, answer_type, answer, expected):
im = ImageFactory()
rs = ReaderStudyFactory()
rs.images.add(im)
rs.save()
reader = UserFactory()
rs.add_reader(reader)
q = QuestionFactory(reader_study=rs, answer_type=answer_type)
response = get_view_for_user(
viewname="api:reader-studies-answer-list",
user=reader,
client=client,
method=client.post,
data={"answer": answer, "images": [im.api_url], "question": q.api_url},
content_type="application/json",
)
assert response.status_code == expected
@pytest.mark.django_db
@pytest.mark.parametrize(
"answer_type", (Question.AnswerType.CHOICE, Question.AnswerType.NUMBER)
)
def test_only_non_required_can_be_null(client, answer_type):
im = ImageFactory()
rs = ReaderStudyFactory()
rs.images.add(im)
rs.save()
reader = UserFactory()
rs.add_reader(reader)
q = QuestionFactory(
reader_study=rs, answer_type=answer_type, required=True
)
response = get_view_for_user(
viewname="api:reader-studies-answer-list",
user=reader,
client=client,
method=client.post,
data={"answer": None, "images": [im.api_url], "question": q.api_url},
content_type="application/json",
)
assert response.status_code == 400
q = QuestionFactory(
reader_study=rs, answer_type=answer_type, required=False
)
response = get_view_for_user(
viewname="api:reader-studies-answer-list",
user=reader,
client=client,
method=client.post,
data={"answer": None, "images": [im.api_url], "question": q.api_url},
content_type="application/json",
)
assert response.status_code == 201
@pytest.mark.django_db
def test_mine(client):
im1, im2 = ImageFactory(), ImageFactory()
rs1, rs2 = ReaderStudyFactory(), ReaderStudyFactory()
rs1.images.add(im1)
rs2.images.add(im2)
reader = UserFactory()
rs1.add_reader(reader)
rs2.add_reader(reader)
q1 = QuestionFactory(
reader_study=rs1, answer_type=Question.AnswerType.BOOL
)
q2 = QuestionFactory(
reader_study=rs2, answer_type=Question.AnswerType.BOOL
)
a1 = AnswerFactory(question=q1, creator=reader, answer=True)
a1.images.add(im1)
a2 = AnswerFactory(question=q2, creator=reader, answer=True)
a2.images.add(im2)
response = get_view_for_user(
viewname="api:reader-studies-answer-mine",
user=reader,
client=client,
method=client.get,
content_type="application/json",
)
response = response.json()
assert response["count"] == 2
response = get_view_for_user(
viewname="api:reader-studies-answer-mine",
user=reader,
client=client,
method=client.get,
data={"question__reader_study": rs1.pk},
content_type="application/json",
)
response = response.json()
assert response["count"] == 1
assert response["results"][0]["pk"] == str(a1.pk)
response = get_view_for_user(
viewname="api:reader-studies-answer-mine",
user=reader,
client=client,
method=client.get,
data={"question__reader_study": rs2.pk},
content_type="application/json",
)
response = response.json()
assert response["count"] == 1
assert response["results"][0]["pk"] == str(a2.pk)
@pytest.mark.django_db
def test_ground_truth_is_excluded(client):
im = ImageFactory()
rs = ReaderStudyFactory()
rs.images.add(im)
editor = UserFactory()
rs.add_editor(editor)
rs.add_reader(editor)
q = QuestionFactory(reader_study=rs, answer_type=Question.AnswerType.BOOL)
a1 = AnswerFactory(
question=q, creator=editor, answer=True, is_ground_truth=True
)
a1.images.add(im)
a2 = AnswerFactory(
question=q, creator=editor, answer=True, is_ground_truth=False
)
a2.images.add(im)
response = get_view_for_user(
viewname="api:reader-studies-answer-mine",
user=editor,
client=client,
method=client.get,
content_type="application/json",
)
results = response.json()["results"]
assert len(results) == 1
assert results[0]["pk"] == str(a2.pk)
@pytest.mark.django_db
@pytest.mark.parametrize(
"answer_type,answer",
(
(Question.AnswerType.BOOL, True),
(Question.AnswerType.NUMBER, 12),
(Question.AnswerType.SINGLE_LINE_TEXT, "dgfsgfds"),
(Question.AnswerType.MULTI_LINE_TEXT, "dgfsgfds\ndgfsgfds"),
(
Question.AnswerType.BOUNDING_BOX_2D,
{
"version": {"major": 1, "minor": 0},
"type": "2D bounding box",
"name": "test_name",
"corners": [[0, 0, 0], [10, 0, 0], [10, 10, 0], [0, 0, 0]],
},
),
(
Question.AnswerType.DISTANCE_MEASUREMENT,
{
"version": {"major": 1, "minor": 0},
"type": "Distance measurement",
"name": "test",
"start": (1, 2, 3),
"end": (4, 5, 6),
},
),
(
Question.AnswerType.MULTIPLE_DISTANCE_MEASUREMENTS,
{
"version": {"major": 1, "minor": 0},
"type": "Multiple distance measurements",
"name": "test",
"lines": [
{"start": (1, 2, 3), "end": (4, 5, 6)},
{"start": (1, 2, 3), "end": (4, 5, 6)},
],
},
),
),
)
def test_csv_export(client, answer_type, answer):
im = ImageFactory()
rs = ReaderStudyFactory()
rs.images.add(im)
rs.save()
editor = UserFactory()
rs.add_editor(editor)
reader = UserFactory()
rs.add_reader(reader)
q = QuestionFactory(
question_text="foo", reader_study=rs, answer_type=answer_type
)
a = AnswerFactory(question=q, answer=answer)
a.images.add(im)
a.save()
response = get_view_for_user(
viewname="api:reader-studies-answer-list",
params={"question__reader_study": str(rs.pk)},
user=editor,
client=client,
method=client.get,
HTTP_ACCEPT="text/csv",
)
headers = str(response.serialize_headers())
content = str(response.content)
assert response.status_code == 200
assert "Content-Type: text/csv" in headers
if isinstance(answer, dict):
for key in answer:
assert key in content
else:
assert re.sub(r"\n", r"\\n", str(a.answer)) in content
assert a.creator.username in content
response = get_view_for_user(
viewname="api:reader-studies-question-list",
params={"reader_study": str(rs.pk)},
user=editor,
client=client,
method=client.get,
HTTP_ACCEPT="text/csv",
)
headers = str(response.serialize_headers())
content = str(response.content)
assert response.status_code == 200
assert "Content-Type: text/csv" in headers
assert a.question.question_text in content
assert a.question.get_answer_type_display() in content
assert str(a.question.required) in content
assert a.question.get_image_port_display() in content
response = get_view_for_user(
viewname="api:image-list",
params={"readerstudies": str(rs.pk)},
user=editor,
client=client,
method=client.get,
HTTP_ACCEPT="text/csv",
)
headers = str(response.serialize_headers())
content = str(response.content)
assert response.status_code == 200
assert "Content-Type: text/csv" in headers
assert im.name in content
@pytest.mark.django_db
@mock.patch(
"grandchallenge.reader_studies.models.ReaderStudy.generate_hanging_list"
)
def test_generate_hanging_list_api_view(generate_hanging_list, client):
rs = ReaderStudyFactory()
editor = UserFactory()
rs.add_editor(editor)
response = get_view_for_user(
viewname="api:reader-study-generate-hanging-list",
reverse_kwargs={"pk": rs.pk},
user=editor,
client=client,
method=client.patch,
follow=True,
)
assert response.status_code == 200
assert "Hanging list generated." in str(response.content)
generate_hanging_list.assert_called_once()
@pytest.mark.django_db
def test_remove_image_api_view(client):
rs = ReaderStudyFactory()
reader, editor = UserFactory(), UserFactory()
rs.add_reader(reader)
rs.add_editor(editor)
response = get_view_for_user(
viewname="api:reader-study-remove-image",
reverse_kwargs={"pk": rs.pk},
user=reader,
client=client,
method=client.patch,
data={"image": 1},
content_type="application/json",
follow=True,
)
assert response.status_code == 403
response = get_view_for_user(
viewname="api:reader-study-remove-image",
reverse_kwargs={"pk": rs.pk},
user=editor,
client=client,
method=client.patch,
data={"image": 1},
content_type="application/json",
follow=True,
)
assert response.status_code == 200
assert "Image could not be removed from reader study." in str(
response.content
)
im = ImageFactory()
rs.images.add(im)
assert im in rs.images.all()
response = get_view_for_user(
viewname="api:reader-study-remove-image",
reverse_kwargs={"pk": rs.pk},
user=editor,
client=client,
method=client.patch,
data={"image": im.pk},
content_type="application/json",
follow=True,
)
assert response.status_code == 200
assert "Image removed from reader study." in str(response.content)
assert im not in rs.images.all()
@pytest.mark.django_db
def test_ground_truth(client):
rs = ReaderStudyFactory(is_educational=True)
reader = UserFactory()
rs.add_reader(reader)
q1 = QuestionFactory(
answer_type=Question.AnswerType.CHOICE, reader_study=rs
)
q2 = QuestionFactory(
answer_type=Question.AnswerType.MULTIPLE_CHOICE, reader_study=rs
)
q3 = QuestionFactory(
answer_type=Question.AnswerType.MULTIPLE_CHOICE_DROPDOWN,
reader_study=rs,
)
op1 = CategoricalOptionFactory(question=q1, title="option1")
op2 = CategoricalOptionFactory(question=q2, title="option1")
op3 = CategoricalOptionFactory(question=q2, title="option1")
op4 = CategoricalOptionFactory(question=q3, title="option1")
op5 = CategoricalOptionFactory(question=q3, title="option1")
im = ImageFactory()
rs.images.add(im)
a1 = AnswerFactory(question=q1, answer=op1.pk, is_ground_truth=True)
a1.images.add(im)
a2 = AnswerFactory(
question=q2, answer=[op2.pk, op3.pk], is_ground_truth=True
)
a2.images.add(im)
a3 = AnswerFactory(
question=q3, answer=[op4.pk, op5.pk], is_ground_truth=True
)
a3.images.add(im)
response = get_view_for_user(
viewname="api:reader-study-ground-truth",
reverse_kwargs={"pk": rs.pk, "case_pk": im.pk},
user=reader,
client=client,
content_type="application/json",
follow=True,
)
assert response.status_code == 200
response = response.json()
assert response[str(q1.pk)] == {
"answer": op1.pk,
"answer_text": op1.title,
"question_text": q1.question_text,
"options": {str(op1.pk): op1.title},
"explanation": "",
}
assert response[str(q2.pk)] == {
"answer": [op2.pk, op3.pk],
"answer_text": f"{op2.title}, {op3.title}",
"question_text": q2.question_text,
"options": {str(op2.pk): op2.title, str(op3.pk): op3.title},
"explanation": "",
}
assert response[str(q3.pk)] == {
"answer": [op4.pk, op5.pk],
"answer_text": f"{op4.title}, {op5.title}",
"question_text": q3.question_text,
"options": {str(op4.pk): op4.title, str(op5.pk): op5.title},
"explanation": "",
}
@pytest.mark.django_db
@pytest.mark.parametrize("answer_type", ("MASK",))
def test_assign_answer_image(client, settings, answer_type):
settings.task_eager_propagates = (True,)
settings.task_always_eager = (True,)
rs = ReaderStudyFactory()
im = ImageFactory()
editor, reader = UserFactory(), UserFactory()
rs.images.add(im)
rs.add_editor(editor)
rs.add_reader(reader)
question = QuestionFactory(reader_study=rs, answer_type=answer_type)
# First post/patch the answer (ReaderStudyAnswersAPI in gcapi)
response = get_view_for_user(
viewname="api:reader-studies-answer-list",
user=reader,
client=client,
method=client.post,
data={
"answer": None, # Answer must be None to image assignment
"images": [im.api_url],
"question": question.api_url,
},
content_type="application/json",
)
assert response.status_code == 201
answer = Answer.objects.get(pk=response.json()["pk"])
# Next upload the image to the answer (upload_cases in gcapi)
upload = create_upload_from_file(
file_path=Path(__file__).parent.parent
/ "cases_tests"
/ "resources"
/ "image10x10x10.mha",
creator=reader,
)
with capture_on_commit_callbacks(execute=True):
response = get_view_for_user(
viewname="api:upload-session-list",
user=reader,
client=client,
method=client.post,
data={"answer": str(answer.pk), "uploads": [upload.api_url]},
content_type="application/json",
)
assert response.status_code == 201
# Validate
answer.refresh_from_db()
image = RawImageUploadSession.objects.get(
pk=response.json()["pk"]
).image_set.first()
assert answer.answer_image == image
assert reader.has_perm("view_image", image)
assert editor.has_perm("view_image", image)
@pytest.mark.django_db
@pytest.mark.parametrize("answer_type", ("MASK",))
def test_upload_session_owned_by_answer_creator(client, settings, answer_type):
settings.task_eager_propagates = (True,)
settings.task_always_eager = (True,)
rs = ReaderStudyFactory()
im = ImageFactory()
editor, reader = UserFactory(), UserFactory()
rs.images.add(im)
rs.add_editor(editor)
rs.add_reader(reader)
question = QuestionFactory(reader_study=rs, answer_type=answer_type)
answer1 = AnswerFactory(creator=reader, question=question, answer=None)
response = get_view_for_user(
viewname="api:upload-session-list",
user=editor,
client=client,
method=client.post,
data={
"answer": str(answer1.pk),
"uploads": [create_completed_upload(user=editor).api_url],
},
content_type="application/json",
)
assert response.status_code == 400
assert "object does not exist" in response.json()["answer"][0]
@pytest.mark.django_db
def test_question_accepts_image_type_answers(client, settings):
settings.task_eager_propagates = (True,)
settings.task_always_eager = (True,)
rs = ReaderStudyFactory()
im = ImageFactory()
reader = UserFactory()
rs.images.add(im)
rs.add_reader(reader)
question = QuestionFactory(
reader_study=rs, answer_type=Question.AnswerType.BOOL
)
answer = AnswerFactory(creator=reader, question=question, answer=None)
response = get_view_for_user(
viewname="api:upload-session-list",
user=reader,
client=client,
method=client.post,
data={
"answer": str(answer.pk),
"uploads": [create_completed_upload(user=reader).api_url],
},
content_type="application/json",
)
assert response.status_code == 400
assert (
b"This question does not accept image type answers"
in response.rendered_content
)
|
py | 1a4d3c80645c1165acbad2f632c7781cfc4df0ac | import pytest
from app.users.forms import UserCreationForm
from app.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_clean_username(self):
# A user with proto_user params does not exist yet.
proto_user = UserFactory.build()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert form.is_valid()
assert form.clean_username() == proto_user.username
# Creating a user.
form.save()
# The user with proto_user params already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
|
py | 1a4d3cd955a8b155e595a8eeb97772cda5390adc | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Flops Analyser."""
import json
import os
from mindinsight.profiler.analyser.base_analyser import BaseAnalyser
from mindinsight.profiler.common.exceptions.exceptions import ProfilerIOException
from mindinsight.profiler.common.log import logger
from mindinsight.profiler.common.validator.validate_path import validate_and_normalize_path
class FlopsAnalyser(BaseAnalyser):
"""
Analyse flops data from file.
"""
_flops_summary_filename = 'flops_summary_{}.json'
_flops_scope_filename = 'flops_scope_{}.json'
def _load(self):
"""Load data according to the parsed profiling files."""
def _filter(self, filter_condition):
"""
Filter the profiling data according to the filter condition.
Args:
filter_condition (dict): The filter condition.
"""
def get_flops_summary(self):
"""
Get flops summary information for UI display.
Returns:
json, the content of flops summary information.
"""
summary_filename = self._flops_summary_filename.format(self._device_id)
file_path = os.path.join(self._profiling_dir, summary_filename)
file_path = validate_and_normalize_path(
file_path, raise_key='Invalid flops summary path.'
)
flops_summary = {}
if os.path.exists(file_path):
try:
with open(file_path, 'r') as f_obj:
flops_summary = json.load(f_obj)
except (IOError, OSError, json.JSONDecodeError) as err:
logger.error('Error occurred when read flops summary file: %s', err)
raise ProfilerIOException()
else:
logger.warning('No flops summary file. Please check the output path.')
return flops_summary
def get_flops_scope(self):
"""
Get flops information of each scope for UI display.
Returns:
json, the content of flops summary information.
"""
flops_scope_filename = self._flops_scope_filename.format(self._device_id)
file_path = os.path.join(self._profiling_dir, flops_scope_filename)
file_path = validate_and_normalize_path(
file_path, raise_key='Invalid flops scope path.'
)
flops_scope = {}
if os.path.exists(file_path):
try:
with open(file_path, 'r') as f_obj:
flops_scope = json.load(f_obj)
except (IOError, OSError, json.JSONDecodeError) as err:
logger.error('Error occurred when read flops scope file: %s', err)
raise ProfilerIOException()
else:
logger.warning('No flops scope file. Please check the output path.')
return flops_scope
|
py | 1a4d3d1c8dee53c7325bb4ec744c94e999e3bcfb | from .linear import linear_activation_constraint, linear_activation_function
from .smooth import (sigmoid_activation_constraint, sigmoid_activation_function,
softplus_activation_constraint, softplus_activation_function,
tanh_activation_constraint, tanh_activation_function)
from .relu import bigm_relu_activation_constraint, ComplementarityReLUActivation
ACTIVATION_FUNCTION_MAP = {
"linear": linear_activation_function,
# "relu": bigm_relu_activation,
"sigmoid": sigmoid_activation_function,
"softplus": softplus_activation_function,
"tanh": tanh_activation_function
}
|
py | 1a4d3d6017d578e1e1884c858a178d5058b9a94b | # patchbomb.py - sending Mercurial changesets as patch emails
#
# Copyright 2005-2009 Matt Mackall <[email protected]> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to send changesets as (a series of) patch emails
The series is started off with a "[PATCH 0 of N]" introduction, which
describes the series as a whole.
Each patch email has a Subject line of "[PATCH M of N] ...", using the
first line of the changeset description as the subject text. The
message contains two or three body parts:
- The changeset description.
- [Optional] The result of running diffstat on the patch.
- The patch itself, as generated by :hg:`export`.
Each message refers to the first in the series using the In-Reply-To
and References headers, so they will show up as a sequence in threaded
mail and news readers, and in mail archives.
To configure other defaults, add a section like this to your
configuration file::
[email]
from = My Name <my@email>
to = recipient1, recipient2, ...
cc = cc1, cc2, ...
bcc = bcc1, bcc2, ...
reply-to = address1, address2, ...
Use ``[patchbomb]`` as configuration section name if you need to
override global ``[email]`` address settings.
Then you can use the :hg:`email` command to mail a series of
changesets as a patchbomb.
You can also either configure the method option in the email section
to be a sendmail compatible mailer or fill out the [smtp] section so
that the patchbomb extension can automatically send patchbombs
directly from the commandline. See the [email] and [smtp] sections in
hgrc(5) for details.
By default, :hg:`email` will prompt for a ``To`` or ``CC`` header if
you do not supply one via configuration or the command line. You can
override this to never prompt by configuring an empty value::
[email]
cc =
You can control the default inclusion of an introduction message with the
``patchbomb.intro`` configuration option. The configuration is always
overwritten by command line flags like --intro and --desc::
[patchbomb]
intro=auto # include introduction message if more than 1 patch (default)
intro=never # never include an introduction message
intro=always # always include an introduction message
You can specify a template for flags to be added in subject prefixes. Flags
specified by --flag option are exported as ``{flags}`` keyword::
[patchbomb]
flagtemplate = "{separate(' ',
ifeq(branch, 'default', '', branch|upper),
flags)}"
You can set patchbomb to always ask for confirmation by setting
``patchbomb.confirm`` to true.
'''
import email as emailmod
import errno
import os
import socket
import tempfile
from mercurial.i18n import _
from mercurial import (
cmdutil,
commands,
error,
formatter,
hg,
mail,
node as nodemod,
patch,
registrar,
repair,
scmutil,
templater,
util,
)
stringio = util.stringio
cmdtable = {}
command = registrar.command(cmdtable)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = 'ships-with-hg-core'
def _addpullheader(seq, ctx):
"""Add a header pointing to a public URL where the changeset is available
"""
repo = ctx.repo()
# experimental config: patchbomb.publicurl
# waiting for some logic that check that the changeset are available on the
# destination before patchbombing anything.
publicurl = repo.ui.config('patchbomb', 'publicurl')
if publicurl:
return ('Available At %s\n'
'# hg pull %s -r %s' % (publicurl, publicurl, ctx))
return None
def uisetup(ui):
cmdutil.extraexport.append('pullurl')
cmdutil.extraexportmap['pullurl'] = _addpullheader
def reposetup(ui, repo):
if not repo.local():
return
repo._wlockfreeprefix.add('last-email.txt')
def prompt(ui, prompt, default=None, rest=':'):
if default:
prompt += ' [%s]' % default
return ui.prompt(prompt + rest, default)
def introwanted(ui, opts, number):
'''is an introductory message apparently wanted?'''
introconfig = ui.config('patchbomb', 'intro', 'auto')
if opts.get('intro') or opts.get('desc'):
intro = True
elif introconfig == 'always':
intro = True
elif introconfig == 'never':
intro = False
elif introconfig == 'auto':
intro = 1 < number
else:
ui.write_err(_('warning: invalid patchbomb.intro value "%s"\n')
% introconfig)
ui.write_err(_('(should be one of always, never, auto)\n'))
intro = 1 < number
return intro
def _formatflags(ui, repo, rev, flags):
"""build flag string optionally by template"""
tmpl = ui.config('patchbomb', 'flagtemplate')
if not tmpl:
return ' '.join(flags)
out = util.stringio()
opts = {'template': templater.unquotestring(tmpl)}
with formatter.templateformatter(ui, out, 'patchbombflag', opts) as fm:
fm.startitem()
fm.context(ctx=repo[rev])
fm.write('flags', '%s', fm.formatlist(flags, name='flag'))
return out.getvalue()
def _formatprefix(ui, repo, rev, flags, idx, total, numbered):
"""build prefix to patch subject"""
flag = _formatflags(ui, repo, rev, flags)
if flag:
flag = ' ' + flag
if not numbered:
return '[PATCH%s]' % flag
else:
tlen = len(str(total))
return '[PATCH %0*d of %d%s]' % (tlen, idx, total, flag)
def makepatch(ui, repo, rev, patchlines, opts, _charsets, idx, total, numbered,
patchname=None):
desc = []
node = None
body = ''
for line in patchlines:
if line.startswith('#'):
if line.startswith('# Node ID'):
node = line.split()[-1]
continue
if line.startswith('diff -r') or line.startswith('diff --git'):
break
desc.append(line)
if not patchname and not node:
raise ValueError
if opts.get('attach') and not opts.get('body'):
body = ('\n'.join(desc[1:]).strip() or
'Patch subject is complete summary.')
body += '\n\n\n'
if opts.get('plain'):
while patchlines and patchlines[0].startswith('# '):
patchlines.pop(0)
if patchlines:
patchlines.pop(0)
while patchlines and not patchlines[0].strip():
patchlines.pop(0)
ds = patch.diffstat(patchlines)
if opts.get('diffstat'):
body += ds + '\n\n'
addattachment = opts.get('attach') or opts.get('inline')
if not addattachment or opts.get('body'):
body += '\n'.join(patchlines)
if addattachment:
msg = emailmod.MIMEMultipart.MIMEMultipart()
if body:
msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
p = mail.mimetextpatch('\n'.join(patchlines), 'x-patch',
opts.get('test'))
binnode = nodemod.bin(node)
# if node is mq patch, it will have the patch file's name as a tag
if not patchname:
patchtags = [t for t in repo.nodetags(binnode)
if t.endswith('.patch') or t.endswith('.diff')]
if patchtags:
patchname = patchtags[0]
elif total > 1:
patchname = cmdutil.makefilename(repo, '%b-%n.patch',
binnode, seqno=idx,
total=total)
else:
patchname = cmdutil.makefilename(repo, '%b.patch', binnode)
disposition = 'inline'
if opts.get('attach'):
disposition = 'attachment'
p['Content-Disposition'] = disposition + '; filename=' + patchname
msg.attach(p)
else:
msg = mail.mimetextpatch(body, display=opts.get('test'))
prefix = _formatprefix(ui, repo, rev, opts.get('flag'), idx, total,
numbered)
subj = desc[0].strip().rstrip('. ')
if not numbered:
subj = ' '.join([prefix, opts.get('subject') or subj])
else:
subj = ' '.join([prefix, subj])
msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
msg['X-Mercurial-Node'] = node
msg['X-Mercurial-Series-Index'] = '%i' % idx
msg['X-Mercurial-Series-Total'] = '%i' % total
return msg, subj, ds
def _getpatches(repo, revs, **opts):
"""return a list of patches for a list of revisions
Each patch in the list is itself a list of lines.
"""
ui = repo.ui
prev = repo['.'].rev()
for r in revs:
if r == prev and (repo[None].files() or repo[None].deleted()):
ui.warn(_('warning: working directory has '
'uncommitted changes\n'))
output = stringio()
cmdutil.export(repo, [r], fp=output,
opts=patch.difffeatureopts(ui, opts, git=True))
yield output.getvalue().split('\n')
def _getbundle(repo, dest, **opts):
"""return a bundle containing changesets missing in "dest"
The `opts` keyword-arguments are the same as the one accepted by the
`bundle` command.
The bundle is a returned as a single in-memory binary blob.
"""
ui = repo.ui
tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-')
tmpfn = os.path.join(tmpdir, 'bundle')
btype = ui.config('patchbomb', 'bundletype')
if btype:
opts['type'] = btype
try:
commands.bundle(ui, repo, tmpfn, dest, **opts)
return util.readfile(tmpfn)
finally:
try:
os.unlink(tmpfn)
except OSError:
pass
os.rmdir(tmpdir)
def _getdescription(repo, defaultbody, sender, **opts):
"""obtain the body of the introduction message and return it
This is also used for the body of email with an attached bundle.
The body can be obtained either from the command line option or entered by
the user through the editor.
"""
ui = repo.ui
if opts.get('desc'):
body = open(opts.get('desc')).read()
else:
ui.write(_('\nWrite the introductory message for the '
'patch series.\n\n'))
body = ui.edit(defaultbody, sender, repopath=repo.path)
# Save series description in case sendmail fails
msgfile = repo.vfs('last-email.txt', 'wb')
msgfile.write(body)
msgfile.close()
return body
def _getbundlemsgs(repo, sender, bundle, **opts):
"""Get the full email for sending a given bundle
This function returns a list of "email" tuples (subject, content, None).
The list is always one message long in that case.
"""
ui = repo.ui
_charsets = mail._charsets(ui)
subj = (opts.get('subject')
or prompt(ui, 'Subject:', 'A bundle for your repository'))
body = _getdescription(repo, '', sender, **opts)
msg = emailmod.MIMEMultipart.MIMEMultipart()
if body:
msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
datapart = emailmod.MIMEBase.MIMEBase('application', 'x-mercurial-bundle')
datapart.set_payload(bundle)
bundlename = '%s.hg' % opts.get('bundlename', 'bundle')
datapart.add_header('Content-Disposition', 'attachment',
filename=bundlename)
emailmod.Encoders.encode_base64(datapart)
msg.attach(datapart)
msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
return [(msg, subj, None)]
def _makeintro(repo, sender, revs, patches, **opts):
"""make an introduction email, asking the user for content if needed
email is returned as (subject, body, cumulative-diffstat)"""
ui = repo.ui
_charsets = mail._charsets(ui)
# use the last revision which is likely to be a bookmarked head
prefix = _formatprefix(ui, repo, revs.last(), opts.get('flag'),
0, len(patches), numbered=True)
subj = (opts.get('subject') or
prompt(ui, '(optional) Subject: ', rest=prefix, default=''))
if not subj:
return None # skip intro if the user doesn't bother
subj = prefix + ' ' + subj
body = ''
if opts.get('diffstat'):
# generate a cumulative diffstat of the whole patch series
diffstat = patch.diffstat(sum(patches, []))
body = '\n' + diffstat
else:
diffstat = None
body = _getdescription(repo, body, sender, **opts)
msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
msg['Subject'] = mail.headencode(ui, subj, _charsets,
opts.get('test'))
return (msg, subj, diffstat)
def _getpatchmsgs(repo, sender, revs, patchnames=None, **opts):
"""return a list of emails from a list of patches
This involves introduction message creation if necessary.
This function returns a list of "email" tuples (subject, content, None).
"""
ui = repo.ui
_charsets = mail._charsets(ui)
patches = list(_getpatches(repo, revs, **opts))
msgs = []
ui.write(_('this patch series consists of %d patches.\n\n')
% len(patches))
# build the intro message, or skip it if the user declines
if introwanted(ui, opts, len(patches)):
msg = _makeintro(repo, sender, revs, patches, **opts)
if msg:
msgs.append(msg)
# are we going to send more than one message?
numbered = len(msgs) + len(patches) > 1
# now generate the actual patch messages
name = None
assert len(revs) == len(patches)
for i, (r, p) in enumerate(zip(revs, patches)):
if patchnames:
name = patchnames[i]
msg = makepatch(ui, repo, r, p, opts, _charsets, i + 1,
len(patches), numbered, name)
msgs.append(msg)
return msgs
def _getoutgoing(repo, dest, revs):
'''Return the revisions present locally but not in dest'''
ui = repo.ui
url = ui.expandpath(dest or 'default-push', dest or 'default')
url = hg.parseurl(url)[0]
ui.status(_('comparing with %s\n') % util.hidepassword(url))
revs = [r for r in revs if r >= 0]
if not revs:
revs = [len(repo) - 1]
revs = repo.revs('outgoing(%s) and ::%ld', dest or '', revs)
if not revs:
ui.status(_("no changes found\n"))
return revs
emailopts = [
('', 'body', None, _('send patches as inline message text (default)')),
('a', 'attach', None, _('send patches as attachments')),
('i', 'inline', None, _('send patches as inline attachments')),
('', 'bcc', [], _('email addresses of blind carbon copy recipients')),
('c', 'cc', [], _('email addresses of copy recipients')),
('', 'confirm', None, _('ask for confirmation before sending')),
('d', 'diffstat', None, _('add diffstat output to messages')),
('', 'date', '', _('use the given date as the sending date')),
('', 'desc', '', _('use the given file as the series description')),
('f', 'from', '', _('email address of sender')),
('n', 'test', None, _('print messages that would be sent')),
('m', 'mbox', '', _('write messages to mbox file instead of sending them')),
('', 'reply-to', [], _('email addresses replies should be sent to')),
('s', 'subject', '', _('subject of first message (intro or single patch)')),
('', 'in-reply-to', '', _('message identifier to reply to')),
('', 'flag', [], _('flags to add in subject prefixes')),
('t', 'to', [], _('email addresses of recipients'))]
@command('email',
[('g', 'git', None, _('use git extended diff format')),
('', 'plain', None, _('omit hg patch header')),
('o', 'outgoing', None,
_('send changes not found in the target repository')),
('b', 'bundle', None, _('send changes not in target as a binary bundle')),
('B', 'bookmark', '', _('send changes only reachable by given bookmark')),
('', 'bundlename', 'bundle',
_('name of the bundle attachment file'), _('NAME')),
('r', 'rev', [], _('a revision to send'), _('REV')),
('', 'force', None, _('run even when remote repository is unrelated '
'(with -b/--bundle)')),
('', 'base', [], _('a base changeset to specify instead of a destination '
'(with -b/--bundle)'), _('REV')),
('', 'intro', None, _('send an introduction email for a single patch')),
] + emailopts + cmdutil.remoteopts,
_('hg email [OPTION]... [DEST]...'))
def email(ui, repo, *revs, **opts):
'''send changesets by email
By default, diffs are sent in the format generated by
:hg:`export`, one per message. The series starts with a "[PATCH 0
of N]" introduction, which describes the series as a whole.
Each patch email has a Subject line of "[PATCH M of N] ...", using
the first line of the changeset description as the subject text.
The message contains two or three parts. First, the changeset
description.
With the -d/--diffstat option, if the diffstat program is
installed, the result of running diffstat on the patch is inserted.
Finally, the patch itself, as generated by :hg:`export`.
With the -d/--diffstat or --confirm options, you will be presented
with a final summary of all messages and asked for confirmation before
the messages are sent.
By default the patch is included as text in the email body for
easy reviewing. Using the -a/--attach option will instead create
an attachment for the patch. With -i/--inline an inline attachment
will be created. You can include a patch both as text in the email
body and as a regular or an inline attachment by combining the
-a/--attach or -i/--inline with the --body option.
With -B/--bookmark changesets reachable by the given bookmark are
selected.
With -o/--outgoing, emails will be generated for patches not found
in the destination repository (or only those which are ancestors
of the specified revisions if any are provided)
With -b/--bundle, changesets are selected as for --outgoing, but a
single email containing a binary Mercurial bundle as an attachment
will be sent. Use the ``patchbomb.bundletype`` config option to
control the bundle type as with :hg:`bundle --type`.
With -m/--mbox, instead of previewing each patchbomb message in a
pager or sending the messages directly, it will create a UNIX
mailbox file with the patch emails. This mailbox file can be
previewed with any mail user agent which supports UNIX mbox
files.
With -n/--test, all steps will run, but mail will not be sent.
You will be prompted for an email recipient address, a subject and
an introductory message describing the patches of your patchbomb.
Then when all is done, patchbomb messages are displayed.
In case email sending fails, you will find a backup of your series
introductory message in ``.hg/last-email.txt``.
The default behavior of this command can be customized through
configuration. (See :hg:`help patchbomb` for details)
Examples::
hg email -r 3000 # send patch 3000 only
hg email -r 3000 -r 3001 # send patches 3000 and 3001
hg email -r 3000:3005 # send patches 3000 through 3005
hg email 3000 # send patch 3000 (deprecated)
hg email -o # send all patches not in default
hg email -o DEST # send all patches not in DEST
hg email -o -r 3000 # send all ancestors of 3000 not in default
hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST
hg email -B feature # send all ancestors of feature bookmark
hg email -b # send bundle of all patches not in default
hg email -b DEST # send bundle of all patches not in DEST
hg email -b -r 3000 # bundle of all ancestors of 3000 not in default
hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST
hg email -o -m mbox && # generate an mbox file...
mutt -R -f mbox # ... and view it with mutt
hg email -o -m mbox && # generate an mbox file ...
formail -s sendmail \\ # ... and use formail to send from the mbox
-bm -t < mbox # ... using sendmail
Before using this command, you will need to enable email in your
hgrc. See the [email] section in hgrc(5) for details.
'''
_charsets = mail._charsets(ui)
bundle = opts.get('bundle')
date = opts.get('date')
mbox = opts.get('mbox')
outgoing = opts.get('outgoing')
rev = opts.get('rev')
bookmark = opts.get('bookmark')
if not (opts.get('test') or mbox):
# really sending
mail.validateconfig(ui)
if not (revs or rev or outgoing or bundle or bookmark):
raise error.Abort(_('specify at least one changeset with -B, -r or -o'))
if outgoing and bundle:
raise error.Abort(_("--outgoing mode always on with --bundle;"
" do not re-specify --outgoing"))
if rev and bookmark:
raise error.Abort(_("-r and -B are mutually exclusive"))
if outgoing or bundle:
if len(revs) > 1:
raise error.Abort(_("too many destinations"))
if revs:
dest = revs[0]
else:
dest = None
revs = []
if rev:
if revs:
raise error.Abort(_('use only one form to specify the revision'))
revs = rev
elif bookmark:
if bookmark not in repo._bookmarks:
raise error.Abort(_("bookmark '%s' not found") % bookmark)
revs = repair.stripbmrevset(repo, bookmark)
revs = scmutil.revrange(repo, revs)
if outgoing:
revs = _getoutgoing(repo, dest, revs)
if bundle:
opts['revs'] = [str(r) for r in revs]
# check if revision exist on the public destination
publicurl = repo.ui.config('patchbomb', 'publicurl')
if publicurl:
repo.ui.debug('checking that revision exist in the public repo')
try:
publicpeer = hg.peer(repo, {}, publicurl)
except error.RepoError:
repo.ui.write_err(_('unable to access public repo: %s\n')
% publicurl)
raise
if not publicpeer.capable('known'):
repo.ui.debug('skipping existence checks: public repo too old')
else:
out = [repo[r] for r in revs]
known = publicpeer.known(h.node() for h in out)
missing = []
for idx, h in enumerate(out):
if not known[idx]:
missing.append(h)
if missing:
if 1 < len(missing):
msg = _('public "%s" is missing %s and %i others')
msg %= (publicurl, missing[0], len(missing) - 1)
else:
msg = _('public url %s is missing %s')
msg %= (publicurl, missing[0])
revhint = ' '.join('-r %s' % h
for h in repo.set('heads(%ld)', missing))
hint = _("use 'hg push %s %s'") % (publicurl, revhint)
raise error.Abort(msg, hint=hint)
# start
if date:
start_time = util.parsedate(date)
else:
start_time = util.makedate()
def genmsgid(id):
return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn())
# deprecated config: patchbomb.from
sender = (opts.get('from') or ui.config('email', 'from') or
ui.config('patchbomb', 'from') or
prompt(ui, 'From', ui.username()))
if bundle:
bundledata = _getbundle(repo, dest, **opts)
bundleopts = opts.copy()
bundleopts.pop('bundle', None) # already processed
msgs = _getbundlemsgs(repo, sender, bundledata, **bundleopts)
else:
msgs = _getpatchmsgs(repo, sender, revs, **opts)
showaddrs = []
def getaddrs(header, ask=False, default=None):
configkey = header.lower()
opt = header.replace('-', '_').lower()
addrs = opts.get(opt)
if addrs:
showaddrs.append('%s: %s' % (header, ', '.join(addrs)))
return mail.addrlistencode(ui, addrs, _charsets, opts.get('test'))
# not on the command line: fallback to config and then maybe ask
addr = (ui.config('email', configkey) or
ui.config('patchbomb', configkey))
if not addr:
specified = (ui.hasconfig('email', configkey) or
ui.hasconfig('patchbomb', configkey))
if not specified and ask:
addr = prompt(ui, header, default=default)
if addr:
showaddrs.append('%s: %s' % (header, addr))
return mail.addrlistencode(ui, [addr], _charsets, opts.get('test'))
elif default:
return mail.addrlistencode(
ui, [default], _charsets, opts.get('test'))
return []
to = getaddrs('To', ask=True)
if not to:
# we can get here in non-interactive mode
raise error.Abort(_('no recipient addresses provided'))
cc = getaddrs('Cc', ask=True, default='')
bcc = getaddrs('Bcc')
replyto = getaddrs('Reply-To')
confirm = ui.configbool('patchbomb', 'confirm')
confirm |= bool(opts.get('diffstat') or opts.get('confirm'))
if confirm:
ui.write(_('\nFinal summary:\n\n'), label='patchbomb.finalsummary')
ui.write(('From: %s\n' % sender), label='patchbomb.from')
for addr in showaddrs:
ui.write('%s\n' % addr, label='patchbomb.to')
for m, subj, ds in msgs:
ui.write(('Subject: %s\n' % subj), label='patchbomb.subject')
if ds:
ui.write(ds, label='patchbomb.diffstats')
ui.write('\n')
if ui.promptchoice(_('are you sure you want to send (yn)?'
'$$ &Yes $$ &No')):
raise error.Abort(_('patchbomb canceled'))
ui.write('\n')
parent = opts.get('in_reply_to') or None
# angle brackets may be omitted, they're not semantically part of the msg-id
if parent is not None:
if not parent.startswith('<'):
parent = '<' + parent
if not parent.endswith('>'):
parent += '>'
sender_addr = emailmod.Utils.parseaddr(sender)[1]
sender = mail.addressencode(ui, sender, _charsets, opts.get('test'))
sendmail = None
firstpatch = None
for i, (m, subj, ds) in enumerate(msgs):
try:
m['Message-Id'] = genmsgid(m['X-Mercurial-Node'])
if not firstpatch:
firstpatch = m['Message-Id']
m['X-Mercurial-Series-Id'] = firstpatch
except TypeError:
m['Message-Id'] = genmsgid('patchbomb')
if parent:
m['In-Reply-To'] = parent
m['References'] = parent
if not parent or 'X-Mercurial-Node' not in m:
parent = m['Message-Id']
m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version()
m['Date'] = emailmod.Utils.formatdate(start_time[0], localtime=True)
start_time = (start_time[0] + 1, start_time[1])
m['From'] = sender
m['To'] = ', '.join(to)
if cc:
m['Cc'] = ', '.join(cc)
if bcc:
m['Bcc'] = ', '.join(bcc)
if replyto:
m['Reply-To'] = ', '.join(replyto)
if opts.get('test'):
ui.status(_('displaying '), subj, ' ...\n')
ui.pager('email')
generator = emailmod.Generator.Generator(ui, mangle_from_=False)
try:
generator.flatten(m, 0)
ui.write('\n')
except IOError as inst:
if inst.errno != errno.EPIPE:
raise
else:
if not sendmail:
sendmail = mail.connect(ui, mbox=mbox)
ui.status(_('sending '), subj, ' ...\n')
ui.progress(_('sending'), i, item=subj, total=len(msgs),
unit=_('emails'))
if not mbox:
# Exim does not remove the Bcc field
del m['Bcc']
fp = stringio()
generator = emailmod.Generator.Generator(fp, mangle_from_=False)
generator.flatten(m, 0)
sendmail(sender_addr, to + bcc + cc, fp.getvalue())
ui.progress(_('writing'), None)
ui.progress(_('sending'), None)
|
py | 1a4d3db2e28a719745078093cc2c068a8f66ff98 | from django.db import models
class Author(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
email = models.EmailField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Meta:
db_table='author'
class Article(models.Model):
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=120)
description = models.TextField()
body = models.TextField()
author = models.ForeignKey(
'Author',
related_name='articles',
on_delete=models.CASCADE
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
class Meta:
db_table='article'
|
py | 1a4d3ed51b3598399e47c8f8597a0a3fcb164e41 | import bpy
import math
import bmesh
bonesCount = 0
def write(filepath,
applyMods=False
):
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.transform_apply(location = True, scale = True, rotation = True)
bpy.ops.object.select_all(action='DESELECT')
scene = bpy.context.scene
meshData = MeshData()
animsData = []
bones = []
for obj in bpy.context.visible_objects:
if obj.pose is not None:
for bone in obj.pose.bones:
bones.append(bone)
global bonesCount
bonesCount = len(bones)
for obj in bpy.context.visible_objects:
if applyMods or obj.type != "MESH":
try:
me = obj.to_mesh(scene, True, "PREVIEW")
except:
me = None
is_tmp_mesh = True
else:
try:
me = obj.to_mesh(scene, False, "PREVIEW")
except:
me = None
is_tmp_mesh = True
if obj.animation_data is not None:
for track in obj.animation_data.nla_tracks:
for strip in track.strips:
action = strip.action
obj.animation_data.action = action
animData = AnimData(action.name)
for i in range(int(action.frame_range[0]), int(action.frame_range[1])):
bpy.context.scene.frame_set(i)
bpy.context.scene.update()
for bone in bones:
animData.addB_Position(bone.head)
bone.rotation_mode = 'XYZ'
animData.addB_Rotation(bone.rotation_euler)
animData.addB_Scale(bone.scale)
animsData.append(animData)
if me is not None:
bm = bmesh.new()
bm.from_mesh(me)
#bmesh.ops.subdivide_edges(bm, edges=bm.edges, use_grid_fill=True, cuts=1)
bmesh.ops.triangulate(bm, faces=bm.faces)
bm.to_mesh(me)
bm.free()
del bm
for vertex in me.vertices:
found = 0
for group in vertex.groups:
i = 0
for bone in bones:
if obj.vertex_groups[group.group].name == bone.name:
found = i
i+=1
meshData.addV_Position(vertex.co)
meshData.addV_Normal(vertex.normal)
meshData.addV_Bone(found)
# empty value as place for material index later
meshData.addV_Material(0)
for polygon in me.polygons:
meshData.addF_Vertex(polygon)
meshData.addF_Material(polygon)
meshData.addF_UVs(polygon, me)
meshData.setF_Count(meshData.fCount + len(me.polygons))
meshData.setV_Count(meshData.vCount + len(me.vertices))
meshData.setM_Count(meshData.mCount + len(obj.material_slots))
for mat_slot in obj.material_slots:
meshData.addM_Color(mat_slot)
if is_tmp_mesh:
bpy.data.meshes.remove(me)
writeToFile(filepath, meshData, animsData)
def writeToFile(filepath, meshData, animsData):
# open target file
file = open(filepath, "w")
i = 0
# write the commons to the file
commons = "".join("SOM (SceneObjectMesh) file created by Blender SOM exporter"
+ "\n" + "project page: https://github.com/JohnsProject/JPGE" + "\n")
file.write(commons)
# write the vertex data to the file
i = 0
file.write("vCount < " + str(meshData.vCount) + " > vCount" + "\n")
file.write("vPosition < ")
for value in meshData.vPosition:
i += 1
if (i < len(meshData.vPosition)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > vPosition" + "\n")
i = 0
file.write("vNormal < ")
for value in meshData.vNormal:
i += 1
if (i < len(meshData.vNormal)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > vNormal" + "\n")
i = 0
file.write("vBone < ")
for value in meshData.vBone:
i += 1
if (i < len(meshData.vBone)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > vBone" + "\n")
i = 0
file.write("vMaterial < ")
for value in meshData.vMaterial:
i += 1
if (i < len(meshData.vMaterial)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > vMaterial" + "\n")
file.write("\n")
# write the face data to the file
i = 0
file.write("fCount < " + str(meshData.fCount) + " > fCount" + "\n")
file.write("fVertex1 < ")
for value in meshData.fVertex1:
i += 1
if (i < len(meshData.fVertex1)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > fVertex1" + "\n")
i = 0
file.write("fVertex2 < ")
for value in meshData.fVertex2:
i += 1
if (i < len(meshData.fVertex2)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > fVertex2" + "\n")
i = 0
file.write("fVertex3 < ")
for value in meshData.fVertex3:
i += 1
if (i < len(meshData.fVertex3)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > fVertex3" + "\n")
i = 0
file.write("fMaterial < ")
for value in meshData.fMaterial:
i += 1
if (i < len(meshData.fMaterial)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > fMaterial" + "\n")
i = 0
file.write("fUV1 < ")
for value in meshData.fUV1:
i += 1
if (i < len(meshData.fUV1)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > fUV1" + "\n")
i = 0
file.write("fUV2 < ")
for value in meshData.fUV2:
i += 1
if (i < len(meshData.fUV2)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > fUV2" + "\n")
i = 0
file.write("fUV3 < ")
for value in meshData.fUV3:
i += 1
if (i < len(meshData.fUV3)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > fUV3" + "\n")
file.write("\n")
# write the material data to the file
i = 0
file.write("mCount < " + str(meshData.mCount) + " > mCount" + "\n")
file.write("mColor < ")
for value in meshData.mColor:
i += 1
if (i < len(meshData.mColor)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > mColor" + "\n")
file.write("\n")
# write the animations to the file
file.write("Animations < " + "\n")
global bonesCount
file.write((" BonesCount <%i" % bonesCount) + "> BonesCount \n")
for animData in animsData:
file.write(" Animation < " + "\n")
file.write(" Name < " + animData.name + "> Name \n")
i = 0
file.write(" bPosition < ")
for value in animData.bPosition:
i += 1
if (i < len(animData.bPosition)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > bPosition" + "\n")
i = 0
file.write(" bRotation < ")
for value in animData.bRotation:
i += 1
if (i < len(animData.bRotation)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > bRotation" + "\n")
i = 0
file.write(" bScale < ")
for value in animData.bScale:
i += 1
if (i < len(animData.bScale)):
file.write("%i," % value)
else:
file.write(("%i" % value))
file.write(" > bScale" + "\n")
file.write(" > Animation " + "\n")
file.write("> Animations" + "\n")
# close file
file.close()
class MeshData:
def __init__(self):
# v = vertex
self.vCount = 0
self.vPosition = []
self.vNormal = []
self.vBone = []
self.vMaterial = []
# f = face
self.fCount = 0
self.fVertex1 = []
self.fVertex2 = []
self.fVertex3 = []
self.fMaterial = []
self.fUV1 = []
self.fUV2 = []
self.fUV3 = []
# m = material
self.mCount = 0
self.mColor = []
def setV_Count(self, value):
self.vCount = value
def addV_Position(self, value):
self.vPosition.append(value[0]*1000)
self.vPosition.append(value[1]*1000)
self.vPosition.append(value[2]*1000)
def addV_Normal(self, value):
self.vNormal.append(value[0]*1000)
self.vNormal.append(value[1]*1000)
self.vNormal.append(value[2]*1000)
def addV_Bone(self, value):
self.vBone.append(value)
def addV_Material(self, value):
self.vMaterial.append(value)
def setF_Count(self, value):
self.fCount = value
def addF_Vertex(self, value):
self.fVertex1.append(self.vCount + value.vertices[0])
self.fVertex2.append(self.vCount + value.vertices[1])
self.fVertex3.append(self.vCount + value.vertices[2])
def addF_Material(self, value):
self.fMaterial.append(self.mCount + value.material_index)
self.vMaterial[value.vertices[0]] = self.mCount + value.material_index
self.vMaterial[value.vertices[1]] = self.mCount + value.material_index
self.vMaterial[value.vertices[2]] = self.mCount + value.material_index
def addF_UVs(self, value, me):
if me.uv_layers.active is not None:
self.fUV1.append(me.uv_layers.active.data[value.loop_indices[0]].uv[0]*128)
self.fUV1.append(me.uv_layers.active.data[value.loop_indices[0]].uv[1]*128)
self.fUV2.append(me.uv_layers.active.data[value.loop_indices[1]].uv[0]*128)
self.fUV2.append(me.uv_layers.active.data[value.loop_indices[1]].uv[1]*128)
self.fUV3.append(me.uv_layers.active.data[value.loop_indices[2]].uv[0]*128)
self.fUV3.append(me.uv_layers.active.data[value.loop_indices[2]].uv[1]*128)
else:
self.fUV1.append(0)
self.fUV1.append(0)
self.fUV2.append(0)
self.fUV2.append(0)
self.fUV3.append(0)
self.fUV3.append(0)
def setM_Count(self, value):
self.mCount = value
def addM_Color(self, value):
if value.material is not None:
self.mColor.append(value.material.diffuse_color[0] * 255)
self.mColor.append(value.material.diffuse_color[1] * 255)
self.mColor.append(value.material.diffuse_color[2] * 255)
self.mColor.append(value.material.alpha * 255)
else:
self.mColor.append(0)
self.mColor.append(0)
self.mColor.append(0)
self.mColor.append(100)
class AnimData:
def __init__(self, name):
self.name = name
self.bPosition = []
self.bRotation = []
self.bScale = []
def addB_Position(self, value):
self.bPosition.append(value[0]*100)
self.bPosition.append(value[1]*100)
self.bPosition.append(value[2]*100)
def addB_Rotation(self, value):
self.bRotation.append(math.degrees(value[0]))
self.bRotation.append(math.degrees(value[1]))
self.bRotation.append(math.degrees(value[2]))
def addB_Scale(self, value):
self.bScale.append(value[0])
self.bScale.append(value[1])
self.bScale.append(value[2])
|
py | 1a4d3eefe9d62bec210e0919109cfa244b337267 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="InstaPython",
version="1.1.1",
author="Micha Birklbauer",
author_email="[email protected]",
description="A set of classes and functions to access Instagram.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/michabirklbauer/instapython",
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
|
py | 1a4d3fb4e86e16bf2588450bbec298a645147216 | import pytest
from dvc.cli import parse_args
from dvc.command.plot import CmdPlotDiff, CmdPlotShow
def test_metrics_diff(mocker):
cli_args = parse_args(
[
"plot",
"diff",
"--file",
"result.extension",
"-t",
"template",
"-d",
"datafile",
"--select",
"column1,column2",
"--no-html",
"--stdout",
"-x",
"x_field",
"-y",
"y_field",
"--title",
"my_title",
"--xlab",
"x_title",
"--ylab",
"y_title",
"HEAD",
"tag1",
"tag2",
]
)
assert cli_args.func == CmdPlotDiff
cmd = cli_args.func(cli_args)
m = mocker.patch.object(cmd.repo, "plot", autospec=True)
mocker.patch("builtins.open")
mocker.patch("os.path.join")
assert cmd.run() == 0
m.assert_called_once_with(
datafile="datafile",
template="template",
revisions=["HEAD", "tag1", "tag2"],
fields={"column1", "column2"},
path=None,
embed=False,
x_field="x_field",
y_field="y_field",
csv_header=True,
title="my_title",
x_title="x_title",
y_title="y_title",
)
def test_metrics_show(mocker):
cli_args = parse_args(
[
"plot",
"show",
"-f",
"result.extension",
"-t",
"template",
"-s",
"$.data",
"--no-html",
"--stdout",
"--no-csv-header",
"datafile",
]
)
assert cli_args.func == CmdPlotShow
cmd = cli_args.func(cli_args)
m = mocker.patch.object(cmd.repo, "plot", autospec=True)
mocker.patch("builtins.open")
mocker.patch("os.path.join")
assert cmd.run() == 0
m.assert_called_once_with(
datafile="datafile",
template="template",
revisions=None,
fields=None,
path="$.data",
embed=False,
x_field=None,
y_field=None,
csv_header=False,
title=None,
x_title=None,
y_title=None,
)
@pytest.mark.parametrize(
"arg_revisions,is_dirty,expected_revisions",
[
([], False, ["workspace"]),
([], True, ["HEAD", "workspace"]),
(["v1", "v2", "workspace"], False, ["v1", "v2", "workspace"]),
(["v1", "v2", "workspace"], True, ["v1", "v2", "workspace"]),
],
)
def test_revisions(mocker, arg_revisions, is_dirty, expected_revisions):
args = mocker.MagicMock()
cmd = CmdPlotDiff(args)
mocker.patch.object(args, "revisions", arg_revisions)
mocker.patch.object(cmd.repo.scm, "is_dirty", return_value=is_dirty)
assert cmd._revisions() == expected_revisions
|
py | 1a4d4007e6dac0b04beadfd3c0d165734f423f3b | import sqlalchemy as sa
from alembic import op
revision = "dddddddddddd"
down_revision = "cccccccccccc"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("foo", sa.Column("bar_id", sa.Integer(), server_default="9"))
def downgrade():
op.drop_column("foo", "bar_id")
|
py | 1a4d4034b1a4809e95f1c51194b2ae1dc8fc7ad5 | """simple_site_requester URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from rest_framework import routers
from ssr_api.views import SiteRequestViewSet
router = routers.DefaultRouter()
router.register(r'requested_sites', SiteRequestViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
|
py | 1a4d4197c922fd28ba43f0e8fb76cff1f3227ddd |
idade = int(input("Informe sua idade: "))
resp = idade>=18
if (resp == True):
print("liberadO, pode entrar!")
if (resp == False):
print("Negado")
|
py | 1a4d4198358abe36ceb86bb80e5511ca800d7be9 | #!/usr/bin/env python
# Copyright 2017-present WonderLabs, Inc. <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pexpect
import sys
from bluepy.btle import Scanner, DefaultDelegate
import binascii
import copy
import datetime
class ScanDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
class DevScanner(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
# print('Scanner inited')
def dongle_start(self):
self.con = pexpect.spawn('hciconfig hci0 up')
time.sleep(1)
def dongle_restart(self):
print('restart bluetooth dongle')
self.con = pexpect.spawn('hciconfig hci0 down')
time.sleep(3)
self.con = pexpect.spawn('hciconfig hci0 up')
time.sleep(3)
def scan_loop(self):
service_uuid = 'cba20d00-224d-11e6-9fb8-0002a5d5c51b'
company_id = '6909' # actually 0x0969
dev_list = []
bot_list = []
meter_list = []
curtain_list = []
contact_list = []
motion_list = []
param_list = []
pir_tip = ['No movement detected', 'Movement detected']
hall_tip = ['Door closed', 'Door opened', 'Timeout no closed']
light_tip = ['Dark', 'Bright']
self.con = pexpect.spawn('hciconfig')
pnum = self.con.expect(['hci0', pexpect.EOF, pexpect.TIMEOUT])
if pnum == 0:
self.con = pexpect.spawn('hcitool lescan')
# self.con.expect('LE Scan ...', timeout=5)
scanner = Scanner().withDelegate(DevScanner())
devices = scanner.scan(10.0)
print('Scanning...')
else:
raise Error('no bluetooth error')
for dev in devices:
mac = 0
param_list[:] = []
for (adtype, desc, value) in dev.getScanData():
# print(adtype, desc, value)
if desc == '16b Service Data':
dev_type = binascii.a2b_hex(value[4:6])
if dev_type == 'H':
param_list.append(binascii.a2b_hex(value[6:8]))
elif dev_type == 'T':
# celsius
tempFra = int(value[11:12].encode('utf-8'), 16) / 10.0
tempInt = int(value[12:14].encode('utf-8'), 16)
if tempInt < 128:
tempInt *= -1
tempFra *= -1
else:
tempInt -= 128
param_list.append(tempInt + tempFra)
param_list.append(
int(value[14:16].encode('utf-8'), 16) % 128)
# print('meter:', param1, param2)
elif dev_type == 'd':
# print(adtype, desc, value)
pirSta = (
int(value[6:7].encode('utf-8'), 16) >> 2) & 0x01
diffSec = (
(int(value[10:11].encode('utf-8'), 16) & 0x04) << 14) \
+ int(value[16:20].encode('utf-8'), 16)
hallSta = (
int(value[11:12].encode('utf-8'), 16) >> 1) & 0x03
lightSta = int(value[11:12].encode('utf-8'), 16) & 0x01
param_list.extend([hallSta, pirSta, lightSta, diffSec])
# print(pirSta, diffSec, hallSta, lightSta, diffSec)
elif dev_type == 's':
# print(adtype, desc, value)
pirSta = (
int(value[6:7].encode('utf-8'), 16) >> 2) & 0x01
lightSta = (int(value[15:16].encode('utf-8'), 16) & 0x03) - 1
# TODO:
diffSec = 0
param_list.extend([pirSta, lightSta, diffSec])
else:
param_list[:] = []
elif desc == 'Local name':
if value == 'WoHand':
mac = dev.addr
dev_type = 'H'
elif value == 'WoMeter':
mac = dev.addr
dev_type = 'T'
elif value == 'WoCurtain':
mac = dev.addr
dev_type = 'c'
elif value == 'WoContact':
mac = dev.addr
dev_type = 'd'
elif value == 'WoMotion':
mac = dev.addr
dev_type = 's'
elif desc == 'Complete 128b Services' and value == service_uuid:
mac = dev.addr
elif desc == 'Manufacturer' and value[0:4] == company_id:
mac = dev.addr
if mac != 0:
dev_list.append([mac, dev_type, copy.deepcopy(param_list)])
# print(dev_list)
for (mac, dev_type, params) in dev_list:
if dev_type == 'H':
if int(binascii.b2a_hex(params[0]), 16) > 127:
bot_list.append([mac, 'Bot', 'Turn On'])
bot_list.append([mac, 'Bot', 'Turn Off'])
bot_list.append([mac, 'Bot', 'Up'])
bot_list.append([mac, 'Bot', 'Down'])
else:
bot_list.append([mac, 'Bot', 'Press'])
elif dev_type == 'T':
meter_list.append([mac, 'Meter', "%.1f'C %d%%" %
(params[0], params[1])])
elif dev_type == 'c':
curtain_list.append([mac, 'Curtain', 'Open'])
curtain_list.append([mac, 'Curtain', 'Close'])
curtain_list.append([mac, 'Curtain', 'Pause'])
elif dev_type == 'd':
timeTrigger = datetime.datetime.now() - datetime.timedelta(0, params[3])
contact_list.append([mac, 'Contact', "%s, %s, %s, Last trigger: %s" %
(hall_tip[params[0]], pir_tip[params[1]], light_tip[params[2]], timeTrigger.strftime("%Y-%m-%d %H:%M"))])
elif dev_type == 's':
motion_list.append([mac, 'Motion', "%s, %s" %
(pir_tip[params[0]], light_tip[params[1]])])
print('Scan timeout.')
return bot_list + meter_list + curtain_list + contact_list + motion_list
pass
def register_cb(self, fn):
self.cb = fn
return
def close(self):
# self.con.sendcontrol('c')
self.con.close(force=True)
def trigger_device(device):
[mac, dev_type, act] = device
# print 'Start to control'
con = pexpect.spawn('gatttool -b ' + mac + ' -t random -I')
con.expect('\[LE\]>')
print('Preparing to connect.')
retry = 3
index = 0
while retry > 0 and 0 == index:
con.sendline('connect')
# To compatible with different Bluez versions
index = con.expect(
['Error', '\[CON\]', 'Connection successful.*\[LE\]>'])
retry -= 1
if 0 == index:
print('Connection error.')
return
print('Connection successful.')
con.sendline('char-desc')
con.expect(['\[CON\]', 'cba20002-224d-11e6-9fb8-0002a5d5c51b'])
cmd_handle = con.before.split('\n')[-1].split()[2].strip(',')
if dev_type == 'Bot':
if act == 'Turn On':
con.sendline('char-write-cmd ' + cmd_handle + ' 570101')
elif act == 'Turn Off':
con.sendline('char-write-cmd ' + cmd_handle + ' 570102')
elif act == 'Press':
con.sendline('char-write-cmd ' + cmd_handle + ' 570100')
elif act == 'Down':
con.sendline('char-write-cmd ' + cmd_handle + ' 570103')
elif act == 'Up':
con.sendline('char-write-cmd ' + cmd_handle + ' 570104')
elif dev_type == 'Meter':
con.sendline('char-write-cmd ' + cmd_handle + ' 570F31')
con.expect('\[LE\]>')
con.sendline('char-read-uuid cba20003-224d-11e6-9fb8-0002a5d5c51b')
index = con.expect(['value:[0-9a-fA-F ]+', 'Error'])
if index == 0:
data = con.after.split(':')[1].replace(' ', '')
tempFra = int(data[3], 16) / 10.0
tempInt = int(data[4:6], 16)
if tempInt < 128:
tempInt *= -1
tempFra *= -1
else:
tempInt -= 128
meterTemp = tempInt + tempFra
meterHumi = int(data[6:8], 16) % 128
print("Meter[%s] %.1f'C %d%%" % (mac, meterTemp, meterHumi))
else:
print('Error!')
elif dev_type == 'Curtain':
if act == 'Open':
con.sendline('char-write-cmd ' + cmd_handle + ' 570F450105FF00')
elif act == 'Close':
con.sendline('char-write-cmd ' + cmd_handle + ' 570F450105FF64')
elif act == 'Pause':
con.sendline('char-write-cmd ' + cmd_handle + ' 570F450100FF')
else:
print('Unsupported operations')
con.expect('\[LE\]>')
con.sendline('quit')
print('Complete')
def main():
# Check bluetooth dongle
print(
'Usage: "sudo python switchbot.py [mac dev_type cmd]" or "sudo python switchbot.py"')
connect = pexpect.spawn('hciconfig')
pnum = connect.expect(["hci0", pexpect.EOF, pexpect.TIMEOUT])
if pnum != 0:
print('No bluetooth hardware, exit now')
sys.exit()
connect = pexpect.spawn('hciconfig hci0 up')
# print(sys.argv, len(sys.argv))
if len(sys.argv) == 4 or len(sys.argv) == 5:
dev = sys.argv[1]
dev_type = sys.argv[2]
act = sys.argv[3] if len(sys.argv) < 5 else ('Turn ' + sys.argv[4])
trigger_device([dev, dev_type, act])
elif len(sys.argv) == 1:
# Start scanning...
scan = DevScanner()
dev_list = scan.scan_loop()
# dev_number = None
if not dev_list:
print("No SwitchBot nearby, exit")
sys.exit()
for idx, val in enumerate(dev_list):
print('%2d' % idx, val)
dev_number = int(input("Input the device number to control:"))
if dev_number >= len(dev_list):
print("Input error, exit")
else:
ble_dev = dev_list[dev_number]
print(ble_dev)
# Trigger the device to work
# If the SwitchBot address is known you can run this command directly without scanning
trigger_device(ble_dev)
else:
print('Wrong cmd!')
print(
'Usage: "sudo python switchbot.py [mac dev_type cmd]" or "sudo python switchbot.py"')
connect = pexpect.spawn('hciconfig')
sys.exit()
if __name__ == "__main__":
main()
|
py | 1a4d41b5ad2f545a99b40f237f5b720d28a35b18 | from __future__ import (absolute_import, division, print_function)
# make plot of ozone concentration data on
# lambert conformal conic map projection, drawing coastlines, state and
# country boundaries, and parallels/meridians.
# the data is interpolated to the native projection grid.
from mpl_toolkits.basemap import Basemap, shiftgrid
import numpy as np
import matplotlib.pyplot as plt
import netCDF4
plt.rcParams['text.usetex'] = False
# read in netCDF4 file. Results from CAMx v6
# test case, converted to netcdf by PseudoNetCDF
# pseudonetcdf.googlecode.com
camx = netCDF4.Dataset('camx.sample.nc')
#alternatively read directly from CAMx uamiv file
#if available
#
# from PseudoNetCDF.camxfiles.Memmaps import uamiv
# camx = uamiv('camx.bin')
# Get Ozone Variable
o3 = camx.variables['O3']
# Get projection space
llcrnrx = camx.XORIG
llcrnry = camx.YORIG
urcrnrx = llcrnrx + (o3[:].shape[-1] + 1) * camx.XCELL
urcrnry = llcrnry + (o3[:].shape[-2] + 1) * camx.XCELL
# Get edge values for pcolor
xedge = np.linspace(0, urcrnrx - llcrnrx, camx.NCOLS + 1)
yedge = np.linspace(0, urcrnry - llcrnry, camx.NCOLS + 1)
X, Y = np.meshgrid(xedge, yedge)
# setup of basemap ('lcc' = lambert conformal conic).
# projection parameters from CAMx file
m = Basemap(projection = 'lcc',
lon_0=camx.P_GAM, lat_0 = 40.,
lat_1 = camx.P_ALP, lat_2 = camx.P_BET,
llcrnrx = llcrnrx, llcrnry = llcrnry,
urcrnry = urcrnry, urcrnrx = urcrnrx)
# create the figure.
fig=plt.figure(figsize=(8,8))
# add an axes.
ax = fig.add_axes([0.1,0.1,0.8,0.8])
ax.set_facecolor('lightgrey')
# associate this axes with the Basemap instance.
m.ax = ax
# plot tile plot with pcolor
# Use first time and first layer (i.e., o3[0, 0] (time, layer, row, col))
# Edge cells have precisely 0 value, and are masked
# to avoid an unnecessary color range.
# Each color bin contains precisely 10% of values
# which makes for a pretty plot.
from matplotlib.colors import ListedColormap
WhGrYlBu = ListedColormap(['#ffffff', '#b7f6ff', '#70edff', '#29e4ff', '#00e1fb', '#0fffc6', '#3bffa4', '#68ff82', '#94ff60', '#c0ff3e', '#edff1c', '#fff400', '#ffc700', '#ff9b00', '#ff6e00', '#ff4200', '#ff1500', '#e80000', '#bb0000', '#8f0000'])
#.from_list('WhGrYlBu', ['white', 'white', 'cyan', 'lightblue', 'lightgreen', 'green', 'yellow', 'orange', 'red', 'red'])
toplot = np.ma.masked_values(o3[0, 0], 0.) * 1000.
bounds = np.percentile(toplot.compressed().ravel(), np.linspace(5, 95, 9).tolist())
ptch = m.pcolor(X, Y, toplot, cmap = WhGrYlBu, norm = plt.matplotlib.colors.BoundaryNorm(bounds, 20), vmin = bounds[0], vmax = bounds[-1])
# Add a colorbar using proportional spacing, but
# colors based on 10 distinct bins
cb = m.colorbar(ptch, location='right',pad='10%', boundaries = bounds, spacing = 'proportional', format = '%.3f', extend = 'both') # draw colorbar
# Add units to the colorbar
cb.ax.set_xlabel('%s*1000.' % o3.units.strip())
# plot blue dot on Houston, Baton Rouge, and Atlanta
def add_dot(lon, lat, label):
xpt,ypt = m(lon,lat)
m.plot([xpt],[ypt],'bo')
ax.annotate(label, xy=(xpt, ypt), xytext=(xpt+1e5, ypt+1e5),
bbox=dict(boxstyle="round4", fc="w"),
arrowprops=dict(facecolor='black'),
)
add_dot(-95.361328,29.754505, 'Houston')
add_dot(-91.140320, 30.458283, 'Baton Rouge')
add_dot(-84.387982, 33.748995, 'Atlanta')
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawcountries()
m.drawstates()
# draw parallels and meridians.
# label on left, right and bottom of map.
parallels = np.arange(20.,60,10.)
m.drawparallels(parallels,labels=[1,1,0,1])
meridians = np.arange(-120., 70.,10.)
m.drawmeridians(meridians,labels=[1,1,0,1])
# set title.
ax.set_title('O$_3$ as predicted by the CAMx v6 Test-Case\neach color division has 10% of cells 5-95% and 5% in triagles')
import textwrap
histstr = 'Processing: %s' % '\n'.join(textwrap.wrap(camx.history.strip(), 140))
fig.text(0.01, 0.01, histstr, horizontalalignment = 'left', verticalalignment = 'bottom', size = 8)
plt.draw()
plt.show()
|
py | 1a4d41f2956aacf3d8ffe75272dcc00d06fc0b11 | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy.interface.matrix_utilities
import abc
import numpy as np
class BaseMatrixInterface:
"""
An interface between constants' internal values
and the target matrix used internally.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def const_to_matrix(self, value, convert_scalars=False):
"""Convert an arbitrary value into a matrix of type self.target_matrix.
Args:
value: The constant to be converted.
convert_scalars: Should scalars be converted?
Returns:
A matrix of type self.target_matrix or a scalar.
"""
raise NotImplementedError()
# Adds a case for scalars to const_to_matrix methods.
@staticmethod
def scalar_const(converter):
def new_converter(self, value, convert_scalars=False):
if not convert_scalars and cvxpy.interface.matrix_utilities.is_scalar(value):
return cvxpy.interface.matrix_utilities.scalar_value(value)
else:
return converter(self, value)
return new_converter
# Return an identity matrix.
@abc.abstractmethod
def identity(self, size):
raise NotImplementedError()
# Return the number of elements of the matrix.
def size(self, matrix):
return np.prod(self.shape(matrix), dtype=int)
# Return the dimensions of the matrix.
@abc.abstractmethod
def shape(self, matrix):
raise NotImplementedError()
# Get the matrix interpreted as a scalar.
@abc.abstractmethod
def scalar_value(self, matrix):
raise NotImplementedError()
# Return a matrix with all 0's.
def zeros(self, shape):
return self.scalar_matrix(0, shape)
# Return a matrix with all 1's.
def ones(self, shape):
return self.scalar_matrix(1, shape)
# A matrix with all entries equal to the given scalar value.
@abc.abstractmethod
def scalar_matrix(self, value, shape):
raise NotImplementedError()
# Return the value at the given index in the matrix.
def index(self, matrix, key):
value = matrix[key]
# Reduce to a scalar if possible.
if cvxpy.interface.matrix_utilities.shape(value) == (1, 1):
return cvxpy.interface.matrix_utilities.scalar_value(value)
else:
return value
# Coerce the matrix into the given shape.
@abc.abstractmethod
def reshape(self, matrix, shape):
raise NotImplementedError()
def block_add(self, matrix, block, vert_offset, horiz_offset, rows, cols,
vert_step: int=1, horiz_step: int=1) -> None:
"""Add the block to a slice of the matrix.
Args:
matrix: The matrix the block will be added to.
block: The matrix/scalar to be added.
vert_offset: The starting row for the matrix slice.
horiz_offset: The starting column for the matrix slice.
rows: The height of the block.
cols: The width of the block.
vert_step: The row step size for the matrix slice.
horiz_step: The column step size for the matrix slice.
"""
block = self._format_block(matrix, block, rows, cols)
matrix[vert_offset:(rows+vert_offset):vert_step,
horiz_offset:(horiz_offset+cols):horiz_step] += block
def _format_block(self, matrix, block, rows, cols):
"""Formats the block for block_add.
Args:
matrix: The matrix the block will be added to.
block: The matrix/scalar to be added.
rows: The height of the block.
cols: The width of the block.
"""
# If the block is a scalar, promote it.
if cvxpy.interface.matrix_utilities.is_scalar(block):
block = self.scalar_matrix(
cvxpy.interface.matrix_utilities.scalar_value(block), rows, cols)
# If the block is a vector coerced into a matrix, promote it.
elif cvxpy.interface.matrix_utilities.is_vector(block) and cols > 1:
block = self.reshape(block, (rows, cols))
# If the block is a matrix coerced into a vector, vectorize it.
elif not cvxpy.interface.matrix_utilities.is_vector(block) and cols == 1:
block = self.reshape(block, (rows, cols))
# Ensure the block is the same type as the matrix.
elif type(block) != type(matrix):
block = self.const_to_matrix(block)
return block
|
py | 1a4d445230d291d088fa0853c4454615c8df43f9 | import time
import logging
from sm_plugins.nfvo import Nfvo
from nameko.rpc import rpc
from nameko import config
from osm_client import OsmClient
from osm_error import OsmError
from osm_actions import OsmActions
from sm_agents.responses.response import AgentResponse
logger = logging.getLogger(__name__)
class OSMAgent(Nfvo):
name = "osm_agent"
def __init__(self):
self.retries = config.get('nr_retries')
self.retires_interval = config.get('retires_interval')
@rpc
def deploy_instance(self, ctx, args):
logger.debug('"deploy_instance" ctx: {} | args: {}'.format(ctx, args))
b_time = round(time.time() * 1000)
osm_client = OsmClient(ctx.get('url'),
ctx.get('username'),
ctx.get('password'),
ctx['parameters'].get('project_id'))
# schedule ns instantiation
res = osm_client.schedule_ns_instantiation(args.get('descriptor_name'),
args.get('instance_name'),
args.get('location'),
args.get('instantiation_params'))
logger.debug('Schedule instantiation OSM response: {}'.format(res))
if isinstance(res, OsmError):
msg = 'An error has occurred while instantiating NS.' \
' OSM Client error: {}'.format(res.response.get('message'))
logger.debug(msg)
return AgentResponse(status='error',
message=msg) \
.response
nsr_id = res
# instantiate ns
res = osm_client.instantiate_ns(args.get('descriptor_name'),
args.get('instance_name'),
nsr_id,
args.get('location'))
if isinstance(res, OsmError):
msg = 'An error has occurred while instantiating NS.' \
' OSM Client error: {}'.format(res.response.get('message'))
logger.debug(msg)
return AgentResponse(status='error',
message=msg) \
.response
# wait for ns to be ready
retries = self.retries
while retries > 0:
nsrs = osm_client.get_nsrs(nsr_id=nsr_id)
if not nsrs:
logger.debug(
'$$osm_agent|OSMAgent|deploy_instance||{}$$'.format(str(round(time.time() * 1000) - b_time)))
return AgentResponse(status='error',
message='An error has occurred while instantiating NS. Empty payload.') \
.response
if len(nsrs) != 1:
logger.debug(
'$$osm_agent|OSMAgent|deploy_instance||{}$$'.format(str(round(time.time() * 1000) - b_time)))
return AgentResponse(status='error',
message='Multiple NSRs found.').response
nsr = nsrs[0]
if not nsr.get('operational-status'):
logger.debug(
'$$osm_agent|OSMAgent|deploy_instance||{}$$'.format(str(round(time.time() * 1000) - b_time)))
return AgentResponse(status='error',
message='An error has occurred while instantiating NS.'
' Cannot retrieve operational status.').response
oper_state = nsr.get('operational-status')
if not nsr.get('config-status'):
logger.debug(
'$$osm_agent|OSMAgent|deploy_instance||{}$$'.format(str(round(time.time() * 1000) - b_time)))
return AgentResponse(status='error',
message='An error has occurred while instantiating NS.'
' Cannot retrieve configuration status.').response
config_state = nsr.get('config-status')
logger.debug('NSR "oper_state": {} | "config_state": {}'.format(oper_state, config_state))
if oper_state == 'running' and config_state == 'configured':
logger.debug(
'$$osm_agent|OSMAgent|deploy_instance||{}$$'.format(str(round(time.time() * 1000) - b_time)))
ar = AgentResponse(status='ok', message='deployed')
ar.set_outputs('ns_id', nsr_id)
return ar.response
if oper_state == 'failed' or config_state == 'failed':
logger.debug(
'$$osm_agent|OSMAgent|deploy_instance||{}$$'.format(str(round(time.time() * 1000) - b_time)))
return AgentResponse(status='error',
message='An error has occurred while instantiating NS. Deploy failed') \
.response
time.sleep(self.retires_interval)
retries = retries - 1
logger.debug('$$osm_agent|OSMAgent|deploy_instance||{}$$'.format(str(round(time.time() * 1000 - b_time))))
return AgentResponse(status='error', message='An error has occurred while instantiating NS. Unknown error') \
.response
@rpc
def exec_action(self, ctx, args):
osm_client = OsmClient(ctx.get('url'),
ctx.get('username'),
ctx.get('password'),
ctx['parameters'].get('project_id'))
osm_actions = OsmActions(osm_client, self.retries, self.retires_interval)
if args.get('action') == 'cvnf_move':
osm_actions.move_cvnf(args.get('cvnf_name'),
args.get('cvnf_dc'),
args.get('source_loc'),
args.get('target_loc'),
args.get('params'))
else:
return AgentResponse(status='error', message='Unknown OSM action') \
.response
@rpc
def exec_custom_action(self, ctx, args):
pass
@rpc
def delete_instance(self, ctx, args):
logger.debug('"delete_instance" ctx: {} | args: {}'.format(ctx, args))
b_time = round(time.time() * 1000)
osm_client = OsmClient(ctx.get('url'),
ctx.get('username'),
ctx.get('password'),
ctx['parameters'].get('project_id'))
res = osm_client.delete_instance(nsr_id=args.get('nsr_id'))
if isinstance(res, OsmError):
msg = 'An error has occurred while deleting NS.' \
' OSM Client error: {}'.format(res.response.get('message'))
logger.debug(msg)
return AgentResponse(status='error',
message=msg) \
.response
if res and res.get('code'):
logger.debug('$$osm_agent|OSMAgent|delete_instance||{}$$'.format(str(round(time.time() * 1000) - b_time)))
return AgentResponse(status='error',
message='An error has occurred while deleting NS. Error code: {}'
.format(res.get('code'))).response
retries = self.retries
while retries > 0:
nsrs = osm_client.get_nsrs(nsr_id=args.get('nsr_id'))
if not nsrs:
logger.debug(
'$$osm_agent|OSMAgent|delete_instance||{}$$'.format(str(round(time.time() * 1000) - b_time)))
return AgentResponse(status='ok', message='deleted').response
if len(nsrs) > 1:
logger.debug(
'$$osm_agent|OSMAgent|delete_instance||{}$$'.format(str(round(time.time() * 1000) - b_time)))
return AgentResponse(status='error',
message='Multiple NSRs found.').response
nsr = nsrs[0]
if nsr.get('code') and nsr.get('code') == 'NOT_FOUND':
logger.debug(
'$$osm_agent|OSMAgent|delete_instance||{}$$'.format(str(round(time.time() * 1000) - b_time)))
return AgentResponse(status='ok', message='deleted').response
if nsr.get('code') and nsr.get('code') != 'NOT_FOUND':
logger.debug(
'$$osm_agent|OSMAgent|delete_instance||{}$$'.format(str(round(time.time() * 1000) - b_time)))
return AgentResponse(status='error',
message='An error has occurred while deleting NS. Delete failed') \
.response
time.sleep(self.retires_interval)
retries = retries - 1
|
py | 1a4d44de2bd14ff91f9413a568d18316da4f18a0 | from django.urls import re_path
from .views import (
PostListView,
PostDetailView,
PostCreateView,
PostDeleteView,
PostUpdateView,
)
APP_NAME = 'posts'
urlpatterns = [
re_path(r'^list/$', PostListView.as_view(), name='list'),
re_path(r'^create/$', PostCreateView.as_view(), name='create'),
re_path(r'^(?P<id>\d+)/edit/$', PostUpdateView.as_view(), name='edit'),
re_path(r'^(?P<id>\d+)/delete/$', PostDeleteView.as_view(), name='delete'),
re_path(r'^(?P<id>\d+)/$', PostDetailView.as_view(), name='detail'),
] |
py | 1a4d45469235dac16eaa414b8fc6350bbbb040fa | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
At training and testing time, PaddlePaddle programs need to read data. To ease
the users' work to write data reading code, we define that
- A *reader* is a function that reads data (from file, network, random number
generator, etc) and yields data items.
- A *reader creator* is a function that returns a reader function.
- A *reader decorator* is a function, which accepts one or more readers, and
returns a reader.
- A *batch reader* is a function that reads data (from *reader*, file, network,
random number generator, etc) and yields a batch of data items.
#####################
Data Reader Interface
#####################
Indeed, *data reader* doesn't have to be a function that reads and yields data
items. It can be any function with no parameter that creates a iterable
(anything can be used in :code:`for x in iterable`)\:
.. code-block:: python
iterable = data_reader()
Element produced from the iterable should be a **single** entry of data,
**not** a mini batch. That entry of data could be a single item, or a tuple of
items.
Item should be of supported type (e.g., numpy array or list/tuple of float
or int).
An example implementation for single item data reader creator:
.. code-block:: python
def reader_creator_random_image(width, height):
def reader():
while True:
yield numpy.random.uniform(-1, 1, size=width*height)
return reader
An example implementation for multiple item data reader creator:
.. code-block:: python
def reader_creator_random_image_and_label(width, height, label):
def reader():
while True:
yield numpy.random.uniform(-1, 1, size=width*height), label
return reader
"""
import paddle.reader.decorator
from paddle.reader.decorator import *
__all__ = []
|
py | 1a4d456e27de1d8ed9ad6a980c9177d15aec09d7 | import unittest
from jarn.viewdoc.viewdoc import DocumentationViewer
from jarn.viewdoc.testing import JailSetup
from jarn.viewdoc.testing import quiet
class GetOptionsTests(JailSetup):
def test_defaults(self):
self.mkfile('my.cfg', """\
[viewdoc]
""")
dv = DocumentationViewer(['-c', 'my.cfg'])
dv.set_defaults('my.cfg')
dv.parse_options(dv.args)
self.assertEqual(dv.defaults.version, '1.8')
self.assertEqual(dv.defaults.browser, 'default')
self.assertEqual(dv.defaults.known_styles, {})
self.assertEqual(dv.defaults.default_style, '')
self.assertEqual(dv.defaults.styles, '')
self.assertEqual(dv.browser, 'default')
self.assertEqual(dv.styles, '')
@quiet
def test_quiet(self):
print('Should not be visible')
def test_empty_defaults(self):
self.mkfile('my.cfg', """\
[viewdoc]
version =
browser =
style =
[styles]
pypi =
""")
dv = DocumentationViewer(['-c', 'my.cfg'])
dv.set_defaults('my.cfg')
dv.parse_options(dv.args)
self.assertEqual(dv.defaults.version, '1.8')
self.assertEqual(dv.defaults.browser, 'default')
self.assertEqual(dv.defaults.known_styles, {'pypi': ''})
self.assertEqual(dv.defaults.default_style, '')
self.assertEqual(dv.defaults.styles, '')
self.assertEqual(dv.browser, 'default')
self.assertEqual(dv.styles, '')
def test_read_defaults(self):
self.mkfile('my.cfg', """\
[viewdoc]
version = 2.0
browser = safari
style = pypi
[styles]
pypi = <style></style>
""")
dv = DocumentationViewer(['-c', 'my.cfg'])
dv.set_defaults('my.cfg')
dv.parse_options(dv.args)
self.assertEqual(dv.defaults.version, '2.0')
self.assertEqual(dv.defaults.browser, 'safari')
self.assertEqual(dv.defaults.known_styles, {'pypi': '<style></style>'})
self.assertEqual(dv.defaults.default_style, 'pypi')
self.assertEqual(dv.defaults.styles, '<style></style>')
self.assertEqual(dv.browser, 'safari')
self.assertEqual(dv.styles, '<style></style>')
def test_newline_in_defaults(self):
self.mkfile('my.cfg', """\
[viewdoc]
version =
2.0
browser =
safari
style =
pypi
[styles]
pypi =
<style>
</style>
plain =
""")
dv = DocumentationViewer(['-c', 'my.cfg'])
dv.set_defaults('my.cfg')
dv.parse_options(dv.args)
self.assertEqual(dv.defaults.version, '2.0')
self.assertEqual(dv.defaults.browser, 'safari')
self.assertEqual(dv.defaults.known_styles, {'pypi': '<style>\n</style>', 'plain': ''})
self.assertEqual(dv.defaults.default_style, 'pypi')
self.assertEqual(dv.defaults.styles, '<style>\n</style>')
self.assertEqual(dv.browser, 'safari')
self.assertEqual(dv.styles, '<style>\n</style>')
def test_unknown_style(self):
self.mkfile('my.cfg', """\
[viewdoc]
style = foo
[styles]
pypi = <style></style>
""")
dv = DocumentationViewer(['-c', 'my.cfg'])
dv.set_defaults('my.cfg')
dv.parse_options(dv.args)
self.assertEqual(dv.defaults.known_styles, {'pypi': '<style></style>'})
self.assertEqual(dv.defaults.default_style, 'foo')
self.assertEqual(dv.defaults.styles, '')
self.assertEqual(dv.browser, 'default')
self.assertEqual(dv.styles, '')
def test_command_line(self):
self.mkfile('my.cfg', """\
[viewdoc]
browser = safari
style = pypi
[styles]
pypi = <style>1</style>
small = <style>2</style>
""")
dv = DocumentationViewer(['-c', 'my.cfg', '-s', 'small', '-b', 'firefox'])
dv.set_defaults('my.cfg')
dv.parse_options(dv.args)
self.assertEqual(dv.defaults.browser, 'safari')
self.assertEqual(dv.defaults.known_styles, {'pypi': '<style>1</style>', 'small': '<style>2</style>'})
self.assertEqual(dv.defaults.default_style, 'pypi')
self.assertEqual(dv.defaults.styles, '<style>1</style>')
self.assertEqual(dv.browser, 'firefox')
self.assertEqual(dv.styles, '<style>2</style>')
|
py | 1a4d471c4d080967bf14126fb17006441b2376a4 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Travis Yates
"""Builder for region similarity calculators."""
from object_detection.core import region_similarity_calculator
from object_detection.protos import region_similarity_calculator_pb2
def build(region_similarity_calculator_config):
"""Builds region similarity calculator based on the configuration.
Builds one of [IouSimilarity, IoaSimilarity, NegSqDistSimilarity] objects. See
core/region_similarity_calculator.proto for details.
Args:
region_similarity_calculator_config: RegionSimilarityCalculator
configuration proto.
Returns:
region_similarity_calculator: RegionSimilarityCalculator object.
Raises:
ValueError: On unknown region similarity calculator.
"""
if not isinstance(
region_similarity_calculator_config,
region_similarity_calculator_pb2.RegionSimilarityCalculator):
raise ValueError(
'region_similarity_calculator_config not of type '
'region_similarity_calculator_pb2.RegionsSimilarityCalculator')
similarity_calculator = region_similarity_calculator_config.WhichOneof(
'region_similarity')
if similarity_calculator == 'iou_similarity':
return region_similarity_calculator.IouSimilarity()
if similarity_calculator == 'ioa_similarity':
return region_similarity_calculator.IoaSimilarity()
if similarity_calculator == 'neg_sq_dist_similarity':
return region_similarity_calculator.NegSqDistSimilarity()
raise ValueError('Unknown region similarity calculator.')
|
py | 1a4d47a184208315f409d8b95aaa00f5ee4328f7 | import azure.functions as azf
def main(req: azf.HttpRequest, foo: azf.Out[azf.HttpResponse]):
foo.set(azf.HttpResponse(body='hello', status_code=201))
|
py | 1a4d47dcc15cd4556f44c3a00bf9e6316e25d8f2 | ##* 큐 : 선입선출 FIFO
##! 데크(deque) 이용
##* 데이터 넣고 빼는 속도가 리스트 자료형에 비해 효율적
##* queue 라이브러리 보다 간단
from collections import deque
##* 큐 구현 위해 deque 라이브러리 사용
queue = deque()
queue.append(5)
queue.append(300)
queue.append(200)
queue.append(400)
queue.popleft()
queue.append(2000)
queue.append(32)
queue.popleft()
##* 먼저 들어온 순서대로 출력 FIFO
print(queue)
##* 다음 출력을 위해 역순
queue.reverse()
##* 나중에 들어온 원소부터 출력
print(queue)
##* deque 객체로 출력되는데 리스트 자료형 변경시 list() 메서드 이용
##! list(queue) |
py | 1a4d482b8d3ec2804511d584e6fdd5034e8abaf4 | from typing import List, Dict
import spacy
from rb.core.lang import Lang
from rb.core.text_element import TextElement
from rb.core.text_element_type import TextElementType
from rb.core.word import Word
class Span(TextElement):
def __init__(self, lang: Lang, text: str, words: List[Word], index_in_container: int,
depth: int = TextElementType.SPAN.value):
super().__init__(lang, text, index_in_container, depth, container=words[0].container)
self.components = words
def get_root(self) -> Word:
return [word for word in self.components
if word.head == word or
word.head.index_in_doc < self.components[0].index_in_doc or
word.head.index_in_doc > self.components[-1].index_in_doc
][0]
@classmethod
def from_spacy_span(cls, lang: Lang, spacy_span: spacy.tokens.Span, words: Dict[int, Word]) -> "Word":
text = spacy_span.text
our_words = [words[i] for i in range(spacy_span.start, spacy_span.end)]
return Span(lang=lang, text=text, words=our_words, index_in_container=our_words[0].index_in_container)
|
py | 1a4d48631f1a7c2e116ebd476c4aa12b9b456e5c | # *** Create a Channel Type Role with full permissions for Chat ***
# Code based on https://www.twilio.com/docs/chat/rest/roles
# Download Python 3 from https://www.python.org/downloads/
# Download the Twilio helper library from https://www.twilio.com/docs/python/install
import os
from twilio.rest import Client
#from datetime import datetime | not required for this examples
import logging
#write requests & responses from Twilio to log file, useful for debugging:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='/usr/local/twilio/python/python3-twilio-sdkv6-examples/chat/logs/twilio_chat.log',
filemode='a')
# Your Account Sid and Auth Token from twilio.com/console & stored in Mac OS ~/.bash_profile in this example
account_sid = os.environ.get('$TWILIO_ACCOUNT_SID')
auth_token = os.environ.get('$TWILIO_AUTH_TOKEN')
client = Client(account_sid, auth_token)
# A list of chat roles parameters & their permissable values
role = client.chat.services('ISxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx') \
.roles \
.create(
friendly_name='Channel - Full Permissions',
type='channel',
permission=['addMember', 'deleteAnyMessage', 'deleteOwnMessage', 'destroyChannel', 'editAnyMessage', 'editAnyMessageAttributes', 'editOwnMessage', 'editOwnMessageAttributes', 'editChannelName', 'editChannelAttributes', 'inviteMember', 'leaveChannel', 'removeMember', 'sendMessage', 'sendMediaMessage']
#['editAnyUserInfo', 'editOwnUserInfo'] not supported ????
)
#print list of all chat roles properties to console, useful for learning info available you can work with?
print(role.account_sid)
print(role.date_created)
print(role.date_updated)
print(role.friendly_name)
print(role.permissions)
print(role.service_sid)
print(role.sid)
print(role.type)
print(role.url)
#create variable for this record
cdr = (role.sid)
#open *.log file with cdr var as filename...
f = open("/usr/local/twilio/python/python3-twilio-sdkv6-examples/chat/logs/" + str( cdr ) + ".log", "a")
#write list of all chat roles properties to above file...
f.write("Account SID : " + str(role.account_sid) + "\n")
f.write("Date Created : " + str(role.date_created) + "\n")
f.write("Date Updated : " + str(role.date_updated) + "\n")
f.write("Friendly Name : " + str(role.friendly_name) + "\n")
f.write("Permissions : " + str(role.permissions) + "\n")
f.write("Service SID : " + str(role.service_sid) + "\n")
f.write("SID : " + str(role.sid) + "\n")
f.write("Type : " + str(role.type) + "\n")
f.write("URL : " + str(role.url) + "\n")
f.close() |
py | 1a4d48a31789d9fae4bd46533007f0ae0ea480b2 | """
控制结构练习:
1.选择结构:三角形面积周长
2.循环结构:判断素数、最大公约数和最小公倍数
"""
import math
class Triangle:
def __init__(self, a, b, c):
if a + b > c and a + c > b and b + c > a:
self.a = a
self.b = b
self.c = c
else:
print('不能构成三角形')
def perimeter(self):
p = (self.a+self.b+self.c)/2
return p
def area(self):
p = self.perimeter()
# p = (self.a + self.b + self.c) / 2
area = math.sqrt(p * (p - self.a) * (p - self.b) * (p - self.c))
return area
def draw_tri(self):
row = int(self.perimeter())
for i in range(row):
for _ in range(i + 1):
print('*', end='')
print()
for i in range(row):
for j in range(row):
if j < row - i - 1:
print(' ', end='')
else:
print('*', end='')
print()
for i in range(row):
for _ in range(row - i - 1):
print(' ', end='')
for _ in range(2 * i + 1):
print('*', end='')
print()
class IsNum:
def __init__(self):
pass
@staticmethod
def is_prime(a):
end = int(math.sqrt(a))
is_prime = True
for x in range(1, end + 1):
if a % x == 0:
is_prime = False
break
if is_prime and a != 1:
print('{0}是素数'.format(a))
else:
print('{0}不是素数'.format(a))
@staticmethod
def mm_judge(a, b):
x = int(a)
y = int(b)
if x > y:
x, y = y, x
for factor in range(x, 0, -1):
if x % factor == 0 and y % factor == 0:
print('%d和%d的最大公约数是%d' % (x, y, factor))
print('%d和%d的最小公倍数是%d' % (x, y, x * y // factor))
break
if __name__ == "__main__":
tri1 = Triangle(3, 5, 7)
print(tri1.perimeter(), tri1.area())
tri2 = Triangle(3, 4, 10)
tri1.draw_tri()
IsNum.is_prime(11)
IsNum.is_prime(12)
IsNum.mm_judge(3, 5)
|
py | 1a4d4a1d443ca47d82ee109728f33d00e77c01b3 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for pipeline."""
from taskflow.flow import Flow
from taskflow.patterns import linear_flow
class PipelineBase(object):
"""Base class of pipeline.
Subclasses must implement validate_kwargs and do_build_flow methods.
"""
def __init__(self, **kwargs):
self._kwargs = kwargs
self._flow = self.build_flow(**kwargs)
def build_flow(self, **kwargs):
"""Make the task flow based on kwargs."""
self.validate_kwargs(**kwargs)
# Call do_make_flow implemented by subclass to make the flow.
flow = self.do_build_flow(**kwargs)
if not isinstance(flow, Flow):
raise TypeError('Return type must be taskflow.flow.Flow.')
return flow
def validate_kwargs(self, **kwargs):
"""Abstract method, subclass must implement this method to validate
kwargs.
"""
raise NotImplementedError('Subclass must implement abstract method')
def do_build_flow(self, **kwargs):
"""Abstract method, subclass must implment this method and return a task
instance.
"""
raise NotImplementedError('Subclass must implement abstract method')
@property
def flow(self):
return self._flow
@property
def name(self):
return self.__class__.__name__
@property
def kwargs(self):
return self._kwargs
class EmptyPipeline(PipelineBase):
def do_build_flow(self, **kwargs):
flow = linear_flow.Flow('empty-pipeline')
return flow
def validate_kwargs(self, **kwargs):
pass
|
py | 1a4d4a77488ac254c259be981eb7b07d3e94b66e | from __future__ import absolute_import
from djaxelrod.celery import app as celery_app
|
py | 1a4d4abce1270ccd6521a7ed3c60f307514c08f4 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import NamedTuple, Optional
import torch
import torch.nn as nn
from ..cameras import get_world_to_view_transform
from .rasterize_meshes import rasterize_meshes
# Class to store the outputs of mesh rasterization
class Fragments(NamedTuple):
pix_to_face: torch.Tensor
zbuf: torch.Tensor
bary_coords: torch.Tensor
dists: torch.Tensor
# Class to store the mesh rasterization params with defaults
class RasterizationSettings:
__slots__ = [
"image_size",
"blur_radius",
"faces_per_pixel",
"bin_size",
"max_faces_per_bin",
"perspective_correct",
"cull_backfaces",
]
def __init__(
self,
image_size: int = 256,
blur_radius: float = 0.0,
faces_per_pixel: int = 1,
bin_size: Optional[int] = None,
max_faces_per_bin: Optional[int] = None,
perspective_correct: bool = False,
cull_backfaces: bool = False,
):
self.image_size = image_size
self.blur_radius = blur_radius
self.faces_per_pixel = faces_per_pixel
self.bin_size = bin_size
self.max_faces_per_bin = max_faces_per_bin
self.perspective_correct = perspective_correct
self.cull_backfaces = cull_backfaces
class MeshRasterizer(nn.Module):
"""
This class implements methods for rasterizing a batch of heterogenous
Meshes.
"""
def __init__(self, cameras=None, raster_settings=None):
"""
Args:
cameras: A cameras object which has a `transform_points` method
which returns the transformed points after applying the
world-to-view and view-to-screen
transformations.
raster_settings: the parameters for rasterization. This should be a
named tuple.
All these initial settings can be overridden by passing keyword
arguments to the forward function.
"""
super().__init__()
if raster_settings is None:
raster_settings = RasterizationSettings()
self.cameras = cameras
self.raster_settings = raster_settings
def transform(self, meshes_world, **kwargs) -> torch.Tensor:
"""
Args:
meshes_world: a Meshes object representing a batch of meshes with
vertex coordinates in world space.
Returns:
meshes_screen: a Meshes object with the vertex positions in screen
space
NOTE: keeping this as a separate function for readability but it could
be moved into forward.
"""
cameras = kwargs.get("cameras", self.cameras)
if cameras is None:
msg = "Cameras must be specified either at initialization \
or in the forward pass of MeshRasterizer"
raise ValueError(msg)
verts_world = meshes_world.verts_padded()
verts_world_packed = meshes_world.verts_packed()
verts_screen = cameras.transform_points(verts_world, **kwargs)
# NOTE: Retaining view space z coordinate for now.
# TODO: Revisit whether or not to transform z coordinate to [-1, 1] or
# [0, 1] range.
view_transform = get_world_to_view_transform(R=cameras.R, T=cameras.T)
verts_view = view_transform.transform_points(verts_world)
verts_screen[..., 2] = verts_view[..., 2]
# Offset verts of input mesh to reuse cached padded/packed calculations.
pad_to_packed_idx = meshes_world.verts_padded_to_packed_idx()
verts_screen_packed = verts_screen.view(-1, 3)[pad_to_packed_idx, :]
verts_packed_offset = verts_screen_packed - verts_world_packed
return meshes_world.offset_verts(verts_packed_offset)
def forward(self, meshes_world, **kwargs) -> Fragments:
"""
Args:
meshes_world: a Meshes object representing a batch of meshes with
coordinates in world space.
Returns:
Fragments: Rasterization outputs as a named tuple.
"""
meshes_screen = self.transform(meshes_world, **kwargs)
raster_settings = kwargs.get("raster_settings", self.raster_settings)
# TODO(jcjohns): Should we try to set perspective_correct automatically
# based on the type of the camera?
pix_to_face, zbuf, bary_coords, dists = rasterize_meshes(
meshes_screen,
image_size=raster_settings.image_size,
blur_radius=raster_settings.blur_radius,
faces_per_pixel=raster_settings.faces_per_pixel,
bin_size=raster_settings.bin_size,
max_faces_per_bin=raster_settings.max_faces_per_bin,
perspective_correct=raster_settings.perspective_correct,
cull_backfaces=raster_settings.cull_backfaces,
)
return Fragments(
pix_to_face=pix_to_face, zbuf=zbuf, bary_coords=bary_coords, dists=dists
)
|
py | 1a4d4ad756309f812aecd41d0cbb3c2f223317ca | import logging
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from dojo.filters import ProductTypeFilter
from dojo.forms import Product_TypeForm, Product_TypeProductForm, Delete_Product_TypeForm
from dojo.models import Product_Type
from dojo.utils import get_page_items, add_breadcrumb
from dojo.notifications.helper import create_notification
from django.db.models import Count, Q
from django.db.models.query import QuerySet
logger = logging.getLogger(__name__)
"""
Jay
Status: in prod
Product Type views
"""
def product_type(request):
# query for names outside of query with prefetch to avoid the complex prefetch query from executing twice
name_words = Product_Type.objects.all().values_list('name', flat=True)
prod_types = Product_Type.objects.all()
ptl = ProductTypeFilter(request.GET, queryset=prod_types)
pts = get_page_items(request, ptl.qs, 25)
pts.object_list = prefetch_for_product_type(pts.object_list)
add_breadcrumb(title="Product Type List", top_level=True, request=request)
return render(request, 'dojo/product_type.html', {
'name': 'Product Type List',
'metric': False,
'user': request.user,
'pts': pts,
'ptl': ptl,
'name_words': name_words})
def prefetch_for_product_type(prod_types):
prefetch_prod_types = prod_types
if isinstance(prefetch_prod_types, QuerySet): # old code can arrive here with prods being a list because the query was already executed
active_findings_query = Q(prod_type__engagement__test__finding__active=True,
prod_type__engagement__test__finding__mitigated__isnull=True,
prod_type__engagement__test__finding__verified=True,
prod_type__engagement__test__finding__false_p=False,
prod_type__engagement__test__finding__duplicate=False,
prod_type__engagement__test__finding__out_of_scope=False)
prefetch_prod_types = prefetch_prod_types.prefetch_related('authorized_users')
prefetch_prod_types = prefetch_prod_types.annotate(findings_count=Count('prod_type__engagement__test__finding__id', filter=active_findings_query))
prefetch_prod_types = prefetch_prod_types.annotate(prod_count=Count('prod_type', distinct=True))
prefetch_prod_types = prefetch_prod_types.annotate(user_count=Count('authorized_users', distinct=True))
else:
logger.debug('unable to prefetch because query was already executed')
return prefetch_prod_types
@user_passes_test(lambda u: u.is_staff)
def add_product_type(request):
form = Product_TypeForm()
if request.method == 'POST':
form = Product_TypeForm(request.POST)
if form.is_valid():
form.save()
messages.add_message(request,
messages.SUCCESS,
'Product type added successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('product_type'))
add_breadcrumb(title="Add Product Type", top_level=False, request=request)
return render(request, 'dojo/new_product_type.html', {
'name': 'Add Product Type',
'metric': False,
'user': request.user,
'form': form,
})
@user_passes_test(lambda u: u.is_staff)
def edit_product_type(request, ptid):
pt = get_object_or_404(Product_Type, pk=ptid)
pt_form = Product_TypeForm(instance=pt)
delete_pt_form = Delete_Product_TypeForm(instance=pt)
if request.method == "POST" and request.POST.get('edit_product_type'):
pt_form = Product_TypeForm(request.POST, instance=pt)
if pt_form.is_valid():
pt = pt_form.save()
messages.add_message(
request,
messages.SUCCESS,
'Product type updated successfully.',
extra_tags="alert-success",
)
return HttpResponseRedirect(reverse("product_type"))
if request.method == "POST" and request.POST.get("delete_product_type"):
form2 = Delete_Product_TypeForm(request.POST, instance=pt)
if form2.is_valid():
pt.delete()
messages.add_message(
request,
messages.SUCCESS,
"Product type Deleted successfully.",
extra_tags="alert-success",
)
create_notification(event='other',
title='Deletion of %s' % pt.name,
description='The product type "%s" was deleted by %s' % (pt.name, request.user),
url=request.build_absolute_uri(reverse('product_type')),
icon="exclamation-triangle")
return HttpResponseRedirect(reverse("product_type"))
add_breadcrumb(title="Edit Product Type", top_level=False, request=request)
return render(request, 'dojo/edit_product_type.html', {
'name': 'Edit Product Type',
'metric': False,
'user': request.user,
'pt_form': pt_form,
'pt': pt})
@user_passes_test(lambda u: u.is_staff)
def add_product_to_product_type(request, ptid):
pt = get_object_or_404(Product_Type, pk=ptid)
form = Product_TypeProductForm(initial={'prod_type': pt})
add_breadcrumb(title="New %s Product" % pt.name, top_level=False, request=request)
return render(request, 'dojo/new_product.html',
{'form': form,
})
|
py | 1a4d4b642bcdbcc59759f868d9276df93a75ca0b | from typing import Any, Dict, List, Optional
import aiohttp
from chia.cmds.units import units
from chia.consensus.block_record import BlockRecord
from chia.rpc.farmer_rpc_client import FarmerRpcClient
from chia.rpc.full_node_rpc_client import FullNodeRpcClient
from chia.rpc.wallet_rpc_client import WalletRpcClient
from chia.util.config import load_config
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.ints import uint16
from chia.util.misc import format_bytes
from chia.util.misc import format_minutes
from chia.util.network import is_localhost
SECONDS_PER_BLOCK = (24 * 3600) / 4608
async def get_harvesters(farmer_rpc_port: int) -> Optional[Dict[str, Any]]:
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
plots = await farmer_client.get_harvesters()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'harvester' {e}")
return None
farmer_client.close()
await farmer_client.await_closed()
return plots
async def get_blockchain_state(rpc_port: int) -> Optional[Dict[str, Any]]:
blockchain_state = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
blockchain_state = await client.get_blockchain_state()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node is running at {rpc_port}")
else:
print(f"Exception from 'full node' {e}")
client.close()
await client.await_closed()
return blockchain_state
async def get_average_block_time(rpc_port: int) -> float:
try:
blocks_to_compare = 500
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
blockchain_state = await client.get_blockchain_state()
curr: Optional[BlockRecord] = blockchain_state["peak"]
if curr is None or curr.height < (blocks_to_compare + 100):
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
while curr is not None and curr.height > 0 and not curr.is_transaction_block:
curr = await client.get_block_record(curr.prev_hash)
if curr is None:
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
past_curr = await client.get_block_record_by_height(curr.height - blocks_to_compare)
while past_curr is not None and past_curr.height > 0 and not past_curr.is_transaction_block:
past_curr = await client.get_block_record(past_curr.prev_hash)
if past_curr is None:
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
client.close()
await client.await_closed()
return (curr.timestamp - past_curr.timestamp) / (curr.height - past_curr.height)
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node is running at {rpc_port}")
else:
print(f"Exception from 'full node' {e}")
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
async def get_wallets_stats(wallet_rpc_port: int) -> Optional[Dict[str, Any]]:
amounts = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if wallet_rpc_port is None:
wallet_rpc_port = config["wallet"]["rpc_port"]
wallet_client = await WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), DEFAULT_ROOT_PATH, config)
amounts = await wallet_client.get_farmed_amount()
#
# Don't catch any exceptions, the caller will handle it
#
finally:
wallet_client.close()
await wallet_client.await_closed()
return amounts
async def is_farmer_running(farmer_rpc_port: int) -> bool:
is_running = False
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
await farmer_client.get_connections()
is_running = True
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'farmer' {e}")
farmer_client.close()
await farmer_client.await_closed()
return is_running
async def get_challenges(farmer_rpc_port: int) -> Optional[List[Dict[str, Any]]]:
signage_points = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
signage_points = await farmer_client.get_signage_points()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'farmer' {e}")
farmer_client.close()
await farmer_client.await_closed()
return signage_points
async def challenges(farmer_rpc_port: int, limit: int) -> None:
signage_points = await get_challenges(farmer_rpc_port)
if signage_points is None:
return None
signage_points.reverse()
if limit != 0:
signage_points = signage_points[:limit]
for signage_point in signage_points:
print(
(
f"Hash: {signage_point['signage_point']['challenge_hash']} "
f"Index: {signage_point['signage_point']['signage_point_index']}"
)
)
async def summary(rpc_port: int, wallet_rpc_port: int, harvester_rpc_port: int, farmer_rpc_port: int) -> None:
all_harvesters = await get_harvesters(farmer_rpc_port)
blockchain_state = await get_blockchain_state(rpc_port)
farmer_running = await is_farmer_running(farmer_rpc_port)
wallet_not_ready: bool = False
wallet_not_running: bool = False
amounts = None
try:
amounts = await get_wallets_stats(wallet_rpc_port)
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
wallet_not_running = True
else:
wallet_not_ready = True
print("Farming status: ", end="")
if blockchain_state is None:
print("Not available")
elif blockchain_state["sync"]["sync_mode"]:
print("Syncing")
elif not blockchain_state["sync"]["synced"]:
print("Not synced or not connected to peers")
elif not farmer_running:
print("Not running")
else:
print("Farming")
if amounts is not None:
print(f"Total chia farmed: {amounts['farmed_amount'] / units['chia']}")
print(f"User transaction fees: {amounts['fee_amount'] / units['chia']}")
print(f"Block rewards: {(amounts['farmer_reward_amount'] + amounts['pool_reward_amount']) / units['chia']}")
print(f"Last height farmed: {amounts['last_height_farmed']}")
class PlotStats:
total_plot_size = 0
total_plots = 0
if all_harvesters is not None:
harvesters_local: dict = {}
harvesters_remote: dict = {}
for harvester in all_harvesters["harvesters"]:
ip = harvester["connection"]["host"]
if is_localhost(ip):
harvesters_local[harvester["connection"]["node_id"]] = harvester
else:
if ip not in harvesters_remote:
harvesters_remote[ip] = {}
harvesters_remote[ip][harvester["connection"]["node_id"]] = harvester
def process_harvesters(harvester_peers_in: dict):
for harvester_peer_id, plots in harvester_peers_in.items():
total_plot_size_harvester = sum(map(lambda x: x["file_size"], plots["plots"]))
PlotStats.total_plot_size += total_plot_size_harvester
PlotStats.total_plots += len(plots["plots"])
print(f" {len(plots['plots'])} plots of size: {format_bytes(total_plot_size_harvester)}")
if len(harvesters_local) > 0:
print(f"Local Harvester{'s' if len(harvesters_local) > 1 else ''}")
process_harvesters(harvesters_local)
for harvester_ip, harvester_peers in harvesters_remote.items():
print(f"Remote Harvester{'s' if len(harvester_peers) > 1 else ''} for IP: {harvester_ip}")
process_harvesters(harvester_peers)
print(f"Plot count for all harvesters: {PlotStats.total_plots}")
print("Total size of plots: ", end="")
print(format_bytes(PlotStats.total_plot_size))
else:
print("Plot count: Unknown")
print("Total size of plots: Unknown")
if blockchain_state is not None:
print("Estimated network space: ", end="")
print(format_bytes(blockchain_state["space"]))
else:
print("Estimated network space: Unknown")
minutes = -1
if blockchain_state is not None and all_harvesters is not None:
proportion = PlotStats.total_plot_size / blockchain_state["space"] if blockchain_state["space"] else -1
minutes = int((await get_average_block_time(rpc_port) / 60) / proportion) if proportion else -1
if all_harvesters is not None and PlotStats.total_plots == 0:
print("Expected time to win: Never (no plots)")
else:
print("Expected time to win: " + format_minutes(minutes))
if amounts is None:
if wallet_not_running:
print("For details on farmed rewards and fees you should run 'chia start wallet' and 'chia wallet show'")
elif wallet_not_ready:
print("For details on farmed rewards and fees you should run 'chia wallet show'")
else:
print("Note: log into your key using 'chia wallet show' to see rewards for each key")
|
py | 1a4d4bc8f1a12e9b7ec144b73e1fe07a1aa9b3b3 |
# Emma's change
# another change
# Fluffy Happiness: Test code to grab pictures of cute animals from
# the Internet
# Usage: >> python get_fluffy.py [options] V.A. Moss
# ([email protected])
__author__ = "V.A. Moss"
__date__ = "$22-oct-2018 22:00:00$"
__version__ = "0.2"
# Imports
import os
import sys
import urllib.request
import urllib.error
import urllib.parse
import ssl
from random import randint
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from argparse import ArgumentParser, RawTextHelpFormatter
parser = ArgumentParser(formatter_class=RawTextHelpFormatter)
parser.add_argument('-k', '--keywords',
default='cute fluffy animal',
type=str,
help='Specify which kind of search to do(default: %(default)s)')
# Parse the arguments above
args = parser.parse_args()
# Path format
path = 'https://imgur.com/search/score?q=%s' % ('+'.join(args.keywords.split()))
# ONLY GET PUPPIES!!!!!
path = 'https://imgur.com/search/score?q=%s' % ('+puppy')
# Get data from website
request = urllib.request.Request(path)
response = urllib.request.urlopen(request, context=ssl._create_unverified_context())
read_response = response.readlines()
# Possible cuteness
possible = []
for line in read_response:
line = line.decode('utf-8')
if '<img alt="" src="' in line:
image_url = line.split('src="//')[1].split('"')[0]
possible.append('http://'+image_url)
# Now select a random image to show
rand_int = randint(0,len(possible)-1)
print("I've selected image #%i: %s" % (rand_int,possible[rand_int]))
print ("Prepare to cuddle ......")
# Download the image and display it
# note: imgur adds a b to names for some reason.
img_name = (possible[rand_int].split('b.jpg')[0]+'.jpg').split('/')[-1]
image_path = 'https://i.imgur.com/' + img_name
urllib.request.urlretrieve('%s' % image_path,'%s' % img_name)
# Show the image in matplotlib
img = mpimg.imread(img_name)
imgplot = plt.imshow(img)
plt.show()
# Bla bla bla
|
py | 1a4d4bfb38755f57b0065a78ec01518b4237cd0a | import torch
import torch.nn as nn
import torch.nn.functional as F
from autoencoder import Encoder, Decoder
class BasicBlock(torch.nn.Module):
def __init__(self, filters=64):
'residual basic block'
super().__init__()
self.residual = torch.nn.Sequential(
nn.Conv2d(filters, filters, 3, 1, padding=1, bias=False),
nn.BatchNorm2d(filters),
nn.ReLU(),
nn.Conv2d(filters, filters, 3, 1, padding=1, bias=False),
nn.BatchNorm2d(filters)
)
def forward(self, x):
return x + self.residual(x)
class ELU_BatchNorm2d(torch.nn.Module):
def __init__(self, filters=64):
super().__init__()
self.actnorm = torch.nn.Sequential(
torch.nn.ELU(),
torch.nn.BatchNorm2d(filters),
)
def forward(self, x):
return self.actnorm(x)
class Res_Encoder(Encoder):
def __init__(self, filters=[4, 8, 16, 32], bottleneck=10):
super().__init__()
self.activate = nn.ELU()
self.main = nn.Sequential(
nn.Conv2d(1, filters[0], 3, 1, padding=1),
self.activate,
BasicBlock(filters[0]),
ELU_BatchNorm2d(filters[0]),
nn.Conv2d(filters[0], filters[1], 5, 2),
self.activate,
BasicBlock(filters[1]),
ELU_BatchNorm2d(filters[1]),
nn.Conv2d(filters[1], filters[2], 5, 2),
self.activate,
BasicBlock(filters[2]),
ELU_BatchNorm2d(filters[2]),
nn.Conv2d(filters[2], filters[3], 3, 2),
self.activate
)
self.mean = nn.Conv2d(filters[3], bottleneck, 1, 1)
self.logvar = nn.Conv2d(filters[3], bottleneck, 1, 1)
class Res_Decoder(Decoder):
def __init__(self, filters=[4, 8, 16, 32], bottleneck=10):
super().__init__()
self.activate = nn.ELU()
self.main = nn.Sequential(
nn.Conv2d(bottleneck, filters[-1], 1, 1, bias=False),
self.activate,
BasicBlock(filters[-1]),
ELU_BatchNorm2d(filters[-1]),
nn.ConvTranspose2d(filters[-1], filters[-2], 3, 2, output_padding=1),
self.activate,
BasicBlock(filters[-2]),
ELU_BatchNorm2d(filters[-2]),
nn.ConvTranspose2d(filters[-2], filters[-3], 5, 2, output_padding=1),
self.activate,
BasicBlock(filters[-3]),
ELU_BatchNorm2d(filters[-3]),
nn.ConvTranspose2d(filters[-3], filters[-4], 5, 2, output_padding=1),
self.activate,
BasicBlock(filters[-4]),
ELU_BatchNorm2d(filters[-4]),
nn.Conv2d(filters[-4], 1, 3, 1, padding=1),
nn.Sigmoid()
)
|
py | 1a4d4cabb51dce750045a702ea6ae6a2a370b1ec | import setuptools
setuptools.setup(
name="ajax-examples",
version="0.0.2",
author="Ian Jones",
description="Ajax examples",
include_package_data = True,
packages=['ajax_examples'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=['openpyxl>=3.0.7'],
)
|
py | 1a4d4d4c07eef1cccc08472e82997dbbc4829dd7 | import datetime as dt
import cx_Oracle
from typing import List
from src.typeDefs.metricsDataRecord import IMetricsDataRecord
def getIexRtmBlockWiseData(appDbConnStr: str, col_attributes: str, startDt: dt.datetime, endDt: dt.datetime) -> List[IMetricsDataRecord]:
targetColumns = ['TRUNC(TIME_STAMP)', 'COL_ATTRIBUTES', 'DATA_VALUE']
metricsFetchSql = """
select {0} from
mo_warehouse.iex_rtm where time_stamp >= :1
and time_stamp < :2
and col_attributes = :3
""".format(','.join(targetColumns), col_attributes)
# initialise codes to be returned
dataRecords: List[IMetricsDataRecord] = []
colNames = []
dbRows = []
dbConn = None
dbCur = None
try:
# get connection with raw data table
dbConn = cx_Oracle.connect(appDbConnStr)
# get cursor and execute fetch sql
dbCur = dbConn.cursor()
dbCur.execute(metricsFetchSql, (startDt, endDt, col_attributes))
colNames = [row[0] for row in dbCur.description]
# fetch all rows
dbRows = dbCur.fetchall()
except Exception as err:
dbRows = []
print('Error while fetching iex rtm data between dates')
print(err)
finally:
# closing database cursor and connection
if dbCur is not None:
dbCur.close()
if dbConn is not None:
dbConn.close()
if (False in [(col in targetColumns) for col in colNames]):
# all desired columns not fetched, hence return empty
return []
# iterate through each row to populate result outage rows
for row in dbRows:
timeStamp: IMetricsDataRecord["time_stamp"] = row[colNames.index(
'TRUNC(TIME_STAMP)')]
metric: IMetricsDataRecord["col_attributes"] = row[colNames.index(
'COL_ATTRIBUTES')]
val: IMetricsDataRecord["data_value"] = row[colNames.index(
'DATA_VALUE')]
sampl: IMetricsDataRecord = {
"time_stamp": timeStamp,
"metric_name": metric,
"data_value": val
}
dataRecords.append(sampl)
return dataRecords
|
py | 1a4d4d822a7e543d71710ae90a76f018d9642813 | # -*- coding: utf-8 -*-
"""KPI views for creating and viewing the kpis."""
import logging
from typing import Any, Dict, List, Optional, Tuple
from flask.blueprints import Blueprint
from flask.globals import request
from flask.json import jsonify
from flask_sqlalchemy import Pagination
from sqlalchemy.orm.attributes import flag_modified
from chaos_genius.controllers.dashboard_controller import (
create_dashboard_kpi_mapper,
disable_mapper_for_kpi_ids,
edit_kpi_dashboards,
enable_mapper_for_kpi_ids,
get_dashboard_list_by_ids,
get_mapper_obj_by_kpi_ids,
kpi_dashboard_mapper_dict,
)
from chaos_genius.controllers.kpi_controller import (
delete_anomaly_output_for_kpi,
delete_rca_output_for_kpi,
get_anomaly_count,
get_kpi_data_from_id,
)
from chaos_genius.core.rca.constants import TIME_RANGES_BY_KEY
from chaos_genius.core.rca.rca_utils.api_utils import kpi_aggregation, kpi_line_data
from chaos_genius.core.utils.kpi_validation import validate_kpi
from chaos_genius.databases.db_utils import chech_editable_field
from chaos_genius.databases.models.dashboard_kpi_mapper_model import DashboardKpiMapper
from chaos_genius.databases.models.data_source_model import DataSource
from chaos_genius.databases.models.kpi_model import Kpi
from chaos_genius.extensions import db
from chaos_genius.settings import DEEPDRILLS_ENABLED_TIME_RANGES
from chaos_genius.utils.pagination import pagination_args, pagination_info
from chaos_genius.utils.search import SEARCH_PARAM_NAME, make_search_filter
blueprint = Blueprint("api_kpi", __name__)
logger = logging.getLogger(__name__)
@blueprint.route("/", methods=["GET", "POST"]) # TODO: Remove this
@blueprint.route("", methods=["GET", "POST"])
def kpi():
"""List KPIs."""
# Handle logging in
if request.method == "POST":
data = request.get_json()
if data is None:
return (
jsonify(
{
"error": "The request payload is not in JSON format",
"status": "failure",
}
),
400,
)
data["dimensions"] = [] if data["dimensions"] is None else data["dimensions"]
if data.get("kpi_query", "").strip():
data["kpi_query"] = data["kpi_query"].strip()
# remove trailing semicolon
if data["kpi_query"][-1] == ";":
data["kpi_query"] = data["kpi_query"][:-1]
new_kpi = Kpi(
name=data.get("name"),
is_certified=data.get("is_certified"),
data_source=data.get("data_source"),
kpi_type=data.get("dataset_type"),
kpi_query=data.get("kpi_query"),
schema_name=data.get("schema_name"),
table_name=data.get("table_name"),
metric=data.get("metric"),
aggregation=data.get("aggregation"),
datetime_column=data.get("datetime_column"),
filters=data.get("filters"),
dimensions=data.get("dimensions"),
)
# Perform KPI Validation
status, message, tz_aware = validate_kpi(new_kpi.as_dict, check_tz_aware=True)
if status is not True:
return jsonify(
{"error": message, "status": "failure", "is_critical": "true"}
)
new_kpi.timezone_aware = tz_aware
new_kpi.save(commit=True)
# Add the dashboard id 0 to the kpi
dashboard_list = data.get("dashboards", []) + [0]
dashboard_list = list(set(dashboard_list))
create_dashboard_kpi_mapper(dashboard_list, [new_kpi.id])
# TODO: Fix circular import error
from chaos_genius.jobs.anomaly_tasks import ready_rca_task
# run rca as soon as new KPI is added
rca_task = ready_rca_task(new_kpi.id)
if rca_task is None:
logger.warn(
"Could not run RCA task since newly added KPI was not found: "
+ f"{new_kpi.id}"
)
else:
rca_task.apply_async()
return jsonify(
{
"data": {"kpi_id": new_kpi.id},
"message": f"KPI {new_kpi.name} has been created successfully.",
"status": "success",
}
)
elif request.method == "GET":
# TODO: abstract this filter params extraction logic
dashboard_ids_list = request.args.getlist("dashboard_id")
datasource_types_list = request.args.getlist("datasource_type")
paginate = request.args.get("paginate") != "false"
page, per_page = pagination_args(request)
search_query, search_filter = make_search_filter(request, Kpi.name)
filters = [Kpi.active == True] # noqa: E712
if search_filter is not None:
filters.append(search_filter)
if datasource_types_list and datasource_types_list != [""]:
filters.append(
DataSource.connection_type.in_(
[
datasource_type
for datasource_types in datasource_types_list
for datasource_type in datasource_types.split(",")
]
)
)
kpis: List[Tuple[Kpi, DataSource]]
kpis_paginated: Optional[Pagination] = None
if dashboard_ids_list and dashboard_ids_list != [""]:
dashboard_ids = [
int(dashboard_id)
for dashboard_ids in dashboard_ids_list
for dashboard_id in dashboard_ids.split(",")
]
kpis_query = (
db.session.query(Kpi, DataSource)
.join(DataSource, Kpi.data_source == DataSource.id)
.join(DashboardKpiMapper, Kpi.id == DashboardKpiMapper.kpi)
.filter(
*filters,
DashboardKpiMapper.active == True, # noqa: E712
DashboardKpiMapper.dashboard.in_(dashboard_ids),
)
)
# TODO: refactor this to reduce code duplication
if paginate:
kpis_paginated_ = kpis_query.order_by(Kpi.created_at.desc()).paginate(
page=page, per_page=per_page
)
kpis = kpis_paginated_.items
kpis_paginated = kpis_paginated_
else:
kpis = kpis_query.all()
else:
kpis_query = (
db.session.query(Kpi, DataSource)
.join(DataSource, Kpi.data_source == DataSource.id)
.filter(*filters) # noqa: E712
)
if paginate:
kpis_paginated_ = kpis_query.order_by(Kpi.created_at.desc()).paginate(
page=page, per_page=per_page
)
kpis = kpis_paginated_.items
kpis_paginated = kpis_paginated_
else:
kpis = kpis_query.all()
kpi_dashboard_mapper = kpi_dashboard_mapper_dict(
[kpi.id for kpi, _ in kpis], as_dict=True
)
kpi_infos: List[Dict[str, Any]] = []
for row in kpis:
kpi_info = row[0].safe_dict
data_source_info = row[1].safe_dict
kpi_info["data_source"] = data_source_info
dashboards = kpi_dashboard_mapper[kpi_info["id"]]
kpi_info["dashboards"] = dashboards
kpi_infos.append(kpi_info)
return jsonify(
{
"count": len(kpi_infos),
"data": kpi_infos,
"pagination": (
pagination_info(kpis_paginated)
if kpis_paginated is not None
else None
),
SEARCH_PARAM_NAME: search_query,
}
)
@blueprint.route("/get-dashboard-list", methods=["GET"])
def get_all_kpis():
"""List KPIs for a particular dashboard."""
status, message = "success", ""
timeline = request.args.get("timeline", "last_7_days")
dashboard_ids_list = request.args.getlist("dashboard_id")
page, per_page = pagination_args(request)
search_query, search_filter = make_search_filter(request, Kpi.name)
filters = [Kpi.active == True] # noqa: E712
if search_filter is not None:
filters.append(search_filter)
kpis_paginated: Optional[Pagination] = None
ret = []
try:
if dashboard_ids_list and dashboard_ids_list != [""]:
dashboard_ids = [
int(dashboard_id)
for dashboard_ids in dashboard_ids_list
for dashboard_id in dashboard_ids.split(",")
]
kpis_paginated_: Pagination = (
Kpi.query.join(DashboardKpiMapper, DashboardKpiMapper.kpi == Kpi.id)
.filter(
*filters,
DashboardKpiMapper.active == True, # noqa: E712
DashboardKpiMapper.dashboard.in_(dashboard_ids),
)
.order_by(Kpi.created_at.desc())
.paginate(page=page, per_page=per_page)
)
else:
kpis_paginated_: Pagination = (
Kpi.query.filter(*filters)
.order_by(Kpi.created_at.desc())
.paginate(page=page, per_page=per_page)
)
# this is only required to let the type checker know that this is not None here
kpis_paginated = kpis_paginated_
metrics = ["name", "metric", "id"]
for kpi in kpis_paginated.items:
info = {key: getattr(kpi, key) for key in metrics}
_, _, aggregate_data = kpi_aggregation(kpi.id, timeline)
info["prev"] = aggregate_data["aggregation"][0]["value"]
info["current"] = aggregate_data["aggregation"][1]["value"]
info["change"] = aggregate_data["aggregation"][2]["value"]
info["percentage_change"] = aggregate_data["aggregation"][3]["value"]
info["display_value_prev"] = TIME_RANGES_BY_KEY[timeline][
"last_period_name"
]
info["display_value_current"] = TIME_RANGES_BY_KEY[timeline][
"current_period_name"
]
info["anomaly_count"] = get_anomaly_count(kpi.id, timeline)
_, _, info["graph_data"] = kpi_line_data(kpi.id)
ret.append(info)
except Exception as e: # noqa: E722
status = "failure"
message = str(e)
logger.error(message, exc_info=True)
return jsonify(
{
"data": ret,
"message": message,
"status": status,
"pagination": pagination_info(kpis_paginated)
if kpis_paginated is not None
else None,
SEARCH_PARAM_NAME: search_query,
}
)
@blueprint.route("/get-timecuts-list", methods=["GET"])
def get_timecuts_list():
"""Returns all active timecuts."""
status, message = "success", ""
ret = {}
try:
enabled_cuts = [
{**{k: v for k, v in value.items() if k != "function"}, "id": key}
for key, value in TIME_RANGES_BY_KEY.items()
if key in DEEPDRILLS_ENABLED_TIME_RANGES
]
ret = enabled_cuts
message = "All timecuts fetched succesfully."
except Exception as e: # noqa: B902
status = "failure"
message = str(e)
logger.error(message)
return jsonify({"data": ret, "message": message, "status": status})
@blueprint.route("/<int:kpi_id>/disable", methods=["GET"])
def disable_kpi(kpi_id):
"""Disable a KPI."""
status, message = "", ""
try:
kpi_obj = Kpi.get_by_id(kpi_id)
if kpi_obj:
kpi_obj.active = False
kpi_obj.save(commit=True)
disable_mapper_for_kpi_ids([kpi_id])
status = "success"
else:
message = "KPI not found"
status = "failure"
except Exception as err: # noqa: B902
status = "failure"
logger.info(f"Error in disabling the KPI: {err}")
return jsonify({"message": message, "status": status})
@blueprint.route("/<int:kpi_id>/enable", methods=["GET"])
def enable_kpi(kpi_id):
"""Enable a KPI."""
status, message = "", ""
try:
kpi_obj = Kpi.get_by_id(kpi_id)
if kpi_obj:
kpi_obj.active = True
kpi_obj.save(commit=True)
enable_mapper_for_kpi_ids([kpi_id])
status = "success"
else:
message = "KPI not found"
status = "failure"
except Exception as err: # noqa: B902
status = "failure"
logger.info(f"Error in enabling the KPI: {err}")
return jsonify({"message": message, "status": status})
@blueprint.route("/<int:kpi_id>/get-dimensions", methods=["GET"])
def kpi_get_dimensions(kpi_id):
"""Retrieve list of dimensions of a KPI."""
dimensions = []
try:
kpi_info = get_kpi_data_from_id(kpi_id)
dimensions = kpi_info["dimensions"]
except Exception as err: # noqa: B902
logger.info(f"Error Found: {err}")
return jsonify({"dimensions": dimensions, "msg": ""})
@blueprint.route("/meta-info", methods=["GET"])
def kpi_meta_info():
"""Meta info of fields of KPI."""
logger.info("kpi meta info")
return jsonify({"data": Kpi.meta_info()})
@blueprint.route("/<int:kpi_id>/update", methods=["PUT"])
def edit_kpi(kpi_id):
"""Edit a KPI."""
status, message = "", ""
do_not_run_analytics_list = ["name", "dashboards"]
run_analytics = False
try:
kpi_obj = Kpi.get_by_id(kpi_id)
data = request.get_json()
if data is None:
raise Exception("Request body is not a JSON.")
meta_info = Kpi.meta_info()
if kpi_obj and kpi_obj.active is True:
dashboard_id_list = data.pop("dashboards", []) + [0]
dashboard_id_list = list(set(dashboard_id_list))
for key, value in data.items():
if key not in do_not_run_analytics_list:
run_analytics = True
if chech_editable_field(meta_info, key):
setattr(kpi_obj, key, value)
# check if dimensions are editted
if "dimensions" in data.keys():
# if empty, do not run anomaly on subdim
if len(data["dimensions"]) < 1:
run_optional = {
"data_quality": True,
"overall": True,
"subdim": False,
}
else:
run_optional = {
"data_quality": True,
"overall": True,
"subdim": True,
}
if "run_optional" not in kpi_obj.anomaly_params or (
kpi_obj.anomaly_params["run_optional"]["subdim"]
!= run_optional["subdim"]
):
kpi_obj.anomaly_params["run_optional"] = run_optional
flag_modified(kpi_obj, "anomaly_params")
if run_analytics:
logger.info(
"Deleting analytics output and re-running tasks since KPI was "
+ f"edited for KPI ID: {kpi_id}"
)
from chaos_genius.jobs.anomaly_tasks import ready_rca_task
rca_task = ready_rca_task(kpi_id)
if rca_task is not None:
delete_rca_output_for_kpi(kpi_id)
rca_task.apply_async()
logger.info(f"RCA started for KPI ID after editing: {kpi_id}")
else:
logger.info(
"RCA failed for KPI ID since KPI does not exist after editing:"
+ f" {kpi_id}"
)
from chaos_genius.jobs.anomaly_tasks import ready_anomaly_task
anomaly_task = ready_anomaly_task(kpi_id)
if anomaly_task is not None:
delete_anomaly_output_for_kpi(kpi_id)
anomaly_task.apply_async()
logger.info(f"Anomaly started for KPI ID after editing: {kpi_id}")
else:
logger.info(
"Anomaly failed for KPI ID since KPI does not exist after "
+ f"editing: {kpi_id}"
)
edit_kpi_dashboards(kpi_id, dashboard_id_list)
kpi_obj.save(commit=True)
status = "success"
else:
message = "KPI not found or disabled"
status = "failure"
except Exception as err: # noqa: B902
status = "failure"
logger.info(f"Error in updating the KPI: {err}")
message = str(err)
return jsonify({"message": message, "status": status})
@blueprint.route("/<int:kpi_id>", methods=["GET"])
def get_kpi_info(kpi_id):
"""Retrieve details of a KPI."""
status, message = "", ""
data = None
try:
kpi_obj = get_kpi_data_from_id(kpi_id)
data = kpi_obj
mapper_obj_list = get_mapper_obj_by_kpi_ids([kpi_id])
dashboard_id_list = [mapper.dashboard for mapper in mapper_obj_list]
dashboard_list = get_dashboard_list_by_ids(dashboard_id_list)
dashboard_list = [dashboard.as_dict for dashboard in dashboard_list]
data["dashboards"] = dashboard_list
status = "success"
except Exception as err: # noqa: B902
status = "failure"
message = str(err)
logger.info(f"Error in fetching the KPI: {err}")
return jsonify({"message": message, "status": status, "data": data})
@blueprint.route("/<int:kpi_id>/trigger-analytics", methods=["GET"])
def trigger_analytics(kpi_id):
"""Trigger analytics tasks for a KPI."""
# TODO: Fix circular import error
from chaos_genius.jobs.anomaly_tasks import ready_anomaly_task, ready_rca_task
rca_task = ready_rca_task(kpi_id)
anomaly_task = ready_anomaly_task(kpi_id)
if rca_task is not None and anomaly_task is not None:
rca_task.apply_async()
anomaly_task.apply_async()
else:
logger.warn(f"Could not analytics since KPI was not found: {kpi_id}")
return jsonify({"message": "RCA and Anomaly triggered successfully"})
|
py | 1a4d5117edb5d67f080e64966fe816df0f29d555 | import time, os, sys
import requests
ns = os.getenv("NAMESPACE")
consumption_factor = os.getenv("CONSUMPTION_FACTOR")
pod_name = os.getenv("POD_NAME")
while True:
time.sleep(5)
r = requests.post(f"http://gpu-timekeeper.kube-system/budget/{ns}/report",
json={ "usage_factor": float(consumption_factor),
"pod_name": pod_name})
if r.json()["terminate"]:
sys.exit(0)
|
py | 1a4d51370031e35eef8fda9a9cb5c87cae389f4c | class Solution:
def findLeaves(self, root: TreeNode) -> List[List[int]]:
d = collections.defaultdict(list)
self.dfs(d, root)
res = []
for v in d.values():
res.append(v)
return res
def dfs(self, d, node):
if not node:
return 0
left = self.dfs(d, node.left)
right = self.dfs(d, node.right)
depth = max(left, right) + 1
d[depth].append(node.val)
return depth |
py | 1a4d51443616d55bc5bb169633008aeb4c3e0246 | from math import atan2
from ..Qt import QtGui, QtCore
from ..Point import Point
from .. import functions as fn
from .GraphicsObject import GraphicsObject
from .UIGraphicsItem import UIGraphicsItem
from .TextItem import TextItem
from .ScatterPlotItem import Symbols, makeCrosshair
from .ViewBox import ViewBox
import string
import warnings
class TargetItem(UIGraphicsItem):
"""Draws a draggable target symbol (circle plus crosshair).
The size of TargetItem will remain fixed on screen even as the view is zoomed.
Includes an optional text label.
"""
sigPositionChanged = QtCore.Signal(object)
sigPositionChangeFinished = QtCore.Signal(object)
def __init__(
self,
pos=None,
size=10,
radii=None,
symbol="crosshair",
pen=None,
hoverPen=None,
brush=None,
hoverBrush=None,
movable=True,
label=None,
labelOpts=None,
):
r"""
Parameters
----------
pos : list, tuple, QPointF, QPoint, Optional
Initial position of the symbol. Default is (0, 0)
size : int
Size of the symbol in pixels. Default is 10.
radii : tuple of int
Deprecated. Gives size of crosshair in screen pixels.
pen : QPen, tuple, list or str
Pen to use when drawing line. Can be any arguments that are valid
for :func:`~pyqtgraph.mkPen`. Default pen is transparent yellow.
brush : QBrush, tuple, list, or str
Defines the brush that fill the symbol. Can be any arguments that
is valid for :func:`~pyqtgraph.mkBrush`. Default is transparent
blue.
movable : bool
If True, the symbol can be dragged to a new position by the user.
hoverPen : QPen, tuple, list, or str
Pen to use when drawing symbol when hovering over it. Can be any
arguments that are valid for :func:`~pyqtgraph.mkPen`. Default pen
is red.
hoverBrush : QBrush, tuple, list or str
Brush to use to fill the symbol when hovering over it. Can be any
arguments that is valid for :func:`~pyqtgraph.mkBrush`. Default is
transparent blue.
symbol : QPainterPath or str
QPainterPath to use for drawing the target, should be centered at
``(0, 0)`` with ``max(width, height) == 1.0``. Alternatively a string
which can be any symbol accepted by
:func:`~pyqtgraph.ScatterPlotItem.setData`
label : bool, str or callable, optional
Text to be displayed in a label attached to the symbol, or None to
show no label (default is None). May optionally include formatting
strings to display the symbol value, or a callable that accepts x
and y as inputs. If True, the label is ``x = {: >.3n}\ny = {: >.3n}``
False or None will result in no text being displayed
labelOpts : dict
A dict of keyword arguments to use when constructing the text
label. See :class:`TargetLabel` and :class:`~pyqtgraph.TextItem`
"""
super().__init__(self)
self.movable = movable
self.moving = False
self._label = None
self.mouseHovering = False
if radii is not None:
warnings.warn(
"'radii' is now deprecated, and will be removed in 0.13.0. Use 'size' "
"parameter instead",
DeprecationWarning,
stacklevel=2,
)
symbol = makeCrosshair(*radii)
size = 1
if pen is None:
pen = (255, 255, 0)
self.setPen(pen)
if hoverPen is None:
hoverPen = (255, 0, 255)
self.setHoverPen(hoverPen)
if brush is None:
brush = (0, 0, 255, 50)
self.setBrush(brush)
if hoverBrush is None:
hoverBrush = (0, 255, 255, 100)
self.setHoverBrush(hoverBrush)
self.currentPen = self.pen
self.currentBrush = self.brush
self._shape = None
self._pos = Point(0, 0)
if pos is None:
pos = Point(0, 0)
self.setPos(pos)
if isinstance(symbol, str):
try:
self._path = Symbols[symbol]
except KeyError:
raise KeyError("symbol name found in available Symbols")
elif isinstance(symbol, QtGui.QPainterPath):
self._path = symbol
else:
raise TypeError("Unknown type provided as symbol")
self.scale = size
self.setPath(self._path)
self.setLabel(label, labelOpts)
@property
def sigDragged(self):
warnings.warn(
"'sigDragged' has been deprecated and will be removed in 0.13.0. Use "
"`sigPositionChanged` instead",
DeprecationWarning,
stacklevel=2,
)
return self.sigPositionChangeFinished
def setPos(self, pos):
"""Method to set the position to ``(x, y)`` within the plot view
Parameters
----------
pos : tuple, list, QPointF, QPoint, or pg.Point
Container that consists of ``(x, y)`` representation of where the
TargetItem should be placed
Raises
------
TypeError
If the type of ``pos`` does not match the known types to extract
coordinate info from, a TypeError is raised
"""
if isinstance(pos, Point):
newPos = pos
elif isinstance(pos, (tuple, list)):
newPos = Point(pos)
elif isinstance(pos, (QtCore.QPointF, QtCore.QPoint)):
newPos = Point(pos.x(), pos.y())
else:
raise TypeError
if self._pos != newPos:
self._pos = newPos
super().setPos(self._pos)
self.sigPositionChanged.emit(self)
def setBrush(self, *args, **kwargs):
"""Set the brush that fills the symbol. Allowable arguments are any that
are valid for :func:`~pyqtgraph.mkBrush`.
"""
self.brush = fn.mkBrush(*args, **kwargs)
if not self.mouseHovering:
self.currentBrush = self.brush
self.update()
def setHoverBrush(self, *args, **kwargs):
"""Set the brush that fills the symbol when hovering over it. Allowable
arguments are any that are valid for :func:`~pyqtgraph.mkBrush`.
"""
self.hoverBrush = fn.mkBrush(*args, **kwargs)
if self.mouseHovering:
self.currentBrush = self.hoverBrush
self.update()
def setPen(self, *args, **kwargs):
"""Set the pen for drawing the symbol. Allowable arguments are any that
are valid for :func:`~pyqtgraph.mkPen`."""
self.pen = fn.mkPen(*args, **kwargs)
if not self.mouseHovering:
self.currentPen = self.pen
self.update()
def setHoverPen(self, *args, **kwargs):
"""Set the pen for drawing the symbol when hovering over it. Allowable
arguments are any that are valid for
:func:`~pyqtgraph.mkPen`."""
self.hoverPen = fn.mkPen(*args, **kwargs)
if self.mouseHovering:
self.currentPen = self.hoverPen
self.update()
def boundingRect(self):
return self.shape().boundingRect()
def paint(self, p, *_):
p.setPen(self.currentPen)
p.setBrush(self.currentBrush)
p.drawPath(self.shape())
def setPath(self, path):
if path != self._path:
self._path = path
self._shape = None
return None
def shape(self):
if self._shape is None:
s = self.generateShape()
if s is None:
return self._path
self._shape = s
# beware--this can cause the view to adjust
# which would immediately invalidate the shape.
self.prepareGeometryChange()
return self._shape
def generateShape(self):
dt = self.deviceTransform()
if dt is None:
self._shape = self._path
return None
v = dt.map(QtCore.QPointF(1, 0)) - dt.map(QtCore.QPointF(0, 0))
dti = fn.invertQTransform(dt)
devPos = dt.map(QtCore.QPointF(0, 0))
tr = QtGui.QTransform()
tr.translate(devPos.x(), devPos.y())
va = atan2(v.y(), v.x())
tr.rotateRadians(va)
tr.scale(self.scale, self.scale)
return dti.map(tr.map(self._path))
def mouseDragEvent(self, ev):
if not self.movable or int(ev.button() & QtCore.Qt.LeftButton) == 0:
return
ev.accept()
if ev.isStart():
self.symbolOffset = self.pos() - self.mapToView(ev.buttonDownPos())
self.moving = True
if not self.moving:
return
self.setPos(self.symbolOffset + self.mapToView(ev.pos()))
if ev.isFinish():
self.moving = False
self.sigPositionChangeFinished.emit(self)
def mouseClickEvent(self, ev):
if self.moving and ev.button() == QtCore.Qt.RightButton:
ev.accept()
self.moving = False
self.sigPositionChanged.emit(self)
self.sigPositionChangeFinished.emit(self)
def setMouseHover(self, hover):
# Inform the item that the mouse is(not) hovering over it
if self.mouseHovering is hover:
return
self.mouseHovering = hover
if hover:
self.currentBrush = self.hoverBrush
self.currentPen = self.hoverPen
else:
self.currentBrush = self.brush
self.currentPen = self.pen
self.update()
def hoverEvent(self, ev):
if self.movable and (not ev.isExit()) and ev.acceptDrags(QtCore.Qt.LeftButton):
self.setMouseHover(True)
else:
self.setMouseHover(False)
def viewTransformChanged(self):
GraphicsObject.viewTransformChanged(self)
self._shape = None # invalidate shape, recompute later if requested.
self.update()
def pos(self):
"""Provides the current position of the TargetItem
Returns
-------
Point
pg.Point of the current position of the TargetItem
"""
return self._pos
def label(self):
"""Provides the TargetLabel if it exists
Returns
-------
TargetLabel or None
If a TargetLabel exists for this TargetItem, return that, otherwise
return None
"""
return self._label
def setLabel(self, text=None, labelOpts=None):
"""Method to call to enable or disable the TargetLabel for displaying text
Parameters
----------
text : Callable or str, optional
Details how to format the text, by default None
If None, do not show any text next to the TargetItem
If Callable, then the label will display the result of ``text(x, y)``
If a fromatted string, then the output of ``text.format(x, y)`` will be
displayed
If a non-formatted string, then the text label will display ``text``, by
default None
labelOpts : dictionary, optional
These arguments are passed on to :class:`~pyqtgraph.TextItem`
"""
if not text:
if self._label is not None and self._label.scene() is not None:
# remove the label if it's already added
self._label.scene().removeItem(self._label)
self._label = None
else:
# provide default text if text is True
if text is True:
# convert to default value or empty string
text = "x = {: .3n}\ny = {: .3n}"
labelOpts = {} if labelOpts is None else labelOpts
if self._label is not None:
self._label.scene().removeItem(self._label)
self._label = TargetLabel(self, text=text, **labelOpts)
def setLabelAngle(self, angle):
warnings.warn(
"TargetItem.setLabelAngle is deprecated and will be removed in 0.13.0."
"Use TargetItem.label().setAngle() instead",
DeprecationWarning,
stacklevel=2,
)
if self.label() is not None and angle != self.label().angle:
self.label().setAngle(angle)
return None
class TargetLabel(TextItem):
"""A TextItem that attaches itself to a TargetItem.
This class extends TextItem with the following features :
* Automatically positions adjacent to the symbol at a fixed position.
* Automatically reformats text when the symbol location has changed.
Parameters
----------
target : TargetItem
The TargetItem to which this label will be attached to.
text : str or callable, Optional
Governs the text displayed, can be a fixed string or a format string
that accepts the x, and y position of the target item; or be a callable
method that accepts a tuple (x, y) and returns a string to be displayed.
If None, an empty string is used. Default is None
offset : tuple or list or QPointF or QPoint
Position to set the anchor of the TargetLabel away from the center of
the target in pixels, by default it is (20, 0).
anchor : tuple, list, QPointF or QPoint
Position to rotate the TargetLabel about, and position to set the
offset value to see :class:`~pyqtgraph.TextItem` for more inforation.
kwargs : dict of arguments that are passed on to
:class:`~pyqtgraph.TextItem` constructor, excluding text parameter
"""
def __init__(
self,
target,
text="",
offset=(20, 0),
anchor=(0, 0.5),
**kwargs,
):
if isinstance(offset, Point):
self.offset = offset
elif isinstance(offset, (tuple, list)):
self.offset = Point(*offset)
elif isinstance(offset, (QtCore.QPoint, QtCore.QPointF)):
self.offset = Point(offset.x(), offset.y())
else:
raise TypeError("Offset parameter is the wrong data type")
super().__init__(anchor=anchor, **kwargs)
self.setParentItem(target)
self.target = target
self.setFormat(text)
self.target.sigPositionChanged.connect(self.valueChanged)
self.valueChanged()
def format(self):
return self._format
def setFormat(self, text):
"""Method to set how the TargetLabel should display the text. This
method should be called from TargetItem.setLabel directly.
Parameters
----------
text : Callable or str
Details how to format the text.
If Callable, then the label will display the result of ``text(x, y)``
If a fromatted string, then the output of ``text.format(x, y)`` will be
displayed
If a non-formatted string, then the text label will display ``text``
"""
if not callable(text):
parsed = list(string.Formatter().parse(text))
if parsed and parsed[0][1] is not None:
self.setProperty("formattableText", True)
else:
self.setText(text)
self.setProperty("formattableText", False)
else:
self.setProperty("formattableText", False)
self._format = text
self.valueChanged()
def valueChanged(self):
x, y = self.target.pos()
if self.property("formattableText"):
self.setText(self._format.format(float(x), float(y)))
elif callable(self._format):
self.setText(self._format(x, y))
def viewTransformChanged(self):
viewbox = self.getViewBox()
if isinstance(viewbox, ViewBox):
viewPixelSize = viewbox.viewPixelSize()
scaledOffset = QtCore.QPointF(
self.offset.x() * viewPixelSize[0], self.offset.y() * viewPixelSize[1]
)
self.setPos(scaledOffset)
return super().viewTransformChanged()
def mouseClickEvent(self, ev):
return self.parentItem().mouseClickEvent(ev)
def mouseDragEvent(self, ev):
targetItem = self.parentItem()
if not targetItem.movable or int(ev.button() & QtCore.Qt.LeftButton) == 0:
return
ev.accept()
if ev.isStart():
targetItem.symbolOffset = targetItem.pos() - self.mapToView(
ev.buttonDownPos()
)
targetItem.moving = True
if not targetItem.moving:
return
targetItem.setPos(targetItem.symbolOffset + self.mapToView(ev.pos()))
if ev.isFinish():
targetItem.moving = False
targetItem.sigPositionChangeFinished.emit(self)
|
py | 1a4d514df14dd98e8bd2e737fadbbaeb3fdf3ab7 | from callsmusic.callsmusic import client as USER
from pyrogram import Client, filters
from pyrogram.types import Message, InlineKeyboardButton, InlineKeyboardMarkup
import config
from config import BOT_USERNAME
from pyrogram.errors import UserAlreadyParticipant
from helpers.decorators import errors, authorized_users_only
@Client.on_message(filters.group & filters.command(["userbotjoin"]))
@authorized_users_only
@errors
async def addchannel(client, message):
chid = message.chat.id
try:
invitelink = await client.export_chat_invite_link(chid)
except:
await message.reply_text(
"<b>Aggiungimi come admin</b>",
)
return
try:
user = await USER.get_me()
except:
user.first_name = "Music"
try:
await USER.join_chat(invitelink)
await USER.send_message(message.chat.id,"Sono entrato!")
except UserAlreadyParticipant:
await message.reply_text(
"<b>Userbot già in chat</b>",
)
pass
except Exception as e:
print(e)
await message.reply_text(
f"<b>🛑 Flood Wait Error 🛑</b>",
)
return
await message.reply_text(
"<b>L'userbot è entrato</b>",
)
@USER.on_message(filters.group & filters.command(["userbotleave"]))
async def rem(USER, message):
try:
await USER.leave_chat(message.chat.id)
except:
await message.reply_text(
f"<b>Errore!</b>",
)
return
|
py | 1a4d5150d927221091cb080928ec9ec9e157ed94 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-28 10:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0006_auto_20160728_1016'),
]
operations = [
migrations.RemoveField(
model_name='motion',
name='answer',
),
migrations.AddField(
model_name='motion',
name='answers',
field=models.ManyToManyField(blank=True, null=True, to='api.Answer'),
),
]
|
py | 1a4d515a4ba3747dd5e75f635635d9c17412e00d | from sqlalchemy import engine_from_config
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import configure_mappers
import zope.sqlalchemy
# import or define all models here to ensure they are attached to the
# Base.metadata prior to any initialization routines
from .horse import Horse
from .race import Race
# run configure_mappers after defining all of the models to ensure
# all relationships can be setup
configure_mappers()
def get_engine(settings, prefix='sqlalchemy.'):
return engine_from_config(settings, prefix)
def get_session_factory(engine):
factory = sessionmaker()
factory.configure(bind=engine)
return factory
def get_tm_session(session_factory, transaction_manager):
"""
Get a ``sqlalchemy.orm.Session`` instance backed by a transaction.
This function will hook the session to the transaction manager which
will take care of committing any changes.
- When using pyramid_tm it will automatically be committed or aborted
depending on whether an exception is raised.
- When using scripts you should wrap the session in a manager yourself.
For example::
import transaction
engine = get_engine(settings)
session_factory = get_session_factory(engine)
with transaction.manager:
dbsession = get_tm_session(session_factory, transaction.manager)
"""
dbsession = session_factory()
zope.sqlalchemy.register(
dbsession, transaction_manager=transaction_manager)
return dbsession
def includeme(config):
"""
Initialize the model for a Pyramid app.
Activate this setup using ``config.include('midway.models')``.
"""
settings = config.get_settings()
settings['tm.manager_hook'] = 'pyramid_tm.explicit_manager'
# use pyramid_tm to hook the transaction lifecycle to the request
config.include('pyramid_tm')
# use pyramid_retry to retry a request when transient exceptions occur
config.include('pyramid_retry')
session_factory = get_session_factory(get_engine(settings))
config.registry['dbsession_factory'] = session_factory
# make request.dbsession available for use in Pyramid
config.add_request_method(
# r.tm is the transaction manager used by pyramid_tm
lambda r: get_tm_session(session_factory, r.tm),
'dbsession',
reify=True
)
|
py | 1a4d5257e1e662707fc8546974bcdace897bbe26 | #!/bin/usr/python3
import logging
import os
import json
import uuid
import datetime
import urllib.request
from utils import getConfig
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
LOG_FILE = os.path.join(BASE_DIR, 'output.log')
logger = logging.getLogger('transatlanticTorrentExpress')
logger.setLevel(logging.DEBUG)
if not os.path.isfile(LOG_FILE):
print('Log file does not exist yet, creating in project folder')
f = open(LOG_FILE, 'w+')
f.close()
fh = logging.FileHandler(LOG_FILE)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
class ESHandler(logging.Handler):
def __init__(self, *args, **kwargs):
self.host = kwargs.get('host')
self.port = kwargs.get('port') or 9200
self.date = datetime.date.today()
self.sessionID = uuid.uuid4()
logging.StreamHandler.__init__(self)
def emit(self, record):
self.format(record)
indexURL = 'http://{}:{}/transatlantic_torrent_express-{}/_doc'.format(self.host, self.port, self.date.strftime('%Y.%m.%d'))
doc = {
'severity': record.levelname,
'message': record.message,
'@timestamp': int(record.created*1000),
'sessionID': str(self.sessionID)
}
if hasattr(record, 'es'):
for param in record.es.values():
if ': {}'.format(param) in record.message:
doc['message'] = record.message.replace(': {}'.format(str(param)), '')
doc = {**record.es, **doc}
payload = json.dumps(doc).encode('utf8')
req = urllib.request.Request(indexURL, data=payload,
headers={'content-type': 'application/json'})
response = urllib.request.urlopen(req)
response = response.read().decode('utf8')
return response
class ElasticFieldParameterAdapter(logging.LoggerAdapter):
def __init__(self, logger, extra={}):
super().__init__(logger, extra)
def process(self, msg, kwargs):
if kwargs == {}:
return (msg, kwargs)
extra = kwargs.get("extra", {})
extra.update({"es": kwargs.pop("es", True)})
kwargs["extra"] = extra
return (msg, kwargs)
config = getConfig()
esHost = config['ELASTIC']['host']
esPort = config['ELASTIC']['port']
eh = ESHandler(host=esHost, port=esPort)
eh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)8s | %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.addHandler(eh)
logger = ElasticFieldParameterAdapter(logger)
|
py | 1a4d52f5b6791a0da2b19982b1e9c7573aaf22b9 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition
"""MXNet symbol frontend."""
from __future__ import absolute_import as _abs
import json
import tvm
from .. import ir_pass
from .. import expr as _expr
from .. import op as _op
from ... import nd as _nd
from .common import StrAttrsDict
from .nnvm_common import _rename, _binop_scalar, _rbinop_scalar, _reduce
from .nnvm_common import _arg_reduce, _init_op, _softmax_op, _cast
from .nnvm_common import _clip, _transpose, _upsampling
from .nnvm_common import _elemwise_sum, _reshape
from .nnvm_common import _warn_not_used
__all__ = ['from_mxnet']
_activation_map = {
"sigmoid": _op.sigmoid,
"tanh" : _op.tanh,
"relu" : _op.nn.relu
}
def _mx_fully_connected(inputs, attrs):
import mxnet as mx
units = attrs.get_int("num_hidden")
use_bias = not attrs.get_bool("no_bias", False)
try:
_ = mx.sym.FullyConnected(mx.sym.var("x"), num_hidden=1, flatten=True)
has_flatten = True
except mx.base.MXNetError:
# no flatten attribute in old mxnet
has_flatten = False
use_flatten = attrs.get_bool("flatten", True)
if has_flatten and use_flatten:
inputs[0] = _op.nn.batch_flatten(inputs[0])
res = _op.nn.dense(inputs[0], inputs[1], units=units)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=-1)
return res
def _get_channel_axis(layout, op_name):
if layout == "NCHW":
return 1
if layout == "NHWC":
return 3
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "layout" of operator {} is not valid.'.format(layout, op_name))
def _mx_activations(inputs, attrs):
act_type = attrs.get_str("act_type")
assert len(inputs) == 1
if act_type == "softrelu":
def _stable_softrelu(x):
# log(1 + exp(-abs(x))) + relu(x)
one = _expr.const(1, dtype="float32")
exp_neg_abs_x = _op.exp(_op.negative(_op.abs(x)))
return _op.add(_op.log(_op.add(one, exp_neg_abs_x)),
_op.nn.relu(x))
return _stable_softrelu(inputs[0])
if act_type not in _activation_map:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported for frontend MXNet.'.format(act_type))
return _activation_map[act_type](inputs[0])
def _mx_compare(new_op, wrapper):
def impl(inputs, attrs):
dtype = ir_pass.infer_type(inputs[0]).checked_type.dtype
return wrapper(new_op)(inputs, attrs).astype(dtype)
return impl
def _mx_conv2d(inputs, attrs):
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 2:
raise tvm.error.OpAttributeInvalid(
'Non-2D kernels are not supported for operator Conv2D.')
data_layout = attrs.get_str("layout", "NCHW")
channel_axis = _get_channel_axis(data_layout, "conv2d")
if "kernel_layout" in attrs.attrs:
kernel_layout = attrs.get_str("kernel_layout")
else:
kernel_layout = "HWIO" if data_layout == "NHWC" else "OIHW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0))
new_attrs["dilation"] = attrs.get_int_tuple("dilate", (1, 1))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
use_bias = not attrs.get_bool("no_bias", False)
res = _op.nn.conv2d(inputs[0], inputs[1], **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
return res
def _mx_conv2d_transpose(inputs, attrs):
if "target_shape" in attrs.attrs:
raise tvm.error.OpAttributeUnimplemented(
'Attribute "target_shape" is not supported for operator Conv2D-transpose.')
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 2:
raise tvm.error.OpAttributeInvalid(
'Non-2D kernels are not supported for operator Conv2D-transpose.')
data_layout = attrs.get_str("layout", "NCHW")
channel_axis = _get_channel_axis(data_layout, "conv2d_transpose")
if "kernel_layout" in attrs.attrs:
kernel_layout = attrs.get_str("kernel_layout")
else:
kernel_layout = "HWIO" if data_layout == "NHWC" else "OIHW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1))
new_attrs["output_padding"] = attrs.get_int_tuple("adj", (0, 0))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0))
new_attrs["dilation"] = attrs.get_int_tuple("dilate", (1, 1))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
use_bias = not attrs.get_bool("no_bias", False)
res = _op.nn.conv2d_transpose(inputs[0], inputs[1], **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
return res
def _mx_pooling(inputs, attrs):
global_pool = attrs.get_bool("global_pool", False)
pool_type = attrs.get_str("pool_type")
def _pool2d(new_op, is_avg):
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 2:
raise tvm.error.OpAttributeInvalid(
'Only 2D kernels are supported for operator Pool2D.')
new_attrs = {}
new_attrs["pool_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0))
new_attrs["ceil_mode"] = (attrs.get_str("pooling_convention", "valid") == "full")
if is_avg:
new_attrs["count_include_pad"] = attrs.get_bool("count_include_pad", True)
return new_op(inputs[0], **new_attrs)
if pool_type == "max":
if global_pool:
return _op.nn.global_max_pool2d(inputs[0])
return _pool2d(_op.nn.max_pool2d, False)
if pool_type == "avg":
if global_pool:
return _op.nn.global_avg_pool2d(inputs[0])
return _pool2d(_op.nn.avg_pool2d, True)
raise tvm.error.OpNotImplemented(
'Operator {} Pooling is not supported for frontend MXNet.'.format(pool_type.capitalize()))
def _mx_adaptive_avg_pooling(inputs, attrs):
output_size = attrs.get_int_tuple("output_size", [])
return _op.contrib.adaptive_avg_pool2d(inputs[0], output_size)
def _mx_dropout(inputs, attrs):
rate = attrs.get_float("p", 0.5)
return _op.nn.dropout(inputs[0], rate=rate)
def _mx_BlockGrad(inputs, attrs): #pylint: disable=unused-argument
return inputs
def _mx_batch_norm(inputs, attrs):
if attrs.get_bool("output_mean_var", False):
raise tvm.error.OpAttributeUnimplemented(
'Attribute "output_mean_var" is not supported for operator Batch Norm.')
if attrs.get_bool("use_global_stats", False):
_warn_not_used("use_global_stats", "batch_norm")
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis", 1)
new_attrs["epsilon"] = attrs.get_float("eps", 0.001)
new_attrs["center"] = True
new_attrs["scale"] = not attrs.get_bool("fix_gamma", False)
return _op.nn.batch_norm(*inputs, **new_attrs)
def _mx_slice(inputs, attrs):
new_attrs = {}
begin = attrs.get_int_tuple('begin', None)
end = attrs.get_int_tuple('end', None)
stride = attrs.get_int_tuple('step', None)
if begin is None:
raise tvm.error.OpAttributeRequired(
'Attribute "begin" not found in operator Slice.')
if end is None:
raise tvm.error.OpAttributeRequired(
'Attribute "end" not found in operator Slice.')
if None in begin:
raise tvm.error.OpAttributeInvalid(
'Value None in attribute "begin" of operator Slice is not valid.')
if None in end:
raise tvm.error.OpAttributeInvalid(
'Value None in attribute "end" of operator Slice is not valid.')
new_attrs = {'begin': begin, 'end': end}
if stride is not None:
new_attrs['strides'] = stride
return _op.strided_slice(inputs[0], **new_attrs)
def _mx_slice_like(inputs, attrs):
assert len(inputs) == 2
new_attrs = {}
new_attrs["axes"] = attrs.get_int_tuple("axes", None)
return _op.slice_like(*inputs, **new_attrs)
def _mx_slice_axis(inputs, attrs):
assert len(inputs) == 1
shape = ir_pass.infer_type(inputs[0]).checked_type.shape
axis = attrs.get_int("axis")
ax_beg = attrs.get_int("begin")
ax_end = attrs.get_str("end")
if axis < 0:
axis += len(shape)
assert 0 <= axis < len(shape)
if ax_end == "None":
ax_end = int(shape[axis])
else:
ax_end = int(ax_end)
if ax_beg < 0:
ax_beg += int(shape[axis])
if ax_end < 0:
ax_end += int(shape[axis])
assert 0 <= ax_beg < int(shape[axis])
assert ax_beg < ax_end <= int(shape[axis])
begin = []
end = []
for i, dim in enumerate(shape):
if i != axis:
begin.append(0)
end.append(dim)
else:
begin.append(ax_beg)
end.append(ax_end)
return _op.strided_slice(inputs[0], begin, end)
def _mx_split(inputs, attrs):
axis = attrs.get_int("axis", 1)
new_attrs = {}
new_attrs["indices_or_sections"] = attrs.get_int("num_outputs")
new_attrs["axis"] = axis
res = _op.split(inputs[0], **new_attrs)
if attrs.get_bool("squeeze_axis", False):
return tuple([_op.squeeze(x, axis=[axis]) for x in res])
return res
def _mx_softmax_activation(inputs, attrs):
mode = attrs.get_str("mode", "instance")
axis = 0 if mode == "instance" else 1
return _op.nn.softmax(inputs[0], axis=axis)
def _mx_softmax_output(inputs, attrs):
if attrs.get_bool("multi_output", False):
return _op.nn.softmax(inputs[0], axis=1)
return _op.nn.softmax(inputs[0])
def _mx_concat(inputs, attrs):
axis = attrs.get_int("dim", 1)
return _op.concatenate(tuple(inputs), axis=axis)
def _mx_stack(inputs, attrs):
axis = attrs.get_int("axis", 0)
return _op.stack(tuple(inputs), axis=axis)
def _mx_expand_dims(inputs, attrs):
axis = attrs.get_int("axis")
return _op.expand_dims(inputs[0], axis=axis)
def _mx_leaky_relu(inputs, attrs):
act_type = attrs.get_str("act_type")
if act_type == "leaky":
return _op.nn.leaky_relu(inputs[0], alpha=attrs.get_float("slope", 0.25))
if act_type == "prelu":
assert len(inputs) == 2
return _op.nn.prelu(*inputs)
if act_type == "elu":
# -slope * relu(1-exp(x)) + relu(x)
slope = attrs.get_float("slope", 0.25)
one = _expr.const(1, dtype="float32")
x = inputs[0]
mslope = _op.nn.relu(_op.subtract(one, _op.exp(x)))
mslope = _op.multiply(mslope, _expr.const(-slope, dtype="float32"))
return _op.add(mslope, _op.nn.relu(x))
if act_type == "rrelu":
# NOTE this is only converted for inference.
lower_bound = attrs.get_float("lower_bound")
upper_bound = attrs.get_float("upper_bound")
alpha = (lower_bound + upper_bound) / 2.0
return _op.nn.leaky_relu(inputs[0], alpha=alpha)
raise tvm.error.OpNotImplemented(
'Operator {} is not supported for frontend MXNet.'.format(act_type))
def _mx_make_power(power):
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
scalar = _expr.const(power, dtype=None)
# Note: int maps to "int32", float maps to "float32"
return _op.power(inputs[0], scalar)
return _impl
def _mx_make_exponent(base):
# exp(b, x) = e^b * e^x
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
scalar = _op.exp(_expr.const(base, dtype="float32"))
return _op.multiply(inputs[0], scalar)
return _impl
def _mx_make_logarithm(base):
# log(b, x) = log(x) / log(b)
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
scalar = _op.log(_expr.const(base, dtype="float32"))
return _op.divide(inputs[0], scalar)
return _impl
def _mx_expm1():
# exp_minus_1 x = exp(x) - 1
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
one = _expr.const(1, dtype="float32")
return _op.log(_op.subtract(inputs[0], one))
return _impl
def _mx_log1p():
# 1_plus_log x = log(x + 1)
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
one = _expr.const(1, dtype="float32")
return _op.log(_op.add(inputs[0], one))
return _impl
def _mx_lrn(inputs, attrs):
new_attrs = {}
new_attrs["alpha"] = attrs.get_float("alpha", 0.0001)
new_attrs["beta"] = attrs.get_float("beta", 0.75)
new_attrs["bias"] = attrs.get_float("knorm", 2)
# NCHW format and normalization along channel axis
new_attrs["axis"] = 1
new_attrs["size"] = attrs.get_int("nsize")
assert len(inputs) == 1
return _op.nn.lrn(inputs[0], **new_attrs)
def _mx_multibox_prior(inputs, attrs):
new_attrs = {}
new_attrs["sizes"] = attrs.get_float_tuple("sizes", (1.0, ))
new_attrs["steps"] = attrs.get_float_tuple("steps", (-1.0, -1.0))
new_attrs["offsets"] = attrs.get_float_tuple("offsets", (0.5, 0.5))
new_attrs["ratios"] = attrs.get_float_tuple("ratios", (1.0, ))
new_attrs["clip"] = attrs.get_bool("clip", False)
return _op.vision.multibox_prior(inputs[0], **new_attrs)
def _mx_multibox_detection(inputs, attrs):
new_attrs0 = {}
new_attrs0["clip"] = attrs.get_bool("clip", True)
new_attrs0["threshold"] = attrs.get_float("threshold", 0.01)
new_attrs0["variances"] = attrs.get_float_tuple("variances", (0.1, 0.1,
0.2, 0.2))
new_attrs1 = {}
new_attrs1["return_indices"] = False
new_attrs1["iou_threshold"] = attrs.get_float("nms_threshold", 0.5)
new_attrs1["force_suppress"] = attrs.get_bool("force_suppress", False)
new_attrs1["top_k"] = attrs.get_int("nms_topk", -1)
ret = _op.vision.multibox_transform_loc(inputs[0], inputs[1],
inputs[2], **new_attrs0)
return _op.vision.non_max_suppression(ret[0], ret[1], **new_attrs1)
def _mx_batch_dot(inputs, attrs):
assert len(inputs) == 2
a, b = inputs
transpose_a = attrs.get_bool("transpose_a", False)
transpose_b = attrs.get_bool("transpose_b", False)
if transpose_a is True:
msg = 'Value {} in attribute "transpose_a" of operator batch_dot ' \
'is not valid.'
raise tvm.error.OpAttributeInvalid(msg.format(transpose_a))
if transpose_b is False:
b = _op.transpose(b, axes=[0, 2, 1])
return _op.nn.batch_matmul(a, b)
def _mx_arange(inputs, attrs):
assert len(inputs) == 0
if attrs.get_int("repeat", 1) != 1:
raise tvm.error.OpAttributeUnimplemented(
'Attribute "repeat" is not supported in operator arange.')
new_attrs = {}
new_attrs["start"] = attrs.get_float("start", 0)
new_attrs["stop"] = attrs.get_float("stop")
new_attrs["step"] = attrs.get_float("step", 1)
new_attrs["dtype"] = attrs.get_str("dtype", "float32")
return _op.arange(**new_attrs)
def _mx_repeat(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["repeats"] = attrs.get_int("repeats")
new_attrs["axis"] = attrs.get_int("axis", 0)
return _op.repeat(inputs[0], **new_attrs)
def _mx_tile(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["reps"] = attrs.get_int_tuple("reps")
return _op.tile(inputs[0], **new_attrs)
def _mx_take(inputs, attrs):
assert len(inputs) == 2
mode = attrs.get_str("mode", "clip")
if mode == "raise":
raise tvm.error.OpAttributeUnimplemented("take with raise mode is not supported yet")
axis = attrs.get_int("axis", 0)
return _op.take(inputs[0], inputs[1].astype("int32"), axis, mode)
def _mx_reverse(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis")
return _op.reverse(inputs[0], **new_attrs)
def _mx_roi_align(inputs, attrs):
new_attrs = {}
new_attrs["pooled_size"] = attrs.get_int_tuple("pooled_size")
new_attrs["spatial_scale"] = attrs.get_float("spatial_scale")
new_attrs["sample_ratio"] = attrs.get_int("sample_ratio", -1)
new_attrs["layout"] = "NCHW"
return _op.vision.roi_align(inputs[0], inputs[1], **new_attrs)
def _mx_resize(inputs, attrs):
scale_height = attrs.get_float("scale_height", None)
scale_width = attrs.get_float("scale_width", None)
height = attrs.get_int("height", 1)
width = attrs.get_int("width", 1)
shape = ir_pass.infer_type(inputs[0]).checked_type.shape
if scale_height is not None:
height = (scale_height * shape[2]).astype("int32")
if scale_width is not None:
width = (scale_width * shape[3]).astype("int32")
size = (height, width)
return _op.image.resize(inputs[0], size, align_corners=True)
def _mx_roi_pooling(inputs, attrs):
new_attrs = {}
new_attrs["pooled_size"] = attrs.get_int_tuple("pooled_size")
new_attrs["spatial_scale"] = attrs.get_float("spatial_scale")
new_attrs["layout"] = "NCHW"
return _op.vision.roi_pool(inputs[0], inputs[1], **new_attrs)
def _mx_proposal(inputs, attrs):
new_attrs = {}
new_attrs["scales"] = attrs.get_float_tuple("scales", (4.0, 8.0, 16.0, 32.0))
new_attrs["ratios"] = attrs.get_float_tuple("ratios", (0.5, 1.0, 2.0))
new_attrs["feature_stride"] = attrs.get_int("feature_stride", 16)
new_attrs["threshold"] = attrs.get_float("threshold", 0.7)
new_attrs["rpn_pre_nms_top_n"] = attrs.get_int("rpn_pre_nms_top_n", 6000)
new_attrs["rpn_post_nms_top_n"] = attrs.get_int("rpn_post_nms_top_n", 300)
new_attrs["rpn_min_size"] = attrs.get_int("rpn_min_size", 16)
new_attrs["iou_loss"] = attrs.get_bool("iou_loss", False)
assert not attrs.get_bool("output_score", False), "proposal doesn't support output score"
return _op.vision.proposal(inputs[0], inputs[1], inputs[2], **new_attrs)
def _mx_box_nms(inputs, attrs):
force_suppress = attrs.get_bool("force_suppress", False)
iou_thresh = attrs.get_float('overlap_thresh', 0.5)
top_k = attrs.get_int('topk', -1)
valid_thresh = attrs.get_float('valid_thresh', 0)
coord_start = attrs.get_int('coord_start', 2)
score_index = attrs.get_int('score_index', 1)
id_index = attrs.get_int('id_index', -1)
in_format = attrs.get_str('in_format', 'corner')
out_format = attrs.get_str('out_format', 'corner')
if in_format != 'corner':
raise tvm.error.OpAttributeInvalid(
'Value of attribute "in_format" must equal "corner" for operator box_nms.')
if out_format != 'corner':
raise tvm.error.OpAttributeInvalid(
'Value of attribute "out_format" must equal "corner" for operator box_nms.')
ret = _op.vision.get_valid_counts(inputs[0], score_threshold=valid_thresh)
nms_out = _op.vision.non_max_suppression(ret[1],
ret[0],
iou_threshold=iou_thresh,
force_suppress=force_suppress,
top_k=top_k,
coord_start=coord_start,
score_index=score_index,
id_index=id_index,
return_indices=False,
invalid_to_bottom=True)
return nms_out
def _mx_l2_normalize(inputs, attrs):
new_attrs = {}
mode = attrs.get_str('mode', 'instance')
if mode != 'channel':
raise tvm.error.OpAttributeInvalid(
'Value of attribute "mode" must equal "channel" for operator l2_normalize.')
new_attrs['eps'] = attrs.get_float('eps', 1e-10)
new_attrs['axis'] = [1]
return _op.nn.l2_normalize(inputs[0], **new_attrs)
def _mx_shape_array(inputs, attrs):
assert len(inputs) == 1
if attrs.get_int("lhs_begin", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support lhs_begin")
if attrs.get_int("lhs_end", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support lhs_end")
if attrs.get_int("rhs_begin", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support rhs_begin")
if attrs.get_int("rhs_end", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support rhs_end")
return _op.shape_of(inputs[0], dtype='int64')
def _mx_full(inputs, attrs):
assert len(inputs) == 0
val = attrs.get_float("value")
shape = attrs.get_int_tuple("shape")
dtype = attrs.get_str("dtype", "float32")
return _op.full(_expr.const(val, dtype), shape, dtype)
def _mx_squeeze(inputs, attrs):
assert len(inputs) == 1
axis = attrs.get_int_tuple("axis", None)
return _op.squeeze(inputs[0], axis)
def _mx_broadcast_axis(inputs, attrs):
assert len(inputs) == 1
axis = attrs.get_int_tuple("axis", [])
size = attrs.get_int_tuple("size", [])
assert len(axis) == len(size)
if len(axis) == 0:
return inputs[0]
src_shape = ir_pass.infer_type(inputs[0])._checked_type_.shape
tgt_shape = []
for i, dim in enumerate(src_shape):
if i not in axis:
tgt_shape.append(dim)
else:
assert int(dim) == 1
idx = axis.index(i)
tgt_shape.append(size[idx])
return _op.broadcast_to(inputs[0], tgt_shape)
def _mx_embedding(inputs, _):
assert len(inputs) == 2
indices, weight = inputs
return _op.take(weight, indices.astype('int32'), axis=0)
def _mx_smooth_l1(inputs, attrs):
scalar = attrs.get_float("scalar", 1.0)
scalar_sq = scalar * scalar
mask = _op.less(inputs[0], _expr.const(1.0 / scalar_sq, dtype='float32'))
return _op.where(mask,
_expr.const(scalar_sq / 2.0, dtype='float32') * inputs[0] * inputs[0],
_op.abs(inputs[0]) - _expr.const(0.5 / scalar_sq))
def _mx_deformable_convolution(inputs, attrs):
new_attrs = {}
assert attrs.get_bool("no_bias")
new_attrs["kernel_size"] = attrs.get_int_tuple("kernel")
new_attrs["strides"] = attrs.get_int_tuple("stride")
new_attrs["padding"] = attrs.get_int_tuple("pad")
new_attrs["dilation"] = attrs.get_int_tuple("dilate")
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["deformable_groups"] = attrs.get_int("num_deformable_group", 1)
new_attrs["groups"] = attrs.get_int("num_group", 1)
assert attrs.get_str("layout", "NCHW") == "NCHW", "Deformable conv2d only supports NCHW layout"
use_bias = not attrs.get_bool("no_bias", False)
res = _op.nn.deformable_conv2d(inputs[0], inputs[1], inputs[2], **new_attrs)
if use_bias:
assert len(inputs) == 4
res = _op.nn.bias_add(res, inputs[3])
return res
def _mx_argsort(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis", -1)
new_attrs["is_ascend"] = attrs.get_bool("is_ascend", True)
new_attrs["dtype"] = attrs.get_str("dtype", "float32")
return _op.argsort(inputs[0], **new_attrs)
def _mx_rnn_param_concat(inputs, _):
# We don't need to concatenate RNN params because we will unravel the RNN op
return [inputs]
def _mx_rnn_layer(inputs, attrs):
def _rnn_cell(data, states, i2h_weight, h2h_weight, i2h_bias, h2h_bias, activation):
i2h = _op.nn.bias_add(_op.nn.dense(data, i2h_weight), i2h_bias, axis=-1)
h2h = _op.nn.bias_add(_op.nn.dense(states[0], h2h_weight), h2h_bias, axis=-1)
out = _activation_map[activation](i2h + h2h)
return out, [out]
def _gru_cell(data, states, i2h_weight, h2h_weight, i2h_bias, h2h_bias):
dtype = ir_pass.infer_type(data).checked_type.dtype
i2h = _op.nn.bias_add(_op.nn.dense(data, i2h_weight), i2h_bias, axis=-1)
h2h = _op.nn.bias_add(_op.nn.dense(states[0], h2h_weight), h2h_bias, axis=-1)
i2h_r, i2h_z, i2h = _op.split(i2h, indices_or_sections=3, axis=1)
h2h_r, h2h_z, h2h = _op.split(h2h, indices_or_sections=3, axis=1)
reset_gate = _activation_map["sigmoid"](i2h_r + h2h_r)
update_gate = _activation_map["sigmoid"](i2h_z + h2h_z)
next_h_tmp = _activation_map["tanh"](reset_gate * h2h + i2h)
next_h = (_expr.const(1, dtype) - update_gate) * next_h_tmp + update_gate * states[0]
return next_h, [next_h]
def _lstm_cell(data, states, i2h_weight, h2h_weight, i2h_bias, h2h_bias):
i2h = _op.nn.bias_add(_op.nn.dense(data, i2h_weight), i2h_bias, axis=-1)
h2h = _op.nn.bias_add(_op.nn.dense(states[0], h2h_weight), h2h_bias, axis=-1)
gates = i2h + h2h
slice_gates = _op.split(gates, indices_or_sections=4, axis=1)
in_gate = _activation_map["sigmoid"](slice_gates[0])
forget_gate = _activation_map["sigmoid"](slice_gates[1])
in_transform = _activation_map["tanh"](slice_gates[2])
out_gate = _activation_map["sigmoid"](slice_gates[3])
next_c = forget_gate * states[1] + in_gate * in_transform
next_h = out_gate * _activation_map["tanh"](next_c)
return next_h, [next_h, next_c]
num_layers = attrs.get_int("num_layers", 1)
mode = attrs.get_str("mode")
if mode.startswith("rnn"):
mode, activation = mode.split('_')
assert mode in ["rnn", "gru", "lstm"]
bidirectional = attrs.get_bool("bidirectional", False)
if bidirectional:
raise tvm.error.OpAttributeUnimplemented(
"Bidirectional RNN op is not supported yet")
layout = attrs.get_str("layout", "TNC")
if layout != "TNC":
raise tvm.error.OpAttributeUnimplemented(
"RNN with layout other than TNC is not supported yet")
num_states = 2 if mode == 'lstm' else 1
assert len(inputs) == num_states + 2
seq_data = inputs[0]
concat_weight = inputs[1]
concat_states = inputs[2:]
seq_len = int(ir_pass.infer_type(seq_data).checked_type.shape[0])
assert len(concat_weight) == num_layers * 4
weights = []
bias = []
states = []
for i in range(num_layers):
w = []
b = []
s = []
for j in range(2):
w.append(concat_weight[i*2 + j].args[0])
b.append(concat_weight[num_layers*2 + i*2 + j].args[0])
for state in concat_states:
s.append(_op.take(state, _expr.const(i, "int32"), axis=0))
weights.append(w)
bias.append(b)
states.append(s)
seq_output = []
for t in range(seq_len):
data = _op.take(seq_data, _expr.const(t, "int32"), axis=0)
for l in range(num_layers):
if mode == "rnn":
out, new_states = _rnn_cell(data, states[l], *weights[l], *bias[l], activation)
elif mode == "gru":
out, new_states = _gru_cell(data, states[l], *weights[l], *bias[l])
else: # mode == "lstm"
out, new_states = _lstm_cell(data, states[l], *weights[l], *bias[l])
states[l] = new_states
data = out
seq_output.append(out)
outputs = [_op.stack(seq_output, axis=0)]
for i in range(num_states):
outputs.append(_op.stack([s[i] for s in states], axis=0))
return outputs
# Note: due to attribute conversion constraint
# ops in the identity set must be attribute free
_identity_list = [
"log",
"exp",
"sqrt",
"floor",
"ceil",
"sigmoid",
"tanh",
"negative",
"reshape_like",
"zeros_like",
"ones_like",
"where",
"gather_nd",
]
_convert_map = {
"_copy" : _rename(_op.copy),
"relu" : _rename(_op.nn.relu),
"broadcast_add" : _rename(_op.add),
"broadcast_sub" : _rename(_op.subtract),
"broadcast_mul" : _rename(_op.multiply),
"broadcast_div" : _rename(_op.divide),
"broadcast_mod" : _rename(_op.mod),
"broadcast_maximum" : _rename(_op.maximum),
"broadcast_minimum" : _rename(_op.minimum),
"broadcast_equal" : _mx_compare(_op.equal, _rename),
"broadcast_not_equal" : _mx_compare(_op.not_equal, _rename),
"broadcast_greater" : _mx_compare(_op.greater, _rename),
"broadcast_greater_equal": _mx_compare(_op.greater_equal, _rename),
"broadcast_lesser" : _mx_compare(_op.less, _rename),
"broadcast_lesser_equal" : _mx_compare(_op.less_equal, _rename),
"elemwise_add" : _rename(_op.add),
"elemwise_sub" : _rename(_op.subtract),
"elemwise_mul" : _rename(_op.multiply),
"elemwise_div" : _rename(_op.divide),
"_maximum" : _rename(_op.maximum),
"_minimum" : _rename(_op.minimum),
"flatten" : _rename(_op.nn.batch_flatten),
"Flatten" : _rename(_op.nn.batch_flatten),
# scalar power
"square" : _mx_make_power(2),
"rsqrt" : _mx_make_power(-1/2),
"cbrt" : _mx_make_power(1/3),
"rcbrt" : _mx_make_power(-1/3),
"__pow_scalar__" : _binop_scalar(_op.power),
"_power_scalar" : _binop_scalar(_op.power),
"__rsub_scalar__" : _rbinop_scalar(_op.subtract),
"_rminus_scalar" : _rbinop_scalar(_op.subtract),
"__rdiv_scalar__" : _rbinop_scalar(_op.divide),
"_rdiv_scalar" : _rbinop_scalar(_op.divide),
"__rpow_scalar__" : _rbinop_scalar(_op.power),
# scalar op
"__add_scalar__" : _binop_scalar(_op.add),
"_plus_scalar" : _binop_scalar(_op.add),
"__sub_scalar__" : _binop_scalar(_op.subtract),
"_minus_scalar" : _binop_scalar(_op.subtract),
"__mul_scalar__" : _binop_scalar(_op.multiply),
"_mul_scalar" : _binop_scalar(_op.multiply),
"__div_scalar__" : _binop_scalar(_op.divide),
"_div_scalar" : _binop_scalar(_op.divide),
"log2" : _mx_make_logarithm(2),
"log10" : _mx_make_logarithm(10),
"log1p" : _mx_log1p,
"expm1" : _mx_expm1,
"_equal_scalar" : _mx_compare(_op.equal, _binop_scalar),
"_not_equal_scalar" : _mx_compare(_op.not_equal, _binop_scalar),
"_greater_scalar" : _mx_compare(_op.greater, _binop_scalar),
"_greater_equal_scalar" : _mx_compare(_op.greater_equal, _binop_scalar),
"_lesser_scalar" : _mx_compare(_op.less, _binop_scalar),
"_lesser_equal_scalar" : _mx_compare(_op.less_equal, _binop_scalar),
"_maximum_scalar" : _binop_scalar(_op.maximum),
"_minimum_scalar" : _binop_scalar(_op.minimum),
# reduction ops
"mean" : _reduce(_op.mean),
"max" : _reduce(_op.max),
"min" : _reduce(_op.min),
"sum" : _reduce(_op.sum),
"max_axis" : _reduce(_op.max),
"min_axis" : _reduce(_op.min),
"sum_axis" : _reduce(_op.sum),
"argmax" : _arg_reduce(_op.argmax),
"argmin" : _arg_reduce(_op.argmin),
# init ops
"_ones" : _init_op(_op.ones),
"_zeros" : _init_op(_op.zeros),
# softmax
"softmax" : _softmax_op(_op.nn.softmax),
"log_softmax" : _softmax_op(_op.nn.log_softmax),
"Softmax" : _softmax_op(_op.nn.softmax),
# per op specialization
"Reshape" : _reshape,
"reshape" : _reshape,
"Cast" : _cast,
"clip" : _clip,
"transpose" : _transpose,
"UpSampling" : _upsampling,
"add_n" : _elemwise_sum,
# MXNet specific implementations
"FullyConnected": _mx_fully_connected,
"Activation" : _mx_activations,
"Convolution" : _mx_conv2d,
"Convolution_v1": _mx_conv2d,
"Deconvolution" : _mx_conv2d_transpose,
"Pooling" : _mx_pooling,
"Pooling_v1" : _mx_pooling,
"Dropout" : _mx_dropout,
"BatchNorm" : _mx_batch_norm,
"BatchNorm_v1" : _mx_batch_norm,
"LRN" : _mx_lrn,
"L2Normalization" : _mx_l2_normalize,
"slice" : _mx_slice,
"slice_like" : _mx_slice_like,
"slice_axis" : _mx_slice_axis,
"SliceChannel" : _mx_split,
"split" : _mx_split,
"expand_dims" : _mx_expand_dims,
"Concat" : _mx_concat,
"concat" : _mx_concat,
"stack" : _mx_stack,
"batch_dot" : _mx_batch_dot,
"LeakyReLU" : _mx_leaky_relu,
"_arange" : _mx_arange,
"_full" : _mx_full,
"repeat" : _mx_repeat,
"tile" : _mx_tile,
"take" : _mx_take,
"reverse" : _mx_reverse,
"squeeze" : _mx_squeeze,
"broadcast_axis": _mx_broadcast_axis,
"BlockGrad" : _mx_BlockGrad,
"shape_array" : _mx_shape_array,
"Embedding" : _mx_embedding,
"argsort" : _mx_argsort,
"SoftmaxOutput" : _mx_softmax_output,
"SoftmaxActivation" : _mx_softmax_activation,
"smooth_l1" : _mx_smooth_l1,
# vision
"_contrib_BilinearResize2D" : _mx_resize,
"_contrib_MultiBoxPrior" : _mx_multibox_prior,
"_contrib_MultiBoxDetection" : _mx_multibox_detection,
"_contrib_ROIAlign" : _mx_roi_align,
"ROIPooling" : _mx_roi_pooling,
"_contrib_Proposal" : _mx_proposal,
"_contrib_MultiProposal" : _mx_proposal,
"_contrib_box_nms" : _mx_box_nms,
"_contrib_DeformableConvolution" : _mx_deformable_convolution,
"_contrib_AdaptiveAvgPooling2D" : _mx_adaptive_avg_pooling,
# NLP
"RNN" : _mx_rnn_layer,
"_rnn_param_concat" : _mx_rnn_param_concat,
# List of missing operators that are present in NNVMv1
# TODO(tvm-tvm): support all operators.
#
# "broadcast_to",
# "Crop" : _crop_like,
}
# set identity list
_convert_map.update({k : _rename(k) for k in _identity_list})
def _from_mxnet_impl(symbol, shape_dict, dtype_info):
"""Convert mxnet symbol to compatible relay Function.
Reconstruct a relay Function by traversing the mxnet symbol.
Parameters
----------
symbol : mxnet.sym.Symbol
Incompatible symbol from mxnet.
The op_name and attrs inside are not always compatible.
shape_dict : dict
Known parameter shapes
dtype_info : dict or str.
Known parameter dtypes
Returns:
-------
func : tvm.relay.Function
Converted relay Function
"""
assert symbol is not None
jgraph = json.loads(symbol.tojson())
jnodes = jgraph["nodes"]
node_map = {}
for nid, node in enumerate(jnodes):
children = [node_map[e[0]][e[1]] for e in node["inputs"]]
attrs = StrAttrsDict(node.get("attrs", {}))
node_name = node["name"]
op_name = node["op"]
if op_name == "null":
shape = shape_dict[node_name] if node_name in shape_dict else None
if isinstance(dtype_info, dict):
dtype = dtype_info[node_name] if node_name in dtype_info else "float32"
else:
dtype = dtype_info
node_map[nid] = [_expr.var(node_name, shape=shape, dtype=dtype)]
elif op_name in _convert_map:
res = _convert_map[op_name](children, attrs)
if isinstance(res, (_expr.TupleWrapper, tuple, list)):
pass
elif isinstance(res, _expr.Expr):
res = [res]
else:
raise RuntimeError("unexpected type %s" % type(res))
node_map[nid] = res
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend MXNet.'.format(op_name))
outputs = [node_map[e[0]][e[1]] for e in jgraph["heads"]]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
func = _expr.Function(ir_pass.free_vars(outputs), outputs)
return func
def _update_shape_dtype(shape, dtype, params):
"""Update shape dtype given params information"""
shape = {} if shape is None else shape
if not params:
return shape, dtype
shape = shape.copy()
shape.update({k : v.shape for k, v in params.items()})
if isinstance(dtype, str):
for k, v in params.items():
if v.dtype != dtype:
raise ValueError(
"%s: dtype not expected %s vs %s" % (k, dtype, v.dtype))
else:
dtype = dtype.copy()
dtype.update({k : str(v.dtype) for k, v in params.items()})
return shape, dtype
def from_mxnet(symbol,
shape=None,
dtype="float32",
arg_params=None,
aux_params=None):
"""Convert from MXNet"s model into compatible relay Function.
Parameters
----------
symbol : mxnet.Symbol or mxnet.gluon.HybridBlock
MXNet symbol.
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
arg_params : dict of str to mx.NDArray
The argument parameters in mxnet
aux_params : dict of str to mx.NDArray
The auxiliary parameters in mxnet
Returns
-------
sym : tvm.relay.Function
Compatible relay Function
params : dict of str to tvm.NDArray
The parameter dict to be used by nnvm
"""
try:
import mxnet as mx
except ImportError as e:
raise ImportError("{}. MXNet is required to parse symbols.".format(e))
if isinstance(symbol, mx.sym.Symbol):
params = {}
arg_params = arg_params if arg_params else {}
aux_params = aux_params if aux_params else {}
for k, v in arg_params.items():
params[k] = _nd.array(v.asnumpy())
for k, v in aux_params.items():
params[k] = _nd.array(v.asnumpy())
shape, dtype = _update_shape_dtype(shape, dtype, params)
sym = _from_mxnet_impl(symbol, shape, dtype)
elif isinstance(symbol, mx.gluon.HybridBlock):
if arg_params is not None or aux_params is not None:
raise ValueError("arg_params and aux_params ae not used when importing HybridBlock")
params = {}
for k, v in symbol.collect_params().items():
params[k] = _nd.array(v.data().asnumpy())
data = mx.sym.Variable("data")
sym = symbol(data)
if isinstance(sym, (list, tuple)):
sym = mx.sym.Group(sym)
shape, dtype = _update_shape_dtype(shape, dtype, params)
sym = _from_mxnet_impl(sym, shape, dtype)
elif isinstance(symbol, mx.gluon.Block):
raise NotImplementedError("Only Hybrid Blocks are supported now.")
else:
msg = "mxnet.Symbol or gluon.HybridBlock expected, got {}".format(type(symbol))
raise ValueError(msg)
return sym, params
|
py | 1a4d530e75439ffcc3e4e53ebbaba9a1700cfda8 | # Copyright 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
logger = logging.getLogger(__name__)
import pdo.submitter.sawtooth.sawtooth_submitter as sw_sub
import pdo.submitter.ccf.ccf_submitter as ccf_sub
# -----------------------------------------------------------------
# Create a new Submitter
# -----------------------------------------------------------------
def create_submitter(ledger_config, *args, **kwargs) :
ledger_type = ledger_config.get('LedgerType', os.environ.get('PDO_LEDGER_TYPE'))
if ledger_type == 'sawtooth':
return sw_sub.SawtoothSubmitter(ledger_config, *args, **kwargs)
elif ledger_type == 'ccf':
return ccf_sub.CCFSubmitter(ledger_config, *args, **kwargs)
else:
logger.error("Invalid Ledger Type. Must be either 'sawtooth' or 'ccf'")
raise Exception("Invalid Ledger Type. Must be either 'sawtooth' or 'ccf'") |
py | 1a4d53c9a41608f51bcd82a57034221ae18e42f6 | from PIL import Image, ImageFilter
import time
class MyGaussianBlur(ImageFilter.GaussianBlur):
name = "GaussianBlur"
def __init__(self, size,radius=2, bounds=None):
super().__init__()
self.radius = radius
self.bounds = bounds
self.size=size
# print(size)
def filter(self, image):
print(1)
if self.bounds:
bounds1 = (0, 0, self.size[0], self.bounds[1])
# print(bounds1)
clips = image.crop(bounds1).gaussian_blur(self.radius)
image.paste(clips, bounds1)
bounds2 = (0, self.bounds[1], self.bounds[0], self.bounds[3])
clips = image.crop(bounds2).gaussian_blur(self.radius)
image.paste(clips, bounds2)
bounds3 = (0, self.bounds[3], self.size[0], self.size[1])
clips = image.crop(bounds3).gaussian_blur(self.radius)
image.paste(clips, bounds3)
bounds4 = (self.bounds[2], self.bounds[1], self.size[0], self.bounds[3])
clips = image.crop(bounds4).gaussian_blur(self.radius)
image.paste(clips, bounds4)
return image
else:
return image.gaussian_blur(self.radius)
st = time.process_time()
image = Image.open('p.jpg')
bounds = (150, 130, 280, 250)
image = image.filter(MyGaussianBlur(image.size,radius=2, bounds=bounds))
# print(1)
# image = image.filter(MyGaussianBlur(radius=2, bounds=bounds2))
# image = image.filter(MyGaussianBlur(radius=2, bounds=bounds3))
# image = image.filter(MyGaussianBlur(radius=2, bounds=bounds4))
print(time.process_time() - st)
image.show()
|
py | 1a4d5492e5fd8ae92d3e172eb30ce6bedde40344 | #aprimorando matriz em python
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
somapar = maior = somacoluna = 0
for l in range(0, 3):
for c in range(0, 3):
matriz[l][c] = int(input(f'Digite um valor para [{l}, {c}]: '))
print('-=' * 25)
for l in range(0, 3):
for c in range(0, 3):
print(f'[{matriz[l][c]:^5}]', end='')
if matriz[l][c] % 2 ==0:
somapar += matriz[l][c]
print()
print('-=' * 25)
print(f'A soma dos valores pares é {somapar}')
for l in range(0, 3):
somacoluna += matriz[l][2]
print(f'A soma dos valores da terceira coluna é {somacoluna} ')
for c in range(0, 3):
if c == 0:
maior = matriz[1][c]
elif matriz[1][c] > maior:
maior = matriz[1][c]
print(f'O maior valor na segunda linha é {maior}')
|
py | 1a4d557f79f732c222f1b2c12511a7b26ec29422 | # -*- coding: utf-8 -*-
"""Train a CapsNet Network on the MNIST dataset.
See the corresponding paper for explanations of the network
@inproceedings{sabour2017dynamic,
title={Dynamic routing between capsules},
author={Sabour, Sara and Frosst, Nicholas and Hinton, Geoffrey E},
booktitle={Advances in Neural Information Processing Systems},
pages={3859--3869},
year={2017}
}
The network trains to an accuracy of >99% in few epochs. The most epochs are needed to train the reconstruction network.
The implementation is based on the code of (thanks to the great and inspiring implementation!):
Author: Xifeng Guo, E-mail: `[email protected]`, Github: `https://github.com/XifengGuo/CapsNet-Keras`
with changes to incorporate the anysma package.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import argparse
import numpy as np
from PIL import Image
from keras import backend as K
from keras import layers, models, optimizers
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from keras import callbacks
from anysma import Capsule
from anysma.modules import InputModule
from anysma.modules.final import DynamicRouting
from anysma.modules.transformations import LinearTransformation
from anysma.utils.normalization_funcs import dynamic_routing_squash as squash
from anysma.callbacks import TensorBoard
from anysma.losses import margin_loss
from anysma.datasets import mnist
K.set_image_data_format('channels_last')
class Mask(layers.Layer):
"""
Mask all vectors except the best matching one.
"""
def __init__(self, **kwargs):
super(Mask, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
signals, prob = inputs
mask = K.one_hot(indices=K.argmax(prob, 1), num_classes=prob.get_shape().as_list()[1])
masked = K.batch_flatten(signals * K.expand_dims(mask, -1))
return masked
def compute_output_shape(self, input_shape):
return tuple([None, input_shape[0][1] * input_shape[0][2]])
def combine_images(generated_images, height=None, width=None):
num = generated_images.shape[0]
if width is None and height is None:
width = int(math.sqrt(num))
height = int(math.ceil(float(num)/width))
elif width is not None and height is None: # height not given
height = int(math.ceil(float(num)/width))
elif height is not None and width is None: # width not given
width = int(math.ceil(float(num)/height))
shape = generated_images.shape[1:3]
image = np.zeros((height*shape[0], width*shape[1]),
dtype=generated_images.dtype)
for index, img in enumerate(generated_images):
i = int(index/width)
j = index % width
image[i*shape[0]:(i+1)*shape[0], j*shape[1]:(j+1)*shape[1]] = \
img[:, :, 0]
return image
def CapsNet(input_shape, n_class, routings):
""" Initialize the CapsNet"""
x = layers.Input(shape=input_shape)
# Layer 1: Just a conventional Conv2D layer
digitcaps = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
primary_caps = Capsule(name='PrimaryCaps')
primary_caps.add(layers.Conv2D(filters=8 * 32, kernel_size=9, strides=2, padding='valid', name='primarycap_conv2d'))
primary_caps.add(layers.Reshape(target_shape=[-1, 8], name='primarycap_reshape'))
primary_caps.add(layers.Lambda(squash, name='primarycap_squash'))
digitcaps = primary_caps(digitcaps)
# Layer 3: Capsule layer. Routing algorithm works here.
digit_caps = Capsule(name='digitcaps', prototype_distribution=(1, n_class))
digit_caps.add(InputModule(signal_shape=None, dissimilarity_initializer='zeros', trainable=False))
digit_caps.add(LinearTransformation(output_dim=16, scope='local'))
digit_caps.add(DynamicRouting(iterations=routings, name='capsnet'))
digitcaps = digit_caps(digitcaps)
# Decoder network.
y = layers.Input(shape=(n_class,))
masked_by_y = Mask()([digitcaps[0], y]) # The true label is used to mask the output of capsule layer. For training
masked = Mask()([digitcaps[0], digitcaps[2]]) # Mask using the capsule with maximal length. For prediction
# Shared Decoder model in training and prediction
decoder = models.Sequential(name='decoder')
decoder.add(layers.Dense(512, activation='relu', input_dim=16 * n_class))
decoder.add(layers.Dense(1024, activation='relu'))
decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))
# Models for training and evaluation (prediction)
train_model = models.Model([x, y], [digitcaps[2], decoder(masked_by_y)])
eval_model = models.Model(x, [digitcaps[2], decoder(masked)])
# manipulate model
noise = layers.Input(shape=(n_class, 16))
noised_digitcaps = layers.Add()([digitcaps[0], noise])
masked_noised_y = Mask()([noised_digitcaps, y])
manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
return train_model, eval_model, manipulate_model
def train(model, data, args):
"""
Training
:param model: the model
:param data: a tuple containing training and testing data, like `((x_train, y_train), (x_test, y_test))`
:param args: arguments
:return: The trained model
"""
# unpacking the data
(x_train, y_train), (x_test, y_test) = data
# callbacks
log = callbacks.CSVLogger(args.save_dir + '/log.csv')
tb = TensorBoard(log_dir=args.save_dir + '/tensorboard-logs',
batch_size=args.batch_size, histogram_freq=int(args.debug))
checkpoint = callbacks.ModelCheckpoint(args.save_dir + '/weights-{epoch:02d}.h5', monitor='val_capsnet_acc',
save_best_only=True, save_weights_only=True, verbose=1)
# compile the model
model.compile(optimizer=optimizers.Adam(lr=args.lr),
loss=[margin_loss, 'mse'],
loss_weights=[1., args.lam_recon],
metrics={'capsnet': 'accuracy'})
def train_generator(x, y, batch_size):
train_datagen = ImageDataGenerator(width_shift_range=2,
height_shift_range=2)
generator = train_datagen.flow(x, y, batch_size=batch_size)
while 1:
x_batch, y_batch = generator.next()
yield ([x_batch, y_batch], [y_batch, x_batch])
# Training with data augmentation. If shift_fraction=0., also no augmentation.
model.fit_generator(generator=train_generator(x_train, y_train, args.batch_size),
steps_per_epoch=int(y_train.shape[0] / args.batch_size),
epochs=args.epochs,
validation_data=[[x_test, y_test], [y_test, x_test]],
callbacks=[log, tb, checkpoint])
model.save_weights(args.save_dir + '/trained_model.h5')
print('Trained model saved to \'%s/trained_model.h5\'' % args.save_dir)
return model
def test(model, data, args):
x_test, y_test = data
y_pred, x_recon = model.predict(x_test, batch_size=args.batch_size)
print('-' * 30 + 'Begin: test' + '-' * 30)
print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1)) / y_test.shape[0])
img = combine_images(np.concatenate([x_test[:50], x_recon[:50]]))
image = img * 255
Image.fromarray(image.astype(np.uint8)).save(args.save_dir + "/real_and_recon.png")
print()
print('Reconstructed images are saved to %s/real_and_recon.png' % args.save_dir)
print('-' * 30 + 'End: test' + '-' * 30)
def manipulate_latent(model, data, args):
print('-' * 30 + 'Begin: manipulate' + '-' * 30)
x_test, y_test = data
index = np.argmax(y_test, 1) == args.digit
number = np.random.randint(low=0, high=sum(index) - 1)
x, y = x_test[index][number], y_test[index][number]
x, y = np.expand_dims(x, 0), np.expand_dims(y, 0)
noise = np.zeros([1, 10, 16])
x_recons = []
for dim in range(16):
for r in [-0.25, -0.2, -0.15, -0.1, -0.05, 0, 0.05, 0.1, 0.15, 0.2, 0.25]:
tmp = np.copy(noise)
tmp[:, :, dim] = r
x_recon = model.predict([x, y, tmp])
x_recons.append(x_recon)
x_recons = np.concatenate(x_recons)
img = combine_images(x_recons, height=16)
image = img * 255
Image.fromarray(image.astype(np.uint8)).save(args.save_dir + '/manipulate-%d.png' % args.digit)
print('manipulated result saved to %s/manipulate-%d.png' % (args.save_dir, args.digit))
print('-' * 30 + 'End: manipulate' + '-' * 30)
def load_mnist():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype('float32') / 255.
x_test = x_test.reshape(-1, 28, 28, 1).astype('float32') / 255.
y_train = to_categorical(y_train.astype('float32'))
y_test = to_categorical(y_test.astype('float32'))
return (x_train, y_train), (x_test, y_test)
if __name__ == "__main__":
# setting the hyper parameters
parser = argparse.ArgumentParser(description="Capsule Network on MNIST.")
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--lr', default=0.001, type=float,
help="Initial learning rate.")
parser.add_argument('--lam_recon', default=0.392, type=float,
help="The coefficient for the loss of decoder.")
parser.add_argument('-r', '--routings', default=3, type=int,
help="Number of iterations used in routing algorithm. should > 0.")
parser.add_argument('--debug', action='store_true',
help="Save weights by TensorBoard.")
parser.add_argument('--save_dir', default='./output')
parser.add_argument('-t', '--testing', action='store_true',
help="Test the trained model on testing dataset.")
parser.add_argument('--digit', default=5, type=int,
help="Digit to manipulate during test.")
parser.add_argument('-w', '--weights', default=None,
help="The path of the saved weights. Should be specified when testing.")
args = parser.parse_args()
print(args)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# load data
(x_train, y_train), (x_test, y_test) = load_mnist()
# define model
model, eval_model, manipulate_model = CapsNet(input_shape=x_train.shape[1:],
n_class=len(np.unique(np.argmax(y_train, 1))),
routings=args.routings)
model.summary(line_length=200, positions=[.33, .6, .67, 1.])
# train or test
if args.weights is not None: # init the model weights with provided one
model.load_weights(args.weights)
if not args.testing:
train(model=model, data=((x_train, y_train), (x_test, y_test)), args=args)
else: # as long as weights are given, will run testing
if args.weights is None:
print('No weights are provided. Will test using random initialized weights.')
manipulate_latent(manipulate_model, (x_test, y_test), args)
test(model=eval_model, data=(x_test, y_test), args=args)
|
py | 1a4d56c15fd630f8908f4a959db0f14bc2f6a978 | ## @file test_GitDependency.py
# Unit test suite for the GitDependency class.
#
##
# Copyright (c) 2019, Microsoft Corporation
#
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
import os
import unittest
import logging
import shutil
import stat
import tempfile
from MuEnvironment import EnvironmentDescriptorFiles as EDF
from MuEnvironment.GitDependency import GitDependency
from MuEnvironment import ShellEnvironment
import copy
test_dir = None
uptodate_version = "7fd1a60b01f91b314f59955a4e4d4e80d8edf11d"
behind_one_version = "762941318ee16e59dabbacb1b4049eec22f0d303"
invalid_version = "762941318ee16e59d123456789049eec22f0d303"
hw_json_template = '''
{
"scope": "global",
"type": "git",
"name": "HelloWorld",
"source": "https://github.com/octocat/Hello-World.git",
"version": "%s",
"flags": []
}
'''
def prep_workspace():
global test_dir
# if test temp dir doesn't exist
if test_dir is None or not os.path.isdir(test_dir):
test_dir = tempfile.mkdtemp()
logging.debug("temp dir is: %s" % test_dir)
else:
clean_workspace()
test_dir = tempfile.mkdtemp()
def clean_workspace():
global test_dir
if test_dir is None:
return
if os.path.isdir(test_dir):
def dorw(action, name, exc):
os.chmod(name, stat.S_IWRITE)
if(os.path.isdir(name)):
os.rmdir(name)
else:
os.remove(name)
shutil.rmtree(test_dir, onerror=dorw)
test_dir = None
class TestGitDependency(unittest.TestCase):
def setUp(self):
prep_workspace()
@classmethod
def setUpClass(cls):
logger = logging.getLogger('')
logger.addHandler(logging.NullHandler())
unittest.installHandler()
@classmethod
def tearDownClass(cls):
clean_workspace()
# good case
def test_fetch_verify_good_repo_at_top_of_tree(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
with open(ext_dep_file_path, "w+") as ext_dep_file:
ext_dep_file.write(hw_json_template % uptodate_version)
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
ext_dep.fetch()
self.assertTrue(ext_dep.verify(logversion=False))
self.assertEqual(ext_dep.version, uptodate_version)
def test_fetch_verify_good_repo_at_not_top_of_tree(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
with open(ext_dep_file_path, "w+") as ext_dep_file:
ext_dep_file.write(hw_json_template % behind_one_version)
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
ext_dep.fetch()
self.assertTrue(ext_dep.verify(logversion=False))
self.assertEqual(ext_dep.version, behind_one_version)
def test_fetch_verify_non_existant_repo_commit_hash(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
with open(ext_dep_file_path, "w+") as ext_dep_file:
ext_dep_file.write(hw_json_template % invalid_version)
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
ext_dep.fetch()
self.assertEqual(ext_dep.version, invalid_version)
self.assertFalse(ext_dep.verify(logversion=False), "Should not verify")
def test_verify_no_directory(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
with open(ext_dep_file_path, "w+") as ext_dep_file:
ext_dep_file.write(hw_json_template % invalid_version)
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
self.assertFalse(ext_dep.verify(logversion=False))
def test_verify_empty_repo_dir(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
with open(ext_dep_file_path, "w+") as ext_dep_file:
ext_dep_file.write(hw_json_template % invalid_version)
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
os.makedirs(ext_dep._local_repo_root_path, exist_ok=True)
self.assertFalse(ext_dep.verify(logversion=False))
def test_verify_invalid_git_repo(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
with open(ext_dep_file_path, "w+") as ext_dep_file:
ext_dep_file.write(hw_json_template % invalid_version)
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
os.makedirs(ext_dep._local_repo_root_path, exist_ok=True)
with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), 'a') as myfile:
myfile.write("Test code\n")
self.assertFalse(ext_dep.verify(logversion=False))
def test_verify_dirty_git_repo(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
with open(ext_dep_file_path, "w+") as ext_dep_file:
ext_dep_file.write(hw_json_template % uptodate_version)
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
ext_dep.fetch()
# now write a new file
with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), 'a') as myfile:
myfile.write("Test code to make repo dirty\n")
self.assertFalse(ext_dep.verify(logversion=False))
def test_verify_up_to_date(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
with open(ext_dep_file_path, "w+") as ext_dep_file:
ext_dep_file.write(hw_json_template % uptodate_version)
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
ext_dep.fetch()
self.assertTrue(ext_dep.verify(logversion=False))
def test_verify_down_level_repo(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
with open(ext_dep_file_path, "w+") as ext_dep_file:
ext_dep_file.write(hw_json_template % behind_one_version)
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
ext_dep.fetch()
self.assertTrue(ext_dep.verify(logversion=False), "Confirm valid ext_dep at one commit behind")
with open(ext_dep_file_path, "w+") as ext_dep_file:
ext_dep_file.write(hw_json_template % uptodate_version)
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
self.assertFalse(ext_dep.verify(logversion=False), "Confirm downlevel repo fails to verify")
ext_dep.fetch()
self.assertTrue(ext_dep.verify(logversion=False), "Confirm repo can be updated")
# CLEAN TESTS
def test_clean_no_directory(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
with open(ext_dep_file_path, "w+") as ext_dep_file:
ext_dep_file.write(hw_json_template % uptodate_version)
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
self.assertFalse(os.path.isdir(ext_dep.contents_dir), "Confirm not ext dep directory before cleaning")
ext_dep.clean()
self.assertFalse(os.path.isdir(ext_dep.contents_dir))
def test_clean_dir_but_not_git_repo(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
with open(ext_dep_file_path, "w+") as ext_dep_file:
ext_dep_file.write(hw_json_template % invalid_version)
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
os.makedirs(ext_dep._local_repo_root_path, exist_ok=True)
with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), 'a') as myfile:
myfile.write("Test code\n")
ext_dep.clean()
self.assertFalse(os.path.isdir(ext_dep.contents_dir))
def test_clean_dirty_git_repo(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
with open(ext_dep_file_path, "w+") as ext_dep_file:
ext_dep_file.write(hw_json_template % uptodate_version)
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
ext_dep.fetch()
self.assertTrue(ext_dep.verify(), "Confirm repo is valid")
# now write a new file
with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), 'a') as myfile:
myfile.write("Test code to make repo dirty\n")
self.assertFalse(ext_dep.verify(), "Confirm repo is dirty")
ext_dep.clean()
self.assertFalse(os.path.isdir(ext_dep.contents_dir))
def test_clean_clean_repo(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
with open(ext_dep_file_path, "w+") as ext_dep_file:
ext_dep_file.write(hw_json_template % uptodate_version)
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
ext_dep.fetch()
self.assertTrue(ext_dep.verify(), "Confirm repo is valid and clean")
ext_dep.clean()
self.assertFalse(os.path.isdir(ext_dep.contents_dir))
class TestGitDependencyUrlPatching(unittest.TestCase):
TEST_DESCRIPTOR = {
"descriptor_file": os.path.abspath(__file__),
"scope": "global",
"type": "git",
"name": "HelloWorld",
"source": "https://github.com/octocat/Hello-World.git",
"version": "7fd1a60b01f91b314f59955a4e4d4e80d8edf11d",
"flags": []
}
def tearDown(self):
env = ShellEnvironment.GetEnvironment()
env.restore_checkpoint(TestGitDependencyUrlPatching.env_checkpoint)
@classmethod
def setUpClass(cls):
env = ShellEnvironment.GetEnvironment()
cls.env_checkpoint = env.checkpoint()
#
# URL FORMATTING TESTS
#
def test_url_should_not_be_modified_without_env(self):
my_test_descriptor = copy.copy(TestGitDependencyUrlPatching.TEST_DESCRIPTOR)
# Add the indicator for patching.
my_test_descriptor['url_creds_var'] = 'test_creds_var'
# Initialize the GitDependency object.
gdep = GitDependency(my_test_descriptor)
# Assert that the URL is identical.
self.assertEqual(gdep.source, my_test_descriptor['source'])
def test_url_should_not_be_modified_without_descriptor_field(self):
my_test_descriptor = copy.copy(TestGitDependencyUrlPatching.TEST_DESCRIPTOR)
env = ShellEnvironment.GetEnvironment()
# Add the var to the environment.
env.set_shell_var('test_creds_var', 'my_stuff')
# Initialize the GitDependency object.
gdep = GitDependency(my_test_descriptor)
# Assert that the URL is identical.
self.assertEqual(gdep.source, my_test_descriptor['source'])
def test_url_should_be_modified_if_creds_are_indicated_and_supplied(self):
my_test_descriptor = copy.copy(TestGitDependencyUrlPatching.TEST_DESCRIPTOR)
# Add the indicator for patching.
my_test_descriptor['url_creds_var'] = 'test_creds_var'
env = ShellEnvironment.GetEnvironment()
# Add the var to the environment.
env.set_shell_var('test_creds_var', 'my_stuff')
# Initialize the GitDependency object.
gdep = GitDependency(my_test_descriptor)
# Assert that the URL is identical.
self.assertEqual(gdep.source, "https://[email protected]/octocat/Hello-World.git")
if __name__ == '__main__':
unittest.main()
|
py | 1a4d56debc0fb04ba84f6ce23e156d382a3d75ce | # coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
# noqa: E241
from __future__ import print_function
from functools import wraps
import glob
import gzip
import itertools
import json
import os
import pipes
import re
import select
import shlex
import shutil
import struct
import subprocess
import sys
import time
import tempfile
import unittest
import uuid
from subprocess import PIPE, STDOUT
if __name__ == '__main__':
raise Exception('do not run this file directly; do something like: tests/runner.py other')
from tools.shared import run_process, try_delete
from tools.shared import EMCC, EMXX, EMAR, EMRANLIB, PYTHON, FILE_PACKAGER, WINDOWS, LLVM_ROOT, EM_BUILD_VERBOSE
from tools.shared import CLANG_CC, CLANG_CXX, LLVM_AR, LLVM_DWARFDUMP
from tools.shared import NODE_JS, SPIDERMONKEY_ENGINE, JS_ENGINES, WASM_ENGINES, V8_ENGINE
from runner import RunnerCore, path_from_root, no_wasm_backend, no_fastcomp, is_slow_test, ensure_dir
from runner import needs_dlfcn, env_modify, no_windows, requires_native_clang, chdir, with_env_modify, create_test_file, parameterized
from jsrun import run_js
from tools import shared, building
import jsrun
import clang_native
import tools.line_endings
import tools.js_optimizer
import tools.tempfiles
import tools.duplicate_function_eliminator
scons_path = shared.which('scons')
emmake = shared.bat_suffix(path_from_root('emmake'))
emcmake = shared.bat_suffix(path_from_root('emcmake'))
emconfigure = shared.bat_suffix(path_from_root('emconfigure'))
emconfig = shared.bat_suffix(path_from_root('em-config'))
emsize = shared.bat_suffix(path_from_root('emsize'))
class temp_directory(object):
def __init__(self, dirname):
self.dir = dirname
def __enter__(self):
self.directory = tempfile.mkdtemp(prefix='emtest_temp_', dir=self.dir)
self.prev_cwd = os.getcwd()
os.chdir(self.directory)
print('temp_directory: ' + self.directory)
return self.directory
def __exit__(self, type, value, traceback):
os.chdir(self.prev_cwd)
def uses_canonical_tmp(func):
"""Decorator that signals the use of the canonical temp by a test method.
This decorator takes care of cleaning the directory after the
test to satisfy the leak detector.
"""
@wraps(func)
def decorated(self):
# Before running the test completely remove the canonical_tmp
if os.path.exists(self.canonical_temp_dir):
shutil.rmtree(self.canonical_temp_dir)
try:
func(self)
finally:
# Make sure the test isn't lying about the fact that it uses
# canonical_tmp
self.assertTrue(os.path.exists(self.canonical_temp_dir))
# Remove the temp dir in a try-finally, as otherwise if the
# test fails we would not clean it up, and if leak detection
# is set we will show that error instead of the actual one.
shutil.rmtree(self.canonical_temp_dir)
return decorated
def is_python3_version_supported():
"""Retuns True if the installed python3 version is supported by emscripten.
Note: Emscripten requires python3.5 or above since python3.4 and below do not
support circular dependencies."""
try:
print('is_python3_version_supported')
python3 = shared.which('python3')
print(' python3 =', python3)
output = run_process([python3, '--version'], stdout=PIPE).stdout
print(' output =', output, output.split())
output = output.split()[1]
# ignore final component which can contains non-integers (e.g 'rc1')
version = [int(x) for x in output.split('.')[:2]]
return version >= [3, 5]
except Exception:
# If anything goes wrong (no python3, unexpected output format), then we do
# not support this python3
return False
def encode_leb(number):
# TODO(sbc): handle larger numbers
assert(number < 255)
# pack the integer then take only the first (little end) byte
return struct.pack('<i', number)[:1]
def get_fastcomp_src_dir():
"""Locate fastcomp source tree by searching realtive to LLVM_ROOT."""
d = LLVM_ROOT
key_file = 'readme-emscripten-fastcomp.txt'
while d != os.path.dirname(d):
d = os.path.abspath(d)
# when the build directory lives below the source directory
if os.path.exists(os.path.join(d, key_file)):
return d
# when the build directory lives alongside the source directory
elif os.path.exists(os.path.join(d, 'src', key_file)):
return os.path.join(d, 'src')
else:
d = os.path.dirname(d)
return None
def parse_wasm(filename):
wat = run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), filename], stdout=PIPE).stdout
imports = []
exports = []
funcs = []
for line in wat.splitlines():
line = line.strip()
if line.startswith('(import '):
line = line.strip('()')
name = line.split()[2].strip('"')
imports.append(name)
if line.startswith('(export '):
line = line.strip('()')
name = line.split()[1].strip('"')
exports.append(name)
if line.startswith('(func '):
line = line.strip('()')
name = line.split()[1].strip('"')
funcs.append(name)
return imports, exports, funcs
class other(RunnerCore):
# Utility to run a simple test in this suite. This receives a directory which
# should contain a test.cpp and test.out files, compiles the cpp, and runs it
# to verify the output, with optional compile and run arguments.
# TODO: use in more places
def do_other_test(self, dirname, emcc_args=[], run_args=[]):
shutil.copyfile(path_from_root('tests', dirname, 'test.cpp'), 'test.cpp')
run_process([EMCC, 'test.cpp'] + emcc_args)
expected = open(path_from_root('tests', dirname, 'test.out')).read()
seen = run_js('a.out.js', args=run_args, stderr=PIPE, full_output=True) + '\n'
self.assertContained(expected, seen)
# Another utility to run a test in this suite. This receives a source file
# to compile, with optional compiler and execution flags.
# Output can be checked by seeing if literals are contained, and that a list
# of regexes match. The return code can also be checked.
def do_smart_test(self, source, literals=[], regexes=[],
emcc_args=[], run_args=[], assert_returncode=0):
run_process([EMCC, source] + emcc_args)
seen = run_js('a.out.js', args=run_args, stderr=PIPE, full_output=True,
assert_returncode=assert_returncode) + '\n'
for literal in literals:
self.assertContained([literal], seen)
for regex in regexes:
self.assertTrue(re.search(regex, seen), 'Expected regex "%s" to match on:\n%s' % (regex, seen))
def run_on_pty(self, cmd):
master, slave = os.openpty()
output = []
try:
env = os.environ.copy()
env['TERM'] = 'xterm-color'
proc = subprocess.Popen(cmd, stdout=slave, stderr=slave, env=env)
while proc.poll() is None:
r, w, x = select.select([master], [], [], 1)
if r:
output.append(os.read(master, 1024))
return (proc.returncode, b''.join(output))
finally:
os.close(master)
os.close(slave)
def test_emcc_v(self):
for compiler in [EMCC, EMXX]:
# -v, without input files
proc = run_process([compiler, '-v'], stdout=PIPE, stderr=PIPE)
self.assertContained('clang version %s' % shared.expected_llvm_version(), proc.stderr)
self.assertContained('GNU', proc.stderr)
self.assertNotContained('this is dangerous', proc.stdout)
self.assertNotContained('this is dangerous', proc.stderr)
def test_emcc_generate_config(self):
for compiler in [EMCC, EMXX]:
config_path = './emscripten_config'
run_process([compiler, '--generate-config', config_path])
self.assertExists(config_path, 'A config file should have been created at %s' % config_path)
config_contents = open(config_path).read()
self.assertContained('EMSCRIPTEN_ROOT', config_contents)
self.assertContained('LLVM_ROOT', config_contents)
os.remove(config_path)
def test_emcc_output_mjs(self):
run_process([EMCC, '-o', 'hello_world.mjs', path_from_root('tests', 'hello_world.c')])
with open('hello_world.mjs') as f:
output = f.read()
self.assertContained('export default Module;', output)
# TODO(sbc): Test that this is actually runnable. We currently don't have
# any tests for EXPORT_ES6 but once we do this should be enabled.
# self.assertContained('hello, world!', run_js('hello_world.mjs'))
def test_emcc_out_file(self):
# Verify that "-ofile" works in addition to "-o" "file"
run_process([EMCC, '-c', '-ofoo.o', path_from_root('tests', 'hello_world.c')])
self.assertExists('foo.o')
run_process([EMCC, '-ofoo.js', 'foo.o'])
self.assertExists('foo.js')
@parameterized({
'c': [EMCC, '.c'],
'cxx': [EMXX, '.cpp']})
def test_emcc_basics(self, compiler, suffix):
# emcc src.cpp ==> writes a.out.js and a.out.wasm
run_process([compiler, path_from_root('tests', 'hello_world' + suffix)])
self.assertExists('a.out.js')
self.assertExists('a.out.wasm')
self.assertContained('hello, world!', run_js('a.out.js'))
# --version
output = run_process([compiler, '--version'], stdout=PIPE, stderr=PIPE)
output = output.stdout.replace('\r', '')
self.assertContained('emcc (Emscripten gcc/clang-like replacement)', output)
self.assertContained('''Copyright (C) 2014 the Emscripten authors (see AUTHORS.txt)
This is free and open source software under the MIT license.
There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
''', output)
# --help
output = run_process([compiler, '--help'], stdout=PIPE, stderr=PIPE)
self.assertContained('Display this information', output.stdout)
self.assertContained('Most clang options will work', output.stdout)
# -dumpmachine
output = run_process([compiler, '-dumpmachine'], stdout=PIPE, stderr=PIPE)
self.assertContained(shared.get_llvm_target(), output.stdout)
# -dumpversion
output = run_process([compiler, '-dumpversion'], stdout=PIPE, stderr=PIPE)
self.assertEqual(shared.EMSCRIPTEN_VERSION, output.stdout.strip())
# properly report source code errors, and stop there
self.clear()
stderr = self.expect_fail([compiler, path_from_root('tests', 'hello_world_error' + suffix)])
self.assertNotContained('IOError', stderr) # no python stack
self.assertNotContained('Traceback', stderr) # no python stack
self.assertContained('error: invalid preprocessing directive', stderr)
self.assertContained(["error: use of undeclared identifier 'cheez", "error: unknown type name 'cheez'"], stderr)
self.assertContained('errors generated.', stderr.splitlines()[-2])
@parameterized({
'c': [EMCC, '.c'],
'cxx': [EMXX, '.cpp']})
def test_emcc_2(self, compiler, suffix):
# emcc src.cpp -c and emcc src.cpp -o src.[o|bc] ==> should give a .bc file
for args in [['-c'], ['-o', 'src.o'], ['-o', 'src.bc'], ['-o', 'src.so'], ['-O1', '-c', '-o', '/dev/null'], ['-O1', '-o', '/dev/null']]:
print('args:', args)
if '/dev/null' in args and WINDOWS:
print('skip because windows')
continue
target = args[1] if len(args) == 2 else 'hello_world.o'
self.clear()
run_process([compiler, path_from_root('tests', 'hello_world' + suffix)] + args)
if args[-1] == '/dev/null':
print('(no output)')
continue
syms = building.llvm_nm(target)
self.assertIn('main', syms.defs)
if self.is_wasm_backend():
# wasm backend will also have '__original_main' or such
self.assertEqual(len(syms.defs), 2)
else:
self.assertEqual(len(syms.defs), 1)
if target == 'js': # make sure emcc can recognize the target as a bitcode file
shutil.move(target, target + '.bc')
target += '.bc'
run_process([compiler, target, '-o', target + '.js'])
self.assertContained('hello, world!', run_js(target + '.js'))
@parameterized({
'c': [EMCC, '.c'],
'cxx': [EMXX, '.cpp']})
def test_emcc_3(self, compiler, suffix):
# handle singleton archives
run_process([compiler, '-c', path_from_root('tests', 'hello_world' + suffix), '-o', 'a.o'])
run_process([LLVM_AR, 'r', 'a.a', 'a.o'], stdout=PIPE, stderr=PIPE)
run_process([compiler, 'a.a'])
self.assertContained('hello, world!', run_js('a.out.js'))
if not self.is_wasm_backend():
# emcc src.ll ==> generates .js
self.clear()
run_process([compiler, path_from_root('tests', 'hello_world.ll')])
self.assertContained('hello, world!', run_js('a.out.js'))
# emcc [..] -o [path] ==> should work with absolute paths
for path in [os.path.abspath(os.path.join('..', 'file1.js')), os.path.join('b_dir', 'file2.js')]:
print(path)
os.chdir(self.get_dir())
self.clear()
print(os.listdir(os.getcwd()))
ensure_dir(os.path.join('a_dir', 'b_dir'))
os.chdir('a_dir')
# use single file so we don't have more files to clean up
run_process([compiler, path_from_root('tests', 'hello_world' + suffix), '-o', path, '-s', 'SINGLE_FILE=1'])
last = os.getcwd()
os.chdir(os.path.dirname(path))
self.assertContained('hello, world!', run_js(os.path.basename(path)))
os.chdir(last)
try_delete(path)
@parameterized({
'c': [EMCC],
'cxx': [EMXX]})
def test_emcc_4(self, compiler):
# Optimization: emcc src.cpp -o something.js [-Ox]. -O0 is the same as not specifying any optimization setting
for params, opt_level, bc_params, closure, has_malloc in [ # bc params are used after compiling to bitcode
(['-o', 'something.js'], 0, None, 0, 1),
(['-o', 'something.js', '-O0'], 0, None, 0, 0),
(['-o', 'something.js', '-O1'], 1, None, 0, 0),
(['-o', 'something.js', '-O1', '-g'], 1, None, 0, 0), # no closure since debug
(['-o', 'something.js', '-O2'], 2, None, 0, 1),
(['-o', 'something.js', '-O2', '-g'], 2, None, 0, 0),
(['-o', 'something.js', '-Os'], 2, None, 0, 1),
(['-o', 'something.js', '-O3'], 3, None, 0, 1),
# and, test compiling to bitcode first
(['-o', 'something.bc'], 0, [], 0, 0),
(['-o', 'something.bc', '-O0'], 0, [], 0, 0),
(['-o', 'something.bc', '-O1'], 1, ['-O1'], 0, 0),
(['-o', 'something.bc', '-O2'], 2, ['-O2'], 0, 0),
(['-o', 'something.bc', '-O3'], 3, ['-O3'], 0, 0),
(['-O1', '-o', 'something.bc'], 1, [], 0, 0),
# non-wasm
(['-s', 'WASM=0', '-o', 'something.js'], 0, None, 0, 1),
(['-s', 'WASM=0', '-o', 'something.js', '-O0'], 0, None, 0, 0),
(['-s', 'WASM=0', '-o', 'something.js', '-O1'], 1, None, 0, 0),
(['-s', 'WASM=0', '-o', 'something.js', '-O1', '-g'], 1, None, 0, 0), # no closure since debug
(['-s', 'WASM=0', '-o', 'something.js', '-O2'], 2, None, 0, 1),
(['-s', 'WASM=0', '-o', 'something.js', '-O2', '-g'], 2, None, 0, 0),
(['-s', 'WASM=0', '-o', 'something.js', '-Os'], 2, None, 0, 1),
(['-s', 'WASM=0', '-o', 'something.js', '-O3'], 3, None, 0, 1),
# and, test compiling to bitcode first
(['-s', 'WASM=0', '-o', 'something.bc'], 0, ['-s', 'WASM=0'], 0, 0),
(['-s', 'WASM=0', '-o', 'something.bc', '-O0'], 0, ['-s', 'WASM=0'], 0, 0),
(['-s', 'WASM=0', '-o', 'something.bc', '-O1'], 1, ['-s', 'WASM=0', '-O1'], 0, 0),
(['-s', 'WASM=0', '-o', 'something.bc', '-O2'], 2, ['-s', 'WASM=0', '-O2'], 0, 0),
(['-s', 'WASM=0', '-o', 'something.bc', '-O3'], 3, ['-s', 'WASM=0', '-O3'], 0, 0),
(['-s', 'WASM=0', '-O1', '-o', 'something.bc'], 1, ['-s', 'WASM=0'], 0, 0),
]:
if 'WASM=0' in params and self.is_wasm_backend():
continue
print(params, opt_level, bc_params, closure, has_malloc)
self.clear()
keep_debug = '-g' in params
args = [compiler, path_from_root('tests', 'hello_world_loop' + ('_malloc' if has_malloc else '') + '.cpp')] + params
print('..', args)
output = run_process(args, stdout=PIPE, stderr=PIPE)
assert len(output.stdout) == 0, output.stdout
if bc_params is not None:
self.assertExists('something.bc', output.stderr)
bc_args = [compiler, 'something.bc', '-o', 'something.js'] + bc_params
print('....', bc_args)
output = run_process(bc_args, stdout=PIPE, stderr=PIPE)
self.assertExists('something.js', output.stderr)
self.assertContained('hello, world!', run_js('something.js'))
# Verify optimization level etc. in the generated code
# XXX these are quite sensitive, and will need updating when code generation changes
generated = open('something.js').read()
main = self.get_func(generated, '_main') if 'function _main' in generated else generated
assert 'new Uint16Array' in generated and 'new Uint32Array' in generated, 'typed arrays 2 should be used by default'
assert 'SAFE_HEAP' not in generated, 'safe heap should not be used by default'
assert ': while(' not in main, 'when relooping we also js-optimize, so there should be no labelled whiles'
if closure:
if opt_level == 0:
assert '._main =' in generated, 'closure compiler should have been run'
elif opt_level >= 1:
assert '._main=' in generated, 'closure compiler should have been run (and output should be minified)'
else:
# closure has not been run, we can do some additional checks. TODO: figure out how to do these even with closure
assert '._main = ' not in generated, 'closure compiler should not have been run'
if keep_debug:
assert ('switch (label)' in generated or 'switch (label | 0)' in generated) == (opt_level <= 0), 'relooping should be in opt >= 1'
assert ('assert(STACKTOP < STACK_MAX' in generated) == (opt_level == 0), 'assertions should be in opt == 0'
if 'WASM=0' in params:
if opt_level >= 2 and '-g' in params:
assert re.search(r'HEAP8\[\$?\w+ ?\+ ?\(+\$?\w+ ?', generated) or re.search(r'HEAP8\[HEAP32\[', generated) or re.search(r'[i$]\d+ & ~\(1 << [i$]\d+\)', generated), 'eliminator should create compound expressions, and fewer one-time vars' # also in -O1, but easier to test in -O2
looks_unminified = ' = {}' in generated and ' = []' in generated
looks_minified = '={}' in generated and '=[]' and ';var' in generated
assert not (looks_minified and looks_unminified)
if opt_level == 0 or '-g' in params:
assert looks_unminified
elif opt_level >= 2:
assert looks_minified
@no_wasm_backend('tests for asmjs optimzer')
@parameterized({
'c': [EMCC],
'cxx': [EMXX]})
def test_emcc_5(self, compiler):
# asm.js optimization levels
for params, test, text in [
(['-O2'], lambda generated: 'function addRunDependency' in generated, 'shell has unminified utilities'),
(['-O2', '--closure', '1'], lambda generated: 'function addRunDependency' not in generated and ';function' in generated, 'closure minifies the shell, removes whitespace'),
(['-O2', '--closure', '1', '-g1'], lambda generated: 'function addRunDependency' not in generated and ';function' not in generated, 'closure minifies the shell, -g1 makes it keep whitespace'),
(['-O2'], lambda generated: 'var b=0' in generated and 'function _main' not in generated, 'registerize/minify is run by default in -O2'),
(['-O2', '--minify', '0'], lambda generated: 'var b = 0' in generated and 'function _main' not in generated, 'minify is cancelled, but not registerize'),
(['-O2', '--js-opts', '0'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'js opts are cancelled'),
(['-O2', '-g'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'registerize/minify is cancelled by -g'),
(['-O2', '-g0'], lambda generated: 'var b=0' in generated and 'function _main' not in generated, 'registerize/minify is run by default in -O2 -g0'),
(['-O2', '-g1'], lambda generated: 'var b = 0' in generated and 'function _main' not in generated, 'compress is cancelled by -g1'),
(['-O2', '-g2'], lambda generated: ('var b = 0' in generated or 'var i1 = 0' in generated) and 'function _main' in generated, 'minify is cancelled by -g2'),
(['-O2', '-g3'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'registerize is cancelled by -g3'),
(['-O2', '--profiling'], lambda generated: ('var b = 0' in generated or 'var i1 = 0' in generated) and 'function _main' in generated, 'similar to -g2'),
(['-O2', '-profiling'], lambda generated: ('var b = 0' in generated or 'var i1 = 0' in generated) and 'function _main' in generated, 'similar to -g2'),
(['-O2', '--profiling-funcs'], lambda generated: 'var b=0' in generated and '"use asm";var a=' in generated and 'function _main' in generated, 'very minified, but retain function names'),
(['-O2', '-profiling-funcs'], lambda generated: 'var b=0' in generated and '"use asm";var a=' in generated and 'function _main' in generated, 'very minified, but retain function names'),
(['-O2'], lambda generated: 'var b=0' in generated and '"use asm";var a=' in generated and 'function _main' not in generated, 'very minified, no function names'),
# (['-O2', '-g4'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'same as -g3 for now'),
(['-s', 'INLINING_LIMIT=0'], lambda generated: 'function _dump' in generated, 'no inlining without opts'),
([], lambda generated: 'Module["_dump"]' not in generated, 'dump is not exported by default'),
(['-s', 'EXPORTED_FUNCTIONS=["_main", "_dump"]'], lambda generated: 'Module["_dump"] =' in generated, 'dump is now exported'),
(['--llvm-opts', '1'], lambda generated: '_puts(' in generated, 'llvm opts requested'),
([], lambda generated: '// Sometimes an existing Module' in generated, 'without opts, comments in shell code'),
(['-O2'], lambda generated: '// Sometimes an existing Module' not in generated, 'with opts, no comments in shell code'),
(['-O2', '-g2'], lambda generated: '// Sometimes an existing Module' not in generated, 'with -g2, no comments in shell code'),
(['-O2', '-g3'], lambda generated: '// Sometimes an existing Module' in generated, 'with -g3, yes comments in shell code'),
]:
print(params, text)
self.clear()
run_process([compiler, path_from_root('tests', 'hello_world_loop.cpp'), '-o', 'a.out.js', '-s', 'WASM=0'] + params)
self.assertContained('hello, world!', run_js('a.out.js'))
assert test(open('a.out.js').read()), text
def test_multiple_sources(self):
# Compiling two sources at a time should work.
cmd = [EMCC, '-c', path_from_root('tests', 'twopart_main.cpp'), path_from_root('tests', 'twopart_side.c')]
run_process(cmd)
# Object files should be generated by default in the current working
# directory, and not alongside the sources.
self.assertExists('twopart_main.o')
self.assertExists('twopart_side.o')
self.assertNotExists(path_from_root('tests', 'twopart_main.o'))
self.assertNotExists(path_from_root('tests', 'twopart_side.o'))
# But it is an error if '-o' is also specified.
self.clear()
err = self.expect_fail(cmd + ['-o', 'out.o'])
self.assertContained('cannot specify -o with -c/-S and multiple source files', err)
self.assertNotExists('twopart_main.o')
self.assertNotExists('twopart_side.o')
self.assertNotExists(path_from_root('tests', 'twopart_main.o'))
self.assertNotExists(path_from_root('tests', 'twopart_side.o'))
def test_combining_object_files(self):
# Compiling two files with -c will generate separate object files
run_process([EMCC, path_from_root('tests', 'twopart_main.cpp'), path_from_root('tests', 'twopart_side.c'), '-c'])
self.assertExists('twopart_main.o')
self.assertExists('twopart_side.o')
# Linking with just one of them is expected to fail
err = self.expect_fail([EMCC, 'twopart_main.o'])
self.assertContained('undefined symbol: theFunc', err)
# Linking with both should work
run_process([EMCC, 'twopart_main.o', 'twopart_side.o'])
self.assertContained('side got: hello from main, over', run_js('a.out.js'))
# Combining object files into another object should also work, using the `-r` flag
run_process([EMCC, '-r', 'twopart_main.o', 'twopart_side.o', '-o', 'combined.o'])
# We also support building without the `-r` flag but expect a warning
err = run_process([EMCC, 'twopart_main.o', 'twopart_side.o', '-o', 'combined2.o'], stderr=PIPE).stderr
self.assertBinaryEqual('combined.o', 'combined2.o')
self.assertContained('warning: Assuming object file output in the absence of `-c`', err)
# Should be two symbols (and in the wasm backend, also __original_main)
syms = building.llvm_nm('combined.o')
self.assertIn('main', syms.defs)
if self.is_wasm_backend():
self.assertEqual(len(syms.defs), 3)
else:
self.assertEqual(len(syms.defs), 2)
run_process([EMCC, 'combined.o', '-o', 'combined.o.js'])
self.assertContained('side got: hello from main, over', run_js('combined.o.js'))
def test_js_transform(self):
with open('t.py', 'w') as f:
f.write('''
import sys
f = open(sys.argv[1], 'a')
f.write('transformed!')
f.close()
''')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '--js-transform', '%s t.py' % (PYTHON)])
self.assertIn('transformed!', open('a.out.js').read())
@no_wasm_backend("wasm backend alwasy embedds memory")
def test_js_mem_file(self):
for opts in [0, 1, 2, 3]:
print('mem init in', opts)
self.clear()
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '-O' + str(opts)])
if opts >= 2:
self.assertExists('a.out.js.mem')
else:
self.assertNotExists('a.out.js.mem')
def test_emcc_asm_v_wasm(self):
for opts in ([], ['-O1'], ['-O2'], ['-O3']):
print('opts', opts)
for mode in ([], ['-s', 'WASM=0']):
self.clear()
wasm = '=0' not in str(mode)
print(' mode', mode, 'wasm?', wasm)
run_process([EMCC, path_from_root('tests', 'hello_world.c')] + opts + mode)
self.assertExists('a.out.js')
if wasm:
self.assertExists('a.out.wasm')
for engine in JS_ENGINES:
print(' engine', engine)
out = run_js('a.out.js', engine=engine, stderr=PIPE, full_output=True)
self.assertContained('hello, world!', out)
if not wasm and engine == SPIDERMONKEY_ENGINE:
self.validate_asmjs(out)
if not wasm and not self.is_wasm_backend():
src = open('a.out.js').read()
if opts == []:
self.assertContained('almost asm', src)
else:
self.assertContained('use asm', src)
def test_emcc_cflags(self):
output = run_process([EMCC, '--cflags'], stdout=PIPE)
flags = output.stdout.strip()
self.assertContained(' '.join(building.doublequote_spaces(shared.emsdk_cflags([], False))), flags)
output = run_process([EMXX, '--cflags'], stdout=PIPE)
flags = output.stdout.strip()
self.assertContained(' '.join(building.doublequote_spaces(shared.emsdk_cflags([], True))), flags)
# check they work
cmd = [CLANG_CXX, path_from_root('tests', 'hello_world.cpp')] + shlex.split(flags.replace('\\', '\\\\')) + ['-c', '-emit-llvm', '-o', 'a.bc']
run_process(cmd)
run_process([EMCC, 'a.bc'])
self.assertContained('hello, world!', run_js('a.out.js'))
def test_emcc_print_search_dirs(self):
result = run_process([EMCC, '-print-search-dirs'], stdout=PIPE, stderr=PIPE)
self.assertContained('programs: =', result.stdout)
self.assertContained('libraries: =', result.stdout)
def test_emar_em_config_flag(self):
# Test that the --em-config flag is accepted but not passed down do llvm-ar.
# We expand this in case the EM_CONFIG is ~/.emscripten (default)
config = os.path.expanduser(shared.EM_CONFIG)
proc = run_process([EMAR, '--em-config', config, '-version'], stdout=PIPE, stderr=PIPE)
self.assertEqual(proc.stderr, "")
self.assertContained('LLVM', proc.stdout)
def test_emsize(self):
with open(path_from_root('tests', 'other', 'test_emsize.out')) as expected_output:
expected = expected_output.read()
cmd = [emsize, path_from_root('tests', 'other', 'test_emsize.js')]
for command in [cmd, cmd + ['-format=sysv']]:
output = run_process(cmd, stdout=PIPE).stdout
self.assertContained(expected, output)
@parameterized({
# ('directory to the test', 'output filename', ['extra args to pass to
# CMake']) Testing all combinations would be too much work and the test
# would take 10 minutes+ to finish (CMake feature detection is slow), so
# combine multiple features into one to try to cover as much as possible
# while still keeping this test in sensible time limit.
'js': ('target_js', 'test_cmake.js', ['-DCMAKE_BUILD_TYPE=Debug']),
'html': ('target_html', 'hello_world_gles.html', ['-DCMAKE_BUILD_TYPE=Release']),
'library': ('target_library', 'libtest_cmake.a', ['-DCMAKE_BUILD_TYPE=MinSizeRel']),
'static_cpp': ('target_library', 'libtest_cmake.a', ['-DCMAKE_BUILD_TYPE=RelWithDebInfo', '-DCPP_LIBRARY_TYPE=STATIC']),
'stdproperty': ('stdproperty', 'helloworld.js', [])
})
def test_cmake(self, test_dir, output_file, cmake_args):
# Test all supported generators.
if WINDOWS:
generators = ['MinGW Makefiles', 'NMake Makefiles']
else:
generators = ['Unix Makefiles', 'Ninja', 'Eclipse CDT4 - Ninja']
configurations = {'MinGW Makefiles' : {'build' : ['mingw32-make'] }, # noqa
'NMake Makefiles' : {'build' : ['nmake', '/NOLOGO']}, # noqa
'Unix Makefiles' : {'build' : ['make']}, # noqa
'Ninja' : {'build' : ['ninja']}, # noqa
'Eclipse CDT4 - Ninja': {'build' : ['ninja']}, # noqa
}
for generator in generators:
conf = configurations[generator]
if not shared.which(conf['build'][0]):
# Use simple test if applicable
print('Skipping %s test for CMake support; build tool found found: %s.' % (generator, conf['build'][0]))
continue
cmakelistsdir = path_from_root('tests', 'cmake', test_dir)
with temp_directory(self.get_dir()) as tempdirname:
# Run Cmake
cmd = [emcmake, 'cmake'] + cmake_args + ['-G', generator, cmakelistsdir]
env = os.environ.copy()
# https://github.com/emscripten-core/emscripten/pull/5145: Check that CMake works even if EMCC_SKIP_SANITY_CHECK=1 is passed.
if test_dir == 'target_html':
env['EMCC_SKIP_SANITY_CHECK'] = '1'
print(str(cmd))
ret = run_process(cmd, env=env, stdout=None if EM_BUILD_VERBOSE >= 2 else PIPE, stderr=None if EM_BUILD_VERBOSE >= 1 else PIPE)
if ret.stderr is not None and len(ret.stderr.strip()):
print(ret.stderr) # If there were any errors, print them directly to console for diagnostics.
if ret.stderr is not None and 'error' in ret.stderr.lower():
print('Failed command: ' + ' '.join(cmd))
print('Result:\n' + ret.stderr)
self.fail('cmake call failed!')
# Build
cmd = conf['build']
if EM_BUILD_VERBOSE >= 3 and 'Ninja' not in generator:
cmd += ['VERBOSE=1']
ret = run_process(cmd, stdout=None if EM_BUILD_VERBOSE >= 2 else PIPE)
if ret.stderr is not None and len(ret.stderr.strip()):
print(ret.stderr) # If there were any errors, print them directly to console for diagnostics.
if ret.stdout is not None and 'error' in ret.stdout.lower() and '0 error(s)' not in ret.stdout.lower():
print('Failed command: ' + ' '.join(cmd))
print('Result:\n' + ret.stdout)
self.fail('make failed!')
self.assertExists(tempdirname + '/' + output_file, 'building a cmake-generated Makefile failed to produce an output file %s!' % tempdirname + '/' + output_file)
# Run through node, if CMake produced a .js file.
if output_file.endswith('.js'):
ret = run_process(NODE_JS + [tempdirname + '/' + output_file], stdout=PIPE).stdout
self.assertTextDataIdentical(open(cmakelistsdir + '/out.txt').read().strip(), ret.strip())
# Test that the various CMAKE_xxx_COMPILE_FEATURES that are advertised for the Emscripten toolchain match with the actual language features that Clang supports.
# If we update LLVM version and this test fails, copy over the new advertised features from Clang and place them to cmake/Modules/Platform/Emscripten.cmake.
@no_windows('Skipped on Windows because CMake does not configure native Clang builds well on Windows.')
def test_cmake_compile_features(self):
with temp_directory(self.get_dir()):
cmd = ['cmake', '-DCMAKE_C_COMPILER=' + CLANG_CC, '-DCMAKE_CXX_COMPILER=' + CLANG_CXX, path_from_root('tests', 'cmake', 'stdproperty')]
print(str(cmd))
native_features = run_process(cmd, stdout=PIPE).stdout
with temp_directory(self.get_dir()):
cmd = [emcmake, 'cmake', path_from_root('tests', 'cmake', 'stdproperty')]
print(str(cmd))
emscripten_features = run_process(cmd, stdout=PIPE).stdout
native_features = '\n'.join([x for x in native_features.split('\n') if '***' in x])
emscripten_features = '\n'.join([x for x in emscripten_features.split('\n') if '***' in x])
self.assertTextDataIdentical(native_features, emscripten_features)
# Tests that it's possible to pass C++11 or GNU++11 build modes to CMake by building code that needs C++11 (embind)
def test_cmake_with_embind_cpp11_mode(self):
for args in [[], ['-DNO_GNU_EXTENSIONS=1']]:
with temp_directory(self.get_dir()) as tempdirname:
configure = [emcmake, 'cmake', path_from_root('tests', 'cmake', 'cmake_with_emval')] + args
print(str(configure))
run_process(configure)
build = ['cmake', '--build', '.']
print(str(build))
run_process(build)
ret = run_process(NODE_JS + [os.path.join(tempdirname, 'cpp_with_emscripten_val.js')], stdout=PIPE).stdout.strip()
if '-DNO_GNU_EXTENSIONS=1' in args:
self.assertTextDataIdentical('Hello! __STRICT_ANSI__: 1, __cplusplus: 201103', ret)
else:
self.assertTextDataIdentical('Hello! __STRICT_ANSI__: 0, __cplusplus: 201103', ret)
# Tests that the Emscripten CMake toolchain option
def test_cmake_bitcode_static_libraries(self):
if self.is_wasm_backend():
# Test that this option produces an error with the llvm backend
err = self.expect_fail([emcmake, 'cmake', path_from_root('tests', 'cmake', 'static_lib'), '-DEMSCRIPTEN_GENERATE_BITCODE_STATIC_LIBRARIES=ON'])
self.assertContained('EMSCRIPTEN_GENERATE_BITCODE_STATIC_LIBRARIES is not compatible with the', err)
return
# Test that building static libraries by default generates UNIX archives (.a, with the emar tool)
self.clear()
run_process([emcmake, 'cmake', path_from_root('tests', 'cmake', 'static_lib')])
run_process(['cmake', '--build', '.'])
self.assertTrue(building.is_ar('libstatic_lib.a'))
run_process([EMAR, 'x', 'libstatic_lib.a'])
found = False # hashing makes the object name random
for x in os.listdir('.'):
if x.endswith('.o'):
found = True
if self.is_wasm_backend():
assert building.is_wasm(x)
else:
assert building.is_bitcode(x)
assert found
# Test that passing the -DEMSCRIPTEN_GENERATE_BITCODE_STATIC_LIBRARIES=ON
# directive causes CMake to generate LLVM bitcode files as static libraries
# (.bc)
self.clear()
run_process([emcmake, 'cmake', '-DEMSCRIPTEN_GENERATE_BITCODE_STATIC_LIBRARIES=ON', path_from_root('tests', 'cmake', 'static_lib')])
run_process(['cmake', '--build', '.'])
if self.is_wasm_backend():
assert building.is_wasm('libstatic_lib.bc')
else:
assert building.is_bitcode('libstatic_lib.bc')
assert not building.is_ar('libstatic_lib.bc')
# Test that one is able to fake custom suffixes for static libraries.
# (sometimes projects want to emulate stuff, and do weird things like files
# with ".so" suffix which are in fact either ar archives or bitcode files)
self.clear()
run_process([emcmake, 'cmake', '-DSET_FAKE_SUFFIX_IN_PROJECT=1', path_from_root('tests', 'cmake', 'static_lib')])
run_process(['cmake', '--build', '.'])
assert building.is_ar('myprefix_static_lib.somecustomsuffix')
# Tests that the CMake variable EMSCRIPTEN_VERSION is properly provided to user CMake scripts
def test_cmake_emscripten_version(self):
run_process([emcmake, 'cmake', path_from_root('tests', 'cmake', 'emscripten_version')])
def test_system_include_paths(self):
# Verify that all default include paths are within `emscripten/system`
def verify_includes(stderr):
self.assertContained('<...> search starts here:', stderr)
assert stderr.count('End of search list.') == 1, stderr
start = stderr.index('<...> search starts here:')
end = stderr.index('End of search list.')
includes = stderr[start:end]
includes = [i.strip() for i in includes.splitlines()[1:-1]]
for i in includes:
if shared.Cache.dirname in i:
self.assertContained(shared.Cache.dirname, i)
else:
self.assertContained(path_from_root('system'), i)
err = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-v'], stderr=PIPE).stderr
verify_includes(err)
err = run_process([EMXX, path_from_root('tests', 'hello_world.cpp'), '-v'], stderr=PIPE).stderr
verify_includes(err)
def test_failure_error_code(self):
for compiler in [EMCC, EMXX]:
# Test that if one file is missing from the build, then emcc shouldn't succeed, and shouldn't produce an output file.
self.expect_fail([compiler, path_from_root('tests', 'hello_world.c'), 'this_file_is_missing.c', '-o', 'out.js'])
self.assertFalse(os.path.exists('out.js'))
def test_use_cxx(self):
create_test_file('empty_file', ' ')
dash_xc = run_process([EMCC, '-v', '-xc', 'empty_file'], stderr=PIPE).stderr
self.assertNotContained('-x c++', dash_xc)
dash_xcpp = run_process([EMCC, '-v', '-xc++', 'empty_file'], stderr=PIPE).stderr
self.assertContained('-x c++', dash_xcpp)
def test_cxx11(self):
for std in ['-std=c++11', '--std=c++11']:
for compiler in [EMCC, EMXX]:
run_process([compiler, std, path_from_root('tests', 'hello_cxx11.cpp')])
# Regression test for issue #4522: Incorrect CC vs CXX detection
def test_incorrect_c_detection(self):
# This auto-detection only works for the compile phase.
# For linking you need to use `em++` or pass `-x c++`
create_test_file('test.c', 'foo\n')
for compiler in [EMCC, EMXX]:
run_process([compiler, '-c', '--bind', '--embed-file', 'test.c', path_from_root('tests', 'hello_world.cpp')])
def test_odd_suffixes(self):
for suffix in ['CPP', 'c++', 'C++', 'cxx', 'CXX', 'cc', 'CC', 'i', 'ii']:
if self.is_wasm_backend() and suffix == 'ii':
# wasm backend treats .i and .ii specially and considers them already
# pre-processed. Because if this is strips all the -D command line
# flags, including the __EMSCRIPTEN__ define, which makes this fail
# to compile since libcxx/__config depends in __EMSCRIPTEN__.
continue
self.clear()
print(suffix)
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'test.' + suffix)
run_process([EMCC, self.in_dir('test.' + suffix)])
self.assertContained('hello, world!', run_js('a.out.js'))
for suffix in ['lo']:
self.clear()
print(suffix)
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-o', 'binary.' + suffix])
run_process([EMCC, 'binary.' + suffix])
self.assertContained('hello, world!', run_js('a.out.js'))
@no_wasm_backend('asm.js minification')
def test_asm_minify(self):
def test(args):
run_process([EMCC, path_from_root('tests', 'hello_world_loop_malloc.cpp'), '-s', 'WASM=0'] + args)
self.assertContained('hello, world!', run_js('a.out.js'))
return open('a.out.js').read()
src = test([])
assert 'function _malloc' in src
src = test(['-O2', '-s', 'ASM_JS=1'])
normal_size = len(src)
print('normal', normal_size)
assert 'function _malloc' not in src
src = test(['-O2', '-s', 'ASM_JS=1', '--minify', '0'])
unminified_size = len(src)
print('unminified', unminified_size)
assert unminified_size > normal_size
assert 'function _malloc' not in src
src = test(['-O2', '-s', 'ASM_JS=1', '-g'])
debug_size = len(src)
print('debug', debug_size)
self.assertGreater(debug_size, unminified_size)
self.assertContained('function _malloc', src)
@no_wasm_backend('tests fastcomp extra assertions for function pointer errors - do we need these?')
def test_dangerous_func_cast(self):
src = r'''
#include <stdio.h>
typedef void (*voidfunc)();
int my_func() {
printf("my func\n");
return 10;
}
int main(int argc, char **argv) {
voidfunc fps[10];
for (int i = 0; i < 10; i++)
fps[i] = (i == argc) ? (void (*)())my_func : NULL;
fps[2 * (argc-1) + 1]();
return 0;
}
'''
create_test_file('src.c', src)
def test(args, expected):
print(args, expected)
run_process([EMCC, 'src.c'] + args, stderr=PIPE)
self.assertContained(expected, run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None))
if self.is_wasm_backend():
return
print('in asm.js')
run_process([EMCC, 'src.c', '-s', 'WASM=0'] + args, stderr=PIPE)
self.assertContained(expected, run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None))
# TODO: emulation function support in wasm is imperfect
print('with emulated function pointers in asm.js')
run_process([EMCC, '-s', 'WASM=0', 'src.c', '-s', 'ASSERTIONS=1'] + args + ['-s', 'EMULATED_FUNCTION_POINTERS=1'], stderr=PIPE)
out = run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None)
self.assertContained(expected, out)
# fastcomp. all asm, so it can't just work with wrong sigs. but,
# ASSERTIONS=2 gives much better info to debug
# Case 1: No useful info, but does mention ASSERTIONS
test(['-O1'], 'ASSERTIONS')
# Case 2: Some useful text
test(['-O1', '-s', 'ASSERTIONS=1'], [
'Invalid function pointer',
"called with signature 'v'. Perhaps this is an invalid value",
'Build with ASSERTIONS=2 for more info'
])
# Case 3: actually useful identity of the bad pointer, with comparisons to
# what it would be in other types/tables
test(['-O1', '-s', 'ASSERTIONS=2'], [
'Invalid function pointer',
"called with signature 'v'. Perhaps this is an invalid value",
'This pointer might make sense in another type signature:',
'Invalid function pointer',
"called with signature 'v'. Perhaps this is an invalid value",
"i: asm['_my_func']"
])
# Case 4: emulate so it works
test(['-O1', '-s', 'EMULATE_FUNCTION_POINTER_CASTS=1'], 'my func\n')
@no_wasm_backend('uses EMULATED_FUNCTION_POINTERS')
def test_emulate_function_pointer_casts_assertions_2(self):
# check empty tables work with assertions 2 in this mode (#6554)
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'EMULATED_FUNCTION_POINTERS=1', '-s', 'ASSERTIONS=2'])
def test_wl_linkflags(self):
# Test path -L and -l via -Wl, arguments and -Wl, response files
create_test_file('main.cpp', '''
extern "C" void printey();
int main() {
printey();
return 0;
}
''')
create_test_file('libfile.cpp', '''
#include <stdio.h>
extern "C" void printey() {
printf("hello from lib\\n");
}
''')
create_test_file('linkflags.txt', '''
-L.
-lfoo
''')
run_process([EMCC, '-o', 'libfile.o', 'libfile.cpp'])
run_process([EMAR, 'cr', 'libfoo.a', 'libfile.o'])
run_process([EMCC, 'main.cpp', '-L.', '-lfoo'])
run_process([EMCC, 'main.cpp', '-Wl,-L.', '-Wl,-lfoo'])
run_process([EMCC, 'main.cpp', '-Wl,@linkflags.txt'])
def test_l_link(self):
# Linking with -lLIBNAME and -L/DIRNAME should work, also should work with spaces
create_test_file('main.cpp', '''
extern void printey();
int main() {
printey();
return 0;
}
''')
create_test_file('libfile.cpp', '''
#include <stdio.h>
void printey() {
printf("hello from lib\\n");
}
''')
ensure_dir('libdir')
libfile = self.in_dir('libdir', 'libfile.so')
aout = 'a.out.js'
def build(path, args):
run_process([EMCC, path] + args)
# Test linking the library built here by emcc
build('libfile.cpp', ['-c'])
shutil.move('libfile.o', libfile)
build('main.cpp', ['-L' + 'libdir', '-lfile'])
self.assertContained('hello from lib', run_js(aout))
# Also test execution with `-l c` and space-separated library linking syntax
os.remove(aout)
build('libfile.cpp', ['-c', '-l', 'c'])
shutil.move('libfile.o', libfile)
build('main.cpp', ['-L', 'libdir', '-l', 'file'])
self.assertContained('hello from lib', run_js(aout))
# Must not leave unneeded linker stubs
self.assertNotExists('a.out')
self.assertNotExists('a.exe')
def test_commons_link(self):
create_test_file('a.h', r'''
#if !defined(A_H)
#define A_H
extern int foo[8];
#endif
''')
create_test_file('a.c', r'''
#include "a.h"
int foo[8];
''')
create_test_file('main.c', r'''
#include <stdio.h>
#include "a.h"
int main() {
printf("|%d|\n", foo[0]);
return 0;
}
''')
run_process([EMCC, '-o', 'a.o', 'a.c'])
run_process([EMAR, 'rv', 'library.a', 'a.o'])
run_process([EMCC, '-o', 'main.o', 'main.c'])
run_process([EMCC, '-o', 'a.js', 'main.o', 'library.a'])
self.assertContained('|0|', run_js('a.js'))
@parameterized({
'expand_symlinks': [[]],
'no_canonical_prefixes': [['-no-canonical-prefixes']],
})
@no_windows('Windows does not support symlinks')
def test_symlink_points_to_bad_suffix(self, flags):
"""Tests compiling a symlink where foobar.c points to foobar.xxx.
In this case, we should always successfully compile the code."""
create_test_file('foobar.xxx', 'int main(){ return 0; }')
os.symlink('foobar.xxx', 'foobar.c')
run_process([EMCC, 'foobar.c', '-o', 'foobar.bc'] + flags)
@parameterized({
'expand_symlinks': ([], True),
'no_canonical_prefixes': (['-no-canonical-prefixes'], False),
})
@no_windows('Windows does not support symlinks')
def test_symlink_has_bad_suffix(self, flags, expect_success):
"""Tests compiling a symlink where foobar.xxx points to foobar.c.
In this case, setting -no-canonical-prefixes will result in a build failure
due to the inappropriate file suffix on foobar.xxx."""
create_test_file('foobar.c', 'int main(){ return 0; }')
os.symlink('foobar.c', 'foobar.xxx')
proc = run_process([EMCC, 'foobar.xxx', '-o', 'foobar.bc'] + flags, check=expect_success, stderr=PIPE)
if not expect_success:
self.assertNotEqual(proc.returncode, 0)
self.assertContained("unknown suffix", proc.stderr)
def test_multiply_defined_libsymbols(self):
lib_name = 'libA.c'
a2_name = 'a2.c'
b2_name = 'b2.c'
main_name = 'main.c'
create_test_file(lib_name, 'int mult() { return 1; }')
create_test_file(a2_name, 'void x() {}')
create_test_file(b2_name, 'void y() {}')
create_test_file(main_name, r'''
#include <stdio.h>
int mult();
int main() {
printf("result: %d\n", mult());
return 0;
}
''')
building.emcc(lib_name, output_filename='libA.so')
building.emcc(a2_name, ['-L.', '-lA'])
building.emcc(b2_name, ['-L.', '-lA'])
building.emcc(main_name, ['-L.', '-lA', a2_name + '.o', b2_name + '.o'], output_filename='a.out.js')
self.assertContained('result: 1', run_js('a.out.js'))
def test_multiply_defined_libsymbols_2(self):
a = "int x() { return 55; }"
a_name = 'a.c'
create_test_file(a_name, a)
b = "int y() { return 2; }"
b_name = 'b.c'
create_test_file(b_name, b)
c = "int z() { return 5; }"
c_name = 'c.c'
create_test_file(c_name, c)
main = r'''
#include <stdio.h>
int x();
int y();
int z();
int main() {
printf("result: %d\n", x() + y() + z());
return 0;
}
'''
main_name = 'main.c'
create_test_file(main_name, main)
building.emcc(a_name) # a.c.o
building.emcc(b_name) # b.c.o
building.emcc(c_name) # c.c.o
lib_name = 'libLIB.a'
building.emar('cr', lib_name, [a_name + '.o', b_name + '.o']) # libLIB.a with a and b
# a is in the lib AND in an .o, so should be ignored in the lib. We do still need b from the lib though
building.emcc(main_name, [a_name + '.o', c_name + '.o', '-L.', '-lLIB'], output_filename='a.out.js')
self.assertContained('result: 62', run_js('a.out.js'))
@no_wasm_backend('not relevent with lld')
def test_link_group(self):
lib_src_name = 'lib.c'
create_test_file(lib_src_name, 'int x() { return 42; }')
main_name = 'main.c'
create_test_file(main_name, r'''
#include <stdio.h>
int x();
int main() {
printf("result: %d\n", x());
return 0;
}
''')
building.emcc(lib_src_name) # lib.c.o
lib_name = 'libLIB.a'
building.emar('cr', lib_name, [lib_src_name + '.o']) # libLIB.a with lib.c.o
def test(lib_args, err_expected):
print(err_expected)
output = run_process([EMCC, main_name, '-o', 'a.out.js'] + lib_args, stdout=PIPE, stderr=PIPE, check=not err_expected)
if err_expected:
self.assertContained(err_expected, output.stderr)
else:
self.assertNotContained('undefined symbol', output.stderr)
out_js = 'a.out.js'
self.assertExists(out_js, output.stdout + '\n' + output.stderr)
self.assertContained('result: 42', run_js(out_js))
test(['-Wl,--start-group', lib_name, '-Wl,--start-group'], 'Nested --start-group, missing --end-group?')
test(['-Wl,--end-group', lib_name, '-Wl,--start-group'], '--end-group without --start-group')
test(['-Wl,--start-group', lib_name, '-Wl,--end-group'], None)
test(['-Wl,--start-group', lib_name], None)
print('embind test with groups')
main_name = 'main.cpp'
create_test_file(main_name, r'''
#include <stdio.h>
#include <emscripten/val.h>
using namespace emscripten;
extern "C" int x();
int main() {
int y = -x();
y = val::global("Math").call<int>("abs", y);
printf("result: %d\n", y);
return 0;
}
''')
test(['-Wl,--start-group', lib_name, '-Wl,--end-group', '--bind'], None)
def test_whole_archive(self):
# Verify that -Wl,--whole-archive includes the static constructor from the
# otherwise unreferenced library.
run_process([EMCC, '-c', '-o', 'main.o', path_from_root('tests', 'test_whole_archive', 'main.c')])
run_process([EMCC, '-c', '-o', 'testlib.o', path_from_root('tests', 'test_whole_archive', 'testlib.c')])
run_process([EMAR, 'crs', 'libtest.a', 'testlib.o'])
run_process([EMCC, '-Wl,--whole-archive', 'libtest.a', '-Wl,--no-whole-archive', 'main.o'])
self.assertContained('foo is: 42\n', run_js('a.out.js'))
run_process([EMCC, '-Wl,-whole-archive', 'libtest.a', '-Wl,-no-whole-archive', 'main.o'])
self.assertContained('foo is: 42\n', run_js('a.out.js'))
# Verify the --no-whole-archive prevents the inclusion of the ctor
run_process([EMCC, '-Wl,-whole-archive', '-Wl,--no-whole-archive', 'libtest.a', 'main.o'])
self.assertContained('foo is: 0\n', run_js('a.out.js'))
def test_link_group_bitcode(self):
create_test_file('1.c', r'''
int f(void);
int main() {
f();
return 0;
}
''')
create_test_file('2.c', r'''
#include <stdio.h>
int f() {
printf("Hello\n");
return 0;
}
''')
run_process([EMCC, '-o', '1.o', '1.c'])
run_process([EMCC, '-o', '2.o', '2.c'])
run_process([EMAR, 'crs', '2.a', '2.o'])
run_process([EMCC, '-o', 'out.bc', '-Wl,--start-group', '2.a', '1.o', '-Wl,--end-group'])
run_process([EMCC, 'out.bc'])
self.assertContained('Hello', run_js('a.out.js'))
@no_wasm_backend('lld resolves circular lib dependencies')
def test_circular_libs(self):
def tmp_source(name, code):
with open(name, 'w') as f:
f.write(code)
tmp_source('a.c', 'int z(); int x() { return z(); }')
tmp_source('b.c', 'int x(); int y() { return x(); } int z() { return 42; }')
tmp_source('c.c', 'int q() { return 0; }')
tmp_source('main.c', r'''
#include <stdio.h>
int y();
int main() {
printf("result: %d\n", y());
return 0;
}
''')
building.emcc('a.c') # a.c.o
building.emcc('b.c') # b.c.o
building.emcc('c.c')
building.emar('cr', 'libA.a', ['a.c.o', 'c.c.o'])
building.emar('cr', 'libB.a', ['b.c.o', 'c.c.o'])
args = ['main.c', '-o', 'a.out.js']
libs_list = ['libA.a', 'libB.a']
# 'libA.a' does not satisfy any symbols from main, so it will not be included,
# and there will be an undefined symbol.
err = self.expect_fail([EMCC] + args + libs_list)
self.assertContained('error: undefined symbol: x', err)
# -Wl,--start-group and -Wl,--end-group around the libs will cause a rescan
# of 'libA.a' after 'libB.a' adds undefined symbol "x", so a.c.o will now be
# included (and the link will succeed).
libs = ['-Wl,--start-group'] + libs_list + ['-Wl,--end-group']
run_process([EMCC] + args + libs)
self.assertContained('result: 42', run_js('a.out.js'))
# -( and -) should also work.
args = ['main.c', '-o', 'a2.out.js']
libs = ['-Wl,-('] + libs_list + ['-Wl,-)']
run_process([EMCC] + args + libs)
self.assertContained('result: 42', run_js('a2.out.js'))
# The fastcomp path will deliberately ignore duplicate input files in order
# to allow "libA.so" on the command line twice. The is not really .so support
# and the .so files are really bitcode.
@no_wasm_backend('tests legacy .so linking behviour')
@needs_dlfcn
def test_redundant_link(self):
lib = "int mult() { return 1; }"
lib_name = 'libA.c'
create_test_file(lib_name, lib)
main = r'''
#include <stdio.h>
int mult();
int main() {
printf("result: %d\n", mult());
return 0;
}
'''
main_name = 'main.c'
create_test_file(main_name, main)
building.emcc(lib_name, output_filename='libA.so')
building.emcc(main_name, ['libA.so', 'libA.so'], output_filename='a.out.js')
self.assertContained('result: 1', run_js('a.out.js'))
def test_dot_a_all_contents_invalid(self):
# check that we error if an object file in a .a is not valid bitcode.
# do not silently ignore native object files, which may have been
# built by mistake
create_test_file('native.c', 'int native() { return 5; }')
create_test_file('main.c', 'extern int native(); int main() { return native(); }')
run_process([CLANG_CC, 'native.c', '-target', 'x86_64-linux', '-c', '-o', 'native.o'])
run_process([EMAR, 'crs', 'libfoo.a', 'native.o'])
stderr = self.expect_fail([EMCC, 'main.c', 'libfoo.a'])
self.assertContained('unknown file type', stderr)
def test_export_all(self):
lib = r'''
#include <stdio.h>
void libf1() { printf("libf1\n"); }
void libf2() { printf("libf2\n"); }
'''
create_test_file('lib.c', lib)
create_test_file('main.js', '''
var Module = {
onRuntimeInitialized: function() {
_libf1();
_libf2();
}
};
''')
building.emcc('lib.c', ['-s', 'EXPORT_ALL', '-s', 'LINKABLE', '--pre-js', 'main.js'], output_filename='a.out.js')
self.assertContained('libf1\nlibf2\n', run_js('a.out.js'))
def test_export_all_and_exported_functions(self):
# EXPORT_ALL should not export library functions by default.
# This mans that to export library function you also need to explicitly
# list them in EXPORTED_FUNCTIONS.
lib = r'''
#include <stdio.h>
#include <emscripten.h>
EMSCRIPTEN_KEEPALIVE void libfunc() { puts("libfunc\n"); }
'''
create_test_file('lib.c', lib)
create_test_file('main.js', '''
var Module = {
onRuntimeInitialized: function() {
_libfunc();
__get_daylight();
}
};
''')
# __get_daylight should not be linked by default, even with EXPORT_ALL
building.emcc('lib.c', ['-s', 'EXPORT_ALL', '--pre-js', 'main.js'], output_filename='a.out.js')
err = run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None)
self.assertContained('__get_daylight is not defined', err)
building.emcc('lib.c', ['-s', "EXPORTED_FUNCTIONS=['__get_daylight']", '-s', 'EXPORT_ALL', '--pre-js', 'main.js'], output_filename='a.out.js')
self.assertContained('libfunc\n', run_js('a.out.js'))
def test_stdin(self):
def run_test():
for engine in JS_ENGINES:
if engine == V8_ENGINE:
continue # no stdin support in v8 shell
engine[0] = os.path.normpath(engine[0])
print(engine, file=sys.stderr)
# work around a bug in python's subprocess module
# (we'd use run_js() normally)
try_delete('out.txt')
cmd = jsrun.make_command(os.path.normpath('out.js'), engine)
cmd = ' '.join(building.doublequote_spaces(cmd))
if WINDOWS:
os.system('type "in.txt" | {} >out.txt'.format(cmd))
else: # posix
os.system('cat in.txt | {} > out.txt'.format(cmd))
self.assertContained('abcdef\nghijkl\neof', open('out.txt').read())
building.emcc(path_from_root('tests', 'module', 'test_stdin.c'), output_filename='out.js')
create_test_file('in.txt', 'abcdef\nghijkl')
run_test()
building.emcc(path_from_root('tests', 'module', 'test_stdin.c'),
['-O2', '--closure', '1'], output_filename='out.js')
run_test()
def test_ungetc_fscanf(self):
create_test_file('main.cpp', r'''
#include <stdio.h>
int main(int argc, char const *argv[])
{
char str[4] = {0};
FILE* f = fopen("my_test.input", "r");
if (f == NULL) {
printf("cannot open file\n");
return -1;
}
ungetc('x', f);
ungetc('y', f);
ungetc('z', f);
fscanf(f, "%3s", str);
printf("%s\n", str);
return 0;
}
''')
create_test_file('my_test.input', 'abc')
building.emcc('main.cpp', ['--embed-file', 'my_test.input'], output_filename='a.out.js')
self.assertContained('zyx', run_process(JS_ENGINES[0] + ['a.out.js'], stdout=PIPE, stderr=PIPE).stdout)
def test_abspaths(self):
# Includes with absolute paths are generally dangerous, things like -I/usr/.. will get to system local headers, not our portable ones.
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'main.c')
for args, expected in [(['-I/usr/something', '-Wwarn-absolute-paths'], True),
(['-L/usr/something', '-Wwarn-absolute-paths'], True),
(['-I/usr/something'], False),
(['-L/usr/something'], False),
(['-I/usr/something', '-Wno-warn-absolute-paths'], False),
(['-L/usr/something', '-Wno-warn-absolute-paths'], False),
(['-Isubdir/something', '-Wwarn-absolute-paths'], False),
(['-Lsubdir/something', '-Wwarn-absolute-paths'], False),
([], False)]:
print(args, expected)
proc = run_process([EMCC, 'main.c'] + args, stderr=PIPE)
WARNING = 'encountered. If this is to a local system header/library, it may cause problems (local system files make sense for compiling natively on your system, but not necessarily to JavaScript)'
self.assertContainedIf(WARNING, proc.stderr, expected)
def test_local_link(self):
# Linking a local library directly, like /usr/lib/libsomething.so, cannot work of course since it
# doesn't contain bitcode. However, when we see that we should look for a bitcode file for that
# library in the -L paths and system/lib
create_test_file('main.cpp', '''
extern void printey();
int main() {
printey();
return 0;
}
''')
ensure_dir('subdir')
open(os.path.join('subdir', 'libfile.so'), 'w').write('this is not llvm bitcode!')
create_test_file('libfile.cpp', '''
#include <stdio.h>
void printey() {
printf("hello from lib\\n");
}
''')
run_process([EMCC, 'libfile.cpp', '-o', 'libfile.so'], stderr=PIPE)
run_process([EMCC, 'main.cpp', os.path.join('subdir', 'libfile.so'), '-L.'])
self.assertContained('hello from lib', run_js('a.out.js'))
def test_identical_basenames(self):
# Issue 287: files in different dirs but with the same basename get confused as the same,
# causing multiply defined symbol errors
ensure_dir('foo')
ensure_dir('bar')
open(os.path.join('foo', 'main.cpp'), 'w').write('''
extern void printey();
int main() {
printey();
return 0;
}
''')
open(os.path.join('bar', 'main.cpp'), 'w').write('''
#include <stdio.h>
void printey() { printf("hello there\\n"); }
''')
run_process([EMCC, os.path.join('foo', 'main.cpp'), os.path.join('bar', 'main.cpp')])
self.assertContained('hello there', run_js('a.out.js'))
# ditto with first creating .o files
try_delete('a.out.js')
run_process([EMCC, os.path.join('foo', 'main.cpp'), '-o', os.path.join('foo', 'main.o')])
run_process([EMCC, os.path.join('bar', 'main.cpp'), '-o', os.path.join('bar', 'main.o')])
run_process([EMCC, os.path.join('foo', 'main.o'), os.path.join('bar', 'main.o')])
self.assertContained('hello there', run_js('a.out.js'))
def test_main_a(self):
# if main() is in a .a, we need to pull in that .a
main_name = 'main.c'
create_test_file(main_name, r'''
#include <stdio.h>
extern int f();
int main() {
printf("result: %d.\n", f());
return 0;
}
''')
other_name = 'other.c'
create_test_file(other_name, r'''
#include <stdio.h>
int f() { return 12346; }
''')
run_process([EMCC, main_name, '-c', '-o', main_name + '.bc'])
run_process([EMCC, other_name, '-c', '-o', other_name + '.bc'])
run_process([EMAR, 'cr', main_name + '.a', main_name + '.bc'])
run_process([EMCC, other_name + '.bc', main_name + '.a'])
self.assertContained('result: 12346.', run_js('a.out.js'))
def test_multiple_archives_duplicate_basenames(self):
create_test_file('common.c', r'''
#include <stdio.h>
void a(void) {
printf("a\n");
}
''')
run_process([EMCC, 'common.c', '-c', '-o', 'common.o'])
try_delete('liba.a')
run_process([EMAR, 'rc', 'liba.a', 'common.o'])
create_test_file('common.c', r'''
#include <stdio.h>
void b(void) {
printf("b\n");
}
''')
run_process([EMCC, 'common.c', '-c', '-o', 'common.o'])
try_delete('libb.a')
run_process([EMAR, 'rc', 'libb.a', 'common.o'])
create_test_file('main.c', r'''
void a(void);
void b(void);
int main() {
a();
b();
}
''')
run_process([EMCC, 'main.c', '-L.', '-la', '-lb'])
self.assertContained('a\nb\n', run_js('a.out.js'))
def test_archive_duplicate_basenames(self):
ensure_dir('a')
create_test_file(os.path.join('a', 'common.c'), r'''
#include <stdio.h>
void a(void) {
printf("a\n");
}
''')
run_process([EMCC, os.path.join('a', 'common.c'), '-c', '-o', os.path.join('a', 'common.o')])
ensure_dir('b')
create_test_file(os.path.join('b', 'common.c'), r'''
#include <stdio.h>
void b(void) {
printf("b...\n");
}
''')
run_process([EMCC, os.path.join('b', 'common.c'), '-c', '-o', os.path.join('b', 'common.o')])
try_delete('liba.a')
run_process([EMAR, 'rc', 'liba.a', os.path.join('a', 'common.o'), os.path.join('b', 'common.o')])
# Verify that archive contains basenames with hashes to avoid duplication
text = run_process([EMAR, 't', 'liba.a'], stdout=PIPE).stdout
self.assertEqual(text.count('common'), 2)
for line in text.split('\n'):
# should not have huge hash names
self.assertLess(len(line), 20, line)
create_test_file('main.c', r'''
void a(void);
void b(void);
int main() {
a();
b();
}
''')
err = run_process([EMCC, 'main.c', '-L.', '-la'], stderr=PIPE).stderr
self.assertNotIn('archive file contains duplicate entries', err)
self.assertContained('a\nb...\n', run_js('a.out.js'))
# Using llvm-ar directly should cause duplicate basenames
try_delete('libdup.a')
run_process([LLVM_AR, 'rc', 'libdup.a', os.path.join('a', 'common.o'), os.path.join('b', 'common.o')])
text = run_process([EMAR, 't', 'libdup.a'], stdout=PIPE).stdout
self.assertEqual(text.count('common.o'), 2)
# With fastcomp we don't support duplicate members so this should generate
# a warning. With the wasm backend (lld) this is fully supported.
cmd = [EMCC, 'main.c', '-L.', '-ldup']
if self.is_wasm_backend():
run_process(cmd)
self.assertContained('a\nb...\n', run_js('a.out.js'))
else:
err = self.expect_fail(cmd)
self.assertIn('libdup.a: archive file contains duplicate entries', err)
self.assertIn('error: undefined symbol: a', err)
# others are not duplicates - the hashing keeps them separate
self.assertEqual(err.count('duplicate: '), 1)
self.assertContained('a\nb...\n', run_js('a.out.js'))
def test_export_from_archive(self):
export_name = 'this_is_an_entry_point'
full_export_name = '_' + export_name
# The wasm backend exports symbols without the leading '_'
if self.is_wasm_backend():
expect_export = export_name
else:
expect_export = full_export_name
create_test_file('export.c', r'''
#include <stdio.h>
void %s(void) {
printf("Hello, world!\n");
}
''' % export_name)
run_process([EMCC, 'export.c', '-c', '-o', 'export.o'])
run_process([EMAR, 'rc', 'libexport.a', 'export.o'])
create_test_file('main.c', r'''
int main() {
return 0;
}
''')
# Sanity check: the symbol should not be linked in if not requested.
run_process([EMCC, 'main.c', '-L.', '-lexport'])
self.assertFalse(self.is_exported_in_wasm(expect_export, 'a.out.wasm'))
# Sanity check: exporting without a definition does not cause it to appear.
# Note: exporting main prevents emcc from warning that it generated no code.
run_process([EMCC, 'main.c', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0', '-s', "EXPORTED_FUNCTIONS=['_main', '%s']" % full_export_name])
self.assertFalse(self.is_exported_in_wasm(expect_export, 'a.out.wasm'))
# Actual test: defining symbol in library and exporting it causes it to appear in the output.
run_process([EMCC, 'main.c', '-L.', '-lexport', '-s', "EXPORTED_FUNCTIONS=['%s']" % full_export_name])
self.assertTrue(self.is_exported_in_wasm(expect_export, 'a.out.wasm'))
def test_embed_file(self):
create_test_file('somefile.txt', 'hello from a file with lots of data and stuff in it thank you very much')
create_test_file('main.cpp', r'''
#include <stdio.h>
int main() {
FILE *f = fopen("somefile.txt", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
return 0;
}
''')
run_process([EMCC, 'main.cpp', '--embed-file', 'somefile.txt'])
self.assertContained('|hello from a file wi|', run_js('a.out.js'))
# preload twice, should not err
run_process([EMCC, 'main.cpp', '--embed-file', 'somefile.txt', '--embed-file', 'somefile.txt'])
self.assertContained('|hello from a file wi|', run_js('a.out.js'))
def test_embed_file_dup(self):
ensure_dir(self.in_dir('tst', 'test1'))
ensure_dir(self.in_dir('tst', 'test2'))
open(self.in_dir('tst', 'aa.txt'), 'w').write('frist')
open(self.in_dir('tst', 'test1', 'aa.txt'), 'w').write('sacond')
open(self.in_dir('tst', 'test2', 'aa.txt'), 'w').write('thard')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
void print_file(const char *name) {
FILE *f = fopen(name, "r");
char buf[100];
memset(buf, 0, 100);
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
}
int main() {
print_file("tst/aa.txt");
print_file("tst/test1/aa.txt");
print_file("tst/test2/aa.txt");
return 0;
}
''')
run_process([EMCC, 'main.cpp', '--embed-file', 'tst'])
self.assertContained('|frist|\n|sacond|\n|thard|\n', run_js('a.out.js'))
def test_exclude_file(self):
ensure_dir(self.in_dir('tst', 'abc.exe'))
ensure_dir(self.in_dir('tst', 'abc.txt'))
open(self.in_dir('tst', 'hello.exe'), 'w').write('hello')
open(self.in_dir('tst', 'hello.txt'), 'w').write('world')
open(self.in_dir('tst', 'abc.exe', 'foo'), 'w').write('emscripten')
open(self.in_dir('tst', 'abc.txt', 'bar'), 'w').write('!!!')
create_test_file('main.cpp', r'''
#include <stdio.h>
int main() {
if(fopen("tst/hello.exe", "rb")) printf("Failed\n");
if(!fopen("tst/hello.txt", "rb")) printf("Failed\n");
if(fopen("tst/abc.exe/foo", "rb")) printf("Failed\n");
if(!fopen("tst/abc.txt/bar", "rb")) printf("Failed\n");
return 0;
}
''')
run_process([EMCC, 'main.cpp', '--embed-file', 'tst', '--exclude-file', '*.exe'])
self.assertEqual(run_js('a.out.js').strip(), '')
def test_multidynamic_link(self):
# Linking the same dynamic library in statically will error, normally, since we statically link it, causing dupe symbols
def test(link_cmd, lib_suffix=''):
print(link_cmd, lib_suffix)
self.clear()
ensure_dir('libdir')
create_test_file('main.cpp', r'''
#include <stdio.h>
extern void printey();
extern void printother();
int main() {
printf("*");
printey();
printf("\n");
printother();
printf("\n");
printf("*");
return 0;
}
''')
open(os.path.join('libdir', 'libfile.cpp'), 'w').write('''
#include <stdio.h>
void printey() {
printf("hello from lib");
}
''')
open(os.path.join('libdir', 'libother.cpp'), 'w').write('''
#include <stdio.h>
extern void printey();
void printother() {
printf("|");
printey();
printf("|");
}
''')
compiler = [EMCC]
# Build libfile normally into an .so
run_process(compiler + [os.path.join('libdir', 'libfile.cpp'), '-o', os.path.join('libdir', 'libfile.so' + lib_suffix)])
# Build libother and dynamically link it to libfile
run_process(compiler + [os.path.join('libdir', 'libother.cpp')] + link_cmd + ['-o', os.path.join('libdir', 'libother.so')])
# Build the main file, linking in both the libs
run_process(compiler + [os.path.join('main.cpp')] + link_cmd + ['-lother', '-c'])
print('...')
# The normal build system is over. We need to do an additional step to link in the dynamic libraries, since we ignored them before
run_process([EMCC, 'main.o'] + link_cmd + ['-lother', '-s', 'EXIT_RUNTIME=1'])
self.assertContained('*hello from lib\n|hello from lib|\n*', run_js('a.out.js'))
test(['-L' + 'libdir', '-lfile']) # -l, auto detection from library path
test(['-L' + 'libdir', self.in_dir('libdir', 'libfile.so.3.1.4.1.5.9')], '.3.1.4.1.5.9') # handle libX.so.1.2.3 as well
def test_js_link(self):
create_test_file('main.cpp', '''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
create_test_file('before.js', '''
var MESSAGE = 'hello from js';
// Module is initialized with empty object by default, so if there are no keys - nothing was run yet
if (Object.keys(Module).length) throw 'This code should run before anything else!';
''')
create_test_file('after.js', '''
out(MESSAGE);
''')
run_process([EMCC, 'main.cpp', '--pre-js', 'before.js', '--post-js', 'after.js', '-s', 'WASM_ASYNC_COMPILATION=0'])
self.assertContained('hello from main\nhello from js\n', run_js('a.out.js'))
def test_sdl_endianness(self):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <SDL/SDL.h>
int main() {
printf("%d, %d, %d\n", SDL_BYTEORDER, SDL_LIL_ENDIAN, SDL_BIG_ENDIAN);
return 0;
}
''')
run_process([EMCC, 'main.cpp'])
self.assertContained('1234, 1234, 4321\n', run_js('a.out.js'))
def test_sdl2_mixer(self):
building.emcc(path_from_root('tests', 'sdl2_mixer.c'), ['-s', 'USE_SDL_MIXER=2'], output_filename='a.out.js')
def test_libpng(self):
shutil.copyfile(path_from_root('tests', 'pngtest.png'), 'pngtest.png')
building.emcc(path_from_root('tests', 'pngtest.c'), ['--embed-file', 'pngtest.png', '-s', 'USE_ZLIB=1', '-s', 'USE_LIBPNG=1'], output_filename='a.out.js')
self.assertContained('TESTS PASSED', run_process(JS_ENGINES[0] + ['a.out.js'], stdout=PIPE, stderr=PIPE).stdout)
def test_libjpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
building.emcc(path_from_root('tests', 'jpeg_test.c'), ['--embed-file', 'screenshot.jpg', '-s', 'USE_LIBJPEG=1'], output_filename='a.out.js')
self.assertContained('Image is 600 by 450 with 3 components', run_js('a.out.js', args=['screenshot.jpg'], stdout=PIPE, stderr=PIPE))
def test_bullet(self):
building.emcc(path_from_root('tests', 'bullet_hello_world.cpp'), ['-s', 'USE_BULLET=1'], output_filename='a.out.js')
self.assertContained('BULLET RUNNING', run_process(JS_ENGINES[0] + ['a.out.js'], stdout=PIPE, stderr=PIPE).stdout)
def test_vorbis(self):
# This will also test if ogg compiles, because vorbis depends on ogg
building.emcc(path_from_root('tests', 'vorbis_test.c'), ['-s', 'USE_VORBIS=1'], output_filename='a.out.js')
self.assertContained('ALL OK', run_process(JS_ENGINES[0] + ['a.out.js'], stdout=PIPE, stderr=PIPE).stdout)
def test_bzip2(self):
building.emcc(path_from_root('tests', 'bzip2_test.c'), ['-s', 'USE_BZIP2=1'], output_filename='a.out.js')
self.assertContained("usage: unzcrash filename", run_process(JS_ENGINES[0] + ['a.out.js'], stdout=PIPE, stderr=PIPE).stdout)
def test_freetype(self):
# copy the Liberation Sans Bold truetype file located in the
# <emscripten_root>/tests/freetype to the compilation folder
shutil.copy2(path_from_root('tests/freetype', 'LiberationSansBold.ttf'), os.getcwd())
# build test program with the font file embed in it
building.emcc(path_from_root('tests', 'freetype_test.c'), ['-s', 'USE_FREETYPE=1', '--embed-file', 'LiberationSansBold.ttf'], output_filename='a.out.js')
# the test program will print an ascii representation of a bitmap where the
# 'w' character has been rendered using the Liberation Sans Bold font
expectedOutput = ' \n' + \
' \n' + \
' \n' + \
' \n' + \
'*** +***+ \n' + \
'***+ ***** +\n' + \
'+**+ ***** +\n' + \
'+*** +**+**+ *\n' + \
' ***+ ***+**+ +*\n' + \
' +**+ *** *** +*\n' + \
' +**++**+ +**+**\n' + \
' ***+**+ +**+**\n' + \
' ****** *****\n' + \
' +****+ +****\n' + \
' +****+ +****\n' + \
' **** ****'
self.assertContained(expectedOutput, run_process(JS_ENGINES[0] + ['a.out.js'], stdout=PIPE, stderr=PIPE).stdout)
def test_link_memcpy(self):
# memcpy can show up *after* optimizations, so after our opportunity to link in libc, so it must be special-cased
create_test_file('main.cpp', r'''
#include <stdio.h>
int main(int argc, char **argv) {
int num = argc + 10;
char buf[num], buf2[num];
for (int i = 0; i < num; i++) {
buf[i] = i*i+i/3;
}
for (int i = 1; i < num; i++) {
buf[i] += buf[i-1];
}
for (int i = 0; i < num; i++) {
buf2[i] = buf[i];
}
for (int i = 1; i < num; i++) {
buf2[i] += buf2[i-1];
}
for (int i = 0; i < num; i++) {
printf("%d:%d\n", i, buf2[i]);
}
return 0;
}
''')
run_process([EMCC, '-O2', 'main.cpp'])
output = run_js('a.out.js', full_output=True, stderr=PIPE)
self.assertContained('''0:0
1:1
2:6
3:21
4:53
5:111
6:-49
7:98
8:55
9:96
10:-16
''', output)
self.assertNotContained('warning: library.js memcpy should not be running, it is only for testing!', output)
def test_undefined_function(self):
cmd = [EMCC, path_from_root('tests', 'hello_world.cpp')]
run_process(cmd)
# adding a missing symbol to EXPORTED_FUNCTIONS should cause failure
cmd += ['-s', "EXPORTED_FUNCTIONS=['foobar']"]
err = self.expect_fail(cmd)
self.assertContained('undefined exported function: "foobar"', err)
# setting ERROR_ON_UNDEFINED_SYMBOLS=0 suppresses error
cmd += ['-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0']
run_process(cmd)
def test_undefined_symbols(self):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <SDL.h>
#include "SDL/SDL_opengl.h"
extern "C" {
void something();
void elsey();
}
int main() {
printf("%p", SDL_GL_GetProcAddress("glGenTextures")); // pull in gl proc stuff, avoid warnings on emulation funcs
something();
elsey();
return 0;
}
''')
for args in ([], ['-O1'], ['-s', 'MAX_WEBGL_VERSION=2']):
for action in ('WARN', 'ERROR', None):
for value in ([0, 1]):
try_delete('a.out.js')
print('checking "%s" %s=%s' % (args, action, value))
extra = ['-s', action + '_ON_UNDEFINED_SYMBOLS=%d' % value] if action else []
proc = run_process([EMCC, 'main.cpp'] + extra + args, stderr=PIPE, check=False)
print(proc.stderr)
if value or action is None:
# The default is that we error in undefined symbols
self.assertContained('error: undefined symbol: something', proc.stderr)
self.assertContained('error: undefined symbol: elsey', proc.stderr)
check_success = False
elif action == 'ERROR' and not value:
# Error disables, should only warn
self.assertContained('warning: undefined symbol: something', proc.stderr)
self.assertContained('warning: undefined symbol: elsey', proc.stderr)
self.assertNotContained('undefined symbol: emscripten_', proc.stderr)
check_success = True
elif action == 'WARN' and not value:
# Disabled warning should imply disabling errors
self.assertNotContained('undefined symbol', proc.stderr)
check_success = True
if check_success:
self.assertEqual(proc.returncode, 0)
self.assertTrue(os.path.exists('a.out.js'))
else:
self.assertNotEqual(proc.returncode, 0)
self.assertFalse(os.path.exists('a.out.js'))
def test_GetProcAddress_LEGACY_GL_EMULATION(self):
# without legacy gl emulation, getting a proc from there should fail
self.do_other_test(os.path.join('other', 'GetProcAddress_LEGACY_GL_EMULATION'), run_args=['0'], emcc_args=['-s', 'LEGACY_GL_EMULATION=0'])
# with it, it should work
self.do_other_test(os.path.join('other', 'GetProcAddress_LEGACY_GL_EMULATION'), run_args=['1'], emcc_args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_prepost(self):
create_test_file('main.cpp', '''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
create_test_file('pre.js', '''
var Module = {
preRun: function() { out('pre-run') },
postRun: function() { out('post-run') }
};
''')
run_process([EMCC, 'main.cpp', '--pre-js', 'pre.js', '-s', 'WASM_ASYNC_COMPILATION=0'])
self.assertContained('pre-run\nhello from main\npost-run\n', run_js('a.out.js'))
# addRunDependency during preRun should prevent main, and post-run from
# running.
with open('pre.js', 'a') as f:
f.write('Module.preRun = function() { out("add-dep"); addRunDependency(); }\n')
run_process([EMCC, 'main.cpp', '--pre-js', 'pre.js', '-s', 'WASM_ASYNC_COMPILATION=0'])
output = run_js('a.out.js')
self.assertContained('add-dep\n', output)
self.assertNotContained('hello from main\n', output)
self.assertNotContained('post-run\n', output)
# noInitialRun prevents run
for no_initial_run, run_dep in [(0, 0), (1, 0), (0, 1)]:
print(no_initial_run, run_dep)
args = ['-s', 'WASM_ASYNC_COMPILATION=0', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["callMain"]']
if no_initial_run:
args += ['-s', 'INVOKE_RUN=0']
if run_dep:
create_test_file('pre.js', 'Module.preRun = function() { addRunDependency("test"); }')
create_test_file('post.js', 'removeRunDependency("test");')
args += ['--pre-js', 'pre.js', '--post-js', 'post.js']
run_process([EMCC, 'main.cpp'] + args)
output = run_js('a.out.js')
self.assertContainedIf('hello from main', output, not no_initial_run)
if no_initial_run:
# Calling main later should still work, filesystem etc. must be set up.
print('call main later')
src = open('a.out.js').read()
src += '\nModule.callMain();\n'
create_test_file('a.out.js', src)
self.assertContained('hello from main', run_js('a.out.js'))
# Use postInit
create_test_file('pre.js', '''
var Module = {
preRun: function() { out('pre-run') },
postRun: function() { out('post-run') },
preInit: function() { out('pre-init') }
};
''')
run_process([EMCC, 'main.cpp', '--pre-js', 'pre.js'])
self.assertContained('pre-init\npre-run\nhello from main\npost-run\n', run_js('a.out.js'))
def test_prepost2(self):
create_test_file('main.cpp', '''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
create_test_file('pre.js', '''
var Module = {
preRun: function() { out('pre-run') },
};
''')
create_test_file('pre2.js', '''
Module.postRun = function() { out('post-run') };
''')
run_process([EMCC, 'main.cpp', '--pre-js', 'pre.js', '--pre-js', 'pre2.js'])
self.assertContained('pre-run\nhello from main\npost-run\n', run_js('a.out.js'))
def test_prepre(self):
create_test_file('main.cpp', '''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
create_test_file('pre.js', '''
var Module = {
preRun: [function() { out('pre-run') }],
};
''')
create_test_file('pre2.js', '''
Module.preRun.push(function() { out('prepre') });
''')
run_process([EMCC, 'main.cpp', '--pre-js', 'pre.js', '--pre-js', 'pre2.js'])
self.assertContained('prepre\npre-run\nhello from main\n', run_js('a.out.js'))
def test_extern_prepost(self):
create_test_file('extern-pre.js', '''
// I am an external pre.
''')
create_test_file('extern-post.js', '''
// I am an external post.
''')
run_process([EMCC, '-O2', path_from_root('tests', 'hello_world.c'), '--extern-pre-js', 'extern-pre.js', '--extern-post-js', 'extern-post.js'])
# the files should be included, and externally - not as part of optimized
# code, so they are the very first and last things, and they are not
# minified.
with open('a.out.js') as output:
js = output.read()
pre = js.index('// I am an external pre.')
post = js.index('// I am an external post.')
# ignore some slack - newlines and other things. we just care about the
# big picture here
SLACK = 50
self.assertLess(pre, post)
self.assertLess(pre, SLACK)
self.assertGreater(post, len(js) - SLACK)
# make sure the slack is tiny compared to the whole program
self.assertGreater(len(js), 100 * SLACK)
@no_wasm_backend('depends on bc output')
def test_save_bc(self):
cmd = [EMCC, path_from_root('tests', 'hello_world_loop_malloc.cpp'), '--save-bc', 'my_bitcode.bc']
run_process(cmd)
assert 'hello, world!' in run_js('a.out.js')
self.assertExists('my_bitcode.bc')
try_delete('a.out.js')
building.llvm_dis('my_bitcode.bc', 'my_ll.ll')
run_process([EMCC, 'my_ll.ll', '-nostdlib', '-o', 'two.js'])
assert 'hello, world!' in run_js('two.js')
def test_js_optimizer(self):
ACORN_PASSES = ['JSDCE', 'AJSDCE', 'applyImportAndExportNameChanges', 'emitDCEGraph', 'applyDCEGraphRemovals', 'growableHeap', 'unsignPointers', 'asanify']
for input, expected, passes in [
(path_from_root('tests', 'optimizer', 'eliminateDeadGlobals.js'), open(path_from_root('tests', 'optimizer', 'eliminateDeadGlobals-output.js')).read(),
['eliminateDeadGlobals']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-output.js')).read(),
['hoistMultiples', 'removeAssignsToUndefined', 'simplifyExpressions']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-output.js')).read(),
['asm', 'simplifyExpressions']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-si.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-si-output.js')).read(),
['simplifyIfs']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-regs.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-regs-output.js')).read(),
['registerize']),
(path_from_root('tests', 'optimizer', 'eliminator-test.js'), open(path_from_root('tests', 'optimizer', 'eliminator-test-output.js')).read(),
['eliminate']),
(path_from_root('tests', 'optimizer', 'safe-eliminator-test.js'), open(path_from_root('tests', 'optimizer', 'safe-eliminator-test-output.js')).read(),
['eliminateMemSafe']),
(path_from_root('tests', 'optimizer', 'asm-eliminator-test.js'), open(path_from_root('tests', 'optimizer', 'asm-eliminator-test-output.js')).read(),
['asm', 'eliminate']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs-output.js')).read(),
['asm', 'registerize']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs-harder.js'), [open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs-harder-output.js')).read(), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs-harder-output2.js')).read(), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs-harder-output3.js')).read()],
['asm', 'registerizeHarder']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs-min.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-regs-min-output.js')).read(),
['asm', 'registerize', 'minifyLocals']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-minifyLocals.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-minifyLocals-output.js')).read(),
['minifyLocals']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-pre.js'), [open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-pre-output.js')).read(), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-pre-output2.js')).read()],
['asm', 'simplifyExpressions']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-pre-f32.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-pre-output-f32.js')).read(),
['asm', 'asmPreciseF32', 'simplifyExpressions', 'optimizeFrounds']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-pre-f32.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-pre-output-f32-nosimp.js')).read(),
['asm', 'asmPreciseF32', 'optimizeFrounds']),
(path_from_root('tests', 'optimizer', 'test-reduce-dead-float-return.js'), open(path_from_root('tests', 'optimizer', 'test-reduce-dead-float-return-output.js')).read(),
['asm', 'optimizeFrounds', 'registerizeHarder']),
(path_from_root('tests', 'optimizer', 'test-no-reduce-dead-float-return-to-nothing.js'), open(path_from_root('tests', 'optimizer', 'test-no-reduce-dead-float-return-to-nothing-output.js')).read(),
['asm', 'registerizeHarder']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-last.js'), [open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-lastOpts-output.js')).read(), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-lastOpts-output2.js')).read(), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-lastOpts-output3.js')).read()],
['asm', 'asmLastOpts']),
(path_from_root('tests', 'optimizer', 'asmLastOpts.js'), open(path_from_root('tests', 'optimizer', 'asmLastOpts-output.js')).read(),
['asm', 'asmLastOpts']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-last.js'), [open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-last-output.js')).read(), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-last-output2.js')).read(), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-last-output3.js')).read()],
['asm', 'asmLastOpts', 'last']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-relocate.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-relocate-output.js')).read(),
['asm', 'relocate']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-minlast.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-asm-minlast-output.js')).read(),
['asm', 'minifyWhitespace', 'asmLastOpts', 'last']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-shiftsAggressive.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-shiftsAggressive-output.js')).read(),
['asm', 'aggressiveVariableElimination']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-localCSE.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-localCSE-output.js')).read(),
['asm', 'localCSE']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-ensureLabelSet.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-ensureLabelSet-output.js')).read(),
['asm', 'ensureLabelSet']),
(path_from_root('tests', 'optimizer', '3154.js'), open(path_from_root('tests', 'optimizer', '3154-output.js')).read(),
['asm', 'eliminate', 'registerize', 'asmLastOpts', 'last']),
(path_from_root('tests', 'optimizer', 'safeLabelSetting.js'), open(path_from_root('tests', 'optimizer', 'safeLabelSetting-output.js')).read(),
['asm', 'safeLabelSetting']), # eliminate, just enough to trigger asm normalization/denormalization
(path_from_root('tests', 'optimizer', 'null_if.js'), [open(path_from_root('tests', 'optimizer', 'null_if-output.js')).read(), open(path_from_root('tests', 'optimizer', 'null_if-output2.js')).read()],
['asm', 'registerizeHarder', 'asmLastOpts', 'minifyWhitespace']), # issue 3520
(path_from_root('tests', 'optimizer', 'null_else.js'), [open(path_from_root('tests', 'optimizer', 'null_else-output.js')).read(), open(path_from_root('tests', 'optimizer', 'null_else-output2.js')).read()],
['asm', 'registerizeHarder', 'asmLastOpts', 'minifyWhitespace']), # issue 3549
(path_from_root('tests', 'optimizer', 'test-js-optimizer-splitMemory.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-splitMemory-output.js')).read(),
['splitMemory']),
(path_from_root('tests', 'optimizer', 'JSDCE.js'), open(path_from_root('tests', 'optimizer', 'JSDCE-output.js')).read(),
['JSDCE']),
(path_from_root('tests', 'optimizer', 'JSDCE-hasOwnProperty.js'), open(path_from_root('tests', 'optimizer', 'JSDCE-hasOwnProperty-output.js')).read(),
['JSDCE']),
(path_from_root('tests', 'optimizer', 'JSDCE-fors.js'), open(path_from_root('tests', 'optimizer', 'JSDCE-fors-output.js')).read(),
['JSDCE']),
(path_from_root('tests', 'optimizer', 'AJSDCE.js'), open(path_from_root('tests', 'optimizer', 'AJSDCE-output.js')).read(),
['AJSDCE']),
(path_from_root('tests', 'optimizer', 'emitDCEGraph.js'), open(path_from_root('tests', 'optimizer', 'emitDCEGraph-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'emitDCEGraph2.js'), open(path_from_root('tests', 'optimizer', 'emitDCEGraph2-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'emitDCEGraph3.js'), open(path_from_root('tests', 'optimizer', 'emitDCEGraph3-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'emitDCEGraph4.js'), open(path_from_root('tests', 'optimizer', 'emitDCEGraph4-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'emitDCEGraph5.js'), open(path_from_root('tests', 'optimizer', 'emitDCEGraph5-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'minimal-runtime-applyDCEGraphRemovals.js'), open(path_from_root('tests', 'optimizer', 'minimal-runtime-applyDCEGraphRemovals-output.js')).read(),
['applyDCEGraphRemovals']),
(path_from_root('tests', 'optimizer', 'applyDCEGraphRemovals.js'), open(path_from_root('tests', 'optimizer', 'applyDCEGraphRemovals-output.js')).read(),
['applyDCEGraphRemovals']),
(path_from_root('tests', 'optimizer', 'applyImportAndExportNameChanges.js'), open(path_from_root('tests', 'optimizer', 'applyImportAndExportNameChanges-output.js')).read(),
['applyImportAndExportNameChanges']),
(path_from_root('tests', 'optimizer', 'applyImportAndExportNameChanges2.js'), open(path_from_root('tests', 'optimizer', 'applyImportAndExportNameChanges2-output.js')).read(),
['applyImportAndExportNameChanges']),
(path_from_root('tests', 'optimizer', 'minimal-runtime-emitDCEGraph.js'), open(path_from_root('tests', 'optimizer', 'minimal-runtime-emitDCEGraph-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'minimal-runtime-2-emitDCEGraph.js'), open(path_from_root('tests', 'optimizer', 'minimal-runtime-2-emitDCEGraph-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'standalone-emitDCEGraph.js'), open(path_from_root('tests', 'optimizer', 'standalone-emitDCEGraph-output.js')).read(),
['emitDCEGraph', 'noPrint']),
(path_from_root('tests', 'optimizer', 'emittedJSPreservesParens.js'), open(path_from_root('tests', 'optimizer', 'emittedJSPreservesParens-output.js')).read(),
['asm']),
(path_from_root('tests', 'optimizer', 'test-growableHeap.js'), open(path_from_root('tests', 'optimizer', 'test-growableHeap-output.js')).read(),
['growableHeap']),
(path_from_root('tests', 'optimizer', 'test-unsignPointers.js'), open(path_from_root('tests', 'optimizer', 'test-unsignPointers-output.js')).read(),
['unsignPointers']),
(path_from_root('tests', 'optimizer', 'test-asanify.js'), open(path_from_root('tests', 'optimizer', 'test-asanify-output.js')).read(),
['asanify']),
(path_from_root('tests', 'optimizer', 'test-js-optimizer-minifyGlobals.js'), open(path_from_root('tests', 'optimizer', 'test-js-optimizer-minifyGlobals-output.js')).read(),
['minifyGlobals']),
]:
print(input, passes)
if not isinstance(expected, list):
expected = [expected]
expected = [out.replace('\n\n', '\n').replace('\n\n', '\n') for out in expected]
acorn = any(p in ACORN_PASSES for p in passes)
# test calling optimizer
if not acorn:
print(' js')
output = run_process(NODE_JS + [path_from_root('tools', 'js-optimizer.js'), input] + passes, stdin=PIPE, stdout=PIPE).stdout
else:
print(' acorn')
output = run_process(NODE_JS + [path_from_root('tools', 'acorn-optimizer.js'), input] + passes, stdin=PIPE, stdout=PIPE).stdout
def check_js(js, expected):
# print >> sys.stderr, 'chak\n==========================\n', js, '\n===========================\n'
if 'registerizeHarder' in passes:
# registerizeHarder is hard to test, as names vary by chance, nondeterminstically FIXME
def fix(src):
if type(src) is list:
return list(map(fix, src))
src = '\n'.join([line for line in src.split('\n') if 'var ' not in line]) # ignore vars
def reorder(func):
def swap(func, stuff):
# emit EYE_ONE always before EYE_TWO, replacing i1,i2 or i2,i1 etc
for i in stuff:
if i not in func:
return func
indexes = [[i, func.index(i)] for i in stuff]
indexes.sort(key=lambda x: x[1])
for j in range(len(indexes)):
func = func.replace(indexes[j][0], 'STD_' + str(j))
return func
func = swap(func, ['i1', 'i2', 'i3'])
func = swap(func, ['i1', 'i2'])
func = swap(func, ['i4', 'i5'])
return func
src = 'function '.join(map(reorder, src.split('function ')))
return src
js = fix(js)
expected = fix(expected)
self.assertIdentical(expected, js.replace('\r\n', '\n').replace('\n\n', '\n').replace('\n\n', '\n'))
if input not in [ # blacklist of tests that are native-optimizer only
path_from_root('tests', 'optimizer', 'asmLastOpts.js'),
path_from_root('tests', 'optimizer', '3154.js')
]:
check_js(output, expected)
else:
print('(skip non-native)')
if not self.is_wasm_backend() and tools.js_optimizer.use_native(passes) and tools.js_optimizer.get_native_optimizer():
# test calling native
def check_json():
run_process(NODE_JS + [path_from_root('tools', 'js-optimizer.js'), output_temp, 'receiveJSON'], stdin=PIPE, stdout=open(output_temp + '.js', 'w'))
output = open(output_temp + '.js').read()
check_js(output, expected)
self.clear()
input_temp = 'temp.js'
output_temp = 'output.js'
shutil.copyfile(input, input_temp)
run_process(NODE_JS + [path_from_root('tools', 'js-optimizer.js'), input_temp, 'emitJSON'], stdin=PIPE, stdout=open(input_temp + '.js', 'w'))
original = open(input).read()
if '// EXTRA_INFO:' in original:
json = open(input_temp + '.js').read()
json += '\n' + original[original.find('// EXTRA_INFO:'):]
create_test_file(input_temp + '.js', json)
# last is only relevant when we emit JS
if 'last' not in passes and \
'null_if' not in input and 'null_else' not in input: # null-* tests are js optimizer or native, not a mixture (they mix badly)
print(' native (receiveJSON)')
output = run_process([tools.js_optimizer.get_native_optimizer(), input_temp + '.js'] + passes + ['receiveJSON', 'emitJSON'], stdin=PIPE, stdout=open(output_temp, 'w')).stdout
check_json()
print(' native (parsing JS)')
output = run_process([tools.js_optimizer.get_native_optimizer(), input] + passes + ['emitJSON'], stdin=PIPE, stdout=open(output_temp, 'w')).stdout
check_json()
print(' native (emitting JS)')
output = run_process([tools.js_optimizer.get_native_optimizer(), input] + passes, stdin=PIPE, stdout=PIPE).stdout
check_js(output, expected)
@no_fastcomp('wasm2js-only')
def test_js_optimizer_wasm2js(self):
# run the js optimizer in a similar way as wasm2js does
shutil.copyfile(path_from_root('tests', 'optimizer', 'wasm2js.js'), 'wasm2js.js')
run_process([PYTHON, path_from_root('tools', 'js_optimizer.py'), 'wasm2js.js', 'minifyNames', 'last'])
with open(path_from_root('tests', 'optimizer', 'wasm2js-output.js')) as expected:
with open('wasm2js.js.jsopt.js') as actual:
self.assertIdentical(expected.read(), actual.read())
def test_m_mm(self):
create_test_file('foo.c', '#include <emscripten.h>')
for opt in ['M', 'MM']:
proc = run_process([EMCC, 'foo.c', '-' + opt], stdout=PIPE, stderr=PIPE)
assert 'foo.o: ' in proc.stdout, '-%s failed to produce the right output: %s' % (opt, proc.stdout)
assert 'error' not in proc.stderr, 'Unexpected stderr: ' + proc.stderr
@uses_canonical_tmp
def test_emcc_debug_files(self):
for opts in [0, 1, 2, 3]:
for debug in [None, '1', '2']:
print(opts, debug)
if os.path.exists(self.canonical_temp_dir):
shutil.rmtree(self.canonical_temp_dir)
env = os.environ.copy()
if debug is None:
env.pop('EMCC_DEBUG', None)
else:
env['EMCC_DEBUG'] = debug
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-O' + str(opts)], stderr=PIPE, env=env)
if debug is None:
self.assertFalse(os.path.exists(self.canonical_temp_dir))
elif debug == '1':
if self.is_wasm_backend():
self.assertExists(os.path.join(self.canonical_temp_dir, 'emcc-3-original.js'))
else:
self.assertExists(os.path.join(self.canonical_temp_dir, 'emcc-0-linktime.bc'))
self.assertExists(os.path.join(self.canonical_temp_dir, 'emcc-1-original.js'))
elif debug == '2':
if self.is_wasm_backend():
self.assertExists(os.path.join(self.canonical_temp_dir, 'emcc-3-original.js'))
else:
self.assertExists(os.path.join(self.canonical_temp_dir, 'emcc-0-basebc.bc'))
self.assertExists(os.path.join(self.canonical_temp_dir, 'emcc-1-linktime.bc'))
self.assertExists(os.path.join(self.canonical_temp_dir, 'emcc-2-original.js'))
def test_debuginfo(self):
for args, expect_debug in [
(['-O0'], False),
(['-O0', '-g'], True),
(['-O0', '-g4'], True),
(['-O1'], False),
(['-O1', '-g'], True),
(['-O2'], False),
(['-O2', '-g'], True),
]:
print(args, expect_debug)
err = run_process([EMCC, '-v', path_from_root('tests', 'hello_world.cpp')] + args, stdout=PIPE, stderr=PIPE).stderr
lines = err.splitlines()
if self.is_wasm_backend():
finalize = [l for l in lines if 'wasm-emscripten-finalize' in l][0]
if expect_debug:
self.assertIn(' -g ', finalize)
else:
self.assertNotIn(' -g ', finalize)
else:
if expect_debug:
self.assertNotIn('strip-debug', err)
else:
self.assertIn('strip-debug', err)
@no_fastcomp()
def test_debuginfo_line_tables_only(self):
def test(do_compile):
do_compile([])
no_size = os.path.getsize('a.out.wasm')
do_compile(['-gline-tables-only'])
line_size = os.path.getsize('a.out.wasm')
do_compile(['-g'])
full_size = os.path.getsize('a.out.wasm')
return (no_size, line_size, full_size)
def compile_to_object(compile_args):
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-c', '-o', 'a.out.wasm'] + compile_args)
no_size, line_size, full_size = test(compile_to_object)
self.assertLess(no_size, line_size)
self.assertLess(line_size, full_size)
def compile_to_executable(compile_args, link_args):
# compile with the specified args
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-c', '-o', 'a.o'] + compile_args)
# link with debug info
run_process([EMCC, 'a.o'] + link_args)
def compile_to_debug_executable(compile_args):
return compile_to_executable(compile_args, ['-g'])
no_size, line_size, full_size = test(compile_to_debug_executable)
self.assertLess(no_size, line_size)
self.assertLess(line_size, full_size)
def compile_to_release_executable(compile_args):
return compile_to_executable(compile_args, [])
no_size, line_size, full_size = test(compile_to_release_executable)
self.assertEqual(no_size, line_size)
self.assertEqual(line_size, full_size)
@no_fastcomp()
def test_dwarf(self):
def compile_with_dwarf(args, output):
# Test that -g enables dwarf info in object files and linked wasm
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-o', output, '-g'] + args)
def verify(output):
info = run_process([LLVM_DWARFDUMP, '--all', output], stdout=PIPE).stdout
self.assertIn('DW_TAG_subprogram', info) # Ensure there's a subprogram entry in .debug_info
self.assertIn('debug_line[0x', info) # Ensure there's a line table
compile_with_dwarf(['-c'], 'a.o')
verify('a.o')
compile_with_dwarf([], 'a.js')
verify('a.wasm')
@unittest.skipIf(not scons_path, 'scons not found in PATH')
@with_env_modify({'EMSCRIPTEN_ROOT': path_from_root()})
def test_scons(self):
# this test copies the site_scons directory alongside the test
shutil.copytree(path_from_root('tests', 'scons'), 'test')
shutil.copytree(path_from_root('tools', 'scons', 'site_scons'), os.path.join('test', 'site_scons'))
with chdir('test'):
run_process(['scons'])
output = run_js('scons_integration.js', assert_returncode=5)
self.assertContained('If you see this - the world is all right!', output)
@unittest.skipIf(not scons_path, 'scons not found in PATH')
@with_env_modify({'EMSCRIPTEN_TOOLPATH': path_from_root('tools', 'scons', 'site_scons'),
'EMSCRIPTEN_ROOT': path_from_root()})
def test_emscons(self):
# uses the emscons wrapper which requires EMSCRIPTEN_TOOLPATH to find
# site_scons
shutil.copytree(path_from_root('tests', 'scons'), 'test')
with chdir('test'):
run_process([path_from_root('emscons'), 'scons'])
output = run_js('scons_integration.js', assert_returncode=5)
self.assertContained('If you see this - the world is all right!', output)
def test_embind_fail(self):
out = self.expect_fail([EMCC, path_from_root('tests', 'embind', 'test_unsigned.cpp')])
self.assertContained("undefined symbol: _embind_register_function", out)
@is_slow_test
def test_embind(self):
environ = os.environ.copy()
environ['EMCC_CLOSURE_ARGS'] = environ.get('EMCC_CLOSURE_ARGS', '') + " --externs " + pipes.quote(path_from_root('tests', 'embind', 'underscore-externs.js'))
test_cases = [
(['--bind']),
(['--bind', '-O1']),
(['--bind', '-O2']),
(['--bind', '-O2', '-s', 'ALLOW_MEMORY_GROWTH=1', path_from_root('tests', 'embind', 'isMemoryGrowthEnabled=true.cpp')]),
]
without_utf8_args = ['-s', 'EMBIND_STD_STRING_IS_UTF8=0']
test_cases_without_utf8 = []
for args in test_cases:
test_cases_without_utf8.append((args + without_utf8_args))
test_cases += test_cases_without_utf8
test_cases.extend([(args[:] + ['-s', 'DYNAMIC_EXECUTION=0']) for args in test_cases])
# closure compiler doesn't work with DYNAMIC_EXECUTION=0
test_cases.append((['--bind', '-O2', '--closure', '1']))
for args in test_cases:
print(args)
self.clear()
testFiles = [
path_from_root('tests', 'embind', 'underscore-1.4.2.js'),
path_from_root('tests', 'embind', 'imvu_test_adapter.js'),
path_from_root('tests', 'embind', 'embind.test.js'),
]
run_process(
[EMCC, path_from_root('tests', 'embind', 'embind_test.cpp'),
'--pre-js', path_from_root('tests', 'embind', 'test.pre.js'),
'--post-js', path_from_root('tests', 'embind', 'test.post.js'),
'-s', 'WASM_ASYNC_COMPILATION=0',
'-s', 'IN_TEST_HARNESS=1'] + args,
env=environ)
if 'DYNAMIC_EXECUTION=0' in args:
with open('a.out.js') as js_binary_file:
js_binary_str = js_binary_file.read()
self.assertNotContained('new Function(', js_binary_str)
self.assertNotContained('eval(', js_binary_str)
with open('a.out.js', 'ab') as f:
for tf in testFiles:
f.write(open(tf, 'rb').read())
output = run_js('a.out.js', stdout=PIPE, stderr=PIPE, full_output=True)
self.assertNotContained('FAIL', output)
def test_emconfig(self):
output = run_process([emconfig, 'LLVM_ROOT'], stdout=PIPE).stdout.strip()
self.assertEqual(output, LLVM_ROOT)
# EMSCRIPTEN_ROOT is kind of special since it should always report the locaton of em-config
# itself (its not configurable via the config file but driven by the location for arg0)
output = run_process([emconfig, 'EMSCRIPTEN_ROOT'], stdout=PIPE).stdout.strip()
self.assertEqual(output, os.path.dirname(emconfig))
invalid = 'Usage: em-config VAR_NAME'
# Don't accept variables that do not exist
output = self.expect_fail([emconfig, 'VAR_WHICH_DOES_NOT_EXIST']).strip()
self.assertEqual(output, invalid)
# Don't accept no arguments
output = self.expect_fail([emconfig]).strip()
self.assertEqual(output, invalid)
# Don't accept more than one variable
output = self.expect_fail([emconfig, 'LLVM_ROOT', 'EMCC']).strip()
self.assertEqual(output, invalid)
# Don't accept arbitrary python code
output = self.expect_fail([emconfig, 'sys.argv[1]']).strip()
self.assertEqual(output, invalid)
def test_link_s(self):
# -s OPT=VALUE can conflict with -s as a linker option. We warn and ignore
create_test_file('main.cpp', r'''
extern "C" {
void something();
}
int main() {
something();
return 0;
}
''')
create_test_file('supp.cpp', r'''
#include <stdio.h>
extern "C" {
void something() {
printf("yello\n");
}
}
''')
run_process([EMCC, 'main.cpp', '-o', 'main.o'])
run_process([EMCC, 'supp.cpp', '-o', 'supp.o'])
run_process([EMCC, 'main.o', '-s', 'supp.o', '-s', 'SAFE_HEAP=1'])
self.assertContained('yello', run_js('a.out.js'))
# Check that valid -s option had an effect'
self.assertContained('SAFE_HEAP', open('a.out.js').read())
def test_conftest_s_flag_passing(self):
create_test_file('conftest.c', r'''
int main() {
return 0;
}
''')
with env_modify({'EMMAKEN_JUST_CONFIGURE': '1'}):
cmd = [EMCC, '-s', 'ASSERTIONS=1', 'conftest.c', '-o', 'conftest']
output = run_process(cmd, stderr=PIPE)
self.assertNotContained('emcc: warning: treating -s as linker option', output.stderr)
self.assertExists('conftest')
def test_file_packager(self):
ensure_dir('subdir')
create_test_file('data1.txt', 'data1')
os.chdir('subdir')
create_test_file('data2.txt', 'data2')
# relative path to below the current dir is invalid
stderr = self.expect_fail([PYTHON, FILE_PACKAGER, 'test.data', '--preload', '../data1.txt'])
self.assertContained('below the current directory', stderr)
# relative path that ends up under us is cool
proc = run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', '../subdir/data2.txt'], stderr=PIPE, stdout=PIPE)
self.assertGreater(len(proc.stdout), 0)
self.assertNotContained('below the current directory', proc.stderr)
# direct path leads to the same code being generated - relative path does not make us do anything different
proc2 = run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data2.txt'], stderr=PIPE, stdout=PIPE)
self.assertGreater(len(proc2.stdout), 0)
self.assertNotContained('below the current directory', proc2.stderr)
def clean(txt):
lines = txt.splitlines()
lines = [l for l in lines if 'PACKAGE_UUID' not in l and 'loadPackage({' not in l]
return ''.join(lines)
self.assertTextDataIdentical(clean(proc.stdout), clean(proc2.stdout))
# verify '--separate-metadata' option produces separate metadata file
os.chdir('..')
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data1.txt', '--preload', 'subdir/data2.txt', '--js-output=immutable.js', '--separate-metadata'])
self.assertExists('immutable.js.metadata')
# verify js output JS file is not touched when the metadata is separated
orig_timestamp = os.path.getmtime('immutable.js')
orig_content = open('immutable.js').read()
# ensure some time passes before running the packager again so that if it does touch the
# js file it will end up with the different timestamp.
time.sleep(1.0)
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data1.txt', '--preload', 'subdir/data2.txt', '--js-output=immutable.js', '--separate-metadata'])
# assert both file content and timestamp are the same as reference copy
self.assertTextDataIdentical(orig_content, open('immutable.js').read())
self.assertEqual(orig_timestamp, os.path.getmtime('immutable.js'))
# verify the content of metadata file is correct
with open('immutable.js.metadata') as f:
metadata = json.load(f)
self.assertEqual(len(metadata['files']), 2)
assert metadata['files'][0]['start'] == 0 and metadata['files'][0]['end'] == len('data1') and metadata['files'][0]['filename'] == '/data1.txt'
assert metadata['files'][1]['start'] == len('data1') and metadata['files'][1]['end'] == len('data1') + len('data2') and metadata['files'][1]['filename'] == '/subdir/data2.txt'
assert metadata['remote_package_size'] == len('data1') + len('data2')
# can only assert the uuid format is correct, the uuid's value is expected to differ in between invocation
uuid.UUID(metadata['package_uuid'], version=4)
def test_file_packager_unicode(self):
unicode_name = 'unicode…☃'
try:
ensure_dir(unicode_name)
except OSError:
print("we failed to even create a unicode dir, so on this OS, we can't test this")
return
full = os.path.join(unicode_name, 'data.txt')
create_test_file(full, 'data')
proc = run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', full], stdout=PIPE, stderr=PIPE)
assert len(proc.stdout), proc.stderr
assert unicode_name in proc.stdout, proc.stdout
print(len(proc.stderr))
def test_file_packager_mention_FORCE_FILESYSTEM(self):
MESSAGE = 'Remember to build the main file with -s FORCE_FILESYSTEM=1 so that it includes support for loading this file package'
create_test_file('data.txt', 'data1')
# mention when running standalone
err = run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=PIPE, stderr=PIPE).stderr
self.assertContained(MESSAGE, err)
# do not mention from emcc
err = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '--preload-file', 'data.txt'], stdout=PIPE, stderr=PIPE).stderr
self.assertEqual(len(err), 0)
def test_headless(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'example.png')
run_process([EMCC, path_from_root('tests', 'sdl_headless.c'), '-s', 'HEADLESS=1'])
output = run_js('a.out.js', stderr=PIPE)
assert '''Init: 0
Font: 0x1
Sum: 0
you should see two lines of text in different colors and a blue rectangle
SDL_Quit called (and ignored)
done.
''' in output, output
def test_preprocess(self):
# Pass -Werror to prevent regressions such as https://github.com/emscripten-core/emscripten/pull/9661
out = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-E', '-Werror'], stdout=PIPE).stdout
self.assertNotExists('a.out.js')
self.assertNotExists('a.out')
# Test explicitly that the output contains a line typically written by the preprocessor.
self.assertContained('# 1 ', out)
self.assertContained('hello_world.c"', out)
self.assertContained('printf("hello, world!', out)
def test_syntax_only_valid(self):
result = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-fsyntax-only'], stdout=PIPE, stderr=STDOUT)
self.assertEqual(result.stdout, '')
self.assertNotExists('a.out.js')
def test_syntax_only_invalid(self):
create_test_file('src.c', 'int main() {')
result = run_process([EMCC, 'src.c', '-fsyntax-only'], stdout=PIPE, check=False, stderr=STDOUT)
self.assertNotEqual(result.returncode, 0)
self.assertContained("src.c:1:13: error: expected '}'", result.stdout)
self.assertNotExists('a.out.js')
def test_demangle(self):
create_test_file('src.cpp', '''
#include <stdio.h>
#include <emscripten.h>
void two(char c) {
EM_ASM(out(stackTrace()));
}
void one(int x) {
two(x % 17);
}
int main() {
EM_ASM(out(demangle('__Znwm'))); // check for no aborts
EM_ASM(out(demangle('_main')));
EM_ASM(out(demangle('__Z2f2v')));
EM_ASM(out(demangle('__Z12abcdabcdabcdi')));
EM_ASM(out(demangle('__ZL12abcdabcdabcdi')));
EM_ASM(out(demangle('__Z4testcsifdPvPiPc')));
EM_ASM(out(demangle('__ZN4test5moarrEcslfdPvPiPc')));
EM_ASM(out(demangle('__ZN4Waka1f12a234123412345pointEv')));
EM_ASM(out(demangle('__Z3FooIiEvv')));
EM_ASM(out(demangle('__Z3FooIidEvi')));
EM_ASM(out(demangle('__ZN3Foo3BarILi5EEEvv')));
EM_ASM(out(demangle('__ZNK10__cxxabiv120__si_class_type_info16search_below_dstEPNS_19__dynamic_cast_infoEPKvib')));
EM_ASM(out(demangle('__Z9parsewordRPKciRi')));
EM_ASM(out(demangle('__Z5multiwahtjmxyz')));
EM_ASM(out(demangle('__Z1aA32_iPA5_c')));
EM_ASM(out(demangle('__ZN21FWakaGLXFleeflsMarfooC2EjjjPKvbjj')));
EM_ASM(out(demangle('__ZN5wakaw2Cm10RasterBaseINS_6watwat9PolocatorEE8merbine1INS4_2OREEEvPKjj'))); // we get this wrong, but at least emit a '?'
one(17);
return 0;
}
''')
# full demangle support
run_process([EMCC, 'src.cpp', '-s', 'DEMANGLE_SUPPORT=1'])
output = run_js('a.out.js')
self.assertContained('''operator new(unsigned long)
_main
f2()
abcdabcdabcd(int)
abcdabcdabcd(int)
test(char, short, int, float, double, void*, int*, char*)
test::moarr(char, short, long, float, double, void*, int*, char*)
Waka::f::a23412341234::point()
void Foo<int>()
void Foo<int, double>(int)
void Foo::Bar<5>()
__cxxabiv1::__si_class_type_info::search_below_dst(__cxxabiv1::__dynamic_cast_info*, void const*, int, bool) const
parseword(char const*&, int, int&)
multi(wchar_t, signed char, unsigned char, unsigned short, unsigned int, unsigned long, long long, unsigned long long, ...)
a(int [32], char (*) [5])
FWakaGLXFleeflsMarfoo::FWakaGLXFleeflsMarfoo(unsigned int, unsigned int, unsigned int, void const*, bool, unsigned int, unsigned int)
void wakaw::Cm::RasterBase<wakaw::watwat::Polocator>::merbine1<wakaw::Cm::RasterBase<wakaw::watwat::Polocator>::OR>(unsigned int const*, unsigned int)
''', output)
# test for multiple functions in one stack trace
run_process([EMCC, 'src.cpp', '-s', 'DEMANGLE_SUPPORT=1', '-g'])
output = run_js('a.out.js')
self.assertIn('one(int)', output)
self.assertIn('two(char)', output)
def test_demangle_cpp(self):
create_test_file('src.cpp', '''
#include <stdio.h>
#include <emscripten.h>
#include <cxxabi.h>
#include <assert.h>
int main() {
char out[256];
int status = 1;
size_t length = 255;
abi::__cxa_demangle("_ZN4Waka1f12a234123412345pointEv", out, &length, &status);
assert(status == 0);
printf("%s\\n", out);
return 0;
}
''')
run_process([EMCC, 'src.cpp'])
output = run_js('a.out.js')
self.assertContained('Waka::f::a23412341234::point()', output)
# Test that malloc() -> OOM -> abort() -> stackTrace() -> jsStackTrace() -> demangleAll() -> demangle() -> malloc()
# cycle will not produce an infinite loop.
def test_demangle_malloc_infinite_loop_crash(self):
run_process([EMXX, path_from_root('tests', 'malloc_demangle_infinite_loop.cpp'), '-g', '-s', 'ABORTING_MALLOC=1', '-s', 'DEMANGLE_SUPPORT=1'])
output = run_js('a.out.js', assert_returncode=None, stderr=PIPE)
if output.count('Cannot enlarge memory arrays') > 2:
print(output)
assert(output.count('Cannot enlarge memory arrays') <= 2)
def test_module_exports_with_closure(self):
# This test checks that module.export is retained when JavaScript is minified by compiling with --closure 1
# This is important as if module.export is not present the Module object will not be visible to node.js
# Run with ./runner.py other.test_module_exports_with_closure
# First make sure test.js isn't present.
self.clear()
# compile with -O2 --closure 0
run_process([EMCC, path_from_root('tests', 'Module-exports', 'test.c'),
'-o', 'test.js', '-O2', '--closure', '0',
'--pre-js', path_from_root('tests', 'Module-exports', 'setup.js'),
'-s', 'EXPORTED_FUNCTIONS=["_bufferTest"]',
'-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["ccall", "cwrap"]',
'-s', 'WASM_ASYNC_COMPILATION=0'])
# Check that compilation was successful
self.assertExists('test.js')
test_js_closure_0 = open('test.js').read()
# Check that test.js compiled with --closure 0 contains "module['exports'] = Module;"
assert ("module['exports'] = Module;" in test_js_closure_0) or ('module["exports"]=Module' in test_js_closure_0) or ('module["exports"] = Module;' in test_js_closure_0)
# Check that main.js (which requires test.js) completes successfully when run in node.js
# in order to check that the exports are indeed functioning correctly.
shutil.copyfile(path_from_root('tests', 'Module-exports', 'main.js'), 'main.js')
if NODE_JS in JS_ENGINES:
self.assertContained('bufferTest finished', run_js('main.js'))
# Delete test.js again and check it's gone.
try_delete('test.js')
self.assertNotExists('test.js')
# compile with -O2 --closure 1
run_process([EMCC, path_from_root('tests', 'Module-exports', 'test.c'),
'-o', 'test.js', '-O2', '--closure', '1',
'--pre-js', path_from_root('tests', 'Module-exports', 'setup.js'),
'-s', 'EXPORTED_FUNCTIONS=["_bufferTest"]',
'-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["ccall", "cwrap"]',
'-s', 'WASM_ASYNC_COMPILATION=0'])
# Check that compilation was successful
self.assertExists('test.js')
test_js_closure_1 = open('test.js').read()
# Check that test.js compiled with --closure 1 contains "module.exports", we want to verify that
# "module['exports']" got minified to "module.exports" when compiling with --closure 1
self.assertContained("module.exports", test_js_closure_1)
# Check that main.js (which requires test.js) completes successfully when run in node.js
# in order to check that the exports are indeed functioning correctly.
if NODE_JS in JS_ENGINES:
self.assertContained('bufferTest finished', run_js('main.js', engine=NODE_JS))
def test_node_catch_exit(self):
# Test that in node.js exceptions are not caught if NODEJS_EXIT_CATCH=0
if NODE_JS not in JS_ENGINES:
return
create_test_file('count.c', '''
#include <string.h>
int count(const char *str) {
return (int)strlen(str);
}
''')
create_test_file('index.js', '''
const count = require('./count.js');
console.log(xxx); //< here is the ReferenceError
''')
reference_error_text = 'console.log(xxx); //< here is the ReferenceError'
run_process([EMCC, 'count.c', '-o', 'count.js'])
# Check that the ReferenceError is caught and rethrown and thus the original error line is masked
self.assertNotContained(reference_error_text,
run_js('index.js', engine=NODE_JS, stderr=STDOUT, assert_returncode=None))
run_process([EMCC, 'count.c', '-o', 'count.js', '-s', 'NODEJS_CATCH_EXIT=0'])
# Check that the ReferenceError is not caught, so we see the error properly
self.assertContained(reference_error_text,
run_js('index.js', engine=NODE_JS, stderr=STDOUT, assert_returncode=None))
def test_extra_exported_methods(self):
# Test with node.js that the EXTRA_EXPORTED_RUNTIME_METHODS setting is considered by libraries
if NODE_JS not in JS_ENGINES:
self.skipTest("node engine required for this test")
create_test_file('count.c', '''
#include <string.h>
int count(const char *str) {
return (int)strlen(str);
}
''')
create_test_file('index.js', '''
const count = require('./count.js');
console.log(count.FS_writeFile);
''')
reference_error_text = 'undefined'
run_process([EMCC, 'count.c', '-s', 'FORCE_FILESYSTEM=1', '-s',
'EXTRA_EXPORTED_RUNTIME_METHODS=["FS_writeFile"]', '-o', 'count.js'])
# Check that the Module.FS_writeFile exists
self.assertNotContained(reference_error_text,
run_js('index.js', engine=NODE_JS, stderr=STDOUT, assert_returncode=None))
run_process([EMCC, 'count.c', '-s', 'FORCE_FILESYSTEM=1', '-o', 'count.js'])
# Check that the Module.FS_writeFile is not exported
self.assertContained(reference_error_text,
run_js('index.js', engine=NODE_JS, stderr=STDOUT, assert_returncode=None))
def test_fs_stream_proto(self):
open('src.cpp', 'wb').write(br'''
#include <stdio.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/stat.h>
#include <errno.h>
#include <string.h>
int main()
{
long file_size = 0;
int h = open("src.cpp", O_RDONLY, 0666);
if (0 != h)
{
FILE* file = fdopen(h, "rb");
if (0 != file)
{
fseek(file, 0, SEEK_END);
file_size = ftell(file);
fseek(file, 0, SEEK_SET);
}
else
{
printf("fdopen() failed: %s\n", strerror(errno));
return 10;
}
close(h);
printf("File size: %ld\n", file_size);
}
else
{
printf("open() failed: %s\n", strerror(errno));
return 10;
}
return 0;
}
''')
run_process([EMCC, 'src.cpp', '--embed-file', 'src.cpp'])
for engine in JS_ENGINES:
out = run_js('a.out.js', engine=engine, stderr=PIPE, full_output=True)
self.assertContained('File size: 724', out)
def test_proxyfs(self):
# This test supposes that 3 different programs share the same directory and files.
# The same JS object is not used for each of them
# But 'require' function caches JS objects.
# If we just load same js-file multiple times like following code,
# these programs (m0,m1,m2) share the same JS object.
#
# var m0 = require('./proxyfs_test.js');
# var m1 = require('./proxyfs_test.js');
# var m2 = require('./proxyfs_test.js');
#
# To separate js-objects for each of them, following 'require' use different js-files.
#
# var m0 = require('./proxyfs_test.js');
# var m1 = require('./proxyfs_test1.js');
# var m2 = require('./proxyfs_test2.js');
#
create_test_file('proxyfs_test_main.js', r'''
var m0 = require('./proxyfs_test.js');
var m1 = require('./proxyfs_test1.js');
var m2 = require('./proxyfs_test2.js');
var section;
function print(str){
process.stdout.write(section+":"+str+":");
}
m0.FS.mkdir('/working');
m0.FS.mount(m0.PROXYFS,{root:'/',fs:m1.FS},'/working');
m0.FS.mkdir('/working2');
m0.FS.mount(m0.PROXYFS,{root:'/',fs:m2.FS},'/working2');
section = "child m1 reads and writes local file.";
print("m1 read embed");
m1.ccall('myreade','number',[],[]);
print("m1 write");console.log("");
m1.ccall('mywrite0','number',['number'],[1]);
print("m1 read");
m1.ccall('myread0','number',[],[]);
section = "child m2 reads and writes local file.";
print("m2 read embed");
m2.ccall('myreade','number',[],[]);
print("m2 write");console.log("");
m2.ccall('mywrite0','number',['number'],[2]);
print("m2 read");
m2.ccall('myread0','number',[],[]);
section = "child m1 reads local file.";
print("m1 read");
m1.ccall('myread0','number',[],[]);
section = "parent m0 reads and writes local and children's file.";
print("m0 read embed");
m0.ccall('myreade','number',[],[]);
print("m0 read m1");
m0.ccall('myread1','number',[],[]);
print("m0 read m2");
m0.ccall('myread2','number',[],[]);
section = "m0,m1 and m2 verify local files.";
print("m0 write");console.log("");
m0.ccall('mywrite0','number',['number'],[0]);
print("m0 read");
m0.ccall('myread0','number',[],[]);
print("m1 read");
m1.ccall('myread0','number',[],[]);
print("m2 read");
m2.ccall('myread0','number',[],[]);
print("m0 read embed");
m0.ccall('myreade','number',[],[]);
print("m1 read embed");
m1.ccall('myreade','number',[],[]);
print("m2 read embed");
m2.ccall('myreade','number',[],[]);
section = "parent m0 writes and reads children's files.";
print("m0 write m1");console.log("");
m0.ccall('mywrite1','number',[],[]);
print("m0 read m1");
m0.ccall('myread1','number',[],[]);
print("m0 write m2");console.log("");
m0.ccall('mywrite2','number',[],[]);
print("m0 read m2");
m0.ccall('myread2','number',[],[]);
print("m1 read");
m1.ccall('myread0','number',[],[]);
print("m2 read");
m2.ccall('myread0','number',[],[]);
print("m0 read m0");
m0.ccall('myread0','number',[],[]);
''')
create_test_file('proxyfs_pre.js', r'''
if (typeof Module === 'undefined') Module = {};
Module["noInitialRun"]=true;
noExitRuntime=true;
''')
create_test_file('proxyfs_embed.txt', r'''test
''')
create_test_file('proxyfs_test.c', r'''
#include <stdio.h>
int
mywrite1(){
FILE* out = fopen("/working/hoge.txt","w");
fprintf(out,"test1\n");
fclose(out);
return 0;
}
int
myread1(){
FILE* in = fopen("/working/hoge.txt","r");
char buf[1024];
int len;
if(in==NULL)
printf("open failed\n");
while(! feof(in)){
if(fgets(buf,sizeof(buf),in)==buf){
printf("%s",buf);
}
}
fclose(in);
return 0;
}
int
mywrite2(){
FILE* out = fopen("/working2/hoge.txt","w");
fprintf(out,"test2\n");
fclose(out);
return 0;
}
int
myread2(){
{
FILE* in = fopen("/working2/hoge.txt","r");
char buf[1024];
int len;
if(in==NULL)
printf("open failed\n");
while(! feof(in)){
if(fgets(buf,sizeof(buf),in)==buf){
printf("%s",buf);
}
}
fclose(in);
}
return 0;
}
int
mywrite0(int i){
FILE* out = fopen("hoge.txt","w");
fprintf(out,"test0_%d\n",i);
fclose(out);
return 0;
}
int
myread0(){
{
FILE* in = fopen("hoge.txt","r");
char buf[1024];
int len;
if(in==NULL)
printf("open failed\n");
while(! feof(in)){
if(fgets(buf,sizeof(buf),in)==buf){
printf("%s",buf);
}
}
fclose(in);
}
return 0;
}
int
myreade(){
{
FILE* in = fopen("proxyfs_embed.txt","r");
char buf[1024];
int len;
if(in==NULL)
printf("open failed\n");
while(! feof(in)){
if(fgets(buf,sizeof(buf),in)==buf){
printf("%s",buf);
}
}
fclose(in);
}
return 0;
}
''')
run_process([EMCC,
'-o', 'proxyfs_test.js', 'proxyfs_test.c',
'--embed-file', 'proxyfs_embed.txt', '--pre-js', 'proxyfs_pre.js',
'-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["ccall", "cwrap"]',
'-lproxyfs.js',
'-s', 'WASM_ASYNC_COMPILATION=0',
'-s', 'MAIN_MODULE=1',
'-s', 'EXPORT_ALL=1'])
# Following shutil.copyfile just prevent 'require' of node.js from caching js-object.
# See https://nodejs.org/api/modules.html
shutil.copyfile('proxyfs_test.js', 'proxyfs_test1.js')
shutil.copyfile('proxyfs_test.js', 'proxyfs_test2.js')
out = run_js('proxyfs_test_main.js')
section = "child m1 reads and writes local file."
self.assertContained(section + ":m1 read embed:test", out)
self.assertContained(section + ":m1 write:", out)
self.assertContained(section + ":m1 read:test0_1", out)
section = "child m2 reads and writes local file."
self.assertContained(section + ":m2 read embed:test", out)
self.assertContained(section + ":m2 write:", out)
self.assertContained(section + ":m2 read:test0_2", out)
section = "child m1 reads local file."
self.assertContained(section + ":m1 read:test0_1", out)
section = "parent m0 reads and writes local and children's file."
self.assertContained(section + ":m0 read embed:test", out)
self.assertContained(section + ":m0 read m1:test0_1", out)
self.assertContained(section + ":m0 read m2:test0_2", out)
section = "m0,m1 and m2 verify local files."
self.assertContained(section + ":m0 write:", out)
self.assertContained(section + ":m0 read:test0_0", out)
self.assertContained(section + ":m1 read:test0_1", out)
self.assertContained(section + ":m2 read:test0_2", out)
self.assertContained(section + ":m0 read embed:test", out)
self.assertContained(section + ":m1 read embed:test", out)
self.assertContained(section + ":m2 read embed:test", out)
section = "parent m0 writes and reads children's files."
self.assertContained(section + ":m0 write m1:", out)
self.assertContained(section + ":m0 read m1:test1", out)
self.assertContained(section + ":m0 write m2:", out)
self.assertContained(section + ":m0 read m2:test2", out)
self.assertContained(section + ":m1 read:test1", out)
self.assertContained(section + ":m2 read:test2", out)
self.assertContained(section + ":m0 read m0:test0_0", out)
def test_dependency_file(self):
# Issue 1732: -MMD (and friends) create dependency files that need to be
# copied from the temporary directory.
create_test_file('test.cpp', r'''
#include "test.hpp"
void my_function()
{
}
''')
create_test_file('test.hpp', r'''
void my_function();
''')
run_process([EMCC, '-MMD', '-c', 'test.cpp', '-o', 'test.o'])
self.assertExists('test.d')
deps = open('test.d').read()
# Look for ': ' instead of just ':' to not confuse C:\path\ notation with make "target: deps" rule. Not perfect, but good enough for this test.
head, tail = deps.split(': ', 2)
assert 'test.o' in head, 'Invalid dependency target'
assert 'test.cpp' in tail and 'test.hpp' in tail, 'Invalid dependencies generated'
def test_dependency_file_2(self):
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'a.c')
run_process([EMCC, 'a.c', '-MMD', '-MF', 'test.d', '-c'])
self.assertContained(open('test.d').read(), 'a.o: a.c\n')
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'a.c')
run_process([EMCC, 'a.c', '-MMD', '-MF', 'test2.d', '-c', '-o', 'test.o'])
self.assertContained(open('test2.d').read(), 'test.o: a.c\n')
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'a.c')
ensure_dir('obj')
run_process([EMCC, 'a.c', '-MMD', '-MF', 'test3.d', '-c', '-o', 'obj/test.o'])
self.assertContained(open('test3.d').read(), 'obj/test.o: a.c\n')
def test_js_lib_quoted_key(self):
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
__internal_data:{
'<' : 0,
'white space' : 1
},
printf__deps: ['__internal_data', 'fprintf']
});
''')
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '--js-library', 'lib.js'])
self.assertContained('hello, world!', run_js('a.out.js'))
def test_js_lib_exported(self):
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
jslibfunc: function(x) { return 2 * x }
});
''')
create_test_file('src.cpp', r'''
#include <emscripten.h>
#include <stdio.h>
extern "C" int jslibfunc(int x);
int main() {
printf("c calling: %d\n", jslibfunc(6));
EM_ASM({
out('js calling: ' + Module['_jslibfunc'](5) + '.');
});
}
''')
run_process([EMCC, 'src.cpp', '--js-library', 'lib.js', '-s', 'EXPORTED_FUNCTIONS=["_main", "_jslibfunc"]'])
self.assertContained('c calling: 12\njs calling: 10.', run_js('a.out.js'))
def test_js_lib_primitive_dep(self):
# Verify that primitive dependencies aren't generated in the output JS.
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
foo__deps: ['Int8Array', 'NonPrimitive'],
foo: function() {},
});
''')
create_test_file('main.c', r'''
void foo(void);
int main(int argc, char** argv) {
foo();
return 0;
}
''')
run_process([EMCC, '-O0', 'main.c', '--js-library', 'lib.js', '-s', 'WARN_ON_UNDEFINED_SYMBOLS=0'])
generated = open('a.out.js').read()
self.assertContained('missing function: NonPrimitive', generated)
self.assertNotContained('missing function: Int8Array', generated)
def test_js_lib_using_asm_lib(self):
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
jslibfunc__deps: ['asmlibfunc'],
jslibfunc: function(x) {
return 2 * _asmlibfunc(x);
},
asmlibfunc__asm: true,
asmlibfunc__sig: 'ii',
asmlibfunc: function(x) {
x = x | 0;
return x + 1 | 0;
}
});
''')
create_test_file('src.cpp', r'''
#include <stdio.h>
extern "C" int jslibfunc(int x);
int main() {
printf("c calling: %d\n", jslibfunc(6));
}
''')
run_process([EMCC, 'src.cpp', '--js-library', 'lib.js'])
self.assertContained('c calling: 14\n', run_js('a.out.js'))
def test_EMCC_BUILD_DIR(self):
# EMCC_BUILD_DIR env var contains the dir we were building in, when running the js compiler (e.g. when
# running a js library). We force the cwd to be src/ for technical reasons, so this lets you find out
# where you were.
create_test_file('lib.js', r'''
printErr('dir was ' + process.env.EMCC_BUILD_DIR);
''')
err = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '--js-library', 'lib.js'], stderr=PIPE).stderr
self.assertContained('dir was ' + os.path.realpath(os.path.normpath(self.get_dir())), err)
def test_float_h(self):
process = run_process([EMCC, path_from_root('tests', 'float+.c')], stdout=PIPE, stderr=PIPE)
assert process.returncode == 0, 'float.h should agree with our system: ' + process.stdout + '\n\n\n' + process.stderr
def test_output_is_dir(self):
ensure_dir('out_dir')
err = self.expect_fail([EMCC, '-c', path_from_root('tests', 'hello_world.c'), '-o', 'out_dir/'])
self.assertContained('error: unable to open output file', err)
def test_default_obj_ext(self):
run_process([EMCC, '-c', path_from_root('tests', 'hello_world.c')])
self.assertExists('hello_world.o')
run_process([EMCC, '-c', path_from_root('tests', 'hello_world.c'), '--default-obj-ext', 'obj'])
self.assertExists('hello_world.obj')
def test_doublestart_bug(self):
create_test_file('code.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
void main_loop(void) {
static int cnt = 0;
if (++cnt >= 10) emscripten_cancel_main_loop();
}
int main(void) {
printf("This should only appear once.\n");
emscripten_set_main_loop(main_loop, 10, 0);
return 0;
}
''')
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
run_process([EMCC, 'code.cpp', '--pre-js', 'pre.js'])
output = run_js('a.out.js')
assert output.count('This should only appear once.') == 1, output
def test_module_print(self):
create_test_file('code.cpp', r'''
#include <stdio.h>
int main(void) {
printf("123456789\n");
return 0;
}
''')
create_test_file('pre.js', r'''
var Module = { print: function(x) { throw '<{(' + x + ')}>' } };
''')
run_process([EMCC, 'code.cpp', '--pre-js', 'pre.js'])
output = run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None)
assert r'<{(123456789)}>' in output, output
def test_precompiled_headers_warnings(self):
# Check that we don't have any underlying warnings from clang, this can happen if we
# pass any link flags to when building a pch.
create_test_file('header.h', '#define X 5\n')
run_process([EMCC, '-Werror', '-xc++-header', 'header.h'])
def test_precompiled_headers(self):
for suffix in ['gch', 'pch']:
print(suffix)
self.clear()
create_test_file('header.h', '#define X 5\n')
run_process([EMCC, '-xc++-header', 'header.h', '-c'])
self.assertExists('header.h.gch') # default output is gch
if suffix != 'gch':
run_process([EMCC, '-xc++-header', 'header.h', '-o', 'header.h.' + suffix])
self.assertBinaryEqual('header.h.gch', 'header.h.' + suffix)
create_test_file('src.cpp', r'''
#include <stdio.h>
int main() {
printf("|%d|\n", X);
return 0;
}
''')
run_process([EMCC, 'src.cpp', '-include', 'header.h'])
output = run_js('a.out.js', stderr=PIPE, full_output=True)
self.assertContained('|5|', output)
# also verify that the gch is actually used
err = run_process([EMCC, 'src.cpp', '-include', 'header.h', '-Xclang', '-print-stats'], stderr=PIPE).stderr
self.assertTextDataContained('*** PCH/Modules Loaded:\nModule: header.h.' + suffix, err)
# and sanity check it is not mentioned when not
try_delete('header.h.' + suffix)
err = run_process([EMCC, 'src.cpp', '-include', 'header.h', '-Xclang', '-print-stats'], stderr=PIPE).stderr
self.assertNotContained('*** PCH/Modules Loaded:\nModule: header.h.' + suffix, err.replace('\r\n', '\n'))
# with specified target via -o
try_delete('header.h.' + suffix)
run_process([EMCC, '-xc++-header', 'header.h', '-o', 'my.' + suffix])
self.assertExists('my.' + suffix)
# -include-pch flag
run_process([EMCC, '-xc++-header', 'header.h', '-o', 'header.h.' + suffix])
run_process([EMCC, 'src.cpp', '-include-pch', 'header.h.' + suffix])
output = run_js('a.out.js')
self.assertContained('|5|', output)
@no_wasm_backend('tests extra fastcomp warnings on unaligned loads/stores, which matter a lot more in asm.js')
def test_warn_unaligned(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
struct packey {
char x;
int y;
double z;
} __attribute__((__packed__));
int main() {
volatile packey p;
p.x = 0;
p.y = 1;
p.z = 2;
return 0;
}
''')
output = run_process([EMCC, 'src.cpp', '-s', 'WASM=0', '-s', 'WARN_UNALIGNED=1', '-g'], stderr=PIPE)
self.assertContained('emcc: warning: unaligned store', output.stderr)
self.assertContained('emcc: warning: unaligned store', output.stderr)
self.assertContained('@line 11 "src.cpp"', output.stderr)
def test_LEGACY_VM_SUPPORT(self):
# when modern features are lacking, we can polyfill them or at least warn
create_test_file('pre.js', 'Math.imul = undefined;')
def test(expected, opts=[]):
print(opts)
result = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '--pre-js', 'pre.js'] + opts, stderr=PIPE, check=False)
if result.returncode == 0:
self.assertContained(expected, run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None))
else:
self.assertContained(expected, result.stderr)
# when legacy is needed, we show an error indicating so
test('build with LEGACY_VM_SUPPORT')
# legacy + disabling wasm works
if self.is_wasm_backend():
return
test('hello, world!', ['-s', 'LEGACY_VM_SUPPORT=1', '-s', 'WASM=0'])
def test_on_abort(self):
expected_output = 'Module.onAbort was called'
def add_on_abort_and_verify(extra=''):
with open('a.out.js') as f:
js = f.read()
with open('a.out.js', 'w') as f:
f.write("var Module = { onAbort: function() { console.log('%s') } };\n" % expected_output)
f.write(extra + '\n')
f.write(js)
self.assertContained(expected_output, run_js('a.out.js', assert_returncode=None))
# test direct abort() C call
create_test_file('src.c', '''
#include <stdlib.h>
int main() {
abort();
}
''')
run_process([EMCC, 'src.c', '-s', 'WASM_ASYNC_COMPILATION=0'])
add_on_abort_and_verify()
# test direct abort() JS call
create_test_file('src.c', '''
#include <emscripten.h>
int main() {
EM_ASM({ abort() });
}
''')
run_process([EMCC, 'src.c', '-s', 'WASM_ASYNC_COMPILATION=0'])
add_on_abort_and_verify()
# test throwing in an abort handler, and catching that
create_test_file('src.c', '''
#include <emscripten.h>
int main() {
EM_ASM({
try {
out('first');
abort();
} catch (e) {
out('second');
abort();
throw e;
}
});
}
''')
run_process([EMCC, 'src.c', '-s', 'WASM_ASYNC_COMPILATION=0'])
with open('a.out.js') as f:
js = f.read()
with open('a.out.js', 'w') as f:
f.write("var Module = { onAbort: function() { console.log('%s'); throw 're-throw'; } };\n" % expected_output)
f.write(js)
out = run_js('a.out.js', stderr=STDOUT, assert_returncode=None)
print(out)
self.assertContained(expected_output, out)
self.assertContained('re-throw', out)
self.assertContained('first', out)
self.assertContained('second', out)
self.assertEqual(out.count(expected_output), 2)
# test an abort during startup
run_process([EMCC, path_from_root('tests', 'hello_world.c')])
os.remove('a.out.wasm') # trigger onAbort by intentionally causing startup to fail
add_on_abort_and_verify()
def test_no_exit_runtime(self):
create_test_file('code.cpp', r'''
#include <stdio.h>
template<int x>
struct Waste {
Waste() {
printf("coming around %d\n", x);
}
~Waste() {
printf("going away %d\n", x);
}
};
Waste<1> w1;
Waste<2> w2;
Waste<3> w3;
Waste<4> w4;
Waste<5> w5;
int main(int argc, char **argv) {
return 0;
}
''')
for wasm in [0, 1]:
for no_exit in [1, 0]:
for opts in [[], ['-O1'], ['-O2', '-g2'], ['-O2', '-g2', '--llvm-lto', '1']]:
if self.is_wasm_backend() and not wasm:
continue
print(wasm, no_exit, opts)
cmd = [EMCC] + opts + ['code.cpp', '-s', 'EXIT_RUNTIME=' + str(1 - no_exit), '-s', 'WASM=' + str(wasm)]
if wasm:
cmd += ['--profiling-funcs'] # for function names
run_process(cmd)
output = run_js('a.out.js', stderr=PIPE, full_output=True)
src = open('a.out.js').read()
if wasm:
src += '\n' + self.get_wasm_text('a.out.wasm')
exit = 1 - no_exit
print(' exit:', exit, 'opts:', opts)
self.assertContained('coming around', output)
self.assertContainedIf('going away', output, exit)
if not self.is_wasm_backend():
# The wasm backend uses atexit to register destructors when
# constructors are called There is currently no way to exclude
# these destructors from the wasm binary.
assert ('atexit(' in src) == exit, 'atexit should not appear in src when EXIT_RUNTIME=0'
assert ('_ZN5WasteILi2EED' in src) == exit, 'destructors should not appear if no exit:\n' + src
def test_no_exit_runtime_warnings_flush(self):
# check we warn if there is unflushed info
create_test_file('code.c', r'''
#include <stdio.h>
int main(int argc, char **argv) {
printf("hello\n");
printf("world"); // no newline, not flushed
#if FLUSH
printf("\n");
#endif
}
''')
create_test_file('code.cpp', r'''
#include <iostream>
int main() {
using namespace std;
cout << "hello" << std::endl;
cout << "world"; // no newline, not flushed
#if FLUSH
std::cout << std::endl;
#endif
}
''')
for src in ['code.c', 'code.cpp']:
for no_exit in [0, 1]:
for assertions in [0, 1]:
for flush in [0, 1]:
# TODO: also check FILESYSTEM=0 here. it never worked though, buffered output was not emitted at shutdown
print(src, no_exit, assertions, flush)
cmd = [EMCC, src, '-s', 'EXIT_RUNTIME=%d' % (1 - no_exit), '-s', 'ASSERTIONS=%d' % assertions]
if flush:
cmd += ['-DFLUSH']
run_process(cmd)
output = run_js('a.out.js', stderr=PIPE, full_output=True)
exit = 1 - no_exit
self.assertContained('hello', output)
assert ('world' in output) == (exit or flush), 'unflushed content is shown only when exiting the runtime'
assert (no_exit and assertions and not flush) == ('stdio streams had content in them that was not flushed. you should set EXIT_RUNTIME to 1' in output), 'warning should be shown'
def test_fs_after_main(self):
for args in [[], ['-O1']]:
print(args)
run_process([EMCC, path_from_root('tests', 'fs_after_main.cpp')])
self.assertContained('Test passed.', run_js('a.out.js'))
@no_wasm_backend('tests fastcomp compiler flags')
def test_os_oz(self):
for arg, expect in [
('-O1', '-O1'),
('-O2', '-O3'),
('-Os', '-Os'),
('-Oz', '-Oz'),
('-O3', '-O3'),
]:
print(arg, expect)
proc = run_process([EMCC, '-v', path_from_root('tests', 'hello_world.cpp'), arg], stderr=PIPE)
self.assertContained(expect, proc.stderr)
self.assertContained('hello, world!', run_js('a.out.js'))
def test_oz_size(self):
sizes = {}
for name, args in [
('0', []),
('1', ['-O1']),
('2', ['-O2']),
('s', ['-Os']),
('z', ['-Oz']),
('3', ['-O3']),
]:
print(name, args)
self.clear()
run_process([EMCC, '-c', path_from_root('system', 'lib', 'dlmalloc.c')] + args)
sizes[name] = os.path.getsize('dlmalloc.o')
print(sizes)
opt_min = min(sizes['1'], sizes['2'], sizes['3'], sizes['s'], sizes['z'])
opt_max = max(sizes['1'], sizes['2'], sizes['3'], sizes['s'], sizes['z'])
# 'opt builds are all fairly close'
self.assertLess(opt_min - opt_max, opt_max * 0.1)
# unopt build is quite larger'
self.assertGreater(sizes['0'], (1.20 * opt_max))
@no_wasm_backend('relies on ctor evaluation and dtor elimination')
def test_global_inits(self):
create_test_file('inc.h', r'''
#include <stdio.h>
template<int x>
struct Waste {
int state;
Waste() : state(10) {}
void test(int a) {
printf("%d\n", a + state);
}
~Waste() {
printf("going away %d\n", x);
}
};
Waste<3> *getMore();
''')
create_test_file('main.cpp', r'''
#include "inc.h"
Waste<1> mw1;
Waste<2> mw2;
int main(int argc, char **argv) {
printf("argc: %d\n", argc);
mw1.state += argc;
mw2.state += argc;
mw1.test(5);
mw2.test(6);
getMore()->test(0);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "inc.h"
Waste<3> sw3;
Waste<3> *getMore() {
return &sw3;
}
''')
for opts, has_global in [
(['-O2', '-g', '-s', 'EXIT_RUNTIME=1'], True),
# no-exit-runtime removes the atexits, and then globalgce can work
# it's magic to remove the global initializer entirely
(['-O2', '-g'], False),
(['-Os', '-g', '-s', 'EXIT_RUNTIME=1'], True),
(['-Os', '-g'], False),
(['-O2', '-g', '--llvm-lto', '1', '-s', 'EXIT_RUNTIME=1'], True),
(['-O2', '-g', '--llvm-lto', '1'], False),
]:
print(opts, has_global)
run_process([EMCC, 'main.cpp', '-c'] + opts)
run_process([EMCC, 'side.cpp', '-c'] + opts)
run_process([EMCC, 'main.o', 'side.o'] + opts)
run_js('a.out.js', stderr=PIPE, full_output=True)
src = open('a.out.js').read()
self.assertContained('argc: 1\n16\n17\n10\n', run_js('a.out.js'))
self.assertContainedIf('globalCtors', src, has_global)
# Tests that when there are only 0 or 1 global initializers, that a grouped global initializer function will not be generated
# (that would just consume excess code size)
def test_no_global_inits(self):
create_test_file('one_global_initializer.cpp', r'''
#include <emscripten.h>
#include <stdio.h>
double t = emscripten_get_now();
int main() { printf("t:%d\n", (int)(t>0)); }
''')
run_process([EMCC, 'one_global_initializer.cpp'])
# Above file has one global initializer, should not generate a redundant grouped globalCtors function
self.assertNotContained('globalCtors', open('a.out.js').read())
self.assertContained('t:1', run_js('a.out.js'))
create_test_file('zero_global_initializers.cpp', r'''
#include <stdio.h>
int main() { printf("t:1\n"); }
''')
run_process([EMCC, 'zero_global_initializers.cpp'])
# Above file should have zero global initializers, should not generate any global initializer functions
self.assertNotContained('__GLOBAL__sub_', open('a.out.js').read())
self.assertContained('t:1', run_js('a.out.js'))
def test_implicit_func(self):
create_test_file('src.c', r'''
#include <stdio.h>
int main()
{
printf("hello %d\n", strnlen("waka", 2)); // Implicit declaration, no header, for strnlen
int (*my_strnlen)(char*, ...) = strnlen;
printf("hello %d\n", my_strnlen("shaka", 2));
return 0;
}
''')
IMPLICIT_WARNING = "warning: implicit declaration of function 'strnlen' is invalid in C99"
IMPLICIT_ERROR = "error: implicit declaration of function 'strnlen' is invalid in C99"
INCOMPATIBLE_WARNINGS = ('warning: incompatible pointer types', 'warning: incompatible function pointer types')
for opts, expected, compile_expected in [
([], None, [IMPLICIT_ERROR]),
(['-Wno-error=implicit-function-declaration'], ['hello '], [IMPLICIT_WARNING]), # turn error into warning
(['-Wno-implicit-function-declaration'], ['hello '], []), # turn error into nothing at all (runtime output is incorrect)
]:
print(opts, expected)
try_delete('a.out.js')
stderr = run_process([EMCC, 'src.c'] + opts, stderr=PIPE, check=False).stderr
for ce in compile_expected + [INCOMPATIBLE_WARNINGS]:
self.assertContained(ce, stderr)
if expected is None:
self.assertNotExists('a.out.js')
else:
output = run_js('a.out.js', stderr=PIPE, full_output=True)
for e in expected:
self.assertContained(e, output)
@no_wasm_backend('uses prebuilt .ll file')
def test_incorrect_static_call(self):
for wasm in [0, 1]:
for opts in [0, 1]:
for asserts in [0, 1]:
extra = []
if opts != 1 - asserts:
extra = ['-s', 'ASSERTIONS=' + str(asserts)]
cmd = [EMCC, path_from_root('tests', 'sillyfuncast2_noasm.ll'), '-O' + str(opts), '-s', 'WASM=' + str(wasm)] + extra
print(opts, asserts, wasm, cmd)
# Should not need to pipe stdout here but binaryen writes to stdout
# when it really should write to stderr.
stderr = run_process(cmd, stdout=PIPE, stderr=PIPE, check=False).stderr
assert ('unexpected' in stderr) == asserts, stderr
assert ("to 'doit'" in stderr) == asserts, stderr
@no_wasm_backend('fastcomp specific')
def test_llvm_lit(self):
grep_path = shared.which('grep')
if not grep_path:
self.skipTest('This test needs the "grep" tool in PATH. If you are using emsdk on Windows, you can obtain it via installing and activating the gnu package.')
llvm_src = get_fastcomp_src_dir()
if not llvm_src:
self.skipTest('llvm source tree not found')
LLVM_LIT = os.path.join(LLVM_ROOT, 'llvm-lit.py')
if not os.path.exists(LLVM_LIT):
LLVM_LIT = os.path.join(LLVM_ROOT, 'llvm-lit')
if not os.path.exists(LLVM_LIT):
self.skipTest('llvm-lit not found; fastcomp directory is most likely prebuilt')
cmd = [PYTHON, LLVM_LIT, '-v', os.path.join(llvm_src, 'test', 'CodeGen', 'JS')]
print(cmd)
run_process(cmd)
@requires_native_clang
def test_bad_triple(self):
# compile a minimal program, with as few dependencies as possible, as
# native building on CI may not always work well
create_test_file('minimal.cpp', 'int main() { return 0; }')
run_process([CLANG_CXX, 'minimal.cpp', '-target', 'x86_64-linux', '-c', '-emit-llvm', '-o', 'a.bc'] + clang_native.get_clang_native_args(), env=clang_native.get_clang_native_env())
# wasm backend will hard fail where as fastcomp only warns
if self.is_wasm_backend():
err = self.expect_fail([EMCC, 'a.bc'])
self.assertContained('machine type must be wasm32', err)
else:
err = run_process([EMCC, 'a.bc'], stderr=PIPE).stderr
assert 'warning' in err or 'WARNING' in err, err
assert 'incorrect target triple' in err or 'different target triples' in err, err
def test_valid_abspath(self):
# Test whether abspath warning appears
abs_include_path = os.path.abspath(self.get_dir())
err = run_process([EMCC, '-I%s' % abs_include_path, '-Wwarn-absolute-paths', path_from_root('tests', 'hello_world.c')], stderr=PIPE).stderr
warning = '-I or -L of an absolute path "-I%s" encountered. If this is to a local system header/library, it may cause problems (local system files make sense for compiling natively on your system, but not necessarily to JavaScript).' % abs_include_path
self.assertContained(warning, err)
# Passing an absolute path to a directory inside the emscripten tree is always ok and should not issue a warning.
abs_include_path = path_from_root('tests')
err = run_process([EMCC, '-I%s' % abs_include_path, '-Wwarn-absolute-paths', path_from_root('tests', 'hello_world.c')], stderr=PIPE).stderr
warning = '-I or -L of an absolute path "-I%s" encountered. If this is to a local system header/library, it may cause problems (local system files make sense for compiling natively on your system, but not necessarily to JavaScript).' % abs_include_path
self.assertNotContained(warning, err)
# Hide warning for this include path
err = run_process([EMCC, '--valid-abspath', abs_include_path, '-I%s' % abs_include_path, '-Wwarn-absolute-paths', path_from_root('tests', 'hello_world.c')], stderr=PIPE).stderr
self.assertNotContained(warning, err)
def test_valid_abspath_2(self):
if WINDOWS:
abs_include_path = 'C:\\nowhere\\at\\all'
else:
abs_include_path = '/nowhere/at/all'
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '--valid-abspath', abs_include_path, '-I%s' % abs_include_path]
print(' '.join(cmd))
run_process(cmd)
self.assertContained('hello, world!', run_js('a.out.js'))
def test_warn_dylibs(self):
shared_suffixes = ['.so', '.dylib', '.dll']
for suffix in ['.o', '.a', '.bc', '.so', '.lib', '.dylib', '.js', '.html']:
print(suffix)
err = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-o', 'out' + suffix], stderr=PIPE).stderr
warning = 'When Emscripten compiles to a typical native suffix for shared libraries (.so, .dylib, .dll) then it emits an object file. You should then compile that to an emscripten SIDE_MODULE (using that flag) with suffix .wasm (for wasm) or .js (for asm.js).'
self.assertContainedIf(warning, err, suffix in shared_suffixes)
def test_side_module_without_proper_target(self):
# SIDE_MODULE is only meaningful when compiling to wasm (or js+wasm)
# otherwise, we are just linking bitcode, and should show an error
for wasm in [0, 1]:
if self.is_wasm_backend() and not wasm:
continue
print(wasm)
stderr = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'SIDE_MODULE=1', '-o', 'a.so', '-s', 'WASM=%d' % wasm])
self.assertContained('SIDE_MODULE must only be used when compiling to an executable shared library, and not when emitting an object file', stderr)
@no_wasm_backend('asm.js optimizations')
def test_simplify_ifs(self):
def test(src, nums):
create_test_file('src.c', src)
for opts, ifs in [
[['-g2'], nums[0]],
[['--profiling'], nums[1]],
[['--profiling', '-g2'], nums[2]]
]:
print(opts, ifs)
if type(ifs) == int:
ifs = [ifs]
try_delete('a.out.js')
run_process([EMCC, 'src.c', '-O2', '-s', 'WASM=0'] + opts, stdout=PIPE)
src = open('a.out.js').read()
main = src[src.find('function _main'):src.find('\n}', src.find('function _main'))]
actual_ifs = main.count('if (')
assert actual_ifs in ifs, main + ' : ' + str([ifs, actual_ifs])
test(r'''
#include <stdio.h>
#include <string.h>
int main(int argc, char **argv) {
if (argc > 5 && strlen(argv[0]) > 1 && strlen(argv[1]) > 2) printf("halp");
return 0;
}
''', [3, 1, 1])
test(r'''
#include <stdio.h>
#include <string.h>
int main(int argc, char **argv) {
while (argc % 3 == 0) {
if (argc > 5 && strlen(argv[0]) > 1 && strlen(argv[1]) > 2) {
printf("halp");
argc++;
} else {
while (argc > 0) {
printf("%d\n", argc--);
}
}
}
return 0;
}
''', [8, [5, 7], [5, 7]])
test(r'''
#include <stdio.h>
#include <string.h>
int main(int argc, char **argv) {
while (argc % 17 == 0) argc *= 2;
if (argc > 5 && strlen(argv[0]) > 10 && strlen(argv[1]) > 20) {
printf("halp");
argc++;
} else {
printf("%d\n", argc--);
}
while (argc % 17 == 0) argc *= 2;
return argc;
}
''', [6, 3, 3])
test(r'''
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[]) {
if (getenv("A") && getenv("B")) {
printf("hello world\n");
} else {
printf("goodnight moon\n");
}
printf("and that's that\n");
return 0;
}
''', [[3, 2], 1, 1])
test(r'''
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[]) {
if (getenv("A") || getenv("B")) {
printf("hello world\n");
}
printf("and that's that\n");
return 0;
}
''', [[3, 2], 1, 1])
def test_symbol_map(self):
UNMINIFIED_HEAP8 = 'var HEAP8 = new global.Int8Array'
UNMINIFIED_MIDDLE = 'function middle'
for opts in [['-O2'], ['-O3']]:
for wasm in [0, 1, 2]:
# -s WASM=2 is a WASM_BACKEND-only feature:
if wasm == 2 and not shared.Settings.WASM_BACKEND:
continue
print(opts, wasm)
self.clear()
create_test_file('src.c', r'''
#include <emscripten.h>
EM_JS(int, run_js, (), {
out(new Error().stack);
return 0;
});
EMSCRIPTEN_KEEPALIVE
void middle() {
if (run_js()) {
// fake recursion that is never reached, to avoid inlining in binaryen and LLVM
middle();
}
}
int main() {
EM_ASM({ _middle() });
}
''')
cmd = [EMCC, 'src.c', '--emit-symbol-map'] + opts
cmd += ['-s', 'WASM=%d' % wasm]
run_process(cmd)
# check that the map is correct
with open('a.out.js.symbols') as f:
symbols = f.read()
lines = [line.split(':') for line in symbols.strip().split('\n')]
minified_middle = None
for minified, full in lines:
# handle both fastcomp and wasm backend notation
if full == '_middle' or full == 'middle':
minified_middle = minified
break
self.assertNotEqual(minified_middle, None)
if wasm:
# stack traces are standardized enough that we can easily check that the
# minified name is actually in the output
stack_trace_reference = 'wasm-function[%s]' % minified_middle
out = run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None)
self.assertContained(stack_trace_reference, out)
# make sure there are no symbols in the wasm itself
wat = run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), 'a.out.wasm'], stdout=PIPE).stdout
for func_start in ('(func $middle', '(func $_middle'):
self.assertNotContained(func_start, wat)
# check we don't keep unnecessary debug info with wasm2js when emitting
# a symbol map
if self.is_wasm_backend() and wasm == 0 and '-O' in str(opts):
with open('a.out.js') as f:
js = f.read()
self.assertNotContained(UNMINIFIED_HEAP8, js)
self.assertNotContained(UNMINIFIED_MIDDLE, js)
# verify those patterns would exist with more debug info
run_process(cmd + ['--profiling-funcs'])
with open('a.out.js') as f:
js = f.read()
self.assertContained(UNMINIFIED_HEAP8, js)
self.assertContained(UNMINIFIED_MIDDLE, js)
def test_bc_to_bc(self):
# emcc should 'process' bitcode to bitcode. build systems can request this if
# e.g. they assume our 'executable' extension is bc, and compile an .o to a .bc
# (the user would then need to build bc to js of course, but we need to actually
# emit the bc)
run_process([EMCC, '-c', path_from_root('tests', 'hello_world.c')])
self.assertExists('hello_world.o')
run_process([EMCC, 'hello_world.o', '-o', 'hello_world.bc'])
self.assertExists('hello_world.o')
self.assertExists('hello_world.bc')
def test_bad_function_pointer_cast(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
typedef int (*callback) (int, ...);
int impl(int foo) {
printf("Hello, world.\n");
return 0;
}
int main() {
volatile callback f = (callback) impl;
f(0); /* This fails with or without additional arguments. */
return 0;
}
''')
for opts in [0, 1, 2]:
for safe in [0, 1]:
for emulate_casts in [0, 1]:
for emulate_fps in [0, 1]:
for relocatable in [0, 1]:
for wasm in [0, 1]:
if self.is_wasm_backend() and (not wasm or emulate_fps):
continue
if emulate_casts and self.is_wasm_backend() and relocatable:
# TODO('https://github.com/emscripten-core/emscripten/issues/8507')
continue
cmd = [EMCC, 'src.cpp', '-O' + str(opts)]
if not wasm:
cmd += ['-s', 'WASM=0']
if safe:
cmd += ['-s', 'SAFE_HEAP']
if emulate_casts:
cmd += ['-s', 'EMULATE_FUNCTION_POINTER_CASTS']
if emulate_fps:
cmd += ['-s', 'EMULATED_FUNCTION_POINTERS']
if relocatable:
cmd += ['-s', 'RELOCATABLE'] # disables asm-optimized safe heap
print(cmd)
run_process(cmd)
output = run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None)
if emulate_casts:
# success!
self.assertContained('Hello, world.', output)
else:
# otherwise, the error depends on the mode we are in
if self.is_wasm_backend() or (wasm and (relocatable or emulate_fps)):
# wasm trap raised by the vm
self.assertContained('function signature mismatch', output)
elif opts == 0 and safe and not wasm:
# non-wasm safe mode checks asm.js function table masks
self.assertContained('Function table mask error', output)
elif opts == 0:
# informative error message (assertions are enabled in -O0)
self.assertContained('Invalid function pointer', output)
else:
# non-informative error
self.assertContained(('abort(', 'exception'), output)
@no_wasm_backend('asm.js function table feature')
def test_aliased_func_pointers(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
int impl1(int foo) { return foo; }
float impla(float foo) { return foo; }
int impl2(int foo) { return foo+1; }
float implb(float foo) { return foo+1; }
int impl3(int foo) { return foo+2; }
float implc(float foo) { return foo+2; }
int main(int argc, char **argv) {
volatile void *f = (void*)impl1;
if (argc == 50) f = (void*)impla;
if (argc == 51) f = (void*)impl2;
if (argc == 52) f = (void*)implb;
if (argc == 53) f = (void*)impl3;
if (argc == 54) f = (void*)implc;
return (int)f;
}
''')
print('aliasing')
sizes_ii = {}
sizes_dd = {}
for alias in [None, 0, 1]:
cmd = [EMCC, 'src.cpp', '-O1', '-s', 'WASM=0']
if alias is not None:
cmd += ['-s', 'ALIASING_FUNCTION_POINTERS=' + str(alias)]
else:
alias = -1
print(cmd)
run_process(cmd)
src = open('a.out.js').read().split('\n')
for line in src:
if line.strip().startswith('var FUNCTION_TABLE_ii = '):
sizes_ii[alias] = line.count(',')
if line.strip().startswith('var FUNCTION_TABLE_dd = '):
sizes_dd[alias] = line.count(',')
print('ii', sizes_ii)
print('dd', sizes_dd)
for sizes in [sizes_ii, sizes_dd]:
self.assertEqual(sizes[-1], sizes[1]) # default is to alias
self.assertLess(sizes[1], sizes[0]) # without aliasing, we have more unique values and fat tables
def test_bad_export(self):
for m in ['', ' ']:
self.clear()
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'EXPORTED_FUNCTIONS=["' + m + '_main"]']
print(cmd)
stderr = run_process(cmd, stderr=PIPE, check=False).stderr
if m:
self.assertContained('undefined exported function: " _main"', stderr)
else:
self.assertContained('hello, world!', run_js('a.out.js'))
def test_no_dynamic_execution(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-O1', '-s', 'DYNAMIC_EXECUTION=0'])
self.assertContained('hello, world!', run_js('a.out.js'))
src = open('a.out.js').read()
self.assertNotContained('eval(', src)
self.assertNotContained('eval.', src)
self.assertNotContained('new Function', src)
try_delete('a.out.js')
# Test that --preload-file doesn't add an use of eval().
create_test_file('temp.txt', "foo\n")
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-O1',
'-s', 'DYNAMIC_EXECUTION=0', '--preload-file', 'temp.txt'])
src = open('a.out.js').read()
assert 'eval(' not in src
assert 'eval.' not in src
assert 'new Function' not in src
try_delete('a.out.js')
# Test that -s DYNAMIC_EXECUTION=1 and -s RELOCATABLE=1 are not allowed together.
self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-O1',
'-s', 'DYNAMIC_EXECUTION=0', '-s', 'RELOCATABLE=1'])
try_delete('a.out.js')
create_test_file('test.c', r'''
#include <emscripten/emscripten.h>
int main() {
emscripten_run_script("console.log('hello from script');");
return 0;
}
''')
# Test that emscripten_run_script() aborts when -s DYNAMIC_EXECUTION=0
run_process([EMCC, 'test.c', '-O1', '-s', 'DYNAMIC_EXECUTION=0'])
self.assertContained('DYNAMIC_EXECUTION=0 was set, cannot eval', run_js('a.out.js', assert_returncode=None, full_output=True, stderr=PIPE))
try_delete('a.out.js')
# Test that emscripten_run_script() posts a warning when -s DYNAMIC_EXECUTION=2
run_process([EMCC, 'test.c', '-O1', '-s', 'DYNAMIC_EXECUTION=2'])
self.assertContained('Warning: DYNAMIC_EXECUTION=2 was set, but calling eval in the following location:', run_js('a.out.js', assert_returncode=None, full_output=True, stderr=PIPE))
self.assertContained('hello from script', run_js('a.out.js', assert_returncode=None, full_output=True, stderr=PIPE))
try_delete('a.out.js')
def test_init_file_at_offset(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
int main() {
int data = 0x12345678;
FILE *f = fopen("test.dat", "wb");
fseek(f, 100, SEEK_CUR);
fwrite(&data, 4, 1, f);
fclose(f);
int data2;
f = fopen("test.dat", "rb");
fread(&data2, 4, 1, f); // should read 0s, not that int we wrote at an offset
printf("read: %d\n", data2);
fseek(f, 0, SEEK_END);
long size = ftell(f); // should be 104, not 4
fclose(f);
printf("file size is %ld\n", size);
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained('read: 0\nfile size is 104\n', run_js('a.out.js'))
def test_unlink(self):
self.do_other_test(os.path.join('other', 'unlink'))
def test_argv0_node(self):
create_test_file('code.cpp', r'''
#include <stdio.h>
int main(int argc, char **argv) {
printf("I am %s.\n", argv[0]);
return 0;
}
''')
run_process([EMCC, 'code.cpp'])
self.assertContained('I am ' + os.path.realpath(self.get_dir()).replace('\\', '/') + '/a.out.js', run_js('a.out.js').replace('\\', '/'))
def test_returncode(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
#if CALL_EXIT
exit(CODE);
#else
return CODE;
#endif
}
''')
for code in [0, 123]:
for no_exit in [0, 1]:
for call_exit in [0, 1]:
for async_compile in [0, 1]:
run_process([EMCC, 'src.cpp', '-DCODE=%d' % code, '-s', 'EXIT_RUNTIME=%d' % (1 - no_exit), '-DCALL_EXIT=%d' % call_exit, '-s', 'WASM_ASYNC_COMPILATION=%d' % async_compile])
for engine in JS_ENGINES:
# async compilation can't return a code in d8
if async_compile and engine == V8_ENGINE:
continue
print(code, no_exit, call_exit, async_compile, engine)
proc = run_process(engine + ['a.out.js'], stderr=PIPE, check=False)
# we always emit the right exit code, whether we exit the runtime or not
self.assertEqual(proc.returncode, code)
msg = 'but EXIT_RUNTIME is not set, so halting execution but not exiting the runtime or preventing further async execution (build with EXIT_RUNTIME=1, if you want a true shutdown)'
if no_exit and call_exit:
self.assertContained(msg, proc.stderr)
else:
self.assertNotContained(msg, proc.stderr)
def test_emscripten_force_exit_NO_EXIT_RUNTIME(self):
create_test_file('src.cpp', r'''
#include <emscripten.h>
int main() {
#if CALL_EXIT
emscripten_force_exit(0);
#endif
}
''')
for no_exit in [0, 1]:
for call_exit in [0, 1]:
run_process([EMCC, 'src.cpp', '-s', 'EXIT_RUNTIME=%d' % (1 - no_exit), '-DCALL_EXIT=%d' % call_exit])
print(no_exit, call_exit)
out = run_js('a.out.js', stdout=PIPE, stderr=PIPE, full_output=True)
assert ('emscripten_force_exit cannot actually shut down the runtime, as the build does not have EXIT_RUNTIME set' in out) == (no_exit and call_exit), out
def test_mkdir_silly(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <dirent.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
int main(int argc, char **argv) {
printf("\n");
for (int i = 1; i < argc; i++) {
printf("%d:\n", i);
int ok = mkdir(argv[i], S_IRWXU|S_IRWXG|S_IRWXO);
printf(" make %s: %d\n", argv[i], ok);
DIR *dir = opendir(argv[i]);
printf(" open %s: %d\n", argv[i], dir != NULL);
if (dir) {
struct dirent *entry;
while ((entry = readdir(dir))) {
printf(" %s, %d\n", entry->d_name, entry->d_type);
}
}
}
}
''')
run_process([EMCC, 'src.cpp'])
# cannot create /, can open
self.assertContained(r'''
1:
make /: -1
open /: 1
., 4
.., 4
tmp, 4
home, 4
dev, 4
proc, 4
''', run_js('a.out.js', args=['/']))
# cannot create empty name, cannot open
self.assertContained(r'''
1:
make : -1
open : 0
''', run_js('a.out.js', args=['']))
# can create unnormalized path, can open
self.assertContained(r'''
1:
make /a//: 0
open /a//: 1
., 4
.., 4
''', run_js('a.out.js', args=['/a//']))
# can create child unnormalized
self.assertContained(r'''
1:
make /a: 0
open /a: 1
., 4
.., 4
2:
make /a//b//: 0
open /a//b//: 1
., 4
.., 4
''', run_js('a.out.js', args=['/a', '/a//b//']))
def test_stat_silly(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <errno.h>
#include <sys/stat.h>
int main(int argc, char **argv) {
for (int i = 1; i < argc; i++) {
const char *path = argv[i];
struct stat path_stat;
if (stat(path, &path_stat) != 0) {
printf("Failed to stat path: %s; errno=%d\n", path, errno);
} else {
printf("ok on %s\n", path);
}
}
}
''')
run_process([EMCC, 'src.cpp'])
# cannot stat ""
self.assertContained(r'''Failed to stat path: /a; errno=44
Failed to stat path: ; errno=44
''', run_js('a.out.js', args=['/a', '']))
def test_symlink_silly(self):
create_test_file('src.cpp', r'''
#include <dirent.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdio.h>
int main(int argc, char **argv) {
if (symlink(argv[1], argv[2]) != 0) {
printf("Failed to symlink paths: %s, %s; errno=%d\n", argv[1], argv[2], errno);
} else {
printf("ok\n");
}
}
''')
run_process([EMCC, 'src.cpp'])
# cannot symlink nonexistents
self.assertContained(r'Failed to symlink paths: , abc; errno=44', run_js('a.out.js', args=['', 'abc']))
self.assertContained(r'Failed to symlink paths: , ; errno=44', run_js('a.out.js', args=['', '']))
self.assertContained(r'ok', run_js('a.out.js', args=['123', 'abc']))
self.assertContained(r'Failed to symlink paths: abc, ; errno=44', run_js('a.out.js', args=['abc', '']))
def test_rename_silly(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <errno.h>
int main(int argc, char **argv) {
if (rename(argv[1], argv[2]) != 0) {
printf("Failed to rename paths: %s, %s; errno=%d\n", argv[1], argv[2], errno);
} else {
printf("ok\n");
}
}
''')
run_process([EMCC, 'src.cpp'])
# cannot symlink nonexistents
self.assertContained(r'Failed to rename paths: , abc; errno=44', run_js('a.out.js', args=['', 'abc']))
self.assertContained(r'Failed to rename paths: , ; errno=44', run_js('a.out.js', args=['', '']))
self.assertContained(r'Failed to rename paths: 123, abc; errno=44', run_js('a.out.js', args=['123', 'abc']))
self.assertContained(r'Failed to rename paths: abc, ; errno=44', run_js('a.out.js', args=['abc', '']))
def test_readdir_r_silly(self):
create_test_file('src.cpp', r'''
#include <iostream>
#include <cstring>
#include <cerrno>
#include <unistd.h>
#include <fcntl.h>
#include <cstdlib>
#include <dirent.h>
#include <sys/stat.h>
#include <sys/types.h>
using std::endl;
namespace
{
void check(const bool result)
{
if(not result) {
std::cout << "Check failed!" << endl;
throw "bad";
}
}
// Do a recursive directory listing of the directory whose path is specified
// by \a name.
void ls(const std::string& name, std::size_t indent = 0)
{
::DIR *dir;
struct ::dirent *entry;
if(indent == 0) {
std::cout << name << endl;
++indent;
}
// Make sure we can open the directory. This should also catch cases where
// the empty string is passed in.
if (not (dir = ::opendir(name.c_str()))) {
const int error = errno;
std::cout
<< "Failed to open directory: " << name << "; " << error << endl;
return;
}
// Just checking the sanity.
if (name.empty()) {
std::cout
<< "Managed to open a directory whose name was the empty string.."
<< endl;
check(::closedir(dir) != -1);
return;
}
// Iterate over the entries in the directory.
while ((entry = ::readdir(dir))) {
const std::string entryName(entry->d_name);
if (entryName == "." || entryName == "..") {
// Skip the dot entries.
continue;
}
const std::string indentStr(indent * 2, ' ');
if (entryName.empty()) {
std::cout
<< indentStr << "\"\": Found empty string as a "
<< (entry->d_type == DT_DIR ? "directory" : "file")
<< " entry!" << endl;
continue;
} else {
std::cout << indentStr << entryName
<< (entry->d_type == DT_DIR ? "/" : "") << endl;
}
if (entry->d_type == DT_DIR) {
// We found a subdirectory; recurse.
ls(std::string(name + (name == "/" ? "" : "/" ) + entryName),
indent + 1);
}
}
// Close our handle.
check(::closedir(dir) != -1);
}
void touch(const std::string &path)
{
const int fd = ::open(path.c_str(), O_CREAT | O_TRUNC, 0644);
check(fd != -1);
check(::close(fd) != -1);
}
}
int main()
{
check(::mkdir("dir", 0755) == 0);
touch("dir/a");
touch("dir/b");
touch("dir/c");
touch("dir/d");
touch("dir/e");
std::cout << "Before:" << endl;
ls("dir");
std::cout << endl;
// Attempt to delete entries as we walk the (single) directory.
::DIR * const dir = ::opendir("dir");
check(dir != NULL);
struct ::dirent *entry;
while((entry = ::readdir(dir)) != NULL) {
const std::string name(entry->d_name);
// Skip "." and "..".
if(name == "." || name == "..") {
continue;
}
// Unlink it.
std::cout << "Unlinking " << name << endl;
check(::unlink(("dir/" + name).c_str()) != -1);
}
check(::closedir(dir) != -1);
std::cout << "After:" << endl;
ls("dir");
std::cout << endl;
return 0;
}
''')
run_process([EMCC, 'src.cpp'])
# cannot symlink nonexistents
self.assertContained(r'''Before:
dir
a
b
c
d
e
Unlinking a
Unlinking b
Unlinking c
Unlinking d
Unlinking e
After:
dir
''', run_js('a.out.js', args=['', 'abc']))
def test_emversion(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
int main() {
printf("major: %d\n", __EMSCRIPTEN_major__);
printf("minor: %d\n", __EMSCRIPTEN_minor__);
printf("tiny: %d\n", __EMSCRIPTEN_tiny__);
}
''')
run_process([EMCC, 'src.cpp'])
expected = '''\
major: %d
minor: %d
tiny: %d
''' % (shared.EMSCRIPTEN_VERSION_MAJOR, shared.EMSCRIPTEN_VERSION_MINOR, shared.EMSCRIPTEN_VERSION_TINY)
self.assertContained(expected, run_js('a.out.js'))
def test_libc_files_without_syscalls(self):
# a program which includes FS due to libc js library support, but has no syscalls,
# so full FS support would normally be optimized out
create_test_file('src.cpp', r'''
#include <sys/time.h>
#include <stddef.h>
int main() {
return utimes(NULL, NULL);
}''')
run_process([EMCC, 'src.cpp'])
def test_syscall_without_filesystem(self):
# a program which includes a non-trivial syscall, but disables the filesystem.
create_test_file('src.c', r'''
#include <sys/time.h>
#include <stddef.h>
extern int __sys_openat(int);
int main() {
return __sys_openat(0);
}''')
run_process([EMCC, 'src.c', '-s', 'NO_FILESYSTEM=1'])
def test_dashS(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-S'])
self.assertExists('hello_world.s')
def test_dashS_stdout(self):
stdout = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-S', '-o', '-'], stdout=PIPE).stdout
self.assertEqual(os.listdir('.'), [])
self.assertContained('hello_world.c', stdout)
def test_emit_llvm(self):
# TODO(https://github.com/emscripten-core/emscripten/issues/9016):
# We shouldn't need to copy the file here but if we don't then emcc will
# internally clobber the hello_world.ll in tests.
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'hello_world.c')
run_process([EMCC, 'hello_world.c', '-S', '-emit-llvm'])
self.assertExists('hello_world.ll')
bitcode = open('hello_world.ll').read()
self.assertContained('target triple = "', bitcode)
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-c', '-emit-llvm'])
self.assertTrue(building.is_bitcode('hello_world.bc'))
def test_dashE(self):
create_test_file('src.cpp', r'''#include <emscripten.h>
__EMSCRIPTEN_major__ __EMSCRIPTEN_minor__ __EMSCRIPTEN_tiny__ EMSCRIPTEN_KEEPALIVE
''')
def test(args=[]):
print(args)
out = run_process([EMCC, 'src.cpp', '-E'] + args, stdout=PIPE).stdout
self.assertContained('%d %d %d __attribute__((used))' % (shared.EMSCRIPTEN_VERSION_MAJOR, shared.EMSCRIPTEN_VERSION_MINOR, shared.EMSCRIPTEN_VERSION_TINY), out)
test()
test(['--bind'])
def test_dashE_respect_dashO(self):
# issue #3365
with_dash_o = run_process([EMXX, path_from_root('tests', 'hello_world.cpp'), '-E', '-o', 'ignored.js'], stdout=PIPE, stderr=PIPE).stdout
without_dash_o = run_process([EMXX, path_from_root('tests', 'hello_world.cpp'), '-E'], stdout=PIPE, stderr=PIPE).stdout
self.assertEqual(len(with_dash_o), 0)
self.assertNotEqual(len(without_dash_o), 0)
def test_dashM(self):
out = run_process([EMXX, path_from_root('tests', 'hello_world.cpp'), '-M'], stdout=PIPE).stdout
self.assertContained('hello_world.o:', out) # Verify output is just a dependency rule instead of bitcode or js
def test_dashM_respect_dashO(self):
# issue #3365
with_dash_o = run_process([EMXX, path_from_root('tests', 'hello_world.cpp'), '-M', '-o', 'ignored.js'], stdout=PIPE).stdout
without_dash_o = run_process([EMXX, path_from_root('tests', 'hello_world.cpp'), '-M'], stdout=PIPE).stdout
self.assertEqual(len(with_dash_o), 0)
self.assertNotEqual(len(without_dash_o), 0)
def test_malloc_implicit(self):
self.do_other_test(os.path.join('other', 'malloc_implicit'))
def test_switch64phi(self):
# issue 2539, fastcomp segfault on phi-i64 interaction
create_test_file('src.cpp', r'''
#include <cstdint>
#include <limits>
#include <cstdio>
//============================================================================
namespace
{
class int_adapter {
public:
typedef ::int64_t int_type;
int_adapter(int_type v = 0)
: value_(v)
{}
static const int_adapter pos_infinity()
{
return (::std::numeric_limits<int_type>::max)();
}
static const int_adapter neg_infinity()
{
return (::std::numeric_limits<int_type>::min)();
}
static const int_adapter not_a_number()
{
return (::std::numeric_limits<int_type>::max)()-1;
}
static bool is_neg_inf(int_type v)
{
return (v == neg_infinity().as_number());
}
static bool is_pos_inf(int_type v)
{
return (v == pos_infinity().as_number());
}
static bool is_not_a_number(int_type v)
{
return (v == not_a_number().as_number());
}
bool is_infinity() const
{
return (value_ == neg_infinity().as_number() ||
value_ == pos_infinity().as_number());
}
bool is_special() const
{
return(is_infinity() || value_ == not_a_number().as_number());
}
bool operator<(const int_adapter& rhs) const
{
if(value_ == not_a_number().as_number()
|| rhs.value_ == not_a_number().as_number()) {
return false;
}
if(value_ < rhs.value_) return true;
return false;
}
int_type as_number() const
{
return value_;
}
int_adapter operator-(const int_adapter& rhs)const
{
if(is_special() || rhs.is_special())
{
if (rhs.is_pos_inf(rhs.as_number()))
{
return int_adapter(1);
}
if (rhs.is_neg_inf(rhs.as_number()))
{
return int_adapter();
}
}
return int_adapter();
}
private:
int_type value_;
};
class time_iterator {
public:
time_iterator(int_adapter t, int_adapter d)
: current_(t),
offset_(d)
{}
time_iterator& operator--()
{
current_ = int_adapter(current_ - offset_);
return *this;
}
bool operator>=(const int_adapter& t)
{
return not (current_ < t);
}
private:
int_adapter current_;
int_adapter offset_;
};
void iterate_backward(const int_adapter *answers, const int_adapter& td)
{
int_adapter end = answers[0];
time_iterator titr(end, td);
std::puts("");
for (; titr >= answers[0]; --titr) {
}
}
}
int
main()
{
const int_adapter answer1[] = {};
iterate_backward(NULL, int_adapter());
iterate_backward(answer1, int_adapter());
}
''')
run_process([EMCC, 'src.cpp', '-O2', '-s', 'SAFE_HEAP=1'])
@parameterized({
'none': [{'EMCC_FORCE_STDLIBS': None}, False],
# forced libs is ok, they were there anyhow
'normal': [{'EMCC_FORCE_STDLIBS': 'libc,libc++abi,libc++'}, False],
# partial list, but ok since we grab them as needed
'parial': [{'EMCC_FORCE_STDLIBS': 'libc++'}, False],
# fail! not enough stdlibs
'partial_only': [{'EMCC_FORCE_STDLIBS': 'libc++,libc,libc++abi', 'EMCC_ONLY_FORCED_STDLIBS': '1'}, True],
# force all the needed stdlibs, so this works even though we ignore the input file
'full_only': [{'EMCC_FORCE_STDLIBS': 'libc,libc++abi,libc++,libpthread,libmalloc', 'EMCC_ONLY_FORCED_STDLIBS': '1'}, False],
})
def test_only_force_stdlibs(self, env, fail):
with env_modify(env):
run_process([EMXX, path_from_root('tests', 'hello_libcxx.cpp'), '-s', 'WARN_ON_UNDEFINED_SYMBOLS=0'])
if fail:
output = self.expect_fail(NODE_JS + ['a.out.js'], stdout=PIPE)
self.assertContained('missing function', output)
else:
self.assertContained('hello, world!', run_js('a.out.js'))
def test_only_force_stdlibs_2(self):
create_test_file('src.cpp', r'''
#include <iostream>
#include <stdexcept>
int main()
{
try {
throw std::exception();
std::cout << "got here" << std::endl;
}
catch (const std::exception& ex) {
std::cout << "Caught exception: " << ex.what() << std::endl;
}
}
''')
with env_modify({'EMCC_FORCE_STDLIBS': 'libc,libc++abi,libc++,libmalloc,libpthread', 'EMCC_ONLY_FORCED_STDLIBS': '1'}):
run_process([EMXX, 'src.cpp', '-s', 'DISABLE_EXCEPTION_CATCHING=0'])
self.assertContained('Caught exception: std::exception', run_js('a.out.js', stderr=PIPE))
def test_strftime_zZ(self):
create_test_file('src.cpp', r'''
#include <cerrno>
#include <cstring>
#include <ctime>
#include <iostream>
int main()
{
// Buffer to hold the current hour of the day. Format is HH + nul
// character.
char hour[3];
// Buffer to hold our ISO 8601 formatted UTC offset for the current
// timezone. Format is [+-]hhmm + nul character.
char utcOffset[6];
// Buffer to hold the timezone name or abbreviation. Just make it
// sufficiently large to hold most timezone names.
char timezone[128];
std::tm tm;
// Get the current timestamp.
const std::time_t now = std::time(NULL);
// What time is that here?
if (::localtime_r(&now, &tm) == NULL) {
const int error = errno;
std::cout
<< "Failed to get localtime for timestamp=" << now << "; errno=" << error
<< "; " << std::strerror(error) << std::endl;
return 1;
}
size_t result = 0;
// Get the formatted hour of the day.
if ((result = std::strftime(hour, 3, "%H", &tm)) != 2) {
const int error = errno;
std::cout
<< "Failed to format hour for timestamp=" << now << "; result="
<< result << "; errno=" << error << "; " << std::strerror(error)
<< std::endl;
return 1;
}
std::cout << "The current hour of the day is: " << hour << std::endl;
// Get the formatted UTC offset in ISO 8601 format.
if ((result = std::strftime(utcOffset, 6, "%z", &tm)) != 5) {
const int error = errno;
std::cout
<< "Failed to format UTC offset for timestamp=" << now << "; result="
<< result << "; errno=" << error << "; " << std::strerror(error)
<< std::endl;
return 1;
}
std::cout << "The current timezone offset is: " << utcOffset << std::endl;
// Get the formatted timezone name or abbreviation. We don't know how long
// this will be, so just expect some data to be written to the buffer.
if ((result = std::strftime(timezone, 128, "%Z", &tm)) == 0) {
const int error = errno;
std::cout
<< "Failed to format timezone for timestamp=" << now << "; result="
<< result << "; errno=" << error << "; " << std::strerror(error)
<< std::endl;
return 1;
}
std::cout << "The current timezone is: " << timezone << std::endl;
std::cout << "ok!\n";
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained('ok!', run_js('a.out.js'))
def test_strptime_symmetry(self):
building.emcc(path_from_root('tests', 'strptime_symmetry.cpp'), output_filename='a.out.js')
self.assertContained('TEST PASSED', run_js('a.out.js'))
def test_truncate_from_0(self):
create_test_file('src.cpp', r'''
#include <cerrno>
#include <cstring>
#include <iostream>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
using std::endl;
//============================================================================
// :: Helpers
namespace
{
// Returns the size of the regular file specified as 'path'.
::off_t getSize(const char* const path)
{
// Stat the file and make sure that it's the expected size.
struct ::stat path_stat;
if (::stat(path, &path_stat) != 0) {
const int error = errno;
std::cout
<< "Failed to lstat path: " << path << "; errno=" << error << "; "
<< std::strerror(error) << endl;
return -1;
}
std::cout
<< "Size of file is: " << path_stat.st_size << endl;
return path_stat.st_size;
}
// Causes the regular file specified in 'path' to have a size of 'length'
// bytes.
void resize(const char* const path,
const ::off_t length)
{
std::cout
<< "Truncating file=" << path << " to length=" << length << endl;
if (::truncate(path, length) == -1)
{
const int error = errno;
std::cout
<< "Failed to truncate file=" << path << "; errno=" << error
<< "; " << std::strerror(error) << endl;
}
const ::off_t size = getSize(path);
if (size != length) {
std::cout
<< "Failed to truncate file=" << path << " to length=" << length
<< "; got size=" << size << endl;
}
}
// Helper to create a file with the given content.
void createFile(const std::string& path, const std::string& content)
{
std::cout
<< "Creating file: " << path << " with content=" << content << endl;
const int fd = ::open(path.c_str(), O_CREAT | O_WRONLY, 0644);
if (fd == -1) {
const int error = errno;
std::cout
<< "Failed to open file for writing: " << path << "; errno=" << error
<< "; " << std::strerror(error) << endl;
return;
}
if (::write(fd, content.c_str(), content.size()) != content.size()) {
const int error = errno;
std::cout
<< "Failed to write content=" << content << " to file=" << path
<< "; errno=" << error << "; " << std::strerror(error) << endl;
// Fall through to close FD.
}
::close(fd);
}
}
//============================================================================
// :: Entry Point
int main()
{
const char* const file = "/tmp/file";
createFile(file, "This is some content");
getSize(file);
resize(file, 32);
resize(file, 17);
resize(file, 0);
// This throws a JS exception.
resize(file, 32);
return 0;
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained(r'''Creating file: /tmp/file with content=This is some content
Size of file is: 20
Truncating file=/tmp/file to length=32
Size of file is: 32
Truncating file=/tmp/file to length=17
Size of file is: 17
Truncating file=/tmp/file to length=0
Size of file is: 0
Truncating file=/tmp/file to length=32
Size of file is: 32
''', run_js('a.out.js'))
def test_create_readonly(self):
create_test_file('src.cpp', r'''
#include <cerrno>
#include <cstring>
#include <iostream>
#include <fcntl.h>
#include <unistd.h>
using std::endl;
//============================================================================
// :: Helpers
namespace
{
// Helper to create a read-only file with content.
void readOnlyFile(const std::string& path, const std::string& content)
{
std::cout
<< "Creating file: " << path << " with content of size="
<< content.size() << endl;
const int fd = ::open(path.c_str(), O_CREAT | O_WRONLY, 0400);
if (fd == -1) {
const int error = errno;
std::cout
<< "Failed to open file for writing: " << path << "; errno=" << error
<< "; " << std::strerror(error) << endl;
return;
}
// Write the content to the file.
ssize_t result = 0;
if ((result = ::write(fd, content.data(), content.size()))
!= ssize_t(content.size()))
{
const int error = errno;
std::cout
<< "Failed to write to file=" << path << "; errno=" << error
<< "; " << std::strerror(error) << endl;
// Fall through to close the file.
}
else {
std::cout
<< "Data written to file=" << path << "; successfully wrote "
<< result << " bytes" << endl;
}
::close(fd);
}
}
//============================================================================
// :: Entry Point
int main()
{
const char* const file = "/tmp/file";
unlink(file);
readOnlyFile(file, "This content should get written because the file "
"does not yet exist and so, only the mode of the "
"containing directory will influence my ability to "
"create and open the file. The mode of the file only "
"applies to opening of the stream, not subsequent stream "
"operations after stream has opened.\n\n");
readOnlyFile(file, "This should not get written because the file already "
"exists and is read-only.\n\n");
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained(r'''Creating file: /tmp/file with content of size=292
Data written to file=/tmp/file; successfully wrote 292 bytes
Creating file: /tmp/file with content of size=79
Failed to open file for writing: /tmp/file; errno=2; Permission denied
''', run_js('a.out.js'))
def test_embed_file_large(self):
# If such long files are encoded on one line,
# they overflow the interpreter's limit
large_size = int(1500000)
create_test_file('large.txt', 'x' * large_size)
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <unistd.h>
int main()
{
FILE* fp = fopen("large.txt", "r");
if (fp) {
printf("ok\n");
fseek(fp, 0L, SEEK_END);
printf("%ld\n", ftell(fp));
} else {
printf("failed to open large file.txt\n");
}
return 0;
}
''')
run_process([EMCC, 'src.cpp', '--embed-file', 'large.txt'])
for engine in JS_ENGINES:
if engine == V8_ENGINE:
continue # ooms
print(engine)
self.assertContained('ok\n' + str(large_size) + '\n', run_js('a.out.js', engine=engine))
def test_force_exit(self):
create_test_file('src.cpp', r'''
#include <emscripten/emscripten.h>
namespace
{
extern "C"
EMSCRIPTEN_KEEPALIVE
void callback()
{
EM_ASM({ out('callback pre()') });
::emscripten_force_exit(42);
EM_ASM({ out('callback post()') });
}
}
int
main()
{
EM_ASM({ setTimeout(function() { out("calling callback()"); _callback() }, 100) });
::emscripten_exit_with_live_runtime();
return 123;
}
''')
run_process([EMCC, 'src.cpp'])
output = run_js('a.out.js', assert_returncode=42)
assert 'callback pre()' in output
assert 'callback post()' not in output
def test_bad_locale(self):
create_test_file('src.cpp', r'''
#include <locale.h>
#include <stdio.h>
#include <wctype.h>
int
main(const int argc, const char * const * const argv)
{
const char * const locale = (argc > 1 ? argv[1] : "C");
const char * const actual = setlocale(LC_ALL, locale);
if(actual == NULL) {
printf("%s locale not supported\n",
locale);
return 0;
}
printf("locale set to %s: %s\n", locale, actual);
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained('locale set to C: C;C;C;C;C;C',
run_js('a.out.js', args=['C']))
self.assertContained('locale set to waka: waka;waka;waka;waka;waka;waka',
run_js('a.out.js', args=['waka']))
def test_browser_language_detection(self):
# Test HTTP Accept-Language parsing by simulating navigator.languages #8751
run_process([EMCC,
path_from_root('tests', 'test_browser_language_detection.c')])
self.assertContained('C.UTF-8', run_js('a.out.js'))
# Accept-Language: fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3
create_test_file('preamble.js', r'''navigator = {};
navigator.languages = [ "fr", "fr-FR", "en-US", "en" ];''')
run_process([EMCC, '--pre-js', 'preamble.js',
path_from_root('tests', 'test_browser_language_detection.c')])
self.assertContained('fr.UTF-8', run_js('a.out.js'))
# Accept-Language: fr-FR,fr;q=0.8,en-US;q=0.5,en;q=0.3
create_test_file('preamble.js', r'''navigator = {};
navigator.languages = [ "fr-FR", "fr", "en-US", "en" ];''')
run_process([EMCC, '--pre-js', 'preamble.js',
path_from_root('tests', 'test_browser_language_detection.c')])
self.assertContained('fr_FR.UTF-8', run_js('a.out.js'))
def test_js_main(self):
# try to add a main() from JS, at runtime. this is not supported (the
# compiler needs to know at compile time about main).
create_test_file('pre_main.js', r'''
var Module = {
'_main': function() {
}
};
''')
create_test_file('src.cpp', '')
run_process([EMCC, 'src.cpp', '--pre-js', 'pre_main.js'])
self.assertContained('compiled without a main, but one is present. if you added it from JS, use Module["onRuntimeInitialized"]',
run_js('a.out.js', assert_returncode=None, stderr=PIPE))
def test_js_malloc(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
for (var i = 0; i < 1000; i++) {
var ptr = Module._malloc(1024 * 1024); // only done in JS, but still must not leak
Module._free(ptr);
}
});
printf("ok.\n");
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained('ok.', run_js('a.out.js', args=['C']))
def test_locale_wrong(self):
create_test_file('src.cpp', r'''
#include <locale>
#include <iostream>
#include <stdexcept>
int
main(const int argc, const char * const * const argv)
{
const char * const name = argc > 1 ? argv[1] : "C";
try {
const std::locale locale(name);
std::cout
<< "Constructed locale \"" << name << "\"\n"
<< "This locale is "
<< (locale == std::locale::global(locale) ? "" : "not ")
<< "the global locale.\n"
<< "This locale is " << (locale == std::locale::classic() ? "" : "not ")
<< "the C locale." << std::endl;
} catch(const std::runtime_error &ex) {
std::cout
<< "Can't construct locale \"" << name << "\": " << ex.what()
<< std::endl;
return 1;
} catch(...) {
std::cout
<< "FAIL: Unexpected exception constructing locale \"" << name << '\"'
<< std::endl;
return 127;
}
}
''')
run_process([EMCC, 'src.cpp', '-s', 'EXIT_RUNTIME=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0'])
self.assertContained('Constructed locale "C"\nThis locale is the global locale.\nThis locale is the C locale.', run_js('a.out.js', args=['C']))
self.assertContained('''Can't construct locale "waka": collate_byname<char>::collate_byname failed to construct for waka''', run_js('a.out.js', args=['waka'], assert_returncode=1))
def test_cleanup_os(self):
# issue 2644
def test(args, be_clean):
print(args)
self.clear()
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'a.c')
create_test_file('b.c', ' ')
run_process([EMCC, 'a.c', 'b.c'] + args)
clutter = glob.glob('*.o')
if be_clean:
assert len(clutter) == 0, 'should not leave clutter ' + str(clutter)
else:
assert len(clutter) == 2, 'should leave .o files'
test(['-o', 'c.bc'], True)
test(['-o', 'c.js'], True)
test(['-o', 'c.html'], True)
test(['-c'], False)
@no_wasm_backend('asm.js debug info')
def test_js_dash_g(self):
create_test_file('src.c', '''
#include <stdio.h>
#include <assert.h>
void checker(int x) {
x += 20;
assert(x < 15); // this is line 7!
}
int main() {
checker(10);
return 0;
}
''')
def check(has):
print(has)
lines = open('a.out.js').readlines()
lines = [line for line in lines if '___assert_fail(' in line or '___assert_func(' in line]
found_line_num = any(('//@line 7 "' in line) for line in lines)
found_filename = any(('src.c"\n' in line) for line in lines)
assert found_line_num == has, 'Must have debug info with the line number'
assert found_filename == has, 'Must have debug info with the filename'
run_process([EMCC, '-s', 'WASM=0', 'src.c', '-g'])
check(True)
run_process([EMCC, '-s', 'WASM=0', 'src.c'])
check(False)
run_process([EMCC, '-s', 'WASM=0', 'src.c', '-g0'])
check(False)
run_process([EMCC, '-s', 'WASM=0', 'src.c', '-g0', '-g']) # later one overrides
check(True)
run_process([EMCC, '-s', 'WASM=0', 'src.c', '-g', '-g0']) # later one overrides
check(False)
def test_dash_g_bc(self):
def test(opts):
print(opts)
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-o', 'a_.bc'] + opts)
sizes = {'_': os.path.getsize('a_.bc')}
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-g', '-o', 'ag.bc'] + opts)
sizes['g'] = os.path.getsize('ag.bc')
for i in range(0, 5):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-g' + str(i), '-o', 'a' + str(i) + '.bc'] + opts)
sizes[i] = os.path.getsize('a' + str(i) + '.bc')
print(' ', sizes)
assert sizes['_'] == sizes[0] == sizes[1] == sizes[2], 'no debug means no llvm debug info ' + str(sizes)
assert sizes['g'] == sizes[3] == sizes[4], '-g or -g4 means llvm debug info ' + str(sizes)
assert sizes['_'] < sizes['g'], 'llvm debug info has positive size ' + str(sizes)
test([])
test(['-O1'])
def test_no_filesystem(self):
FS_MARKER = 'var FS'
# fopen forces full filesystem support
run_process([EMCC, path_from_root('tests', 'hello_world_fopen.c'), '-s', 'ASSERTIONS=0'])
yes_size = os.path.getsize('a.out.js')
self.assertContained('hello, world!', run_js('a.out.js'))
self.assertContained(FS_MARKER, open('a.out.js').read())
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'ASSERTIONS=0'])
no_size = os.path.getsize('a.out.js')
self.assertContained('hello, world!', run_js('a.out.js'))
self.assertNotContained(FS_MARKER, open('a.out.js').read())
print('yes fs, no fs:', yes_size, no_size)
# ~100K of FS code is removed
self.assertGreater(yes_size - no_size, 90000)
self.assertLess(no_size, 360000)
def test_no_filesystem_libcxx(self):
run_process([EMCC, path_from_root('tests', 'hello_libcxx.cpp'), '-s', 'FILESYSTEM=0'])
self.assertContained('hello, world!', run_js('a.out.js'))
def test_no_nuthin(self):
# check FILESYSTEM is automatically set, and effective
def test(opts, absolute):
print('opts, absolute:', opts, absolute)
sizes = {}
def do(name, source, moar_opts):
self.clear()
# pad the name to a common length so that doesn't effect the size of the
# output
padded_name = name + '_' * (20 - len(name))
run_process([EMCC, path_from_root('tests', source), '-o', padded_name + '.js'] + opts + moar_opts)
sizes[name] = os.path.getsize(padded_name + '.js')
if os.path.exists(padded_name + '.wasm'):
sizes[name] += os.path.getsize(padded_name + '.wasm')
self.assertContained('hello, world!', run_js(padded_name + '.js'))
do('normal', 'hello_world_fopen.c', [])
do('no_fs', 'hello_world.c', []) # without fopen, we should auto-detect we do not need full fs support and can do FILESYSTEM=0
do('no_fs_manual', 'hello_world.c', ['-s', 'FILESYSTEM=0'])
print(' ', sizes)
self.assertLess(sizes['no_fs'], sizes['normal'])
self.assertLess(sizes['no_fs'], absolute)
# manual can usually remove a tiny bit more
self.assertLess(sizes['no_fs_manual'], sizes['no_fs'] + 30)
test(['-s', 'ASSERTIONS=0'], 120000) # we don't care about code size with assertions
test(['-O1'], 91000)
test(['-O2'], 46000)
test(['-O3', '--closure', '1'], 17000)
# asm.js too
if not self.is_wasm_backend():
test(['-O3', '--closure', '1', '-s', 'WASM=0'], 36000)
test(['-O3', '--closure', '2', '-s', 'WASM=0'], 33000) # might change now and then
def test_no_browser(self):
BROWSER_INIT = 'var Browser'
run_process([EMCC, path_from_root('tests', 'hello_world.c')])
self.assertNotContained(BROWSER_INIT, open('a.out.js').read())
run_process([EMCC, path_from_root('tests', 'browser_main_loop.c')]) # uses emscripten_set_main_loop, which needs Browser
self.assertContained(BROWSER_INIT, open('a.out.js').read())
def test_EXPORTED_RUNTIME_METHODS(self):
def test(opts, has, not_has):
print(opts, has, not_has)
self.clear()
# check without assertions, as with assertions we add stubs for the things we remove (which
# print nice error messages)
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'ASSERTIONS=0'] + opts)
self.assertContained('hello, world!', run_js('a.out.js'))
src = open('a.out.js').read()
self.assertContained(has, src)
self.assertNotContained(not_has, src)
test([], 'Module["', 'Module["waka')
test(['-s', 'EXPORTED_RUNTIME_METHODS=[]'], '', 'Module["addRunDependency')
test(['-s', 'EXPORTED_RUNTIME_METHODS=["addRunDependency"]'], 'Module["addRunDependency', 'Module["waka')
test(['-s', 'EXPORTED_RUNTIME_METHODS=[]', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["addRunDependency"]'], 'Module["addRunDependency', 'Module["waka')
def test_stat_fail_alongtheway(self):
create_test_file('src.cpp', r'''
#include <errno.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <stdlib.h>
#include <fcntl.h>
#include <string.h>
#define CHECK(expression) \
if(!(expression)) { \
error = errno; \
printf("FAIL: %s\n", #expression); fail = 1; \
} else { \
error = errno; \
printf("pass: %s\n", #expression); \
} \
int
main()
{
int error;
int fail = 0;
CHECK(mkdir("path", 0777) == 0);
CHECK(close(open("path/file", O_CREAT | O_WRONLY, 0644)) == 0);
{
struct stat st;
CHECK(stat("path", &st) == 0);
CHECK(st.st_mode = 0777);
}
{
struct stat st;
CHECK(stat("path/nosuchfile", &st) == -1);
printf("info: errno=%d %s\n", error, strerror(error));
CHECK(error == ENOENT);
}
{
struct stat st;
CHECK(stat("path/file", &st) == 0);
CHECK(st.st_mode = 0666);
}
{
struct stat st;
CHECK(stat("path/file/impossible", &st) == -1);
printf("info: errno=%d %s\n", error, strerror(error));
CHECK(error == ENOTDIR);
}
{
struct stat st;
CHECK(lstat("path/file/impossible", &st) == -1);
printf("info: errno=%d %s\n", error, strerror(error));
CHECK(error == ENOTDIR);
}
return fail;
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained(r'''pass: mkdir("path", 0777) == 0
pass: close(open("path/file", O_CREAT | O_WRONLY, 0644)) == 0
pass: stat("path", &st) == 0
pass: st.st_mode = 0777
pass: stat("path/nosuchfile", &st) == -1
info: errno=44 No such file or directory
pass: error == ENOENT
pass: stat("path/file", &st) == 0
pass: st.st_mode = 0666
pass: stat("path/file/impossible", &st) == -1
info: errno=54 Not a directory
pass: error == ENOTDIR
pass: lstat("path/file/impossible", &st) == -1
info: errno=54 Not a directory
pass: error == ENOTDIR
''', run_js('a.out.js'))
def test_link_with_a_static(self):
create_test_file('x.c', r'''
int init_weakref(int a, int b) {
return a + b;
}
''')
create_test_file('y.c', r'''
static int init_weakref(void) { // inlined in -O2, not in -O0 where it shows up in llvm-nm as 't'
return 150;
}
int testy(void) {
return init_weakref();
}
''')
create_test_file('z.c', r'''
extern int init_weakref(int, int);
extern int testy(void);
int main(void) {
return testy() + init_weakref(5, 6);
}
''')
run_process([EMCC, 'x.c', '-o', 'x.o'])
run_process([EMCC, 'y.c', '-o', 'y.o'])
run_process([EMCC, 'z.c', '-o', 'z.o'])
try_delete('libtest.a')
run_process([EMAR, 'rc', 'libtest.a', 'y.o'])
run_process([EMAR, 'rc', 'libtest.a', 'x.o'])
run_process([EMRANLIB, 'libtest.a'])
for args in [[], ['-O2']]:
print('args:', args)
run_process([EMCC, 'z.o', 'libtest.a', '-s', 'EXIT_RUNTIME=1'] + args)
run_js('a.out.js', assert_returncode=161)
def test_link_with_bad_o_in_a(self):
# when building a .a, we force-include all the objects inside it. but, some
# may not be valid bitcode, e.g. if it contains metadata or something else
# weird. we should just ignore those
run_process([EMCC, '-c', path_from_root('tests', 'hello_world.c'), '-o', 'hello_world.o'])
create_test_file('bad.obj', 'this is not a good file, it should be ignored!')
run_process([LLVM_AR, 'cr', 'libfoo.a', 'hello_world.o', 'bad.obj'])
run_process([EMCC, 'libfoo.a'])
self.assertContained('hello, world!', run_js('a.out.js'))
def test_require(self):
inname = path_from_root('tests', 'hello_world.c')
building.emcc(inname, args=['-s', 'ASSERTIONS=0'], output_filename='a.out.js')
output = run_process(NODE_JS + ['-e', 'require("./a.out.js")'], stdout=PIPE, stderr=PIPE)
assert output.stdout == 'hello, world!\n' and output.stderr == '', 'expected no output, got\n===\nSTDOUT\n%s\n===\nSTDERR\n%s\n===\n' % (output.stdout, output.stderr)
def test_require_modularize(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MODULARIZE=1', '-s', 'ASSERTIONS=0'])
src = open('a.out.js').read()
self.assertContained('module.exports = Module;', src)
output = run_process(NODE_JS + ['-e', 'var m = require("./a.out.js"); m();'], stdout=PIPE, stderr=PIPE)
self.assertFalse(output.stderr)
self.assertEqual(output.stdout, 'hello, world!\n')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="NotModule"', '-s', 'ASSERTIONS=0'])
src = open('a.out.js').read()
self.assertContained('module.exports = NotModule;', src)
output = run_process(NODE_JS + ['-e', 'var m = require("./a.out.js"); m();'], stdout=PIPE, stderr=PIPE)
self.assertFalse(output.stderr)
self.assertEqual(output.stdout, 'hello, world!\n')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MODULARIZE=1'])
# We call require() twice to ensure it returns wrapper function each time
output = run_process(NODE_JS + ['-e', 'require("./a.out.js")();var m = require("./a.out.js"); m();'], stdout=PIPE, stderr=PIPE)
self.assertFalse(output.stderr)
self.assertEqual(output.stdout, 'hello, world!\nhello, world!\n')
def test_define_modularize(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MODULARIZE=1', '-s', 'ASSERTIONS=0'])
with open('a.out.js') as f:
src = 'var module = 0; ' + f.read()
create_test_file('a.out.js', src)
assert "define([], function() { return Module; });" in src
output = run_process(NODE_JS + ['-e', 'var m; (global.define = function(deps, factory) { m = factory(); }).amd = true; require("./a.out.js"); m();'], stdout=PIPE, stderr=PIPE)
assert output.stdout == 'hello, world!\n' and output.stderr == '', 'expected output, got\n===\nSTDOUT\n%s\n===\nSTDERR\n%s\n===\n' % (output.stdout, output.stderr)
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="NotModule"', '-s', 'ASSERTIONS=0'])
with open('a.out.js') as f:
src = 'var module = 0; ' + f.read()
create_test_file('a.out.js', src)
assert "define([], function() { return NotModule; });" in src
output = run_process(NODE_JS + ['-e', 'var m; (global.define = function(deps, factory) { m = factory(); }).amd = true; require("./a.out.js"); m();'], stdout=PIPE, stderr=PIPE)
assert output.stdout == 'hello, world!\n' and output.stderr == '', 'expected output, got\n===\nSTDOUT\n%s\n===\nSTDERR\n%s\n===\n' % (output.stdout, output.stderr)
def test_EXPORT_NAME_with_html(self):
result = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-o', 'a.html', '-s', 'EXPORT_NAME=Other'], stdout=PIPE, check=False, stderr=STDOUT)
self.assertNotEqual(result.returncode, 0)
self.assertContained('Customizing EXPORT_NAME requires that the HTML be customized to use that name', result.stdout)
@no_wasm_backend('tests fastcomp specific passes')
def test_emcc_c_multi(self):
def test(args, llvm_opts=None):
print(args)
lib = r'''
int mult() { return 1; }
'''
lib_name = 'libA.c'
create_test_file(lib_name, lib)
main = r'''
#include <stdio.h>
int mult();
int main() {
printf("result: %d\n", mult());
return 0;
}
'''
main_name = 'main.c'
create_test_file(main_name, main)
err = run_process([EMCC, '-v', '-c', main_name, lib_name] + args, stderr=PIPE).stderr
VECTORIZE = '-disable-loop-vectorization'
if args:
assert err.count(VECTORIZE) == 2, err # specified twice, once per file
# corresponding to exactly once per invocation of optimizer
assert err.count(os.path.sep + 'opt') == 2, err
else:
assert err.count(VECTORIZE) == 0, err # no optimizations
run_process([EMCC, main_name.replace('.c', '.o'), lib_name.replace('.c', '.o')])
self.assertContained('result: 1', run_js('a.out.js'))
test([])
test(['-O2'], '-O3')
test(['-Oz'], '-Oz')
test(['-Os'], '-Os')
def test_export_all_3142(self):
create_test_file('src.cpp', r'''
typedef unsigned int Bit32u;
struct S_Descriptor {
Bit32u limit_0_15 :16;
Bit32u base_0_15 :16;
Bit32u base_16_23 :8;
};
class Descriptor
{
public:
Descriptor() { saved.fill[0]=saved.fill[1]=0; }
union {
S_Descriptor seg;
Bit32u fill[2];
} saved;
};
Descriptor desc;
''')
try_delete('a.out.js')
run_process([EMCC, 'src.cpp', '-O2', '-s', 'EXPORT_ALL'])
self.assertExists('a.out.js')
@no_wasm_backend('tests PRECISE_F32=1')
def test_f0(self):
run_process([EMCC, path_from_root('tests', 'fasta.cpp'), '-O2', '-s', 'PRECISE_F32=1', '-profiling', '-s', 'WASM=0'])
src = open('a.out.js').read()
assert ' = f0;' in src or ' = f0,' in src
def test_emmake_emconfigure(self):
def check(what, args, fail=True, expect=''):
args = [what] + args
print(what, args, fail, expect)
output = run_process(args, stdout=PIPE, stderr=PIPE, check=False)
assert ('is a helper for' in output.stderr) == fail
assert ('Typical usage' in output.stderr) == fail
self.assertContained(expect, output.stdout)
check(emmake, [])
check(emconfigure, [])
check(emmake, ['--version'])
check(emconfigure, ['--version'])
check(emmake, ['make'], fail=False)
check(emconfigure, ['configure'], fail=False)
check(emconfigure, ['./configure'], fail=False)
check(emcmake, ['cmake'], fail=False)
create_test_file('test.py', '''
import os
print(os.environ.get('CROSS_COMPILE'))
''')
check(emconfigure, [PYTHON, 'test.py'], expect=path_from_root('em'), fail=False)
check(emmake, [PYTHON, 'test.py'], expect=path_from_root('em'), fail=False)
create_test_file('test.py', '''
import os
print(os.environ.get('NM'))
''')
check(emconfigure, [PYTHON, 'test.py'], expect=shared.LLVM_NM, fail=False)
def test_emmake_python(self):
# simulates a configure/make script that looks for things like CC, AR, etc., and which we should
# not confuse by setting those vars to something containing `python X` as the script checks for
# the existence of an executable.
run_process([emmake, PYTHON, path_from_root('tests', 'emmake', 'make.py')])
def test_sdl2_config(self):
for args, expected in [
[['--version'], '2.0.0'],
[['--cflags'], '-s USE_SDL=2'],
[['--libs'], '-s USE_SDL=2'],
[['--cflags', '--libs'], '-s USE_SDL=2'],
]:
print(args, expected)
out = run_process([PYTHON, path_from_root('system', 'bin', 'sdl2-config')] + args, stdout=PIPE, stderr=PIPE).stdout
assert expected in out, out
print('via emmake')
out = run_process([emmake, 'sdl2-config'] + args, stdout=PIPE, stderr=PIPE).stdout
assert expected in out, out
def test_module_onexit(self):
create_test_file('src.cpp', r'''
#include <emscripten.h>
int main() {
EM_ASM({
Module['onExit'] = function(status) { out('exiting now, status ' + status) };
});
return 14;
}
''')
try_delete('a.out.js')
run_process([EMCC, 'src.cpp', '-s', 'EXIT_RUNTIME=1'])
self.assertContained('exiting now, status 14', run_js('a.out.js', assert_returncode=14))
def test_NO_aliasing(self):
# the NO_ prefix flips boolean options
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'EXIT_RUNTIME=1'])
exit_1 = open('a.out.js').read()
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'NO_EXIT_RUNTIME=0'])
no_exit_0 = open('a.out.js').read()
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'EXIT_RUNTIME=0'])
exit_0 = open('a.out.js').read()
assert exit_1 == no_exit_0
assert exit_1 != exit_0
def test_underscore_exit(self):
create_test_file('src.cpp', r'''
#include <unistd.h>
int main() {
_exit(0); // should not end up in an infinite loop with non-underscore exit
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained('', run_js('a.out.js', assert_returncode=0))
def test_file_packager_huge(self):
MESSAGE = 'warning: file packager is creating an asset bundle of 257 MB. this is very large, and browsers might have trouble loading it'
create_test_file('huge.dat', 'a' * (1024 * 1024 * 257))
create_test_file('tiny.dat', 'a')
err = run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'tiny.dat'], stdout=PIPE, stderr=PIPE).stderr
self.assertNotContained(MESSAGE, err)
err = run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'huge.dat'], stdout=PIPE, stderr=PIPE).stderr
self.assertContained(MESSAGE, err)
self.clear()
def test_massive_alloc(self):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
volatile int x = (int)malloc(1024 * 1024 * 1400);
return x == 0; // can't alloc it, but don't fail catastrophically, expect null
}
''')
run_process([EMCC, 'main.cpp', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'WASM=0'])
# just care about message regarding allocating over 1GB of memory
output = run_js('a.out.js', stderr=PIPE, full_output=True)
if self.is_wasm_backend():
self.assertContained('''Warning: Enlarging memory arrays, this is not fast! 16777216,1473314816\n''', output)
else:
self.assertContained('''Warning: Enlarging memory arrays, this is not fast! 16777216,1476395008\n''', output)
print('wasm')
run_process([EMCC, 'main.cpp', '-s', 'ALLOW_MEMORY_GROWTH=1'])
# no message about growth, just check return code
run_js('a.out.js', stderr=PIPE, full_output=True)
def test_failing_alloc(self):
for pre_fail, post_fail, opts in [
('', '', []),
('EM_ASM( Module.temp = HEAP32[DYNAMICTOP_PTR>>2] );', 'EM_ASM( assert(Module.temp === HEAP32[DYNAMICTOP_PTR>>2], "must not adjust DYNAMICTOP when an alloc fails!") );', []),
# also test non-wasm in normal mode
('', '', ['-s', 'WASM=0']),
('EM_ASM( Module.temp = HEAP32[DYNAMICTOP_PTR>>2] );', 'EM_ASM( assert(Module.temp === HEAP32[DYNAMICTOP_PTR>>2], "must not adjust DYNAMICTOP when an alloc fails!") );', ['-s', 'WASM=0']),
]:
for growth in [0, 1]:
for aborting_args in [[], ['-s', 'ABORTING_MALLOC=0'], ['-s', 'ABORTING_MALLOC=1']]:
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <assert.h>
#include <emscripten.h>
#define CHUNK_SIZE (10 * 1024 * 1024)
int main() {
std::vector<void*> allocs;
bool has = false;
while (1) {
printf("trying an allocation\n");
%s
void* curr = malloc(CHUNK_SIZE);
if (!curr) {
%s
break;
}
has = true;
printf("allocated another chunk, %%zu so far\n", allocs.size());
allocs.push_back(curr);
}
assert(has);
printf("an allocation failed!\n");
#ifdef SPLIT
return 0;
#endif
while (1) {
assert(allocs.size() > 0);
void *curr = allocs.back();
allocs.pop_back();
free(curr);
printf("freed one\n");
if (malloc(CHUNK_SIZE)) break;
}
printf("managed another malloc!\n");
}
''' % (pre_fail, post_fail))
args = [EMCC, 'main.cpp'] + opts + aborting_args
args += ['-s', 'TEST_MEMORY_GROWTH_FAILS=1'] # In this test, force memory growing to fail
if growth:
args += ['-s', 'ALLOW_MEMORY_GROWTH=1']
# growth disables aborting by default, but it can be overridden
aborting = 'ABORTING_MALLOC=1' in aborting_args or (not aborting_args and not growth)
print('test_failing_alloc', args, pre_fail)
run_process(args)
# growth also disables aborting
can_manage_another = not aborting
split = '-DSPLIT' in args
print('can manage another:', can_manage_another, 'split:', split, 'aborting:', aborting)
output = run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=0 if can_manage_another else None)
if can_manage_another:
self.assertContained('an allocation failed!\n', output)
if not split:
# split memory allocation may fail due to GC objects no longer being allocatable,
# and we can't expect to recover from that deterministically. So just check we
# get to the fail.
# otherwise, we should fail eventually, then free, then succeed
self.assertContained('managed another malloc!\n', output)
else:
# we should see an abort
self.assertContained('abort(Cannot enlarge memory arrays', output)
if growth:
# when growth is enabled, the default is to not abort, so just explain that
self.assertContained('If you want malloc to return NULL (0) instead of this abort, do not link with -s ABORTING_MALLOC=1', output)
else:
# when growth is not enabled, suggest 3 possible solutions (start with more memory, allow growth, or don't abort)
self.assertContained(('higher than the current value 16777216,', 'higher than the current value 33554432,'), output)
self.assertContained('compile with -s ALLOW_MEMORY_GROWTH=1 ', output)
self.assertContained('compile with -s ABORTING_MALLOC=0 ', output)
def test_failing_growth_2gb(self):
create_test_file('test.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
void* out;
int main() {
while (1) {
puts("loop...");
out = malloc(1024 * 1024);
if (!out) {
puts("done");
return 0;
}
}
}
''')
run_process([EMCC, '-O1', 'test.cpp', '-s', 'ALLOW_MEMORY_GROWTH'])
self.assertContained('done', run_js('a.out.js'))
def test_libcxx_minimal(self):
create_test_file('vector.cpp', r'''
#include <vector>
int main(int argc, char** argv) {
std::vector<void*> v;
for (int i = 0 ; i < argc; i++) {
v.push_back(nullptr);
}
return v.size();
}
''')
run_process([EMCC, '-O2', 'vector.cpp', '-o', 'vector.js'])
run_process([EMCC, '-O2', path_from_root('tests', 'hello_libcxx.cpp'), '-o', 'iostream.js'])
vector = os.path.getsize('vector.js')
iostream = os.path.getsize('iostream.js')
print(vector, iostream)
self.assertGreater(vector, 1000)
# we can strip out almost all of libcxx when just using vector
self.assertLess(2.25 * vector, iostream)
@no_wasm_backend('relies on EMULATED_FUNCTION_POINTERS')
def test_emulated_function_pointers(self):
create_test_file('src.c', r'''
#include <emscripten.h>
typedef void (*fp)();
int main(int argc, char **argv) {
volatile fp f = 0;
EM_ASM({
if (typeof FUNCTION_TABLE_v !== 'undefined') {
out('function table: ' + FUNCTION_TABLE_v);
} else {
out('no visible function tables');
}
});
if (f) f();
return 0;
}
''')
def test(args, expected):
print(args, expected)
run_process([EMCC, 'src.c', '-s', 'WASM=0'] + args, stderr=PIPE)
self.assertContained(expected, run_js('a.out.js'))
for opts in [0, 1, 2, 3]:
test(['-O' + str(opts)], 'no visible function tables')
test(['-O' + str(opts), '-s', 'EMULATED_FUNCTION_POINTERS=1'], 'function table: ')
@no_wasm_backend('relies on EMULATED_FUNCTION_POINTERS')
def test_emulated_function_pointers_2(self):
create_test_file('src.c', r'''
#include <emscripten.h>
typedef void (*fp)();
static void one() { EM_ASM( out('one') ); }
static void two() { EM_ASM( out('two') ); }
void test() {
volatile fp f = one;
f();
f = two;
f();
}
int main(int argc, char **argv) {
test();
// swap them!
EM_ASM_INT({
var one = $0;
var two = $1;
if (typeof FUNCTION_TABLE_v === 'undefined') {
out('no');
return;
}
var temp = FUNCTION_TABLE_v[one];
FUNCTION_TABLE_v[one] = FUNCTION_TABLE_v[two];
FUNCTION_TABLE_v[two] = temp;
}, (int)&one, (int)&two);
test();
return 0;
}
''')
flipped = 'one\ntwo\ntwo\none\n'
unchanged = 'one\ntwo\none\ntwo\n'
no_table = 'one\ntwo\nno\none\ntwo\n'
def test(args, expected):
print(args, expected.replace('\n', ' '))
run_process([EMCC, 'src.c', '-s', 'WASM=0'] + args)
self.assertContained(expected, run_js('a.out.js'))
for opts in [0, 1, 2]:
test(['-O' + str(opts)], no_table)
test(['-O' + str(opts), '-s', 'EMULATED_FUNCTION_POINTERS=1'], flipped)
test(['-O' + str(opts), '-s', 'EMULATED_FUNCTION_POINTERS=2'], flipped)
test(['-O' + str(opts), '-s', 'EMULATED_FUNCTION_POINTERS=1', '-s', 'RELOCATABLE=1'], flipped)
test(['-O' + str(opts), '-s', 'EMULATED_FUNCTION_POINTERS=2', '-s', 'RELOCATABLE=1'], unchanged) # with both of those, we optimize and you cannot flip them
test(['-O' + str(opts), '-s', 'MAIN_MODULE=1'], unchanged) # default for modules is optimized
test(['-O' + str(opts), '-s', 'MAIN_MODULE=1', '-s', 'EMULATED_FUNCTION_POINTERS=2'], unchanged)
test(['-O' + str(opts), '-s', 'MAIN_MODULE=1', '-s', 'EMULATED_FUNCTION_POINTERS=1'], flipped) # but you can disable that
def test_minimal_dynamic(self):
def run(wasm):
print('wasm?', wasm)
library_file = 'library.wasm' if wasm else 'library.js'
def test(main_args, library_args=[], expected='hello from main\nhello from library'):
print('testing', main_args, library_args)
self.clear()
create_test_file('library.c', r'''
#include <stdio.h>
void library_func() {
#ifdef USE_PRINTF
printf("hello from library: %p\n", &library_func);
#else
puts("hello from library");
#endif
}
''')
# -fno-builtin to prevent printf -> iprintf optimization
run_process([EMCC, 'library.c', '-fno-builtin', '-s', 'SIDE_MODULE=1', '-O2', '-o', library_file, '-s', 'WASM=' + str(wasm), '-s', 'EXPORT_ALL'] + library_args)
create_test_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
int main() {
puts("hello from main");
void *lib_handle = dlopen("%s", 0);
if (!lib_handle) {
puts("cannot load side module");
return 1;
}
typedef void (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x) puts("cannot find side function");
else x();
}
''' % library_file)
run_process([EMCC, 'main.c', '--embed-file', library_file, '-O2', '-s', 'WASM=' + str(wasm)] + main_args)
self.assertContained(expected, run_js('a.out.js', assert_returncode=None, stderr=STDOUT))
size = os.path.getsize('a.out.js')
if wasm:
size += os.path.getsize('a.out.wasm')
side_size = os.path.getsize(library_file)
print(' sizes:', size, side_size)
return (size, side_size)
def percent_diff(x, y):
small = min(x, y)
large = max(x, y)
return float(100 * large) / small - 100
full = test(main_args=['-s', 'MAIN_MODULE=1'])
# printf is not used in main, but libc was linked in, so it's there
printf = test(main_args=['-s', 'MAIN_MODULE=1'], library_args=['-DUSE_PRINTF'])
# main module tests
# dce in main, and it fails since puts is not exported
dce = test(main_args=['-s', 'MAIN_MODULE=2'], expected=('cannot', 'undefined'))
# with exporting, it works
dce = test(main_args=['-s', 'MAIN_MODULE=2', '-s', 'EXPORTED_FUNCTIONS=["_main", "_puts"]'])
# printf is not used in main, and we dce, so we failz
dce_fail = test(main_args=['-s', 'MAIN_MODULE=2'], library_args=['-DUSE_PRINTF'], expected=('cannot', 'undefined'))
# exporting printf in main keeps it alive for the library
dce_save = test(main_args=['-s', 'MAIN_MODULE=2', '-s', 'EXPORTED_FUNCTIONS=["_main", "_printf", "_puts"]'], library_args=['-DUSE_PRINTF'])
self.assertLess(percent_diff(full[0], printf[0]), 4)
self.assertLess(percent_diff(dce[0], dce_fail[0]), 4)
self.assertLess(dce[0], 0.2 * full[0]) # big effect, 80%+ is gone
self.assertGreater(dce_save[0], 1.05 * dce[0]) # save exported all of printf
# side module tests
# mode 2, so dce in side, but library_func is not exported, so it is dce'd
side_dce_fail = test(main_args=['-s', 'MAIN_MODULE=1'], library_args=['-s', 'SIDE_MODULE=2'], expected='cannot find side function')
# mode 2, so dce in side, and library_func is not exported
side_dce_work = test(main_args=['-s', 'MAIN_MODULE=1'], library_args=['-s', 'SIDE_MODULE=2', '-s', 'EXPORTED_FUNCTIONS=["_library_func"]'], expected='hello from library')
self.assertLess(side_dce_fail[1], 0.95 * side_dce_work[1]) # removing that function saves a chunk
run(wasm=1)
if not self.is_wasm_backend():
run(wasm=0)
def test_ld_library_path(self):
create_test_file('hello1.c', r'''
#include <stdio.h>
void
hello1 ()
{
printf ("Hello1\n");
return;
}
''')
create_test_file('hello2.c', r'''
#include <stdio.h>
void
hello2 ()
{
printf ("Hello2\n");
return;
}
''')
create_test_file('hello3.c', r'''
#include <stdio.h>
void
hello3 ()
{
printf ("Hello3\n");
return;
}
''')
create_test_file('hello4.c', r'''
#include <stdio.h>
#include <math.h>
double
hello4 (double x)
{
printf ("Hello4\n");
return fmod(x, 2.0);
}
''')
create_test_file('pre.js', r'''
Module['preRun'].push(function (){
ENV['LD_LIBRARY_PATH']='/lib:/usr/lib';
});
''')
create_test_file('main.c', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dlfcn.h>
int
main()
{
void *h;
void (*f) ();
double (*f2) (double);
h = dlopen ("libhello1.wasm", RTLD_NOW);
f = dlsym (h, "hello1");
f();
dlclose (h);
h = dlopen ("libhello2.wasm", RTLD_NOW);
f = dlsym (h, "hello2");
f();
dlclose (h);
h = dlopen ("libhello3.wasm", RTLD_NOW);
f = dlsym (h, "hello3");
f();
dlclose (h);
h = dlopen ("/usr/local/lib/libhello4.wasm", RTLD_NOW);
f2 = dlsym (h, "hello4");
double result = f2(5.5);
dlclose (h);
if (result == 1.5) {
printf("Ok\n");
}
return 0;
}
''')
run_process([EMCC, '-o', 'libhello1.wasm', 'hello1.c', '-s', 'SIDE_MODULE=1', '-s', 'EXPORT_ALL=1'])
run_process([EMCC, '-o', 'libhello2.wasm', 'hello2.c', '-s', 'SIDE_MODULE=1', '-s', 'EXPORT_ALL=1'])
run_process([EMCC, '-o', 'libhello3.wasm', 'hello3.c', '-s', 'SIDE_MODULE=1', '-s', 'EXPORT_ALL=1'])
run_process([EMCC, '-o', 'libhello4.wasm', 'hello4.c', '-s', 'SIDE_MODULE=1', '-s', 'EXPORT_ALL=1'])
run_process([EMCC, '-o', 'main.js', 'main.c', '-s', 'MAIN_MODULE=1', '-s', 'INITIAL_MEMORY=' + str(32 * 1024 * 1024),
'--embed-file', 'libhello1.wasm@/lib/libhello1.wasm',
'--embed-file', 'libhello2.wasm@/usr/lib/libhello2.wasm',
'--embed-file', 'libhello3.wasm@/libhello3.wasm',
'--embed-file', 'libhello4.wasm@/usr/local/lib/libhello4.wasm',
'--pre-js', 'pre.js'])
out = run_js('main.js')
self.assertContained('Hello1', out)
self.assertContained('Hello2', out)
self.assertContained('Hello3', out)
self.assertContained('Hello4', out)
self.assertContained('Ok', out)
def test_dlopen_rtld_global(self):
# This test checks RTLD_GLOBAL where a module is loaded
# before the module providing a global it needs is. in asm.js we use JS
# to create a redirection function. In wasm we just have wasm, so we
# need to introspect the wasm module. Browsers may add that eventually,
# or we could ship a little library that does it.
create_test_file('hello1.c', r'''
#include <stdio.h>
extern int hello1_val;
int hello1_val=3;
void
hello1 (int i)
{
printf ("hello1_val by hello1:%d\n",hello1_val);
printf ("Hello%d\n",i);
}
''')
create_test_file('hello2.c', r'''
#include <stdio.h>
extern int hello1_val;
extern void hello1 (int);
void
hello2 (int i)
{
void (*f) (int);
printf ("hello1_val by hello2:%d\n",hello1_val);
f = hello1;
f(i);
}
''')
create_test_file('main.c', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dlfcn.h>
int
main(int argc,char** argv)
{
void *h;
void *h2;
void (*f) (int);
h = dlopen ("libhello1.wasm", RTLD_NOW|RTLD_GLOBAL);
h2 = dlopen ("libhello2.wasm", RTLD_NOW|RTLD_GLOBAL);
f = dlsym (h, "hello1");
f(1);
f = dlsym (h2, "hello2");
f(2);
dlclose (h);
dlclose (h2);
return 0;
}
''')
run_process([EMCC, '-o', 'libhello1.js', 'hello1.c', '-s', 'SIDE_MODULE=1', '-s', 'EXPORT_ALL=1'])
run_process([EMCC, '-o', 'libhello2.js', 'hello2.c', '-s', 'SIDE_MODULE=1', '-s', 'EXPORT_ALL=1'])
run_process([EMCC, '-o', 'main.js', 'main.c', '-s', 'MAIN_MODULE=1',
'--embed-file', 'libhello1.wasm',
'--embed-file', 'libhello2.wasm'])
out = run_js('main.js')
self.assertContained('Hello1', out)
self.assertContained('Hello2', out)
self.assertContained('hello1_val by hello1:3', out)
self.assertContained('hello1_val by hello2:3', out)
@no_fastcomp()
def test_main_module_without_exceptions_message(self):
# A side module that needs exceptions needs a main module with that
# support enabled; show a clear message in that case.
create_test_file('side.cpp', r'''
#include <exception>
#include <stdio.h>
extern "C" void test_throw() {
try {
throw 42;
} catch(int x) {
printf("catch %d.\n", x);
return;
}
puts("bad location");
}
''')
create_test_file('main.cpp', r'''
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dlfcn.h>
typedef void (*voidf)();
int main() {
void* h = dlopen ("libside.wasm", RTLD_NOW|RTLD_GLOBAL);
assert(h);
voidf f = (voidf)dlsym(h, "test_throw");
assert(f);
f();
return 0;
}
''')
run_process([EMCC, '-o', 'libside.wasm', 'side.cpp', '-s', 'SIDE_MODULE=1', '-fexceptions'])
def build_main(args):
print(args)
with env_modify({'EMCC_FORCE_STDLIBS': 'libc++abi'}):
run_process([EMCC, 'main.cpp', '-s', 'MAIN_MODULE=1',
'--embed-file', 'libside.wasm'] + args)
build_main([])
out = run_js('a.out.js', assert_returncode=None, stderr=STDOUT)
self.assertContained('Exception catching is disabled, this exception cannot be caught.', out)
self.assertContained('note: in dynamic linking, if a side module wants exceptions, the main module must be built with that support', out)
build_main(['-fexceptions'])
out = run_js('a.out.js')
self.assertContained('catch 42', out)
def test_debug_asmLastOpts(self):
create_test_file('src.c', r'''
#include <stdio.h>
struct Dtlink_t
{ struct Dtlink_t* right; /* right child */
union
{ unsigned int _hash; /* hash value */
struct Dtlink_t* _left; /* left child */
} hl;
};
int treecount(register struct Dtlink_t* e)
{
return e ? treecount(e->hl._left) + treecount(e->right) + 1 : 0;
}
int main() {
printf("hello, world!\n");
}
''')
run_process([EMCC, 'src.c', '-s', 'EXPORTED_FUNCTIONS=["_main", "_treecount"]', '--minify', '0', '-g4', '-Oz'])
self.assertContained('hello, world!', run_js('a.out.js'))
@no_wasm_backend('MEM_INIT_METHOD not supported under wasm')
def test_meminit_crc(self):
create_test_file('src.c', r'''
#include <stdio.h>
int main() { printf("Mary had a little lamb.\n"); }
''')
run_process([EMCC, 'src.c', '--memory-init-file', '0', '-s', 'MEM_INIT_METHOD=2', '-s', 'ASSERTIONS=1', '-s', 'WASM=0'])
with open('a.out.js') as f:
d = f.read()
return
self.assertContained('Mary had', d)
d = d.replace('Mary had', 'Paul had')
create_test_file('a.out.js', d)
out = run_js('a.out.js', assert_returncode=None, stderr=STDOUT)
self.assertContained('Assertion failed: memory initializer checksum', out)
def test_emscripten_print_double(self):
create_test_file('src.c', r'''
#include <stdio.h>
#include <assert.h>
#include <emscripten.h>
void test(double d) {
char buffer[100], buffer2[100];
unsigned len, len2, len3;
len = emscripten_print_double(d, NULL, -1);
len2 = emscripten_print_double(d, buffer, len+1);
assert(len == len2);
buffer[len] = 0;
len3 = snprintf(buffer2, 100, "%g", d);
printf("|%g : %u : %s : %s : %d|\n", d, len, buffer, buffer2, len3);
}
int main() {
printf("\n");
test(0);
test(1);
test(-1);
test(1.234);
test(-1.234);
test(1.1234E20);
test(-1.1234E20);
test(1.1234E-20);
test(-1.1234E-20);
test(1.0/0.0);
test(-1.0/0.0);
}
''')
run_process([EMCC, 'src.c'])
out = run_js('a.out.js')
self.assertContained('''
|0 : 1 : 0 : 0 : 1|
|1 : 1 : 1 : 1 : 1|
|-1 : 2 : -1 : -1 : 2|
|1.234 : 5 : 1.234 : 1.234 : 5|
|-1.234 : 6 : -1.234 : -1.234 : 6|
|1.1234e+20 : 21 : 112340000000000000000 : 1.1234e+20 : 10|
|-1.1234e+20 : 22 : -112340000000000000000 : -1.1234e+20 : 11|
|1.1234e-20 : 10 : 1.1234e-20 : 1.1234e-20 : 10|
|-1.1234e-20 : 11 : -1.1234e-20 : -1.1234e-20 : 11|
|inf : 8 : Infinity : inf : 3|
|-inf : 9 : -Infinity : -inf : 4|
''', out)
def test_emscripten_scan_stack(self):
create_test_file('src.cpp', r'''
#include <set>
#include <emscripten.h>
#include <stdio.h>
#include <assert.h>
std::set<int> seenInts;
void scan(void* x, void* y) {
printf("scan\n");
int* p = (int*)x;
int* q = (int*)y;
// The callback sends us the [low, high) range.
assert(p < q);
// The range is of a reasonable size - not all of memory.
assert(q - p < 100);
while (p < q) {
seenInts.insert(*p);
p++;
}
}
int main() {
int x;
int* y = &x;
*y = 12345678;
emscripten_scan_stack(scan);
assert(seenInts.count(12345678));
puts("ok");
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained('ok', run_js('a.out.js'))
def test_no_warn_exported_jslibfunc(self):
err = run_process([EMCC, path_from_root('tests', 'hello_world.c'),
'-s', 'DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=["alGetError"]',
'-s', 'EXPORTED_FUNCTIONS=["_main", "_alGetError"]'], stderr=PIPE).stderr
self.assertNotContained('function requested to be exported, but not implemented: "_alGetError"', err)
@no_wasm_backend()
def test_almost_asm_warning(self):
def run(args, expected):
print(args, expected)
err = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0'] + args, stderr=PIPE).stderr
if expected:
self.assertContained('[-Walmost-asm]', err)
else:
self.assertEqual(err, '')
run(['-O1', '-s', 'ALLOW_MEMORY_GROWTH=1'], True), # default
# suppress almost-asm warning manually
run(['-O1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-Wno-almost-asm'], False),
# last warning flag should "win"
run(['-O1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-Wno-almost-asm', '-Walmost-asm'], True)
def test_musl_syscalls(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c')])
src = open('a.out.js').read()
# there should be no musl syscalls in hello world output
self.assertNotContained('__syscall', src)
@no_windows('posix-only')
def test_emcc_dev_null(self):
out = run_process([EMCC, '-dM', '-E', '-x', 'c', '/dev/null'], stdout=PIPE).stdout
self.assertContained('#define __EMSCRIPTEN__ 1', out) # all our defines should show up
def test_umask_0(self):
create_test_file('src.c', r'''
#include <sys/stat.h>
#include <stdio.h>
int main() {
umask(0);
printf("hello, world!\n");
}''')
run_process([EMCC, 'src.c'])
self.assertContained('hello, world!', run_js('a.out.js'))
def test_no_missing_symbols(self): # simple hello world should not show any missing symbols
run_process([EMCC, path_from_root('tests', 'hello_world.c')])
# main() is implemented in C, and even if requested from JS, we should not warn
create_test_file('library_foo.js', '''
mergeInto(LibraryManager.library, {
my_js__deps: ['main'],
my_js: (function() {
return function() {
console.log("hello " + _nonexistingvariable);
};
}()),
});
''')
create_test_file('test.cpp', '''
#include <stdio.h>
#include <stdlib.h>
extern "C" {
extern void my_js();
}
int main() {
my_js();
return EXIT_SUCCESS;
}
''')
run_process([EMCC, 'test.cpp', '--js-library', 'library_foo.js'])
# but we do error on a missing js var
create_test_file('library_foo_missing.js', '''
mergeInto(LibraryManager.library, {
my_js__deps: ['main', 'nonexistingvariable'],
my_js: (function() {
return function() {
console.log("hello " + _nonexistingvariable);
};
}()),
});
''')
err = self.expect_fail([EMCC, 'test.cpp', '--js-library', 'library_foo_missing.js'])
self.assertContained('undefined symbol: nonexistingvariable', err)
# and also for missing C code, of course (without the --js-library, it's just a missing C method)
err = self.expect_fail([EMCC, 'test.cpp'])
self.assertContained('undefined symbol: my_js', err)
@no_fastcomp('fastcomp links in memset in JS in a hackish way')
def test_js_lib_to_system_lib(self):
# memset is in compiled code, so a js library __deps can't access it. it
# would need to be in deps_info.json or EXPORTED_FUNCTIONS
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
depper__deps: ['memset'],
depper: function(ptr) {
_memset(ptr, 'd'.charCodeAt(0), 10);
},
});
''')
create_test_file('test.cpp', r'''
#include <string.h>
#include <stdio.h>
extern "C" {
extern void depper(char*);
}
int main(int argc, char** argv) {
char buffer[11];
buffer[10] = '\0';
// call by a pointer, to force linking of memset, no llvm intrinsic here
volatile auto ptr = memset;
(*ptr)(buffer, 'a', 10);
depper(buffer);
puts(buffer);
}
''')
err = self.expect_fail([EMCC, 'test.cpp', '--js-library', 'lib.js'])
self.assertContained('_memset may need to be added to EXPORTED_FUNCTIONS if it arrives from a system library', err)
# without the dep, and with EXPORTED_FUNCTIONS, it works ok
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
depper: function(ptr) {
_memset(ptr, 'd'.charCodeAt(0), 10);
},
});
''')
run_process([EMCC, 'test.cpp', '--js-library', 'lib.js', '-s', 'EXPORTED_FUNCTIONS=[_main,_memset]'])
self.assertContained('dddddddddd', run_js('a.out.js'))
def test_realpath(self):
create_test_file('src.c', r'''
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#define TEST_PATH "/boot/README.txt"
int
main(int argc, char **argv)
{
errno = 0;
char *t_realpath_buf = realpath(TEST_PATH, NULL);
if (NULL == t_realpath_buf) {
perror("Resolve failed");
return 1;
} else {
printf("Resolved: %s\n", t_realpath_buf);
free(t_realpath_buf);
return 0;
}
}
''')
ensure_dir('boot')
create_test_file(os.path.join('boot', 'README.txt'), ' ')
run_process([EMCC, 'src.c', '--embed-file', 'boot'])
self.assertContained('Resolved: /boot/README.txt', run_js('a.out.js'))
def test_realpath_nodefs(self):
create_test_file('src.c', r'''
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <emscripten.h>
#define TEST_PATH "/working/TEST_NODEFS.txt"
int
main(int argc, char **argv)
{
errno = 0;
EM_ASM({
FS.mkdir('/working');
FS.mount(NODEFS, { root: '.' }, '/working');
});
char *t_realpath_buf = realpath(TEST_PATH, NULL);
if (NULL == t_realpath_buf) {
perror("Resolve failed");
return 1;
} else {
printf("Resolved: %s\n", t_realpath_buf);
free(t_realpath_buf);
return 0;
}
}
''')
create_test_file('TEST_NODEFS.txt', ' ')
run_process([EMCC, 'src.c', '-lnodefs.js'])
self.assertContained('Resolved: /working/TEST_NODEFS.txt', run_js('a.out.js'))
def test_realpath_2(self):
ensure_dir('Folder')
create_test_file('src.c', r'''
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
int testrealpath(const char* path) {
errno = 0;
char *t_realpath_buf = realpath(path, NULL);
if (NULL == t_realpath_buf) {
printf("Resolve failed: \"%s\"\n",path);fflush(stdout);
return 1;
} else {
printf("Resolved: \"%s\" => \"%s\"\n", path, t_realpath_buf);fflush(stdout);
free(t_realpath_buf);
return 0;
}
}
int main(int argc, char **argv)
{
// files:
testrealpath("testfile.txt");
testrealpath("Folder/testfile.txt");
testrealpath("testnonexistentfile.txt");
// folders
testrealpath("Folder");
testrealpath("/Folder");
testrealpath("./");
testrealpath("");
testrealpath("/");
return 0;
}
''')
create_test_file('testfile.txt', '')
create_test_file(os.path.join('Folder', 'testfile.txt'), '')
run_process([EMCC, 'src.c', '--embed-file', 'testfile.txt', '--embed-file', 'Folder'])
self.assertContained('''Resolved: "testfile.txt" => "/testfile.txt"
Resolved: "Folder/testfile.txt" => "/Folder/testfile.txt"
Resolve failed: "testnonexistentfile.txt"
Resolved: "Folder" => "/Folder"
Resolved: "/Folder" => "/Folder"
Resolved: "./" => "/"
Resolve failed: ""
Resolved: "/" => "/"
''', run_js('a.out.js'))
def test_no_warnings(self):
# build once before to make sure system libs etc. exist
run_process([EMCC, path_from_root('tests', 'hello_libcxx.cpp')])
# check that there is nothing in stderr for a regular compile
err = run_process([EMCC, path_from_root('tests', 'hello_libcxx.cpp')], stderr=PIPE).stderr
self.assertEqual(err, '')
@no_wasm_backend("llvm-lto is fastcomp only flag")
def test_llvm_lto(self):
sizes = {}
lto_levels = [0, 1, 2, 3]
for lto in lto_levels:
cmd = [EMCC, path_from_root('tests', 'hello_libcxx.cpp'), '-O2', '--llvm-lto', str(lto)]
if self.is_wasm_backend():
cmd += ['-flto']
print(cmd)
run_process(cmd)
self.assertContained('hello, world!', run_js('a.out.js'))
sizes[lto] = os.path.getsize('a.out.wasm')
print(sizes)
# LTO sizes should be distinct
for i in lto_levels:
assert sizes[i] not in set(sizes).difference(set([sizes[i]]))
# LTO should reduce code size
# Skip mode 2 because it has historically increased code size, but not always
self.assertLess(sizes[1], sizes[0])
if not self.is_wasm_backend():
self.assertLess(sizes[3], sizes[0])
def test_dlmalloc_modes(self):
create_test_file('src.cpp', r'''
#include <stdlib.h>
#include <stdio.h>
int main() {
void* c = malloc(1024);
free(c);
free(c);
printf("double-freed\n");
}
''')
run_process([EMCC, 'src.cpp'])
self.assertContained('double-freed', run_js('a.out.js'))
# in debug mode, the double-free is caught
run_process([EMCC, 'src.cpp', '-s', 'ASSERTIONS=2'])
seen_error = False
out = '?'
try:
out = run_js('a.out.js')
except Exception:
seen_error = True
self.assertTrue(seen_error, out)
def test_mallocs(self):
def run(opts):
print(opts)
sizes = {}
for malloc, name in (
('dlmalloc', 'dlmalloc'),
(None, 'default'),
('emmalloc', 'emmalloc')
):
print(malloc, name)
cmd = [EMCC, path_from_root('tests', 'hello_libcxx.cpp'), '-o', 'a.out.js'] + opts
if malloc:
cmd += ['-s', 'MALLOC="%s"' % malloc]
print(cmd)
run_process(cmd)
sizes[name] = os.path.getsize('a.out.wasm')
print(sizes)
# dlmalloc is the default
self.assertEqual(sizes['dlmalloc'], sizes['default'])
# emmalloc is much smaller
self.assertLess(sizes['emmalloc'], sizes['dlmalloc'] - 5000)
run([])
run(['-O2'])
@no_fastcomp("fastcomp doesn't support 2GB+")
def test_emmalloc_2GB(self):
def test(args, text=None):
if text:
stderr = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MALLOC=emmalloc'] + args)
self.assertContained(text, stderr)
else:
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MALLOC=emmalloc'] + args)
test(['-s', 'INITIAL_MEMORY=2GB'], 'INITIAL_MEMORY must be less than 2GB due to current spec limitations')
# emmalloc allows growth by default (as the max size is fine), but not if
# a too-high max is set
test(['-s', 'ALLOW_MEMORY_GROWTH'])
test(['-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=1GB'])
test(['-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=3GB'], 'emmalloc only works on <2GB of memory. Use the default allocator, or decrease MAXIMUM_MEMORY')
@no_fastcomp("fastcomp doesn't support 2GB+")
def test_2GB_plus(self):
# when the heap size can be over 2GB, we rewrite pointers to be unsigned
def test(page_diff):
args = [EMCC, path_from_root('tests', 'hello_world.c'), '-O2', '-s', 'ALLOW_MEMORY_GROWTH']
if page_diff is not None:
args += ['-s', 'MAXIMUM_MEMORY=%d' % (2**31 + page_diff * 64 * 1024)]
print(args)
run_process(args)
return os.path.getsize('a.out.js')
less = test(-1)
equal = test(0)
more = test(1)
none = test(None)
# exactly 2GB still doesn't require unsigned pointers, as we can't address
# the 2GB location in memory
self.assertEqual(less, equal)
self.assertLess(equal, more)
# not specifying maximum memory does not result in unsigned pointers, as the
# default maximum memory is 2GB.
self.assertEqual(less, none)
@no_fastcomp('depends on wasm-emscripten-finalize')
@parameterized({
'normal': (['-s', 'WASM_BIGINT=0'], 'testbind.js'),
'bigint': (['-s', 'WASM_BIGINT=1'], 'testbind_bigint.js'),
})
def test_sixtyfour_bit_return_value(self, args, bind_js):
# This test checks that the most significant 32 bits of a 64 bit long are correctly made available
# to native JavaScript applications that wish to interact with compiled code returning 64 bit longs.
# The MS 32 bits should be available in Runtime.getTempRet0() even when compiled with -O2 --closure 1
# Compile test.c and wrap it in a native JavaScript binding so we can call our compiled function from JS.
run_process([EMCC, path_from_root('tests', 'return64bit', 'test.c'),
'--pre-js', path_from_root('tests', 'return64bit', 'testbindstart.js'),
'--pre-js', path_from_root('tests', 'return64bit', bind_js),
'--post-js', path_from_root('tests', 'return64bit', 'testbindend.js'),
'-s', 'EXPORTED_FUNCTIONS=["_test_return64"]', '-o', 'test.js', '-O2',
'--closure', '1', '-g1', '-s', 'WASM_ASYNC_COMPILATION=0'] + args)
# Simple test program to load the test.js binding library and call the binding to the
# C function returning the 64 bit long.
create_test_file('testrun.js', '''
var test = require("./test.js");
test.runtest();
''')
# Run the test and confirm the output is as expected.
out = run_js('testrun.js', engine=NODE_JS + ['--experimental-wasm-bigint'])
self.assertContained('''\
input = 0xaabbccdd11223344
low = 5678
high = 1234
input = 0xabcdef1912345678
low = 5678
high = 1234
''', out)
def test_lib_include_flags(self):
run_process([EMCC] + '-l m -l c -I'.split() + [path_from_root('tests', 'include_test'), path_from_root('tests', 'lib_include_flags.c')])
def test_dash_s(self):
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s'])
self.assertContained('hello, world!', run_js('a.out.js'))
def test_dash_s_response_file_string(self):
create_test_file('response_file', '"MyModule"\n')
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'EXPORT_NAME=@response_file'])
def test_dash_s_response_file_list(self):
create_test_file('response_file', '["_main", "_malloc"]\n')
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'EXPORTED_FUNCTIONS=@response_file'])
def test_dash_s_response_file_misssing(self):
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'EXPORTED_FUNCTIONS=@foo'])
self.assertContained('error: foo: file not found parsing argument: EXPORTED_FUNCTIONS=@foo', err)
def test_dash_s_unclosed_quote(self):
# Unclosed quote
err = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), "-s", "TEST_KEY='MISSING_QUOTE"], stderr=PIPE, check=False).stderr
self.assertNotContained('AssertionError', err) # Do not mention that it is an assertion error
self.assertContained('unclosed opened quoted string. expected final character to be "\'"', err)
def test_dash_s_single_quote(self):
# Only one quote
err = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), "-s", "TEST_KEY='"], stderr=PIPE, check=False).stderr
self.assertNotContained('AssertionError', err) # Do not mention that it is an assertion error
self.assertContained('unclosed opened quoted string.', err)
def test_dash_s_unclosed_list(self):
# Unclosed list
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), "-s", "TEST_KEY=[Value1, Value2"])
self.assertNotContained('AssertionError', err) # Do not mention that it is an assertion error
self.assertContained('unclosed opened string list. expected final character to be "]"', err)
def test_dash_s_valid_list(self):
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), "-s", "TEST_KEY=[Value1, \"Value2\"]"])
self.assertNotContained('a problem occurred in evaluating the content after a "-s", specifically', err)
def test_dash_s_wrong_type(self):
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'EXPORTED_FUNCTIONS=foo'])
self.assertContained("error: setting `EXPORTED_FUNCTIONS` expects `<class 'list'>` but got `<class 'str'>`", err)
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'EXIT_RUNTIME=[foo,bar]'])
self.assertContained("error: setting `EXIT_RUNTIME` expects `<class 'int'>` but got `<class 'list'>`", err)
def test_dash_s_typo(self):
# with suggestions
stderr = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'DISABLE_EXCEPTION_CATCH=1'])
self.assertContained("Attempt to set a non-existent setting: 'DISABLE_EXCEPTION_CATCH'", stderr)
self.assertContained('did you mean one of DISABLE_EXCEPTION_CATCHING', stderr)
# no suggestions
stderr = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'CHEEZ=1'])
self.assertContained("perhaps a typo in emcc\'s -s X=Y notation?", stderr)
self.assertContained('(see src/settings.js for valid values)', stderr)
# suggestions do not include renamed legacy settings
stderr = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'ZBINARYEN_ASYNC_COMPILATION'])
self.assertContained("Attempt to set a non-existent setting: 'ZBINARYEN_ASYNC_COMPILATION'", stderr)
self.assertNotContained(' BINARYEN_ASYNC_COMPILATION', stderr)
def test_python_2_3(self):
# check emcc/em++ can be called by any python
def trim_py_suffix(filename):
"""remove .py from EMCC(=emcc.py)"""
return filename[:-3] if filename.endswith('.py') else filename
def run(python):
if python == 'python3':
has = is_python3_version_supported()
else:
has = shared.which(python) is not None
print(python, has)
if has:
print(' checking emcc.py...')
run_process([python, path_from_root('emcc.py'), '--version'], stdout=PIPE)
print(' checking em++.py...')
run_process([python, path_from_root('em++.py'), '--version'], stdout=PIPE)
run('python')
run('python2')
run('python3')
def test_zeroinit(self):
create_test_file('src.c', r'''
#include <stdio.h>
int buf[1048576];
int main() {
printf("hello, world! %d\n", buf[123456]);
return 0;
}
''')
run_process([EMCC, 'src.c', '-O2', '-g'])
size = os.path.getsize('a.out.wasm')
# size should be much smaller than the size of that zero-initialized buffer
self.assertLess(size, 123456 / 2)
@no_wasm_backend('asm.js')
def test_separate_asm_warning(self):
# Test that -s PRECISE_F32=2 raises a warning that --separate-asm is implied.
stderr = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '-s', 'PRECISE_F32=2', '-o', 'a.html'], stderr=PIPE).stderr
self.assertContained('forcing separate asm output', stderr)
# Test that -s PRECISE_F32=2 --separate-asm should not post a warning.
stderr = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '-s', 'PRECISE_F32=2', '-o', 'a.html', '--separate-asm'], stderr=PIPE).stderr
self.assertNotContained('forcing separate asm output', stderr)
# Test that -s PRECISE_F32=1 should not post a warning.
stderr = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '-s', 'PRECISE_F32=1', '-o', 'a.html'], stderr=PIPE).stderr
self.assertNotContained('forcing separate asm output', stderr)
# Manually doing separate asm should show a warning, if not targeting html
warning = '--separate-asm works best when compiling to HTML'
stderr = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '--separate-asm'], stderr=PIPE).stderr
self.assertContained(warning, stderr)
stderr = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '--separate-asm', '-o', 'a.html'], stderr=PIPE).stderr
self.assertNotContained(warning, stderr)
# test that the warning can be suppressed
stderr = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '--separate-asm', '-Wno-separate-asm'], stderr=PIPE).stderr
self.assertNotContained(warning, stderr)
def test_canonicalize_nan_warning(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
union U {
int x;
float y;
} a;
int main() {
a.x = 0x7FC01234;
printf("%f\n", a.y);
printf("0x%x\n", a.x);
return 0;
}
''')
stderr = run_process([EMCC, 'src.cpp', '-O1'], stderr=PIPE).stderr
if not self.is_wasm_backend():
self.assertContained("emcc: warning: cannot represent a NaN literal", stderr)
stderr = run_process([EMCC, 'src.cpp', '-O1', '-g'], stderr=PIPE).stderr
self.assertContained("emcc: warning: cannot represent a NaN literal", stderr)
self.assertContained('//@line 12 "src.cpp"', stderr)
else:
out = run_js('a.out.js')
self.assertContained('nan\n', out)
self.assertContained('0x7fc01234\n', out)
@no_wasm_backend('tests our python linking logic')
def test_link_response_file_does_not_force_absolute_paths(self):
with_space = 'with space'
ensure_dir(with_space)
create_test_file(os.path.join(with_space, 'main.cpp'), '''
int main() {
return 0;
}
''')
building.emcc(os.path.join(with_space, 'main.cpp'), ['-g'])
with chdir(with_space):
link_args = building.link(['main.cpp.o'], 'all.bc', just_calculate=True)
time.sleep(0.2) # Wait for Windows FS to release access to the directory
shutil.rmtree(with_space)
# We want only the relative path to be in the linker args, it should not be converted to an absolute path.
if hasattr(self, 'assertCountEqual'):
self.assertCountEqual(link_args, ['main.cpp.o'])
else:
# Python 2 compatibility
self.assertItemsEqual(link_args, ['main.cpp.o'])
def test_memory_growth_noasm(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-O2', '-s', 'ALLOW_MEMORY_GROWTH=1'])
src = open('a.out.js').read()
assert 'use asm' not in src
def test_EM_ASM_i64(self):
create_test_file('src.cpp', '''
#include <stdint.h>
#include <emscripten.h>
int main() {
EM_ASM({
out('inputs: ' + $0 + ', ' + $1 + '.');
}, int64_t(0x12345678ABCDEF1FLL));
}
''')
stderr = self.expect_fail([EMCC, 'src.cpp', '-Oz'])
if not self.is_wasm_backend():
self.assertContained('EM_ASM should not receive i64s as inputs, they are not valid in JS', stderr)
def test_eval_ctors_non_terminating(self):
for wasm in (1, 0):
if self.is_wasm_backend() and not wasm:
continue
print('wasm', wasm)
src = r'''
struct C {
C() {
volatile int y = 0;
while (y == 0) {}
}
};
C always;
int main() {}
'''
create_test_file('src.cpp', src)
run_process([EMCC, 'src.cpp', '-O2', '-s', 'EVAL_CTORS=1', '-profiling-funcs', '-s', 'WASM=%d' % wasm])
@no_wasm_backend('EVAL_CTORS is monolithic with the wasm backend')
def test_eval_ctors(self):
for wasm in (1, 0):
if self.is_wasm_backend() and not wasm:
continue
print('wasm', wasm)
print('check no ctors is ok')
# on by default in -Oz, but user-overridable
def get_size(args):
print('get_size', args)
run_process([EMCC, path_from_root('tests', 'hello_libcxx.cpp'), '-s', 'WASM=%d' % wasm] + args)
self.assertContained('hello, world!', run_js('a.out.js'))
if wasm:
codesize = self.count_wasm_contents('a.out.wasm', 'funcs')
memsize = self.count_wasm_contents('a.out.wasm', 'memory-data')
else:
codesize = os.path.getsize('a.out.js')
memsize = os.path.getsize('a.out.js.mem')
return (codesize, memsize)
def check_size(left, right):
# can't measure just the mem out of the wasm, so ignore [1] for wasm
if left[0] == right[0] and left[1] == right[1]:
return 0
if left[0] < right[0] and left[1] > right[1]:
return -1 # smaller code, bigger mem
if left[0] > right[0] and left[1] < right[1]:
return 1
assert False, [left, right]
o2_size = get_size(['-O2'])
assert check_size(get_size(['-O2']), o2_size) == 0, 'deterministic'
assert check_size(get_size(['-O2', '-s', 'EVAL_CTORS=1']), o2_size) < 0, 'eval_ctors works if user asks for it'
oz_size = get_size(['-Oz'])
assert check_size(get_size(['-Oz']), oz_size) == 0, 'deterministic'
assert check_size(get_size(['-Oz', '-s', 'EVAL_CTORS=1']), oz_size) == 0, 'eval_ctors is on by default in oz'
assert check_size(get_size(['-Oz', '-s', 'EVAL_CTORS=0']), oz_size) == 1, 'eval_ctors can be turned off'
linkable_size = get_size(['-Oz', '-s', 'EVAL_CTORS=1', '-s', 'LINKABLE=1'])
assert check_size(get_size(['-Oz', '-s', 'EVAL_CTORS=0', '-s', 'LINKABLE=1']), linkable_size) == 1, 'noticeable difference in linkable too'
def test_eval_ctor_ordering(self):
# ensure order of execution remains correct, even with a bad ctor
def test(p1, p2, p3, last, expected):
src = r'''
#include <stdio.h>
#include <stdlib.h>
volatile int total = 0;
struct C {
C(int x) {
volatile int y = x;
y++;
y--;
if (y == 0xf) {
printf("you can't eval me ahead of time\n"); // bad ctor
}
total <<= 4;
total += int(y);
}
};
C __attribute__((init_priority(%d))) c1(0x5);
C __attribute__((init_priority(%d))) c2(0x8);
C __attribute__((init_priority(%d))) c3(%d);
int main() {
printf("total is 0x%%x.\n", total);
}
''' % (p1, p2, p3, last)
create_test_file('src.cpp', src)
run_process([EMCC, 'src.cpp', '-O2', '-s', 'EVAL_CTORS=1', '-profiling-funcs', '-s', 'WASM=%d' % wasm])
self.assertContained('total is %s.' % hex(expected), run_js('a.out.js'))
shutil.copyfile('a.out.js', 'x' + hex(expected) + '.js')
if wasm:
shutil.copyfile('a.out.wasm', 'x' + hex(expected) + '.wasm')
return self.count_wasm_contents('a.out.wasm', 'funcs')
else:
return open('a.out.js').read().count('function _')
print('no bad ctor')
first = test(1000, 2000, 3000, 0xe, 0x58e) # noqa
second = test(3000, 1000, 2000, 0xe, 0x8e5) # noqa
third = test(2000, 3000, 1000, 0xe, 0xe58) # noqa
print(first, second, third)
assert first == second and second == third
print('with bad ctor')
first = test(1000, 2000, 3000, 0xf, 0x58f) # noqa; 2 will succeed
second = test(3000, 1000, 2000, 0xf, 0x8f5) # noqa; 1 will succedd
third = test(2000, 3000, 1000, 0xf, 0xf58) # noqa; 0 will succeed
print(first, second, third)
assert first < second and second < third, [first, second, third]
@uses_canonical_tmp
@with_env_modify({'EMCC_DEBUG': '1'})
def test_eval_ctors_debug_output(self):
for wasm in (1, 0):
print('wasm', wasm)
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
external_thing: function() {}
});
''')
create_test_file('src.cpp', r'''
extern "C" void external_thing();
struct C {
C() { external_thing(); } // don't remove this!
};
C c;
int main() {}
''')
err = run_process([EMCC, 'src.cpp', '--js-library', 'lib.js', '-Oz', '-s', 'WASM=%d' % wasm], stderr=PIPE).stderr
if self.is_wasm_backend():
# disabled in the wasm backend
self.assertContained('Ctor evalling in the wasm backend is disabled', err)
self.assertNotContained('ctor_evaller: not successful', err) # with logging
else:
self.assertContained('external_thing', err) # the failing call should be mentioned
if not wasm and not self.is_wasm_backend(): # asm.js will show a stack trace
self.assertContained('ctorEval.js', err) # with a stack trace
self.assertContained('ctor_evaller: not successful', err) # with logging
def test_override_js_execution_environment(self):
create_test_file('main.cpp', r'''
#include <emscripten.h>
int main() {
EM_ASM({
out('environment is WEB? ' + ENVIRONMENT_IS_WEB);
out('environment is WORKER? ' + ENVIRONMENT_IS_WORKER);
out('environment is NODE? ' + ENVIRONMENT_IS_NODE);
out('environment is SHELL? ' + ENVIRONMENT_IS_SHELL);
});
}
''')
# use SINGLE_FILE since we don't want to depend on loading a side .wasm file on the environment in this test;
# with the wrong env we have very odd failures
run_process([EMCC, 'main.cpp', '-s', 'SINGLE_FILE=1'])
src = open('a.out.js').read()
envs = ['web', 'worker', 'node', 'shell']
for env in envs:
for engine in JS_ENGINES:
if engine == V8_ENGINE:
continue # ban v8, weird failures
actual = 'NODE' if engine == NODE_JS else 'SHELL'
print(env, actual, engine)
module = {'ENVIRONMENT': env}
if env != actual:
# avoid problems with arguments detection, which may cause very odd failures with the wrong environment code
module['arguments'] = []
curr = 'var Module = %s;\n' % str(module)
print(' ' + curr)
create_test_file('test.js', curr + src)
seen = run_js('test.js', engine=engine, stderr=PIPE, full_output=True, assert_returncode=None)
self.assertContained('Module.ENVIRONMENT has been deprecated. To force the environment, use the ENVIRONMENT compile-time option (for example, -s ENVIRONMENT=web or -s ENVIRONMENT=node', seen)
def test_override_c_environ(self):
create_test_file('pre.js', r'''
var Module = {
preRun: [function() { ENV.hello = 'world' }]
};
''')
create_test_file('src.cpp', r'''
#include <stdlib.h>
#include <stdio.h>
int main() {
printf("|%s|\n", getenv("hello"));
}
''')
run_process([EMCC, 'src.cpp', '--pre-js', 'pre.js'])
self.assertContained('|world|', run_js('a.out.js'))
create_test_file('pre.js', r'''
var Module = {
preRun: [function(module) { module.ENV.hello = 'world' }]
};
''')
run_process([EMCC, 'src.cpp', '--pre-js', 'pre.js', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["ENV"]'])
self.assertContained('|world|', run_js('a.out.js'))
run_process([EMCC, 'src.cpp', '--pre-js', 'pre.js', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["ENV"]', '-s', 'MODULARIZE=1'])
output = run_process(NODE_JS + ['-e', 'require("./a.out.js")();'], stdout=PIPE, stderr=PIPE)
self.assertContained('|world|', output.stdout)
def test_warn_no_filesystem(self):
WARNING = 'Filesystem support (FS) was not included. The problem is that you are using files from JS, but files were not used from C/C++, so filesystem support was not auto-included. You can force-include filesystem support with -s FORCE_FILESYSTEM=1'
run_process([EMCC, path_from_root('tests', 'hello_world.c')])
seen = run_js('a.out.js', stderr=PIPE)
assert WARNING not in seen
def test(contents):
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({ %s });
printf("hello, world!\n");
return 0;
}
''' % contents)
run_process([EMCC, 'src.cpp'])
self.assertContained(WARNING, run_js('a.out.js', stderr=PIPE, assert_returncode=None))
# might appear in handwritten code
test("FS.init()")
test("FS.createPreloadedFile('waka waka, just warning check')")
test("FS.createDataFile('waka waka, just warning check')")
test("FS.analyzePath('waka waka, just warning check')")
test("FS.loadFilesFromDB('waka waka, just warning check')")
# might appear in filesystem code from a separate script tag
test("Module['FS_createDataFile']('waka waka, just warning check')")
test("Module['FS_createPreloadedFile']('waka waka, just warning check')")
# text is in the source when needed, but when forcing FS, it isn't there
run_process([EMCC, 'src.cpp'])
self.assertContained(WARNING, open('a.out.js').read())
run_process([EMCC, 'src.cpp', '-s', 'FORCE_FILESYSTEM=1']) # forcing FS means no need
self.assertNotContained(WARNING, open('a.out.js').read())
run_process([EMCC, 'src.cpp', '-s', 'ASSERTIONS=0']) # no assertions, no need
self.assertNotContained(WARNING, open('a.out.js').read())
run_process([EMCC, 'src.cpp', '-O2']) # optimized, so no assertions
self.assertNotContained(WARNING, open('a.out.js').read())
def test_warn_module_print_err(self):
ERROR = 'was not exported. add it to EXTRA_EXPORTED_RUNTIME_METHODS (see the FAQ)'
def test(contents, expected, args=[]):
create_test_file('src.cpp', r'''
#include <emscripten.h>
int main() {
EM_ASM({ %s });
return 0;
}
''' % contents)
run_process([EMCC, 'src.cpp'] + args)
self.assertContained(expected, run_js('a.out.js', stderr=STDOUT, assert_returncode=None))
# error shown (when assertions are on)
test("Module.print('x')", ERROR)
test("Module['print']('x')", ERROR)
test("Module.printErr('x')", ERROR)
test("Module['printErr']('x')", ERROR)
# when exported, all good
test("Module['print']('print'); Module['printErr']('err'); ", 'print\nerr', ['-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print", "printErr"]'])
def test_warn_unexported_main(self):
WARNING = 'main() is in the input files, but "_main" is not in EXPORTED_FUNCTIONS, which means it may be eliminated as dead code. Export it if you want main() to run.'
proc = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'EXPORTED_FUNCTIONS=[]'], stderr=PIPE)
self.assertContained(WARNING, proc.stderr)
############################################################
# Function eliminator tests
############################################################
def normalize_line_endings(self, input):
return input.replace('\r\n', '\n').replace('\n\n', '\n').replace('\n\n', '\n')
def get_file_contents(self, file):
file_contents = ""
with open(file) as fout:
file_contents = "".join(fout.readlines())
file_contents = self.normalize_line_endings(file_contents)
return file_contents
def function_eliminator_test_helper(self, input_file, expected_output_file, use_hash_info=False):
input_file = path_from_root('tests', 'optimizer', input_file)
expected_output_file = path_from_root('tests', 'optimizer', expected_output_file)
command = [path_from_root('tools', 'eliminate-duplicate-functions.js'), input_file, '--no-minimize-whitespace', '--use-asm-ast']
if use_hash_info:
command.append('--use-hash-info')
proc = run_process(NODE_JS + command, stdin=PIPE, stderr=PIPE, stdout=PIPE)
assert proc.stderr == '', proc.stderr
expected_output = self.get_file_contents(expected_output_file)
output = self.normalize_line_endings(proc.stdout)
self.assertIdentical(expected_output, output)
def test_function_eliminator_simple(self):
self.function_eliminator_test_helper('test-function-eliminator-simple.js',
'test-function-eliminator-simple-output.js')
def test_function_eliminator_replace_function_call(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-function-call.js',
'test-function-eliminator-replace-function-call-output.js')
def test_function_eliminator_replace_function_call_two_passes(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-function-call-output.js',
'test-function-eliminator-replace-function-call-two-passes-output.js')
def test_function_eliminator_replace_array_value(self):
output_file = 'output.js'
try:
shared.safe_copy(path_from_root('tests', 'optimizer', 'test-function-eliminator-replace-array-value.js'), output_file)
tools.duplicate_function_eliminator.run(output_file)
output_file_contents = self.get_file_contents(output_file)
expected_file_contents = self.get_file_contents(path_from_root('tests', 'optimizer', 'test-function-eliminator-replace-array-value-output.js'))
self.assertIdentical(expected_file_contents, output_file_contents)
finally:
tools.tempfiles.try_delete(output_file)
def test_function_eliminator_replace_object_value_assignment(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-object-value-assignment.js',
'test-function-eliminator-replace-object-value-assignment-output.js')
def test_function_eliminator_variable_clash(self):
self.function_eliminator_test_helper('test-function-eliminator-variable-clash.js',
'test-function-eliminator-variable-clash-output.js')
def test_function_eliminator_replace_variable_value(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-variable-value.js',
'test-function-eliminator-replace-variable-value-output.js')
@no_wasm_backend('tests native asm.js optimizer, which is never build for wasm backend')
def test_function_eliminator_double_parsed_correctly(self):
# This is a test that makes sure that when we perform final optimization on
# the JS file, doubles are preserved (and not converted to ints).
output_file = 'output.js'
try:
shared.safe_copy(path_from_root('tests', 'optimizer', 'test-function-eliminator-double-parsed-correctly.js'), output_file)
# Run duplicate function elimination
tools.duplicate_function_eliminator.run(output_file)
# Run last opts
shutil.move(tools.js_optimizer.run(output_file, ['last', 'asm']), output_file)
output_file_contents = self.get_file_contents(output_file)
# Compare
expected_file_contents = self.get_file_contents(path_from_root('tests', 'optimizer', 'test-function-eliminator-double-parsed-correctly-output.js'))
self.assertIdentical(expected_file_contents, output_file_contents)
finally:
tools.tempfiles.try_delete(output_file)
# Now do the same, but using a pre-generated equivalent function hash info that
# comes in handy for parallel processing
def test_function_eliminator_simple_with_hash_info(self):
self.function_eliminator_test_helper('test-function-eliminator-simple-with-hash-info.js',
'test-function-eliminator-simple-output.js',
use_hash_info=True)
def test_function_eliminator_replace_function_call_with_hash_info(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-function-call-with-hash-info.js',
'test-function-eliminator-replace-function-call-output.js',
use_hash_info=True)
def test_function_eliminator_replace_function_call_two_passes_with_hash_info(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-function-call-output-with-hash-info.js',
'test-function-eliminator-replace-function-call-two-passes-output.js',
use_hash_info=True)
def test_function_eliminator_replace_object_value_assignment_with_hash_info(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-object-value-assignment-with-hash-info.js',
'test-function-eliminator-replace-object-value-assignment-output.js',
use_hash_info=True)
def test_function_eliminator_variable_clash_with_hash_info(self):
self.function_eliminator_test_helper('test-function-eliminator-variable-clash-with-hash-info.js',
'test-function-eliminator-variable-clash-output.js',
use_hash_info=True)
def test_function_eliminator_replace_variable_value_with_hash_info(self):
self.function_eliminator_test_helper('test-function-eliminator-replace-variable-value-with-hash-info.js',
'test-function-eliminator-replace-variable-value-output.js',
use_hash_info=True)
@no_wasm_backend('uses CYBERDWARF')
def test_cyberdwarf_pointers(self):
run_process([EMCC, path_from_root('tests', 'debugger', 'test_pointers.cpp'), '-Oz', '-s', 'CYBERDWARF=1',
'--pre-js', path_from_root('tests', 'debugger', 'test_preamble.js'), '-o', 'test_pointers.js'])
run_js('test_pointers.js')
@no_wasm_backend('uses CYBERDWARF')
def test_cyberdwarf_union(self):
run_process([EMCC, path_from_root('tests', 'debugger', 'test_union.cpp'), '-Oz', '-s', 'CYBERDWARF=1',
'--pre-js', path_from_root('tests', 'debugger', 'test_preamble.js'), '-o', 'test_union.js'])
run_js('test_union.js')
def test_source_file_with_fixed_language_mode(self):
create_test_file('src_tmp_fixed_lang', '''
#include <string>
#include <iostream>
int main() {
std::cout << "Test_source_fixed_lang_hello" << std::endl;
return 0;
}
''')
run_process([EMCC, '-Wall', '-x', 'c++', 'src_tmp_fixed_lang'])
self.assertContained("Test_source_fixed_lang_hello", run_js('a.out.js'))
stderr = self.expect_fail([EMCC, '-Wall', 'src_tmp_fixed_lang'])
self.assertContained("Input file has an unknown suffix, don't know what to do with it!", stderr)
def test_disable_inlining(self):
create_test_file('test.c', r'''
#include <stdio.h>
void foo() {
printf("foo\n");
}
int main() {
foo();
return 0;
}
''')
# Without the 'INLINING_LIMIT=1', -O2 inlines foo()
cmd = [EMCC, 'test.c', '-O2', '-o', 'test.bc', '-s', 'INLINING_LIMIT=1']
if self.is_wasm_backend():
cmd += ['-flto']
run_process(cmd)
# If foo() had been wrongly inlined above, internalizing foo and running
# global DCE makes foo DCE'd
building.llvm_opt('test.bc', ['-internalize', '-internalize-public-api-list=main', '-globaldce'], 'test2.bc')
# To this test to be successful, foo() shouldn't have been inlined above and
# foo() should be in the function list
syms = building.llvm_nm('test2.bc', include_internal=True)
assert 'foo' in syms.defs, 'foo() should not be inlined'
@no_wasm_backend('--separate-asm')
def test_output_eol(self):
# --separate-asm only makes sense without wasm (no asm.js with wasm)
for params in [[], ['--separate-asm', '-s', 'WASM=0'], ['--proxy-to-worker'], ['--proxy-to-worker', '--separate-asm', '-s', 'WASM=0']]:
for output_suffix in ['html', 'js']:
for eol in ['windows', 'linux']:
files = ['a.js']
if '--separate-asm' in params:
files += ['a.asm.js']
if output_suffix == 'html':
files += ['a.html']
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-o', 'a.' + output_suffix, '--output_eol', eol] + params
run_process(cmd)
for f in files:
print(str(cmd) + ' ' + str(params) + ' ' + eol + ' ' + f)
assert os.path.isfile(f)
if eol == 'linux':
expected_ending = '\n'
else:
expected_ending = '\r\n'
ret = tools.line_endings.check_line_endings(f, expect_only=expected_ending)
assert ret == 0
for f in files:
try_delete(f)
@no_wasm_backend('asm2wasm specific')
@uses_canonical_tmp
def test_binaryen_opts(self):
with env_modify({'EMCC_DEBUG': '1'}):
for args, expect_js_opts, expect_wasm_opts, expect_only_wasm in [
([], False, False, True),
(['-O0'], False, False, True),
(['-O1'], False, True, True),
(['-O2'], False, True, True),
(['-O2', '--js-opts', '1'], True, True, False), # user asked
(['-O2', '-s', 'EVAL_CTORS=1'], False, True, True), # ctor evaller turned off since only-wasm
(['-O3'], False, True, True),
(['-Os'], False, True, True),
(['-Oz'], False, True, True), # ctor evaller turned off since only-wasm
]:
try_delete('a.out.js')
try_delete('a.out.wasm')
try_delete('a.out.wat')
cmd = [EMCC, path_from_root('tests', 'core', 'test_i64.c')] + args
print(args, 'js opts:', expect_js_opts, 'only-wasm:', expect_only_wasm, ' ', ' '.join(cmd))
err = run_process(cmd, stdout=PIPE, stderr=PIPE).stderr
assert expect_js_opts == ('applying js optimization passes:' in err), err
if not self.is_wasm_backend():
assert expect_only_wasm == ('-emscripten-only-wasm' in err and '--wasm-only' in err), err # check both flag to fastcomp and to asm2wasm
wat = run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), 'a.out.wasm'], stdout=PIPE).stdout
# i64s
i64s = wat.count('(i64.')
print(' seen i64s:', i64s)
assert expect_only_wasm == (i64s > 30), 'i64 opts can be emitted in only-wasm mode, but not normally' # note we emit a few i64s even without wasm-only, when we replace udivmoddi (around 15 such)
selects = wat.count('(select')
print(' seen selects:', selects)
if expect_wasm_opts:
# when optimizing we should create selects
self.assertGreater(selects, 15)
else:
# when not optimizing for size we should not
self.assertEqual(selects, 0)
# asm2wasm opt line
asm2wasm_line = [line for line in err.split('\n') if 'asm2wasm' in line]
asm2wasm_line = '' if not asm2wasm_line else asm2wasm_line[0]
if '-O0' in args or '-O' not in str(args):
assert '-O' not in asm2wasm_line, 'no opts should be passed to asm2wasm: ' + asm2wasm_line
else:
opts_str = args[0]
assert opts_str.startswith('-O')
assert opts_str in asm2wasm_line, 'expected opts: ' + asm2wasm_line
@no_wasm_backend('fastcomp specific')
def test_binaryen_and_precise_f32(self):
for args, expect in [
([], True),
(['-s', 'PRECISE_F32=0'], True), # disabled, but no asm.js, so we definitely want f32
(['-s', 'PRECISE_F32=1'], True),
(['-s', 'PRECISE_F32=2'], True),
]:
print(args, expect)
try_delete('a.out.js')
err = run_process([EMCC, '-v', path_from_root('tests', 'hello_world.cpp'), '-s', 'BINARYEN=1'] + args, stderr=PIPE).stderr
assert expect == (' -emscripten-precise-f32' in err), err
self.assertContained('hello, world!', run_js('a.out.js'))
def test_binaryen_names(self):
sizes = {}
for args, expect_names in [
([], False),
(['-g'], True),
(['-O1'], False),
(['-O2'], False),
(['-O2', '-g'], True),
(['-O2', '-g1'], False),
(['-O2', '-g2'], True),
(['-O2', '--profiling'], True),
(['-O2', '--profiling-funcs'], True),
]:
print(args, expect_names)
try_delete('a.out.js')
# we use dlmalloc here, as emmalloc has a bunch of asserts that contain the text "malloc" in them, which makes counting harder
run_process([EMCC, path_from_root('tests', 'hello_world.cpp')] + args + ['-s', 'MALLOC="dlmalloc"'])
code = open('a.out.wasm', 'rb').read()
if expect_names:
# name section adds the name of malloc (there is also another one for the export)
self.assertEqual(code.count(b'malloc'), 2)
else:
# should be just malloc for the export
self.assertEqual(code.count(b'malloc'), 1)
sizes[str(args)] = os.path.getsize('a.out.wasm')
print(sizes)
self.assertLess(sizes["['-O2']"], sizes["['-O2', '--profiling-funcs']"], 'when -profiling-funcs, the size increases due to function names')
def test_binaryen_warn_mem(self):
# if user changes INITIAL_MEMORY at runtime, the wasm module may not accept the memory import if it is too big/small
create_test_file('pre.js', 'var Module = { INITIAL_MEMORY: 50 * 1024 * 1024 };\n')
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'INITIAL_MEMORY=' + str(16 * 1024 * 1024), '--pre-js', 'pre.js', '-s', 'WASM_ASYNC_COMPILATION=0'])
out = run_js('a.out.js', full_output=True, stderr=PIPE, assert_returncode=None)
self.assertContained('LinkError', out)
self.assertContained('Memory size incompatibility issues may be due to changing INITIAL_MEMORY at runtime to something too large. Use ALLOW_MEMORY_GROWTH to allow any size memory (and also make sure not to set INITIAL_MEMORY at runtime to something smaller than it was at compile time).', out)
self.assertNotContained('hello, world!', out)
# and with memory growth, all should be good
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'INITIAL_MEMORY=' + str(16 * 1024 * 1024), '--pre-js', 'pre.js', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'WASM_ASYNC_COMPILATION=0'])
self.assertContained('hello, world!', run_js('a.out.js'))
@no_wasm_backend('asm.js specific')
def test_binaryen_asmjs_outputs(self):
# Test that an .asm.js file is outputted exactly when it is requested.
for args, output_asmjs in [
([], False),
(['-s', 'MAIN_MODULE=2'], False),
]:
with temp_directory(self.get_dir()) as temp_dir:
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-o', os.path.join(temp_dir, 'a.js')] + args
print(' '.join(cmd))
run_process(cmd)
if output_asmjs:
self.assertExists(os.path.join(temp_dir, 'a.asm.js'))
self.assertNotExists(os.path.join(temp_dir, 'a.temp.asm.js'))
# Test that outputting to .wasm does not nuke an existing .asm.js file, if
# user wants to manually dual-deploy both to same directory.
with temp_directory(self.get_dir()) as temp_dir:
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=0', '-o', os.path.join(temp_dir, 'a.js'), '--separate-asm']
print(' '.join(cmd))
run_process(cmd)
self.assertExists(os.path.join(temp_dir, 'a.asm.js'))
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-o', os.path.join(temp_dir, 'a.js')]
print(' '.join(cmd))
run_process(cmd)
self.assertExists(os.path.join(temp_dir, 'a.asm.js'))
self.assertExists(os.path.join(temp_dir, 'a.wasm'))
self.assertNotExists(os.path.join(temp_dir, 'a.temp.asm.js'))
def test_binaryen_mem(self):
for args, expect_initial, expect_max in [
(['-s', 'INITIAL_MEMORY=20971520'], 320, 320),
(['-s', 'INITIAL_MEMORY=20971520', '-s', 'ALLOW_MEMORY_GROWTH=1'], 320, None),
(['-s', 'INITIAL_MEMORY=20971520', '-s', 'MAXIMUM_MEMORY=41943040'], 320, 640),
(['-s', 'INITIAL_MEMORY=20971520', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MAXIMUM_MEMORY=41943040'], 320, 640),
]:
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'WASM=1', '-O2'] + args
print(' '.join(cmd))
run_process(cmd)
wat = run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), 'a.out.wasm'], stdout=PIPE).stdout
for line in wat:
if '(import "env" "memory" (memory ' in line:
parts = line.strip().replace('(', '').replace(')', '').split(' ')
print(parts)
self.assertEqual(parts[5], str(expect_initial))
if not expect_max:
self.assertEqual(len(parts), 6)
else:
self.assertEqual(parts[6], str(expect_max))
def test_invalid_mem(self):
# A large amount is fine, multiple of 16MB or not
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'INITIAL_MEMORY=33MB'])
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'INITIAL_MEMORY=32MB'])
# But not in asm.js
if not self.is_wasm_backend():
ret = self.expect_fail([EMCC, '-s', 'WASM=0', path_from_root('tests', 'hello_world.c'), '-s', 'INITIAL_MEMORY=33MB'])
self.assertContained('INITIAL_MEMORY must be a multiple of 16MB', ret)
# A tiny amount is fine in wasm
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'INITIAL_MEMORY=65536', '-s', 'TOTAL_STACK=1024'])
# And the program works!
self.assertContained('hello, world!', run_js('a.out.js'))
# But not in asm.js
if not self.is_wasm_backend():
ret = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'INITIAL_MEMORY=65536', '-s', 'WASM=0'])
self.assertContained('INITIAL_MEMORY must be at least 16MB', ret)
# Must be a multiple of 64KB
ret = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'INITIAL_MEMORY=33554433']) # 32MB + 1 byte
self.assertContained('INITIAL_MEMORY must be a multiple of 64KB', ret)
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MAXIMUM_MEMORY=33MB'])
ret = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MAXIMUM_MEMORY=34603009']) # 33MB + 1 byte
self.assertContained('MAXIMUM_MEMORY must be a multiple of 64KB', ret)
def test_invalid_output_dir(self):
ret = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-o', os.path.join('NONEXISTING_DIRECTORY', 'out.js')])
self.assertContained('specified output file (NONEXISTING_DIRECTORY%sout.js) is in a directory that does not exist' % os.path.sep, ret)
def test_binaryen_ctors(self):
# ctor order must be identical to js builds, deterministically
create_test_file('src.cpp', r'''
#include <stdio.h>
struct A {
A() { puts("constructing A!"); }
};
A a;
struct B {
B() { puts("constructing B!"); }
};
B b;
int main() {}
''')
run_process([EMCC, 'src.cpp'])
correct = run_js('a.out.js')
for args in [[], ['-s', 'RELOCATABLE=1']]:
print(args)
run_process([EMCC, 'src.cpp', '-s', 'WASM=1', '-o', 'b.out.js'] + args)
seen = run_js('b.out.js')
assert correct == seen, correct + '\n vs \n' + seen
# test debug info and debuggability of JS output
@uses_canonical_tmp
def test_binaryen_debug(self):
with env_modify({'EMCC_DEBUG': '1'}):
for args, expect_dash_g, expect_emit_text, expect_clean_js, expect_whitespace_js, expect_closured in [
(['-O0'], False, False, False, True, False),
(['-O0', '-g1'], False, False, False, True, False),
(['-O0', '-g2'], True, False, False, True, False), # in -g2+, we emit -g to asm2wasm so function names are saved
(['-O0', '-g'], True, True, False, True, False),
(['-O0', '--profiling-funcs'], True, False, False, True, False),
(['-O1'], False, False, False, True, False),
(['-O2'], False, False, True, False, False),
(['-O2', '-g1'], False, False, True, True, False),
(['-O2', '-g'], True, True, False, True, False),
(['-O2', '--closure', '1'], False, False, True, False, True),
(['-O2', '--closure', '1', '-g1'], False, False, True, True, True),
(['-O2', '--js-opts', '1'], False, False, True, False, False),
]:
print(args, expect_dash_g, expect_emit_text)
try_delete('a.out.wat')
cmd = [EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'WASM=1'] + args
print(' '.join(cmd))
err = run_process(cmd, stdout=PIPE, stderr=PIPE).stderr
if not self.is_wasm_backend():
asm2wasm_line = [x for x in err.split('\n') if 'asm2wasm' in x][0]
asm2wasm_line = asm2wasm_line.strip() + ' ' # ensure it ends with a space, for simpler searches below
print('|' + asm2wasm_line + '|')
assert expect_dash_g == (' -g ' in asm2wasm_line)
assert expect_emit_text == (' -S ' in asm2wasm_line)
if expect_emit_text:
text = open('a.out.wat').read()
assert ';;' in text, 'must see debug info comment'
assert 'hello_world.cpp:12' in text, 'must be file:line info'
js = open('a.out.js').read()
assert expect_clean_js == ('// ' not in js), 'cleaned-up js must not have comments'
assert expect_whitespace_js == ('{\n ' in js), 'whitespace-minified js must not have excess spacing'
assert expect_closured == ('var a;' in js or 'var a,' in js or 'var a=' in js or 'var a ' in js), 'closured js must have tiny variable names'
@uses_canonical_tmp
def test_binaryen_ignore_implicit_traps(self):
sizes = []
with env_modify({'EMCC_DEBUG': '1'}):
for args, expect in [
([], False),
(['-s', 'BINARYEN_IGNORE_IMPLICIT_TRAPS=1'], True),
]:
print(args, expect)
cmd = [EMCC, path_from_root('tests', 'hello_libcxx.cpp'), '-s', 'WASM=1', '-O3'] + args
print(' '.join(cmd))
err = run_process(cmd, stdout=PIPE, stderr=PIPE).stderr
self.assertContainedIf('--ignore-implicit-traps ', err, expect)
sizes.append(os.path.getsize('a.out.wasm'))
print('sizes:', sizes)
# sizes must be different, as the flag has an impact
self.assertEqual(len(set(sizes)), 2)
@no_fastcomp('BINARYEN_EXTRA_PASSES is used to optimize only in the wasm backend (fastcomp uses flags to asm2wasm)')
def test_binaryen_passes_extra(self):
def build(args=[]):
return run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-O3'] + args, stdout=PIPE).stdout
build()
base_size = os.path.getsize('a.out.wasm')
out = build(['-s', 'BINARYEN_EXTRA_PASSES="--metrics"'])
# and --metrics output appears
self.assertContained('[funcs]', out)
# adding --metrics should not affect code size
self.assertEqual(base_size, os.path.getsize('a.out.wasm'))
def assertFileContents(self, filename, contents):
contents = contents.replace('\r', '')
if os.environ.get('EMTEST_REBASELINE'):
with open(filename, 'w') as f:
f.write(contents)
return
if not os.path.exists(filename):
self.fail('Test expectation file not found: ' + filename + '.\n' +
'Run with EMTEST_REBASELINE to generate.')
expected_content = open(filename).read()
message = "Run with EMTEST_REBASELINE=1 to automatically update expectations"
self.assertTextDataIdentical(expected_content, contents, message,
filename, filename + '.new')
def run_metadce_test(self, filename, args, expected_exists, expected_not_exists, expected_size,
check_sent=True, check_imports=True, check_exports=True, check_funcs=True):
size_slack = 0.05
# in -Os, -Oz, we remove imports wasm doesn't need
print('Running metadce test: %s:' % filename, args, expected_exists,
expected_not_exists, expected_size, check_sent, check_imports, check_exports, check_funcs)
filename = path_from_root('tests', 'other', 'metadce', filename)
def clean_arg(arg):
return arg.replace('-', '')
def args_to_filename(args):
result = ''
for a in args:
if a == '-s':
continue
a = a.replace('-', '')
a = a.replace('=1', '')
a = a.replace('=[]', '_NONE')
a = a.replace('=', '_')
if a:
result += '_' + a
return result
expected_basename = os.path.splitext(filename)[0]
if not self.is_wasm_backend():
expected_basename += '_fastcomp'
expected_basename += args_to_filename(args)
run_process([EMCC, filename, '-g2'] + args)
# find the imports we send from JS
js = open('a.out.js').read()
start = js.find('asmLibraryArg = ')
end = js.find('}', start) + 1
start = js.find('{', start)
relevant = js[start + 2:end - 2]
relevant = relevant.replace(' ', '').replace('"', '').replace("'", '').split(',')
sent = [x.split(':')[0].strip() for x in relevant]
sent = [x for x in sent if x]
sent.sort()
for exists in expected_exists:
self.assertIn(exists, sent)
for not_exists in expected_not_exists:
self.assertNotIn(not_exists, sent)
wasm_size = os.path.getsize('a.out.wasm')
if expected_size is not None:
ratio = abs(wasm_size - expected_size) / float(expected_size)
print(' seen wasm size: %d (expected: %d), ratio to expected: %f' % (wasm_size, expected_size, ratio))
self.assertLess(ratio, size_slack)
imports, exports, funcs = parse_wasm('a.out.wasm')
imports.sort()
exports.sort()
funcs.sort()
# filter out _NNN suffixed that can be the result of bitcode linking when
# internal symbol names collide.
def strip_numeric_suffixes(funcname):
parts = funcname.split('_')
while parts:
if parts[-1].isdigit():
parts.pop()
else:
break
return '_'.join(parts)
funcs = [strip_numeric_suffixes(f) for f in funcs]
if check_sent:
sent_file = expected_basename + '.sent'
sent_data = '\n'.join(sent) + '\n'
self.assertFileContents(sent_file, sent_data)
if check_imports:
filename = expected_basename + '.imports'
data = '\n'.join(imports) + '\n'
self.assertFileContents(filename, data)
if check_exports:
filename = expected_basename + '.exports'
data = '\n'.join(exports) + '\n'
self.assertFileContents(filename, data)
if check_funcs:
filename = expected_basename + '.funcs'
data = '\n'.join(funcs) + '\n'
self.assertFileContents(filename, data)
@parameterized({
'O0': ([], [], ['waka'], 9766), # noqa
'O1': (['-O1'], [], ['waka'], 7886), # noqa
'O2': (['-O2'], [], ['waka'], 7871), # noqa
# in -O3, -Os and -Oz we metadce, and they shrink it down to the minimal output we want
'O3': (['-O3'], [], [], 85), # noqa
'Os': (['-Os'], [], [], 85), # noqa
'Oz': (['-Oz'], [], [], 85), # noqa
'Os_mr': (['-Os', '-s', 'MINIMAL_RUNTIME'], [], [], 85), # noqa
})
@no_fastcomp()
def test_metadce_minimal(self, *args):
self.run_metadce_test('minimal.c', *args)
@parameterized({
'O0': ([], ['abort'], ['waka'], 22712), # noqa
'O1': (['-O1'], ['abort'], ['waka'], 10450), # noqa
'O2': (['-O2'], ['abort'], ['waka'], 10440), # noqa
# in -O3, -Os and -Oz we metadce, and they shrink it down to the minimal output we want
'O3': (['-O3'], [], [], 55), # noqa
'Os': (['-Os'], [], [], 55), # noqa
'Oz': (['-Oz'], [], [], 55), # noqa
})
@no_wasm_backend()
def test_metadce_minimal_fastcomp(self, *args):
self.run_metadce_test('minimal.c', *args)
@parameterized({
'noexcept': (['-O2'], [], ['waka'], 218988), # noqa
# exceptions increases code size significantly
'except': (['-O2', '-fexceptions'], [], ['waka'], 279827), # noqa
# exceptions does not pull in demangling by default, which increases code size
'mangle': (['-O2', '-fexceptions',
'-s', 'DEMANGLE_SUPPORT'], [], ['waka'], 408028), # noqa
})
@no_fastcomp()
def test_metadce_cxx(self, *args):
self.run_metadce_test('hello_libcxx.cpp', *args)
@parameterized({
'normal': (['-O2'], ['abort'], ['waka'], 186423),
'emulated_function_pointers':
(['-O2', '-s', 'EMULATED_FUNCTION_POINTERS=1'], ['abort'], ['waka'], 188310),
})
@no_wasm_backend()
def test_metadce_cxx_fastcomp(self, *args):
# test on libc++: see effects of emulated function pointers
self.run_metadce_test('hello_libcxx.cpp', *args)
@parameterized({
'O0': ([], [], ['waka'], 22849), # noqa
'O1': (['-O1'], [], ['waka'], 10533), # noqa
'O2': (['-O2'], [], ['waka'], 10256), # noqa
'O3': (['-O3'], [], [], 1999), # noqa; in -O3, -Os and -Oz we metadce
'Os': (['-Os'], [], [], 2010), # noqa
'Oz': (['-Oz'], [], [], 2004), # noqa
# finally, check what happens when we export nothing. wasm should be almost empty
'export_nothing':
(['-Os', '-s', 'EXPORTED_FUNCTIONS=[]'], [], [], 61), # noqa
# we don't metadce with linkable code! other modules may want stuff
# don't compare the # of functions in a main module, which changes a lot
# TODO(sbc): Investivate why the number of exports is order of magnitude
# larger for wasm backend.
'main_module_2': (['-O3', '-s', 'MAIN_MODULE=2'], [], [], 10652, True, True, True, False), # noqa
})
@no_fastcomp()
def test_metadce_hello(self, *args):
self.run_metadce_test('hello_world.cpp', *args)
@parameterized({
'O0': ([], ['abort'], ['waka'], 42701), # noqa
'O1': (['-O1'], ['abort'], ['waka'], 13199), # noqa
'O2': (['-O2'], ['abort'], ['waka'], 12425), # noqa
'O3': (['-O3'], [], [], 2045), # noqa; in -O3, -Os and -Oz we metadce
'Os': (['-Os'], [], [], 2064), # noqa
'Oz': (['-Oz'], [], [], 2045), # noqa
# finally, check what happens when we export nothing. wasm should be almost empty
'export_nothing':
(['-Os', '-s', 'EXPORTED_FUNCTIONS=[]'], [], [], 8), # noqa; totally empty!
# we don't metadce with linkable code! other modules may want stuff
# don't compare the # of functions in a main module, which changes a lot
'main_module_2': (['-O3', '-s', 'MAIN_MODULE=2'], [], [], 10017), # noqa
})
@no_wasm_backend()
def test_metadce_hello_fastcomp(self, *args):
self.run_metadce_test('hello_world.cpp', *args)
@parameterized({
'O3': ('mem.c', ['-O3'],
[], [], 6100), # noqa
# argc/argv support code etc. is in the wasm
'O3_standalone': ('mem.c', ['-O3', '-s', 'STANDALONE_WASM'],
[], [], 6309), # noqa
# without argc/argv, no support code for them is emitted
'O3_standalone_narg': ('mem_no_argv.c', ['-O3', '-s', 'STANDALONE_WASM'],
[], [], 6309), # noqa
# without main, no support code for argc/argv is emitted either
'O3_standalone_lib': ('mem_no_main.c', ['-O3', '-s', 'STANDALONE_WASM', '--no-entry'],
[], [], 6309), # noqa
# Growth support code is in JS, no significant change in the wasm
'O3_grow': ('mem.c', ['-O3', '-s', 'ALLOW_MEMORY_GROWTH'],
[], [], 6098), # noqa
# Growth support code is in the wasm
'O3_grow_standalone': ('mem.c', ['-O3', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'STANDALONE_WASM'],
[], [], 6449), # noqa
# without argc/argv, no support code for them is emitted, even with lto
'O3_standalone_narg_flto':
('mem_no_argv.c', ['-O3', '-s', 'STANDALONE_WASM', '-flto'],
[], [], 4971), # noqa
})
@no_fastcomp()
def test_metadce_mem(self, filename, *args):
self.run_metadce_test(filename, *args)
@parameterized({
'O3': ('libcxxabi_message.cpp', ['-O3'],
[], [], 128), # noqa
# argc/argv support code etc. is in the wasm
'O3_standalone': ('libcxxabi_message.cpp', ['-O3', '-s', 'STANDALONE_WASM'],
[], [], 174), # noqa
})
@no_fastcomp()
def test_metadce_libcxxabi_message(self, filename, *args):
self.run_metadce_test(filename, *args)
# ensures runtime exports work, even with metadce
def test_extra_runtime_exports(self):
exports = ['stackSave', 'stackRestore', 'stackAlloc', 'FS']
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'WASM=1', '-Os', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=%s' % str(exports)])
js = open('a.out.js').read()
for export in exports:
assert ('Module["%s"]' % export) in js, export
def test_legalize_js_ffi(self):
# test disabling of JS FFI legalization
wasm_dis = os.path.join(building.get_binaryen_bin(), 'wasm-dis')
for (args, js_ffi) in [
(['-s', 'LEGALIZE_JS_FFI=1', '-s', 'SIDE_MODULE=1', '-O1', '-s', 'EXPORT_ALL=1'], True),
(['-s', 'LEGALIZE_JS_FFI=0', '-s', 'SIDE_MODULE=1', '-O1', '-s', 'EXPORT_ALL=1'], False),
(['-s', 'LEGALIZE_JS_FFI=0', '-s', 'SIDE_MODULE=1', '-O0', '-s', 'EXPORT_ALL=1'], False),
(['-s', 'LEGALIZE_JS_FFI=0', '-s', 'WARN_ON_UNDEFINED_SYMBOLS=0', '-O0'], False),
]:
if self.is_wasm_backend() and 'SIDE_MODULE=1' in args:
continue
print(args)
try_delete('a.out.wasm')
try_delete('a.out.wat')
cmd = [EMCC, path_from_root('tests', 'other', 'ffi.c'), '-g', '-o', 'a.out.js'] + args
print(' '.join(cmd))
run_process(cmd)
run_process([wasm_dis, 'a.out.wasm', '-o', 'a.out.wat'])
text = open('a.out.wat').read()
# remove internal comments and extra whitespace
text = re.sub(r'\(;[^;]+;\)', '', text)
text = re.sub(r'\$var\$*.', '', text)
text = re.sub(r'param \$\d+', 'param ', text)
text = re.sub(r' +', ' ', text)
# TODO: remove the unecessary ".*" in e_* regexs after binaryen #2510 lands
e_add_f32 = re.search(r'func \$_?add_f .*\(param f32\) \(param f32\) \(result f32\)', text)
i_i64_i32 = re.search(r'import .*"_?import_ll" .*\(param i32 i32\) \(result i32\)', text)
i_f32_f64 = re.search(r'import .*"_?import_f" .*\(param f64\) \(result f64\)', text)
i_i64_i64 = re.search(r'import .*"_?import_ll" .*\(param i64\) \(result i64\)', text)
i_f32_f32 = re.search(r'import .*"_?import_f" .*\(param f32\) \(result f32\)', text)
e_i64_i32 = re.search(r'func \$_?add_ll .*\(param i32\) \(param i32\) \(param i32\) \(param i32\) \(result i32\)', text)
e_f32_f64 = re.search(r'func \$legalstub\$_?add_f .*\(param f64\) \(param f64\) \(result f64\)', text)
e_i64_i64 = re.search(r'func \$_?add_ll .*\(param i64\) \(param i64\) \(result i64\)', text)
assert e_add_f32, 'add_f export missing'
if js_ffi:
assert i_i64_i32, 'i64 not converted to i32 in imports'
assert i_f32_f64, 'f32 not converted to f64 in imports'
assert not i_i64_i64, 'i64 not converted to i32 in imports'
assert not i_f32_f32, 'f32 not converted to f64 in imports'
assert e_i64_i32, 'i64 not converted to i32 in exports'
assert not e_f32_f64, 'f32 not converted to f64 in exports'
assert not e_i64_i64, 'i64 not converted to i64 in exports'
else:
assert not i_i64_i32, 'i64 converted to i32 in imports'
assert not i_f32_f64, 'f32 converted to f64 in imports'
assert i_i64_i64, 'i64 converted to i32 in imports'
assert i_f32_f32, 'f32 converted to f64 in imports'
assert not e_i64_i32, 'i64 converted to i32 in exports'
assert not e_f32_f64, 'f32 converted to f64 in exports'
assert e_i64_i64, 'i64 converted to i64 in exports'
def test_no_legalize_js_ffi(self):
# test minimal JS FFI legalization for invoke and dyncalls
if self.is_wasm_backend():
self.skipTest('not testing legalize with main module and wasm backend')
wasm_dis = os.path.join(building.get_binaryen_bin(), 'wasm-dis')
for (args, js_ffi) in [
(['-s', 'LEGALIZE_JS_FFI=0', '-s', 'MAIN_MODULE=2', '-O3', '-s', 'DISABLE_EXCEPTION_CATCHING=0'], False),
]:
print(args)
try_delete('a.out.wasm')
try_delete('a.out.wat')
with env_modify({'EMCC_FORCE_STDLIBS': 'libc++'}):
cmd = [EMCC, path_from_root('tests', 'other', 'noffi.cpp'), '-g', '-o', 'a.out.js'] + args
print(' '.join(cmd))
run_process(cmd)
run_process([wasm_dis, 'a.out.wasm', '-o', 'a.out.wat'])
text = open('a.out.wat').read()
# remove internal comments and extra whitespace
text = re.sub(r'\(;[^;]+;\)', '', text)
text = re.sub(r'\$var\$*.', '', text)
text = re.sub(r'param \$\d+', 'param ', text)
text = re.sub(r' +', ' ', text)
# print("text: %s" % text)
i_legalimport_i64 = re.search(r'\(import.*\$legalimport\$invoke_j.*', text)
e_legalstub_i32 = re.search(r'\(func.*\$legalstub\$dyn.*\(result i32\)', text)
assert i_legalimport_i64, 'legal import not generated for invoke call'
assert e_legalstub_i32, 'legal stub not generated for dyncall'
def test_export_aliasee(self):
# build side module
args = ['-s', 'SIDE_MODULE=1']
cmd = [EMCC, path_from_root('tests', 'other', 'alias', 'side.c'), '-g', '-o', 'side.wasm'] + args
print(' '.join(cmd))
run_process(cmd)
# build main module
args = ['-s', 'EXPORTED_FUNCTIONS=["_main", "_foo"]', '-s', 'MAIN_MODULE=2', '-s', 'EXIT_RUNTIME=1', '-lnodefs.js']
cmd = [EMCC, path_from_root('tests', 'other', 'alias', 'main.c'), '-o', 'main.js'] + args
print(' '.join(cmd))
run_process(cmd)
# run the program
self.assertContained('success', run_js('main.js'))
def test_sysconf_phys_pages(self):
def run(args, expected):
if self.is_wasm_backend() and 'WASM=0' in args:
return
cmd = [EMCC, path_from_root('tests', 'unistd', 'sysconf_phys_pages.c')] + args
print(str(cmd))
run_process(cmd)
result = run_js('a.out.js').strip()
self.assertEqual(result, str(expected) + ', errno: 0')
run([], 1024)
run(['-s', 'INITIAL_MEMORY=32MB'], 2048)
run(['-s', 'INITIAL_MEMORY=32MB', '-s', 'ALLOW_MEMORY_GROWTH=1'], (2 * 1024 * 1024 * 1024) // 16384)
run(['-s', 'INITIAL_MEMORY=32MB', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'WASM=0'], (2 * 1024 * 1024 * 1024) // 16384)
def test_wasm_target_and_STANDALONE_WASM(self):
# STANDALONE_WASM means we never minify imports and exports.
for opts, potentially_expect_minified_exports_and_imports in (
([], False),
(['-s', 'STANDALONE_WASM'], False),
(['-O2'], False),
(['-O3'], True),
(['-O3', '-s', 'STANDALONE_WASM'], False),
(['-Os'], True),
):
if 'STANDALONE_WASM' in opts and not self.is_wasm_backend():
continue
# targeting .wasm (without .js) means we enable STANDALONE_WASM automatically, and don't minify imports/exports
for target in ('out.js', 'out.wasm'):
expect_minified_exports_and_imports = potentially_expect_minified_exports_and_imports and target.endswith('.js')
standalone = target.endswith('.wasm') or 'STANDALONE_WASM' in opts
print(opts, potentially_expect_minified_exports_and_imports, target, ' => ', expect_minified_exports_and_imports, standalone)
self.clear()
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-o', target] + opts)
self.assertExists('out.wasm')
if target.endswith('.wasm'):
# only wasm requested
self.assertNotExists('out.js')
wat = run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), 'out.wasm'], stdout=PIPE).stdout
wat_lines = wat.split('\n')
exports = [line.strip().split(' ')[1].replace('"', '') for line in wat_lines if "(export " in line]
imports = [line.strip().split(' ')[2].replace('"', '') for line in wat_lines if "(import " in line]
exports_and_imports = exports + imports
print(' exports', exports)
print(' imports', imports)
if expect_minified_exports_and_imports:
assert 'a' in exports_and_imports
else:
assert 'a' not in exports_and_imports
assert 'memory' in exports_and_imports or 'fd_write' in exports_and_imports, 'some things are not minified anyhow'
# verify the wasm runs with the JS
if target.endswith('.js'):
self.assertContained('hello, world!', run_js('out.js'))
# verify a standalone wasm
if standalone and self.is_wasm_backend():
for engine in WASM_ENGINES:
print(engine)
self.assertContained('hello, world!', run_js('out.wasm', engine=engine))
def test_wasm_targets_side_module(self):
# side modules do allow a wasm target
for opts, target in [([], 'a.out.wasm'), (['-o', 'lib.wasm'], 'lib.wasm')]:
# specified target
print('building: ' + target)
self.clear()
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'SIDE_MODULE=1'] + opts)
for x in os.listdir('.'):
assert not x.endswith('.js'), 'we should not emit js when making a wasm side module: ' + x
self.assertIn(b'dylink', open(target, 'rb').read())
@no_fastcomp('test wasm object files')
def test_wasm_backend_lto(self):
# test building of non-wasm-object-files libraries, building with them, and running them
src = path_from_root('tests', 'hello_libcxx.cpp')
# test codegen in lto mode, and compare to normal (wasm object) mode
for args in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os'], ['-Oz']]:
print(args)
print('wasm in object')
run_process([EMXX, src] + args + ['-c', '-o', 'hello_obj.o'])
self.assertTrue(building.is_wasm('hello_obj.o'))
self.assertFalse(building.is_bitcode('hello_obj.o'))
print('bitcode in object')
run_process([EMXX, src] + args + ['-c', '-o', 'hello_bitcode.o', '-flto'])
self.assertFalse(building.is_wasm('hello_bitcode.o'))
self.assertTrue(building.is_bitcode('hello_bitcode.o'))
print('use bitcode object (LTO)')
run_process([EMXX, 'hello_bitcode.o'] + args + ['-flto'])
self.assertContained('hello, world!', run_js('a.out.js'))
print('use bitcode object (non-LTO)')
run_process([EMXX, 'hello_bitcode.o'] + args)
self.assertContained('hello, world!', run_js('a.out.js'))
print('use native object (LTO)')
run_process([EMXX, 'hello_obj.o'] + args + ['-flto'])
self.assertContained('hello, world!', run_js('a.out.js'))
print('use native object (non-LTO)')
run_process([EMXX, 'hello_obj.o'] + args)
self.assertContained('hello, world!', run_js('a.out.js'))
@parameterized({
'except': [],
'noexcept': ['-s', 'DISABLE_EXCEPTION_CATCHING=0']
})
@no_fastcomp('test wasm object files')
def test_wasm_backend_lto_libcxx(self, *args):
run_process([EMXX, path_from_root('tests', 'hello_libcxx.cpp'), '-flto'] + list(args))
@no_fastcomp('wasm backend lto specific')
def test_lto_flags(self):
for flags, expect_bitcode in [
([], False),
(['-flto'], True),
(['-flto=thin'], True),
(['-s', 'WASM_OBJECT_FILES=0'], True),
(['-s', 'WASM_OBJECT_FILES=1'], False),
]:
run_process([EMCC, path_from_root('tests', 'hello_world.cpp')] + flags + ['-c', '-o', 'a.o'])
seen_bitcode = building.is_bitcode('a.o')
self.assertEqual(expect_bitcode, seen_bitcode, 'must emit LTO-capable bitcode when flags indicate so (%s)' % str(flags))
def test_wasm_nope(self):
for opts in [[], ['-O2']]:
print(opts)
# check we show a good error message if there is no wasm support
create_test_file('pre.js', 'WebAssembly = undefined;\n')
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '--pre-js', 'pre.js'] + opts)
out = run_js('a.out.js', stderr=STDOUT, assert_returncode=None)
self.assertContained('no native wasm support detected', out)
def test_jsrun(self):
print(NODE_JS)
jsrun.WORKING_ENGINES = {}
# Test that engine check passes
self.assertTrue(jsrun.check_engine(NODE_JS))
# Run it a second time (cache hit)
self.assertTrue(jsrun.check_engine(NODE_JS))
# Test that engine check fails
bogus_engine = ['/fake/inline4']
self.assertFalse(jsrun.check_engine(bogus_engine))
self.assertFalse(jsrun.check_engine(bogus_engine))
# Test the other possible way (list vs string) to express an engine
if type(NODE_JS) is list:
engine2 = NODE_JS[0]
else:
engine2 = [NODE_JS]
self.assertTrue(jsrun.check_engine(engine2))
# Test that run_js requires the engine
run_js(path_from_root('tests', 'hello_world.js'), NODE_JS)
caught_exit = 0
try:
run_js(path_from_root('tests', 'hello_world.js'), bogus_engine)
except SystemExit as e:
caught_exit = e.code
self.assertEqual(1, caught_exit, 'Did not catch SystemExit with bogus JS engine')
def test_error_on_missing_libraries(self):
# -llsomenonexistingfile is an error by default
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), '-lsomenonexistingfile'])
if self.is_wasm_backend():
self.assertContained('wasm-ld: error: unable to find library -lsomenonexistingfile', err)
else:
self.assertContained('emcc: cannot find library "somenonexistingfile"', err)
# Tests that if user accidentally attempts to link native object code, we show an error
def test_native_link_error_message(self):
run_process([CLANG_CC, '-c', path_from_root('tests', 'hello_123.c'), '-o', 'hello_123.o'])
err = self.expect_fail([EMCC, 'hello_123.o', '-o', 'hello_123.js'])
self.assertContained('hello_123.o is not a valid input', err)
# Tests that we should give a clear error on INITIAL_MEMORY not being enough for static initialization + stack
def test_clear_error_on_massive_static_data(self):
with open('src.cpp', 'w') as f:
f.write('''
char muchData[128 * 1024];
int main() {
return (int)&muchData;
}
''')
err = self.expect_fail([EMCC, 'src.cpp', '-s', 'TOTAL_STACK=1KB', '-s', 'INITIAL_MEMORY=64KB'])
if self.is_wasm_backend():
self.assertContained('wasm-ld: error: initial memory too small', err)
else:
self.assertContained('Memory is not large enough for static data (134000) plus the stack (1024), please increase INITIAL_MEMORY (65536)', err)
def test_o_level_clamp(self):
for level in [3, 4, 20]:
err = run_process([EMCC, '-O' + str(level), path_from_root('tests', 'hello_world.c')], stderr=PIPE).stderr
self.assertContainedIf("optimization level '-O" + str(level) + "' is not supported; using '-O3' instead", err, level > 3)
# Tests that if user specifies multiple -o output directives, then the last one will take precedence
def test_multiple_o_files(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-o', 'a.js', '-o', 'b.js'])
assert os.path.isfile('b.js')
assert not os.path.isfile('a.js')
# Tests that Emscripten-provided header files can be cleanly included in C code
def test_include_system_header_in_c(self):
for std in [[], ['-std=c89']]: # Test oldest C standard, and the default C standard
for directory, headers in [
('emscripten', ['dom_pk_codes.h', 'em_asm.h', 'emscripten.h', 'fetch.h', 'html5.h', 'key_codes.h', 'threading.h', 'trace.h']), # This directory has also bind.h, val.h and wire.h, which require C++11
('AL', ['al.h', 'alc.h']),
('EGL', ['egl.h', 'eglplatform.h']),
('GL', ['freeglut_std.h', 'gl.h', 'glew.h', 'glfw.h', 'glu.h', 'glut.h']),
('GLES', ['gl.h', 'glplatform.h']),
('GLES2', ['gl2.h', 'gl2platform.h']),
('GLES3', ['gl3.h', 'gl3platform.h', 'gl31.h', 'gl32.h']),
('GLFW', ['glfw3.h']),
('KHR', ['khrplatform.h'])]:
for h in headers:
inc = '#include <' + directory + '/' + h + '>'
print(inc)
create_test_file('a.c', inc)
create_test_file('b.c', inc)
run_process([EMCC] + std + ['a.c', 'b.c'])
@is_slow_test
def test_single_file(self):
for (single_file_enabled,
meminit1_enabled,
debug_enabled,
closure_enabled,
wasm_enabled) in itertools.product([True, False], repeat=5):
# skip unhelpful option combinations
if wasm_enabled and meminit1_enabled:
continue
if closure_enabled and debug_enabled:
continue
expect_wasm = wasm_enabled
expect_meminit = meminit1_enabled and not wasm_enabled
expect_wat = debug_enabled and wasm_enabled and not self.is_wasm_backend()
cmd = [EMCC, path_from_root('tests', 'hello_world.c')]
if single_file_enabled:
expect_meminit = False
expect_wasm = False
cmd += ['-s', 'SINGLE_FILE=1']
if meminit1_enabled:
cmd += ['--memory-init-file', '1']
if debug_enabled:
cmd += ['-g']
if closure_enabled:
cmd += ['--closure', '1']
if not wasm_enabled:
cmd += ['-s', 'WASM=0']
self.clear()
def do_test(cmd):
print(' '.join(cmd))
run_process(cmd)
print(os.listdir('.'))
assert expect_meminit == (os.path.exists('a.out.mem') or os.path.exists('a.out.js.mem'))
assert expect_wasm == os.path.exists('a.out.wasm')
assert expect_wat == os.path.exists('a.out.wat')
self.assertContained('hello, world!', run_js('a.out.js'))
do_test(cmd)
# additional combinations that are not part of the big product()
if self.is_wasm_backend() and debug_enabled:
separate_dwarf_cmd = cmd + ['-gseparate-dwarf']
if wasm_enabled:
do_test(separate_dwarf_cmd)
self.assertExists('a.out.wasm.debug.wasm')
else:
self.expect_fail(separate_dwarf_cmd)
def test_emar_M(self):
create_test_file('file1', ' ')
create_test_file('file2', ' ')
run_process([EMAR, 'cr', 'file1.a', 'file1'])
run_process([EMAR, 'cr', 'file2.a', 'file2'])
run_process([EMAR, '-M'], input='''create combined.a
addlib file1.a
addlib file2.a
save
end
''')
result = run_process([EMAR, 't', 'combined.a'], stdout=PIPE).stdout
self.assertContained('file1', result)
self.assertContained('file2', result)
def test_emar_duplicate_inputs(self):
# Verify the we can supply the same intput muliple times without
# confusing emar.py:
# See https://github.com/emscripten-core/emscripten/issues/9733
create_test_file('file1', ' ')
run_process([EMAR, 'cr', 'file1.a', 'file1', 'file1'])
# Temporarily disabled to allow this llvm change to roll
# https://reviews.llvm.org/D69665
@no_windows('Temporarily disabled under windows')
def test_emar_response_file(self):
# Test that special character such as single quotes in filenames survive being
# sent via response file
create_test_file("file'1", ' ')
create_test_file("file'2", ' ')
building.emar('cr', 'libfoo.a', ("file'1", "file'2"))
def test_archive_empty(self):
# This test added because we had an issue with the AUTO_ARCHIVE_INDEXES failing on empty
# archives (which inherently don't have indexes).
run_process([EMAR, 'crS', 'libfoo.a'])
run_process([EMCC, '-Werror', 'libfoo.a', path_from_root('tests', 'hello_world.c')])
def test_archive_no_index(self):
create_test_file('foo.c', 'int foo = 1;')
run_process([EMCC, '-c', 'foo.c'])
run_process([EMCC, '-c', path_from_root('tests', 'hello_world.c')])
# The `S` flag means don't add an archive index
run_process([EMAR, 'crS', 'libfoo.a', 'foo.o'])
# The llvm backend (link GNU ld and lld) doesn't support linking archives with no index.
# However we have logic that will automatically add indexes (unless running with
# NO_AUTO_ARCHIVE_INDEXES).
if self.is_wasm_backend():
stderr = self.expect_fail([EMCC, '-s', 'NO_AUTO_ARCHIVE_INDEXES', 'libfoo.a', 'hello_world.o'])
self.assertContained('libfoo.a: archive has no index; run ranlib to add one', stderr)
# The default behavior is to add archive indexes automatically.
run_process([EMCC, 'libfoo.a', 'hello_world.o'])
@no_fastcomp('AUTO_ARCHIVE_INDEXES only applies to wasm backend')
def test_archive_non_objects(self):
create_test_file('file.txt', 'test file')
# llvm-nm has issues with files that start with two or more null bytes since it thinks they
# are COFF files. Ensure that we correctly ignore such files when we process them.
create_test_file('zeros.bin', '\0\0\0\0')
run_process([EMCC, '-c', path_from_root('tests', 'hello_world.c')])
# No index added.
# --format=darwin (the default on OSX has a strange issue where it add extra
# newlines to files: https://bugs.llvm.org/show_bug.cgi?id=42562
run_process([EMAR, 'crS', '--format=gnu', 'libfoo.a', 'file.txt', 'zeros.bin', 'hello_world.o'])
run_process([EMCC, path_from_root('tests', 'hello_world.c'), 'libfoo.a'])
def test_flag_aliases(self):
def assert_aliases_match(flag1, flag2, flagarg, extra_args=[]):
results = {}
for f in (flag1, flag2):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', f + '=' + flagarg] + extra_args)
with open('a.out.js') as out:
results[f + '.js'] = out.read()
with open('a.out.wasm', 'rb') as out:
results[f + '.wasm'] = out.read()
self.assertEqual(results[flag1 + '.js'], results[flag2 + '.js'], 'js results should be identical')
self.assertEqual(results[flag1 + '.wasm'], results[flag2 + '.wasm'], 'wasm results should be identical')
assert_aliases_match('INITIAL_MEMORY', 'TOTAL_MEMORY', '16777216')
assert_aliases_match('INITIAL_MEMORY', 'TOTAL_MEMORY', '64MB')
assert_aliases_match('MAXIMUM_MEMORY', 'WASM_MEM_MAX', '16777216', ['-s', 'ALLOW_MEMORY_GROWTH'])
assert_aliases_match('MAXIMUM_MEMORY', 'BINARYEN_MEM_MAX', '16777216', ['-s', 'ALLOW_MEMORY_GROWTH'])
def test_IGNORE_CLOSURE_COMPILER_ERRORS(self):
create_test_file('pre.js', r'''
// make closure compiler very very angry
var dupe = 1;
var dupe = 2;
function Node() {
throw 'Node is a DOM thing too, and use the ' + dupe;
}
function Node() {
throw '(duplicate) Node is a DOM thing too, and also use the ' + dupe;
}
''')
def test(check, extra=[]):
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-O2', '--closure', '1', '--pre-js', 'pre.js'] + extra
proc = run_process(cmd, check=check, stderr=PIPE)
if not check:
self.assertNotEqual(proc.returncode, 0)
return proc
WARNING = 'Variable dupe declared more than once'
proc = test(check=False)
self.assertContained(WARNING, proc.stderr)
proc = test(check=True, extra=['-s', 'IGNORE_CLOSURE_COMPILER_ERRORS=1'])
self.assertNotContained(WARNING, proc.stderr)
def test_closure_full_js_library(self):
# test for closure errors in the entire JS library
# We must ignore various types of errors that are expected in this situation, as we
# are including a lot of JS without corresponding compiled code for it. This still
# lets us catch all other errors.
with env_modify({'EMCC_CLOSURE_ARGS': '--jscomp_off undefinedVars'}):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-O1', '--closure', '1', '-g1', '-s', 'INCLUDE_FULL_LIBRARY=1', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0'])
# Tests --closure-args command line flag
def test_closure_externs(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '--closure', '1', '--pre-js', path_from_root('tests', 'test_closure_externs_pre_js.js'), '--closure-args', '--externs "' + path_from_root('tests', 'test_closure_externs.js') + '"'])
def test_toolchain_profiler(self):
environ = os.environ.copy()
environ['EM_PROFILE_TOOLCHAIN'] = '1'
# replaced subprocess functions should not cause errors
run_process([EMCC, path_from_root('tests', 'hello_world.c')], env=environ)
def test_noderawfs(self):
fopen_write = open(path_from_root('tests', 'asmfs', 'fopen_write.cpp')).read()
create_test_file('main.cpp', fopen_write)
run_process([EMCC, 'main.cpp', '-s', 'NODERAWFS=1'])
self.assertContained("read 11 bytes. Result: Hello data!", run_js('a.out.js'))
# NODERAWFS should directly write on OS file system
self.assertEqual("Hello data!", open('hello_file.txt').read())
def test_noderawfs_disables_embedding(self):
expected = '--preload-file and --embed-file cannot be used with NODERAWFS which disables virtual filesystem'
base = [EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'NODERAWFS=1']
err = self.expect_fail(base + ['--preload-file', 'somefile'])
self.assertContained(expected, err)
err = self.expect_fail(base + ['--embed-file', 'somefile'])
self.assertContained(expected, err)
def test_node_code_caching(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'),
'-s', 'NODE_CODE_CACHING',
'-s', 'WASM_ASYNC_COMPILATION=0'])
def get_cached():
cached = glob.glob('a.out.wasm.*.cached')
if not cached:
return None
self.assertEqual(len(cached), 1)
return cached[0]
# running the program makes it cache the code
self.assertFalse(get_cached())
self.assertEqual('hello, world!', run_js('a.out.js').strip())
self.assertTrue(get_cached(), 'should be a cache file')
# hard to test it actually uses it to speed itself up, but test that it
# does try to deserialize it at least
with open(get_cached(), 'w') as f:
f.write('waka waka')
ERROR = 'NODE_CODE_CACHING: failed to deserialize, bad cache file?'
self.assertContained(ERROR, run_js('a.out.js', stderr=PIPE, full_output=True))
# we cached proper code after showing that error
with open(get_cached(), 'rb') as f:
self.assertEqual(f.read().count(b'waka'), 0)
self.assertNotContained(ERROR, run_js('a.out.js', stderr=PIPE, full_output=True))
def test_autotools_shared_check(self):
env = os.environ.copy()
env['LC_ALL'] = 'C'
expected = ': supported targets:.* elf'
for python in [PYTHON, 'python', 'python2', 'python3']:
if not shared.which(python):
continue
if python == 'python3' and not is_python3_version_supported():
continue
print(python)
out = run_process([python, path_from_root('emcc.py'), '--help'], stdout=PIPE, env=env).stdout
assert re.search(expected, out)
def test_ioctl_window_size(self):
self.do_other_test(os.path.join('other', 'ioctl', 'window_size'))
def test_fd_closed(self):
self.do_other_test(os.path.join('other', 'fd_closed'))
def test_fflush(self):
# fflush without the full filesystem won't quite work
self.do_other_test(os.path.join('other', 'fflush'))
def test_fflush_fs(self):
# fflush with the full filesystem will flush from libc, but not the JS logging, which awaits a newline
self.do_other_test(os.path.join('other', 'fflush_fs'), emcc_args=['-s', 'FORCE_FILESYSTEM=1'])
def test_fflush_fs_exit(self):
# on exit, we can send out a newline as no more code will run
self.do_other_test(os.path.join('other', 'fflush_fs_exit'), emcc_args=['-s', 'FORCE_FILESYSTEM=1', '-s', 'EXIT_RUNTIME=1'])
def test_extern_weak(self):
self.do_other_test(os.path.join('other', 'extern_weak'))
if not self.is_wasm_backend(): # TODO: wasm backend main module
self.do_other_test(os.path.join('other', 'extern_weak'), emcc_args=['-s', 'MAIN_MODULE=1', '-DLINKABLE'])
def test_main_module_without_main(self):
create_test_file('pre.js', r'''
var Module = {
onRuntimeInitialized: function() {
Module._foo();
}
};
''')
create_test_file('src.c', r'''
#include <emscripten.h>
EMSCRIPTEN_KEEPALIVE void foo() {
EM_ASM({ console.log("bar") });
}
''')
run_process([EMCC, 'src.c', '--pre-js', 'pre.js', '-s', 'MAIN_MODULE=2'])
self.assertContained('bar', run_js('a.out.js'))
def test_js_optimizer_parse_error(self):
# check we show a proper understandable error for JS parse problems
create_test_file('src.cpp', r'''
#include <emscripten.h>
int main() {
EM_ASM({
var x = !<->5.; // wtf
});
}
''')
stderr = self.expect_fail([EMCC, 'src.cpp', '-O2'])
# wasm backend output doesn't have spaces in the EM_ASM function bodies
self.assertContained(('''
var ASM_CONSTS = [function() { var x = !<->5.; }];
^
''', '''
1024: function() {var x = !<->5.;}
^
'''), stderr)
@no_fastcomp('wasm2js only')
def test_js_optimizer_chunk_size_determinism(self):
def build():
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-O3', '-s', 'WASM=0'])
with open('a.out.js') as f:
# FIXME: newline differences can exist, ignore for now
return f.read().replace('\n', '')
normal = build()
with env_modify({
'EMCC_JSOPT_MIN_CHUNK_SIZE': '1',
'EMCC_JSOPT_MAX_CHUNK_SIZE': '1'
}):
tiny = build()
with env_modify({
'EMCC_JSOPT_MIN_CHUNK_SIZE': '4294967296',
'EMCC_JSOPT_MAX_CHUNK_SIZE': '4294967296'
}):
huge = build()
self.assertIdentical(normal, tiny)
self.assertIdentical(normal, huge)
def test_EM_ASM_ES6(self):
create_test_file('src.cpp', r'''
#include <emscripten.h>
int main() {
EM_ASM({
var x = (a, b) => 5; // valid ES6
async function y() {} // valid ES2017
out('hello!');
});
}
''')
run_process([EMCC, 'src.cpp', '-O2'])
self.assertContained('hello!', run_js('a.out.js'))
def test_check_sourcemapurl(self):
if not self.is_wasm():
self.skipTest('only supported with wasm')
run_process([EMCC, path_from_root('tests', 'hello_123.c'), '-g4', '-o', 'a.js', '--source-map-base', 'dir/'])
output = open('a.wasm', 'rb').read()
# has sourceMappingURL section content and points to 'dir/a.wasm.map' file
source_mapping_url_content = encode_leb(len('sourceMappingURL')) + b'sourceMappingURL' + encode_leb(len('dir/a.wasm.map')) + b'dir/a.wasm.map'
self.assertEqual(output.count(source_mapping_url_content), 1)
# make sure no DWARF debug info sections remain - they would just waste space
self.assertNotIn(b'.debug_', output)
def test_check_source_map_args(self):
# -g4 is needed for source maps; -g is not enough
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-g'])
self.assertNotExists('a.out.wasm.map')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-g4'])
self.assertExists('a.out.wasm.map')
@parameterized({
'normal': [],
'profiling': ['--profiling'] # -g4 --profiling should still emit a source map; see #8584
})
def test_check_sourcemapurl_default(self, *args):
print(args)
if not self.is_wasm():
self.skipTest('only supported with wasm')
try_delete('a.wasm.map')
run_process([EMCC, path_from_root('tests', 'hello_123.c'), '-g4', '-o', 'a.js'] + list(args))
output = open('a.wasm', 'rb').read()
# has sourceMappingURL section content and points to 'a.wasm.map' file
source_mapping_url_content = encode_leb(len('sourceMappingURL')) + b'sourceMappingURL' + encode_leb(len('a.wasm.map')) + b'a.wasm.map'
self.assertIn(source_mapping_url_content, output)
def test_wasm_sourcemap(self):
# The no_main.c will be read (from relative location) due to speficied "-s"
shutil.copyfile(path_from_root('tests', 'other', 'wasm_sourcemap', 'no_main.c'), 'no_main.c')
wasm_map_cmd = [PYTHON, path_from_root('tools', 'wasm-sourcemap.py'),
'--sources', '--prefix', '=wasm-src://',
'--load-prefix', '/emscripten/tests/other/wasm_sourcemap=.',
'--dwarfdump-output',
path_from_root('tests', 'other', 'wasm_sourcemap', 'foo.wasm.dump'),
'-o', 'a.out.wasm.map',
path_from_root('tests', 'other', 'wasm_sourcemap', 'foo.wasm'),
'--basepath=' + os.getcwd()]
run_process(wasm_map_cmd)
output = open('a.out.wasm.map').read()
# has "sources" entry with file (includes also `--prefix =wasm-src:///` replacement)
self.assertIn('wasm-src:///emscripten/tests/other/wasm_sourcemap/no_main.c', output)
# has "sourcesContent" entry with source code (included with `-s` option)
self.assertIn('int foo()', output)
# has some entries
self.assertRegexpMatches(output, r'"mappings":\s*"[A-Za-z0-9+/]')
def test_wasm_sourcemap_dead(self):
wasm_map_cmd = [PYTHON, path_from_root('tools', 'wasm-sourcemap.py'),
'--dwarfdump-output',
path_from_root('tests', 'other', 'wasm_sourcemap_dead', 't.wasm.dump'),
'-o', 'a.out.wasm.map',
path_from_root('tests', 'other', 'wasm_sourcemap_dead', 't.wasm'),
'--basepath=' + os.getcwd()]
run_process(wasm_map_cmd, stdout=PIPE, stderr=PIPE)
output = open('a.out.wasm.map').read()
# has only two entries
self.assertRegexpMatches(output, r'"mappings":\s*"[A-Za-z0-9+/]+,[A-Za-z0-9+/]+"')
@no_fastcomp()
def test_wasm_sourcemap_relative_paths(self):
def test(infile, source_map_added_dir=''):
expected_source_map_path = 'a.cpp'
if source_map_added_dir:
expected_source_map_path = source_map_added_dir + '/' + expected_source_map_path
print(infile, expected_source_map_path)
shutil.copyfile(path_from_root('tests', 'hello_123.c'), infile)
infiles = [
infile,
os.path.abspath(infile),
'./' + infile
]
for curr in infiles:
print(' ', curr)
run_process([EMCC, curr, '-g4'])
with open('a.out.wasm.map', 'r') as f:
self.assertIn('"%s"' % expected_source_map_path, str(f.read()))
test('a.cpp')
ensure_dir('inner')
test('inner/a.cpp', 'inner')
@no_fastcomp('dwarf')
def test_separate_dwarf(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-g'])
self.assertExists('a.out.wasm')
self.assertNotExists('a.out.wasm.debug.wasm')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-gseparate-dwarf'])
self.assertExists('a.out.wasm')
self.assertExists('a.out.wasm.debug.wasm')
self.assertLess(os.path.getsize('a.out.wasm'), os.path.getsize('a.out.wasm.debug.wasm'))
# the special section should also exist, that refers to the side debug file
with open('a.out.wasm', 'rb') as f:
wasm = f.read()
self.assertIn(b'external_debug_info', wasm)
self.assertIn(b'a.out.wasm.debug.wasm', wasm)
@no_fastcomp('dwarf')
def test_separate_dwarf_with_filename(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-gseparate-dwarf=with_dwarf.wasm'])
self.assertNotExists('a.out.wasm.debug.wasm')
self.assertExists('with_dwarf.wasm')
# the correct notation is to have exactly one '=' and in the right place
for invalid in ('-gseparate-dwarf=x=', '-gseparate-dwarfy=', '-gseparate-dwarf-hmm'):
stderr = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), invalid])
self.assertContained('invalid -gseparate-dwarf=FILENAME notation', stderr)
def test_wasm_producers_section(self):
# no producers section by default
run_process([EMCC, path_from_root('tests', 'hello_world.c')])
with open('a.out.wasm', 'rb') as f:
self.assertNotIn('clang', str(f.read()))
size = os.path.getsize('a.out.wasm')
if self.is_wasm_backend():
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'EMIT_PRODUCERS_SECTION=1'])
with open('a.out.wasm', 'rb') as f:
self.assertIn('clang', str(f.read()))
size_with_section = os.path.getsize('a.out.wasm')
self.assertLess(size, size_with_section)
def test_html_preprocess(self):
test_file = path_from_root('tests', 'module', 'test_stdin.c')
output_file = 'test_stdin.html'
shell_file = path_from_root('tests', 'module', 'test_html_preprocess.html')
run_process([EMCC, '-o', output_file, test_file, '--shell-file', shell_file, '-s', 'ASSERTIONS=0'], stdout=PIPE, stderr=PIPE)
output = open(output_file).read()
self.assertContained("""<style>
/* Disable preprocessing inside style block as syntax is ambiguous with CSS */
#include {background-color: black;}
#if { background-color: red;}
#else {background-color: blue;}
#endif {background-color: green;}
#xxx {background-color: purple;}
</style>
T1:(else) ASSERTIONS != 1
T2:ASSERTIONS != 1
T3:ASSERTIONS < 2
T4:(else) ASSERTIONS <= 1
T5:(else) ASSERTIONS
T6:!ASSERTIONS""", output)
run_process([EMCC, '-o', output_file, test_file, '--shell-file', shell_file, '-s', 'ASSERTIONS=1'], stdout=PIPE, stderr=PIPE)
output = open(output_file).read()
self.assertContained("""<style>
/* Disable preprocessing inside style block as syntax is ambiguous with CSS */
#include {background-color: black;}
#if { background-color: red;}
#else {background-color: blue;}
#endif {background-color: green;}
#xxx {background-color: purple;}
</style>
T1:ASSERTIONS == 1
T2:(else) ASSERTIONS == 1
T3:ASSERTIONS < 2
T4:(else) ASSERTIONS <= 1
T5:ASSERTIONS
T6:(else) !ASSERTIONS""", output)
run_process([EMCC, '-o', output_file, test_file, '--shell-file', shell_file, '-s', 'ASSERTIONS=2'], stdout=PIPE, stderr=PIPE)
output = open(output_file).read()
self.assertContained("""<style>
/* Disable preprocessing inside style block as syntax is ambiguous with CSS */
#include {background-color: black;}
#if { background-color: red;}
#else {background-color: blue;}
#endif {background-color: green;}
#xxx {background-color: purple;}
</style>
T1:(else) ASSERTIONS != 1
T2:ASSERTIONS != 1
T3:(else) ASSERTIONS >= 2
T4:ASSERTIONS > 1
T5:ASSERTIONS
T6:(else) !ASSERTIONS""", output)
# Tests that Emscripten-compiled applications can be run from a relative path with node command line that is different than the current working directory.
def test_node_js_run_from_different_directory(self):
ensure_dir('subdir')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-o', os.path.join('subdir', 'a.js'), '-O3'])
ret = run_process(NODE_JS + [os.path.join('subdir', 'a.js')], stdout=PIPE).stdout
self.assertContained('hello, world!', ret)
# Tests that a pthreads + modularize build can be run in node js
@no_fastcomp('node pthreads only supported on wasm backend')
def test_node_js_pthread_module(self):
# create module loader script
moduleLoader = 'moduleLoader.js'
moduleLoaderContents = '''
const test_module = require("./module");
test_module().then((test_module_instance) => {
test_module_instance._main();
process.exit(0);
});
'''
ensure_dir('subdir')
create_test_file(os.path.join('subdir', moduleLoader), moduleLoaderContents)
# build hello_world.c
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-o', os.path.join('subdir', 'module.js'), '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=test_module', '-s', 'ENVIRONMENT=worker,node'])
# run the module
ret = run_process(NODE_JS + ['--experimental-wasm-threads'] + [os.path.join('subdir', moduleLoader)], stdout=PIPE).stdout
self.assertContained('hello, world!', ret)
@no_windows('node system() does not seem to work, see https://github.com/emscripten-core/emscripten/pull/10547')
def test_node_js_system(self):
run_process([EMCC, '-DENV_NODE', path_from_root('tests', 'system.c'), '-o', 'a.js', '-O3'])
ret = run_process(NODE_JS + ['a.js'], stdout=PIPE).stdout
self.assertContained('OK', ret)
def test_is_bitcode(self):
fname = 'tmp.o'
with open(fname, 'wb') as f:
f.write(b'foo')
self.assertFalse(building.is_bitcode(fname))
with open(fname, 'wb') as f:
f.write(b'\xDE\xC0\x17\x0B')
f.write(16 * b'\x00')
f.write(b'BC')
self.assertTrue(building.is_bitcode(fname))
with open(fname, 'wb') as f:
f.write(b'BC')
self.assertTrue(building.is_bitcode(fname))
def test_is_ar(self):
fname = 'tmp.a'
with open(fname, 'wb') as f:
f.write(b'foo')
self.assertFalse(building.is_ar(fname))
with open(fname, 'wb') as f:
f.write(b'!<arch>\n')
self.assertTrue(building.is_ar(fname))
def test_emcc_parsing(self):
create_test_file('src.c', r'''
#include <stdio.h>
void a() { printf("a\n"); }
void b() { printf("b\n"); }
void c() { printf("c\n"); }
void d() { printf("d\n"); }
''')
create_test_file('response', r'''[
"_a",
"_b",
"_c",
"_d"
]
''')
for export_arg, expected in [
# extra space at end - should be ignored
("EXPORTED_FUNCTIONS=['_a', '_b', '_c', '_d' ]", ''),
# extra newline in response file - should be ignored
("EXPORTED_FUNCTIONS=@response", ''),
# stray slash
("EXPORTED_FUNCTIONS=['_a', '_b', \\'_c', '_d']", '''undefined exported function: "\\\\'_c'"'''),
# stray slash
("EXPORTED_FUNCTIONS=['_a', '_b',\\ '_c', '_d']", '''undefined exported function: "\\\\ '_c'"'''),
# stray slash
('EXPORTED_FUNCTIONS=["_a", "_b", \\"_c", "_d"]', 'undefined exported function: "\\\\"_c""'),
# stray slash
('EXPORTED_FUNCTIONS=["_a", "_b",\\ "_c", "_d"]', 'undefined exported function: "\\\\ "_c"'),
# missing comma
('EXPORTED_FUNCTIONS=["_a", "_b" "_c", "_d"]', 'undefined exported function: "_b" "_c"'),
]:
print(export_arg)
proc = run_process([EMCC, 'src.c', '-s', export_arg], stdout=PIPE, stderr=PIPE, check=not expected)
print(proc.stderr)
if not expected:
self.assertFalse(proc.stderr)
else:
self.assertNotEqual(proc.returncode, 0)
self.assertContained(expected, proc.stderr)
@no_fastcomp('uses new ASYNCIFY')
def test_asyncify_escaping(self):
proc = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'ASYNCIFY=1', '-s', "ASYNCIFY_ONLY=[DOS_ReadFile(unsigned short, unsigned char*, unsigned short*, bool)]"], stdout=PIPE, stderr=PIPE)
self.assertContained('emcc: ASYNCIFY list contains an item without balanced parentheses', proc.stderr)
self.assertContained(' DOS_ReadFile(unsigned short', proc.stderr)
self.assertContained('Try to quote the entire argument', proc.stderr)
@no_fastcomp('uses new ASYNCIFY')
def test_asyncify_response_file(self):
return self.skipTest(' TODO remove the support for multiple binaryen versions warning output ("function name" vs "pattern" etc).')
create_test_file('a.txt', r'''[
"DOS_ReadFile(unsigned short, unsigned char*, unsigned short*, bool)"
]
''')
proc = run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'ASYNCIFY=1', '-s', "[email protected]"], stdout=PIPE, stderr=PIPE)
# we should parse the response file properly, and then issue a proper warning for the missing function
self.assertContained(
'Asyncify onlylist contained a non-matching pattern: DOS_ReadFile(unsigned short, unsigned char*, unsigned short*, bool)',
proc.stderr)
# Sockets and networking
def test_inet(self):
self.do_run(open(path_from_root('tests', 'sha1.c')).read(), 'SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6')
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
printf("*%x,%x,%x,%x,%x,%x*\n", htonl(0xa1b2c3d4), htonl(0xfe3572e0), htonl(0x07abcdf0), htons(0xabcd), ntohl(0x43211234), ntohs(0xbeaf));
in_addr_t i = inet_addr("190.180.10.78");
printf("%x\n", i);
return 0;
}
'''
self.do_run(src, '*d4c3b2a1,e07235fe,f0cdab07,cdab,34122143,afbe*\n4e0ab4be\n')
def test_inet2(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
struct in_addr x, x2;
int *y = (int*)&x;
*y = 0x12345678;
printf("%s\n", inet_ntoa(x));
int r = inet_aton(inet_ntoa(x), &x2);
printf("%s\n", inet_ntoa(x2));
return 0;
}
'''
self.do_run(src, '120.86.52.18\n120.86.52.18\n')
def test_inet3(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
#include <sys/socket.h>
int main() {
char dst[64];
struct in_addr x, x2;
int *y = (int*)&x;
*y = 0x12345678;
printf("%s\n", inet_ntop(AF_INET,&x,dst,sizeof dst));
int r = inet_aton(inet_ntoa(x), &x2);
printf("%s\n", inet_ntop(AF_INET,&x2,dst,sizeof dst));
return 0;
}
'''
self.do_run(src, '120.86.52.18\n120.86.52.18\n')
def test_inet4(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
#include <sys/socket.h>
void test(const char *test_addr, bool first=true){
char str[40];
struct in6_addr addr;
unsigned char *p = (unsigned char*)&addr;
int ret;
ret = inet_pton(AF_INET6,test_addr,&addr);
if(ret == -1) return;
if(ret == 0) return;
if(inet_ntop(AF_INET6,&addr,str,sizeof(str)) == NULL ) return;
printf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x - %s\n",
p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9],p[10],p[11],p[12],p[13],p[14],p[15],str);
if (first) test(str, false); // check again, on our output
}
int main(){
test("::");
test("::1");
test("::1.2.3.4");
test("::17.18.19.20");
test("::ffff:1.2.3.4");
test("1::ffff");
test("::255.255.255.255");
test("0:ff00:1::");
test("0:ff::");
test("abcd::");
test("ffff::a");
test("ffff::a:b");
test("ffff::a:b:c");
test("ffff::a:b:c:d");
test("ffff::a:b:c:d:e");
test("::1:2:0:0:0");
test("0:0:1:2:3::");
test("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff");
test("1::255.255.255.255");
//below should fail and not produce results..
test("1.2.3.4");
test("");
test("-");
printf("ok.\n");
}
'''
self.do_run(src, r'''0000:0000:0000:0000:0000:0000:0000:0000 - ::
0000:0000:0000:0000:0000:0000:0000:0000 - ::
0000:0000:0000:0000:0000:0000:0000:0001 - ::1
0000:0000:0000:0000:0000:0000:0000:0001 - ::1
0000:0000:0000:0000:0000:0000:0102:0304 - ::102:304
0000:0000:0000:0000:0000:0000:0102:0304 - ::102:304
0000:0000:0000:0000:0000:0000:1112:1314 - ::1112:1314
0000:0000:0000:0000:0000:0000:1112:1314 - ::1112:1314
0000:0000:0000:0000:0000:ffff:0102:0304 - ::ffff:1.2.3.4
0000:0000:0000:0000:0000:ffff:0102:0304 - ::ffff:1.2.3.4
0001:0000:0000:0000:0000:0000:0000:ffff - 1::ffff
0001:0000:0000:0000:0000:0000:0000:ffff - 1::ffff
0000:0000:0000:0000:0000:0000:ffff:ffff - ::ffff:ffff
0000:0000:0000:0000:0000:0000:ffff:ffff - ::ffff:ffff
0000:ff00:0001:0000:0000:0000:0000:0000 - 0:ff00:1::
0000:ff00:0001:0000:0000:0000:0000:0000 - 0:ff00:1::
0000:00ff:0000:0000:0000:0000:0000:0000 - 0:ff::
0000:00ff:0000:0000:0000:0000:0000:0000 - 0:ff::
abcd:0000:0000:0000:0000:0000:0000:0000 - abcd::
abcd:0000:0000:0000:0000:0000:0000:0000 - abcd::
ffff:0000:0000:0000:0000:0000:0000:000a - ffff::a
ffff:0000:0000:0000:0000:0000:0000:000a - ffff::a
ffff:0000:0000:0000:0000:0000:000a:000b - ffff::a:b
ffff:0000:0000:0000:0000:0000:000a:000b - ffff::a:b
ffff:0000:0000:0000:0000:000a:000b:000c - ffff::a:b:c
ffff:0000:0000:0000:0000:000a:000b:000c - ffff::a:b:c
ffff:0000:0000:0000:000a:000b:000c:000d - ffff::a:b:c:d
ffff:0000:0000:0000:000a:000b:000c:000d - ffff::a:b:c:d
ffff:0000:0000:000a:000b:000c:000d:000e - ffff::a:b:c:d:e
ffff:0000:0000:000a:000b:000c:000d:000e - ffff::a:b:c:d:e
0000:0000:0000:0001:0002:0000:0000:0000 - ::1:2:0:0:0
0000:0000:0000:0001:0002:0000:0000:0000 - ::1:2:0:0:0
0000:0000:0001:0002:0003:0000:0000:0000 - 0:0:1:2:3::
0000:0000:0001:0002:0003:0000:0000:0000 - 0:0:1:2:3::
ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff - ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff - ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
0001:0000:0000:0000:0000:0000:ffff:ffff - 1::ffff:ffff
0001:0000:0000:0000:0000:0000:ffff:ffff - 1::ffff:ffff
ok.
''')
def test_getsockname_unconnected_socket(self):
self.do_run(r'''
#include <sys/socket.h>
#include <stdio.h>
#include <assert.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <string.h>
int main() {
int fd;
int z;
fd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
struct sockaddr_in adr_inet;
socklen_t len_inet = sizeof adr_inet;
z = getsockname(fd, (struct sockaddr *)&adr_inet, &len_inet);
if (z != 0) {
perror("getsockname error");
return 1;
}
char buffer[1000];
sprintf(buffer, "%s:%u", inet_ntoa(adr_inet.sin_addr), (unsigned)ntohs(adr_inet.sin_port));
const char *correct = "0.0.0.0:0";
printf("got (expected) socket: %s (%s), size %lu (%lu)\n", buffer, correct, strlen(buffer), strlen(correct));
assert(strlen(buffer) == strlen(correct));
assert(strcmp(buffer, correct) == 0);
puts("success.");
}
''', 'success.')
def test_getpeername_unconnected_socket(self):
self.do_run(r'''
#include <sys/socket.h>
#include <stdio.h>
#include <assert.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <string.h>
int main() {
int fd;
int z;
fd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
struct sockaddr_in adr_inet;
socklen_t len_inet = sizeof adr_inet;
z = getpeername(fd, (struct sockaddr *)&adr_inet, &len_inet);
if (z != 0) {
perror("getpeername error");
return 1;
}
puts("unexpected success.");
}
''', 'getpeername error: Socket not connected', assert_returncode=None)
def test_getaddrinfo(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_getaddrinfo.c')).read(), 'success')
def test_getnameinfo(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_getnameinfo.c')).read(), 'success')
def test_gethostbyname(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_gethostbyname.c')).read(), 'success')
def test_getprotobyname(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_getprotobyname.c')).read(), 'success')
def test_socketpair(self):
self.do_run(r'''
#include <sys/socket.h>
#include <stdio.h>
int main() {
int fd[2];
int err;
err = socketpair(AF_INET, SOCK_STREAM, 0, fd);
if (err != 0) {
perror("socketpair error");
return 1;
}
puts("unexpected success.");
}
''', 'socketpair error: Function not implemented', assert_returncode=None)
def test_link(self):
self.do_run(r'''
#include <netdb.h>
#include <sys/types.h>
#include <sys/socket.h>
int main () {
void* thing = gethostbyname("bing.com");
ssize_t rval = recv (0, thing, 0, 0);
rval = send (0, thing, 0, 0);
return 0;
}''', '', force_c=True)
# This test verifies that function names embedded into the build with --js-library (JS functions imported to asm.js/wasm)
# are minified when -O3 is used
def test_js_function_names_are_minified(self):
def check_size(f, expected_size):
if not os.path.isfile(f):
return # Nonexistent file passes in this check
obtained_size = os.path.getsize(f)
print('size of generated ' + f + ': ' + str(obtained_size))
try_delete(f)
self.assertLess(obtained_size, expected_size)
run_process([PYTHON, path_from_root('tests', 'gen_many_js_functions.py'), 'library_long.js', 'main_long.c'])
for wasm in [['-s', 'WASM=1'], ['-s', 'WASM=0']]:
if self.is_wasm_backend() and 'WASM=0' in wasm:
continue
# Currently we rely on Closure for full minification of every appearance of JS function names.
# TODO: Add minification also for non-Closure users and add [] to this list to test minification without Closure.
for closure in [['--closure', '1']]:
args = [EMCC, '-O3', '--js-library', 'library_long.js', 'main_long.c', '-o', 'a.html'] + wasm + closure
print(' '.join(args))
run_process(args)
ret = run_process(NODE_JS + ['a.js'], stdout=PIPE).stdout
self.assertTextDataIdentical('Sum of numbers from 1 to 1000: 500500 (expected 500500)', ret.strip())
check_size('a.js', 150000)
check_size('a.wasm', 80000)
# Checks that C++ exceptions managing invoke_*() wrappers will not be generated if exceptions are disabled
def test_no_invoke_functions_are_generated_if_exception_catching_is_disabled(self):
self.skipTest('Skipping other.test_no_invoke_functions_are_generated_if_exception_catching_is_disabled: Enable after new version of fastcomp has been tagged')
for args in [[], ['-s', 'WASM=0']]:
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'DISABLE_EXCEPTION_CATCHING=1', '-o', 'a.html'] + args)
output = open('a.js').read()
self.assertContained('_main', output) # Smoke test that we actually compiled
self.assertNotContained('invoke_', output)
# Verifies that only the minimal needed set of invoke_*() functions will be generated when C++ exceptions are enabled
def test_no_excessive_invoke_functions_are_generated_when_exceptions_are_enabled(self):
self.skipTest('Skipping other.test_no_excessive_invoke_functions_are_generated_when_exceptions_are_enabled: Enable after new version of fastcomp has been tagged')
for args in [[], ['-s', 'WASM=0']]:
run_process([EMCC, path_from_root('tests', 'invoke_i.cpp'), '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-o', 'a.html'] + args)
output = open('a.js').read()
self.assertContained('invoke_i', output)
self.assertNotContained('invoke_ii', output)
self.assertNotContained('invoke_v', output)
def test_emscripten_metadata(self):
run_process([EMCC, path_from_root('tests', 'hello_world.c')])
self.assertNotIn(b'emscripten_metadata', open('a.out.wasm', 'rb').read())
run_process([EMCC, path_from_root('tests', 'hello_world.c'),
'-s', 'EMIT_EMSCRIPTEN_METADATA'])
self.assertIn(b'emscripten_metadata', open('a.out.wasm', 'rb').read())
# make sure wasm executes correctly
ret = run_process(NODE_JS + ['a.out.js'], stdout=PIPE).stdout
self.assertTextDataIdentical('hello, world!\n', ret)
@parameterized({
'O0': (False, ['-O0']), # noqa
'O0_emit': (True, ['-O0', '-s', 'EMIT_EMSCRIPTEN_LICENSE']), # noqa
'O2': (False, ['-O2']), # noqa
'O2_emit': (True, ['-O2', '-s', 'EMIT_EMSCRIPTEN_LICENSE']), # noqa
'O2_js_emit': (True, ['-O2', '-s', 'EMIT_EMSCRIPTEN_LICENSE', '-s', 'WASM=0']), # noqa
'O2_closure': (False, ['-O2', '--closure', '1']), # noqa
'O2_closure_emit': (True, ['-O2', '-s', 'EMIT_EMSCRIPTEN_LICENSE', '--closure', '1']), # noqa
'O2_closure_js_emit': (True, ['-O2', '-s', 'EMIT_EMSCRIPTEN_LICENSE', '--closure', '1', '-s', 'WASM=0']), # noqa
})
@no_fastcomp('EMIT_EMSCRIPTEN_LICENSE is upstream only')
def test_emscripten_license(self, expect_license, args):
# fastcomp does not support the new license flag
if not self.is_wasm_backend():
expect_license = False
run_process([EMCC, path_from_root('tests', 'hello_world.c')] + args)
with open('a.out.js') as f:
js = f.read()
licenses_found = len(re.findall('Copyright [0-9]* The Emscripten Authors', js))
if expect_license:
self.assertNotEqual(licenses_found, 0, 'Unable to find license block in output file!')
self.assertEqual(licenses_found, 1, 'Found too many license blocks in the output file!')
else:
self.assertEqual(licenses_found, 0, 'Found a license block in the output file, but it should not have been there!')
# This test verifies that the generated exports from asm.js/wasm module only reference the
# unminified exported name exactly once. (need to contain the export name once for unminified
# access from calling code, and should not have the unminified name exist more than once, that
# would be wasteful for size)
def test_function_exports_are_small(self):
def test(wasm, closure, opt):
extra_args = wasm + opt + closure
print(extra_args)
args = [EMCC, path_from_root('tests', 'long_function_name_in_export.c'), '-o', 'a.html', '-s', 'ENVIRONMENT=web', '-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-Werror'] + extra_args
run_process(args)
output = open('a.js', 'r').read()
try_delete('a.js')
self.assertNotContained('asm["_thisIsAFunctionExportedFromAsmJsOrWasmWithVeryLongFunction"]', output)
# TODO: Add stricter testing when Wasm side is also optimized: (currently Wasm does still need
# to reference exports multiple times)
if 'WASM=1' not in wasm:
num_times_export_is_referenced = output.count('thisIsAFunctionExportedFromAsmJsOrWasmWithVeryLongFunction')
self.assertEqual(num_times_export_is_referenced, 1)
for closure in [[], ['--closure', '1']]:
for opt in [['-O2'], ['-O3'], ['-Os']]:
test(['-s', 'WASM=0'], closure, opt)
test(['-s', 'WASM=1', '-s', 'WASM_ASYNC_COMPILATION=0'], closure, opt)
def test_minimal_runtime_code_size(self):
smallest_code_size_args = ['-s', 'MINIMAL_RUNTIME=2',
'-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1',
'-s', 'ENVIRONMENT=web',
'-s', 'TEXTDECODER=2',
'-s', 'ABORTING_MALLOC=0',
'-s', 'ALLOW_MEMORY_GROWTH=0',
'-s', 'SUPPORT_ERRNO=0',
'-s', 'DECLARE_ASM_MODULE_EXPORTS=1',
'-s', 'MALLOC=emmalloc',
'-s', 'GL_EMULATE_GLES_VERSION_STRING_FORMAT=0',
'-s', 'GL_EXTENSIONS_IN_PREFIXED_FORMAT=0',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0',
'-s', 'GL_TRACK_ERRORS=0',
'-s', 'GL_SUPPORT_EXPLICIT_SWAP_CONTROL=0',
'-s', 'GL_POOL_TEMP_BUFFERS=0',
'-s', 'FAST_UNROLLED_MEMCPY_AND_MEMSET=0',
'-s', 'MIN_CHROME_VERSION=58',
'-s', 'NO_FILESYSTEM=1',
'--output_eol', 'linux',
'-Oz',
'--closure', '1',
'-DNDEBUG',
'-ffast-math']
asmjs = ['-s', 'WASM=0', '--separate-asm', '-s', 'ELIMINATE_DUPLICATE_FUNCTIONS=1', '--memory-init-file', '1']
wasm2js = ['-s', 'WASM=0', '--memory-init-file', '1']
hello_world_sources = [path_from_root('tests', 'small_hello_world.c'),
'-s', 'RUNTIME_FUNCS_TO_IMPORT=[]',
'-s', 'USES_DYNAMIC_ALLOC=0',
'-s', 'ASM_PRIMITIVE_VARS=[STACKTOP]']
random_printf_sources = [path_from_root('tests', 'hello_random_printf.c'),
'-s', 'RUNTIME_FUNCS_TO_IMPORT=[]',
'-s', 'USES_DYNAMIC_ALLOC=0',
'-s', 'ASM_PRIMITIVE_VARS=[STACKTOP]',
'-s', 'SINGLE_FILE=1']
hello_webgl_sources = [path_from_root('tests', 'minimal_webgl', 'main.cpp'),
path_from_root('tests', 'minimal_webgl', 'webgl.c'),
'--js-library', path_from_root('tests', 'minimal_webgl', 'library_js.js'),
'-s', 'RUNTIME_FUNCS_TO_IMPORT=[]',
'-s', 'USES_DYNAMIC_ALLOC=1', '-lwebgl.js',
'-s', 'MODULARIZE=1']
hello_webgl2_sources = hello_webgl_sources + ['-s', 'MAX_WEBGL_VERSION=2']
def print_percent(actual, expected):
if actual == expected:
return ''
return ' ({:+.2f}%)'.format((actual - expected) * 100.0 / expected)
for js in [False, True]:
for sources, name in [
[hello_world_sources, 'hello_world'],
[random_printf_sources, 'random_printf'],
[hello_webgl_sources, 'hello_webgl'],
[hello_webgl2_sources, 'hello_webgl2']
]:
outputs = ['a.html', 'a.js']
test_name = name
args = smallest_code_size_args[:]
if not self.is_wasm_backend():
test_name += '_fastcomp'
if js:
outputs += ['a.mem']
if self.is_wasm_backend():
args += wasm2js
test_name += '_wasm2js'
else:
args += asmjs
outputs += ['a.asm.js']
test_name += '_asmjs'
else:
outputs += ['a.wasm']
test_name += '_wasm'
if 'SINGLE_FILE=1' in sources:
outputs = ['a.html']
results_file = path_from_root('tests', 'code_size', test_name + '.json')
print('\n-----------------------------\n' + test_name)
expected_results = {}
try:
expected_results = json.loads(open(results_file, 'r').read())
except Exception:
if not os.environ.get('EMTEST_REBASELINE'):
raise
args = [EMCC, '-o', 'a.html'] + args + sources
print('\n' + ' '.join(args))
run_process(args)
print('\n')
def get_file_gzipped_size(f):
f_gz = f + '.gz'
with gzip.open(f_gz, 'wb') as gzf:
gzf.write(open(f, 'rb').read())
size = os.path.getsize(f_gz)
try_delete(f_gz)
return size
obtained_results = {}
total_output_size = 0
total_expected_size = 0
total_output_size_gz = 0
total_expected_size_gz = 0
for f in outputs:
f_gz = f + '.gz'
expected_size = expected_results[f] if f in expected_results else float('inf')
expected_size_gz = expected_results[f_gz] if f_gz in expected_results else float('inf')
size = os.path.getsize(f)
size_gz = get_file_gzipped_size(f)
obtained_results[f] = size
obtained_results[f_gz] = size_gz
if size != expected_size and (f.endswith('.js') or f.endswith('.html')):
print('Contents of ' + f + ': ')
print(open(f, 'r').read())
print('size of ' + f + ' == ' + str(size) + ', expected ' + str(expected_size) + ', delta=' + str(size - expected_size) + print_percent(size, expected_size))
print('size of ' + f_gz + ' == ' + str(size_gz) + ', expected ' + str(expected_size_gz) + ', delta=' + str(size_gz - expected_size_gz) + print_percent(size_gz, expected_size_gz))
# Hack: Generated .mem initializer files have different sizes on different
# platforms (Windows gives x, CircleCI Linux gives x-17 bytes, my home
# Linux gives x+2 bytes..). Likewise asm.js files seem to be affected by
# the LLVM IR text names, which lead to asm.js names, which leads to
# difference code size, which leads to different relooper choices,
# as a result leading to slightly different total code sizes.
# TODO: identify what is causing this. meanwhile allow some amount of slop
mem_slop = 10 if self.is_wasm_backend() else 50
if size <= expected_size + mem_slop and size >= expected_size - mem_slop:
size = expected_size
# N.B. even though the test code above prints out gzip compressed sizes, regression testing is done against uncompressed sizes
# this is because optimizing for compressed sizes can be unpredictable and sometimes counterproductive
total_output_size += size
total_expected_size += expected_size
total_output_size_gz += size_gz
total_expected_size_gz += expected_size_gz
obtained_results['total'] = total_output_size
obtained_results['total_gz'] = total_output_size_gz
print('Total output size=' + str(total_output_size) + ' bytes, expected total size=' + str(total_expected_size) + ', delta=' + str(total_output_size - total_expected_size) + print_percent(total_output_size, total_expected_size))
print('Total output size gzipped=' + str(total_output_size_gz) + ' bytes, expected total size gzipped=' + str(total_expected_size_gz) + ', delta=' + str(total_output_size_gz - total_expected_size_gz) + print_percent(total_output_size_gz, total_expected_size_gz))
if os.environ.get('EMTEST_REBASELINE'):
open(results_file, 'w').write(json.dumps(obtained_results, indent=2) + '\n')
else:
if total_output_size > total_expected_size:
print('Oops, overall generated code size regressed by ' + str(total_output_size - total_expected_size) + ' bytes!')
if total_output_size < total_expected_size:
print('Hey amazing, overall generated code size was improved by ' + str(total_expected_size - total_output_size) + ' bytes! Rerun test with other.test_minimal_runtime_code_size with EMTEST_REBASELINE=1 to update the expected sizes!')
self.assertEqual(total_output_size, total_expected_size)
# Test that legacy settings that have been fixed to a specific value and their value can no longer be changed,
def test_legacy_settings_forbidden_to_change(self):
stderr = self.expect_fail([EMCC, '-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=0', path_from_root('tests', 'hello_world.c')])
self.assertContained('MEMFS_APPEND_TO_TYPED_ARRAYS=0 is no longer supported', stderr)
run_process([EMCC, '-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1', path_from_root('tests', 'hello_world.c')])
run_process([EMCC, '-s', 'PRECISE_I64_MATH=2', path_from_root('tests', 'hello_world.c')])
@no_fastcomp('depends on wasm backend .a linking')
def test_jsmath(self):
run_process([EMCC, path_from_root('tests', 'other', 'jsmath.cpp'), '-Os', '-o', 'normal.js', '--closure', '0'])
normal_js_size = os.path.getsize('normal.js')
normal_wasm_size = os.path.getsize('normal.wasm')
run_process([EMCC, path_from_root('tests', 'other', 'jsmath.cpp'), '-Os', '-o', 'jsmath.js', '-s', 'JS_MATH', '--closure', '0'])
jsmath_js_size = os.path.getsize('jsmath.js')
jsmath_wasm_size = os.path.getsize('jsmath.wasm')
# js math increases JS size, but decreases wasm, and wins overall
# it would win more with closure, but no point in making the test slower)
self.assertLess(normal_js_size, jsmath_js_size)
self.assertLess(jsmath_wasm_size, normal_wasm_size)
self.assertLess(jsmath_js_size + jsmath_wasm_size, 0.90 * (normal_js_size + normal_wasm_size))
# js math has almost identical output, but misses some corner cases, 4 out of 34
normal = run_js('normal.js').splitlines()
jsmath = run_js('jsmath.js').splitlines()
assert len(normal) == len(jsmath)
diff = 0
for i in range(len(normal)):
if normal[i] != jsmath[i]:
diff += 1
self.assertEqual(diff, 4)
def test_strict_mode_hello_world(self):
# Verify that strict mode can be used for simple hello world program both
# via the environment EMCC_STRICT=1 and from the command line `-s STRICT`
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'STRICT=1']
run_process(cmd)
with env_modify({'EMCC_STRICT': '1'}):
self.do_run(open(path_from_root('tests', 'hello_world.c')).read(), 'hello, world!')
def test_legacy_settings(self):
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'SPLIT_MEMORY=0']
# By default warnings are not shown
stderr = run_process(cmd, stderr=PIPE).stderr
self.assertNotContained('warning', stderr)
# Adding or -Wlegacy-settings enables the warning
stderr = run_process(cmd + ['-Wlegacy-settings'], stderr=PIPE).stderr
self.assertContained('warning: use of legacy setting: SPLIT_MEMORY', stderr)
self.assertContained('[-Wlegacy-settings]', stderr)
def test_strict_mode_legacy_settings(self):
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'SPLIT_MEMORY=0']
run_process(cmd)
stderr = self.expect_fail(cmd + ['-s', 'STRICT=1'])
self.assertContained('legacy setting used in strict mode: SPLIT_MEMORY', stderr)
with env_modify({'EMCC_STRICT': '1'}):
stderr = self.expect_fail(cmd)
self.assertContained('legacy setting used in strict mode: SPLIT_MEMORY', stderr)
def test_strict_mode_legacy_settings_runtime(self):
# Verify that legacy settings are not accessible at runtime under strict
# mode.
self.set_setting('RETAIN_COMPILER_SETTINGS', 1)
src = r'''\
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("BINARYEN_METHOD: %s\n", (char*)emscripten_get_compiler_setting("BINARYEN_METHOD"));
return 0;
}
'''
self.do_run(src, 'BINARYEN_METHOD: native-wasm')
with env_modify({'EMCC_STRICT': '1'}):
self.do_run(src, 'invalid compiler setting: BINARYEN_METHOD')
self.set_setting('STRICT', 1)
self.do_run(src, 'invalid compiler setting: BINARYEN_METHOD')
def test_renamed_setting(self):
# Verify that renamed settings are available by either name (when not in
# strict mode.
self.set_setting('RETAIN_COMPILER_SETTINGS', 1)
src = r'''\
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("%d %d\n",
emscripten_get_compiler_setting("BINARYEN_ASYNC_COMPILATION"),
emscripten_get_compiler_setting("WASM_ASYNC_COMPILATION"));
return 0;
}
'''
# Setting the new name should set both
self.set_setting('WASM_ASYNC_COMPILATION', 0)
self.do_run(src, '0 0')
self.set_setting('WASM_ASYNC_COMPILATION', 1)
self.do_run(src, '1 1')
self.clear_setting('WASM_ASYNC_COMPILATION')
# Setting the old name should set both
self.set_setting('BINARYEN_ASYNC_COMPILATION', 0)
self.do_run(src, '0 0')
self.set_setting('BINARYEN_ASYNC_COMPILATION', 1)
self.do_run(src, '1 1')
def test_strict_mode_legacy_settings_library(self):
create_test_file('lib.js', r'''
#if SPLIT_MEMORY
#endif
''')
cmd = [EMCC, path_from_root('tests', 'hello_world.c'), '-o', 'out.js', '--js-library', 'lib.js']
run_process(cmd)
self.assertContained('ReferenceError: SPLIT_MEMORY is not defined', self.expect_fail(cmd + ['-s', 'STRICT=1']))
with env_modify({'EMCC_STRICT': '1'}):
self.assertContained('ReferenceError: SPLIT_MEMORY is not defined', self.expect_fail(cmd))
def test_safe_heap_log(self):
self.set_setting('SAFE_HEAP')
self.set_setting('SAFE_HEAP_LOG')
self.set_setting('EXIT_RUNTIME')
src = open(path_from_root('tests', 'hello_world.c')).read()
self.do_run(src, 'SAFE_HEAP load: ')
if not self.is_wasm_backend():
self.set_setting('WASM', 0)
self.do_run(src, 'SAFE_HEAP load: ')
@no_fastcomp('iprintf/__small_printf are wasm-backend-only features')
def test_mini_printfs(self):
def test(code):
with open('src.c', 'w') as f:
f.write('''
#include <stdio.h>
void* unknown_value;
int main() {
%s
}
''' % code)
run_process([EMCC, 'src.c', '-O1'])
return os.path.getsize('a.out.wasm')
i = test('printf("%d", *(int*)unknown_value);')
f = test('printf("%f", *(double*)unknown_value);')
lf = test('printf("%Lf", *(long double*)unknown_value);')
both = test('printf("%d", *(int*)unknown_value); printf("%Lf", *(long double*)unknown_value);')
print(i, f, lf, both)
# iprintf is much smaller than printf with float support
self.assertGreater(i, f - 3400)
self.assertLess(i, f - 3000)
# __small_printf is somewhat smaller than printf with long double support
self.assertGreater(f, lf - 900)
self.assertLess(f, lf - 500)
# both is a little bigger still
self.assertGreater(lf, both - 100)
self.assertLess(lf, both - 50)
@parameterized({
'normal': ([], '''\
0.000051 => -5.123719529365189373493194580078e-05
0.000051 => -5.123719300544352718866300544498e-05
0.000051 => -5.123719300544352718866300544498e-05
'''),
'full_long_double': (['-s', 'PRINTF_LONG_DOUBLE'], '''\
0.000051 => -5.123719529365189373493194580078e-05
0.000051 => -5.123719300544352718866300544498e-05
0.000051 => -5.123719300544352710023893104250e-05
'''),
})
@no_fastcomp('float128 is wasm backend only')
def test_long_double_printing(self, args, expected):
create_test_file('src.cpp', r'''
#include <stdio.h>
int main(void) {
float f = 5.123456789e-5;
double d = 5.123456789e-5;
long double ld = 5.123456789e-5;
printf("%f => %.30e\n", f, f / (f - 1));
printf("%f => %.30e\n", d, d / (d - 1));
printf("%Lf => %.30Le\n", ld, ld / (ld - 1));
}
''')
run_process([EMCC, 'src.cpp'] + args)
self.assertContained(expected, run_js('a.out.js'))
# Tests that passing -s MALLOC=none will not include system malloc() to the build.
def test_malloc_none(self):
stderr = self.expect_fail([EMCC, path_from_root('tests', 'malloc_none.c'), '-s', 'MALLOC=none'])
self.assertContained('undefined symbol: malloc', stderr)
@parameterized({
'c': ['c'],
'cpp': ['cpp'],
})
@no_fastcomp('lsan not supported on fastcomp')
def test_lsan_leaks(self, ext):
self.do_smart_test(path_from_root('tests', 'other', 'test_lsan_leaks.' + ext),
emcc_args=['-fsanitize=leak', '-s', 'ALLOW_MEMORY_GROWTH=1'],
assert_returncode=None, literals=[
'Direct leak of 2048 byte(s) in 1 object(s) allocated from',
'Direct leak of 1337 byte(s) in 1 object(s) allocated from',
'Direct leak of 42 byte(s) in 1 object(s) allocated from',
])
@parameterized({
'c': ['c', [
r'in malloc.*a\.out\.wasm\+0x',
r'(?im)in f (|[/a-z\.]:).*/test_lsan_leaks\.c:6:21$',
r'(?im)in main (|[/a-z\.]:).*/test_lsan_leaks\.c:10:16$',
r'(?im)in main (|[/a-z\.]:).*/test_lsan_leaks\.c:12:3$',
r'(?im)in main (|[/a-z\.]:).*/test_lsan_leaks\.c:13:3$',
]],
'cpp': ['cpp', [
r'in operator new\[\]\(unsigned long\).*a\.out\.wasm\+0x',
r'(?im)in f\(\) (|[/a-z\.]:).*/test_lsan_leaks\.cpp:4:21$',
r'(?im)in main (|[/a-z\.]:).*/test_lsan_leaks\.cpp:8:16$',
r'(?im)in main (|[/a-z\.]:).*/test_lsan_leaks\.cpp:10:3$',
r'(?im)in main (|[/a-z\.]:).*/test_lsan_leaks\.cpp:11:3$',
]],
})
@no_fastcomp('lsan not supported on fastcomp')
def test_lsan_stack_trace(self, ext, regexes):
self.do_smart_test(path_from_root('tests', 'other', 'test_lsan_leaks.' + ext),
emcc_args=['-fsanitize=leak', '-s', 'ALLOW_MEMORY_GROWTH=1', '-g4'],
assert_returncode=None, literals=[
'Direct leak of 2048 byte(s) in 1 object(s) allocated from',
'Direct leak of 1337 byte(s) in 1 object(s) allocated from',
'Direct leak of 42 byte(s) in 1 object(s) allocated from',
], regexes=regexes)
@parameterized({
'c': ['c'],
'cpp': ['cpp'],
})
@no_fastcomp('lsan not supported on fastcomp')
def test_lsan_no_leak(self, ext):
self.do_smart_test(path_from_root('tests', 'other', 'test_lsan_no_leak.' + ext),
emcc_args=['-fsanitize=leak', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'ASSERTIONS=0'],
regexes=[r'^\s*$'])
@no_fastcomp('lsan not supported on fastcomp')
def test_lsan_no_stack_trace(self):
self.do_smart_test(path_from_root('tests', 'other', 'test_lsan_leaks.c'),
emcc_args=['-fsanitize=leak', '-s', 'ALLOW_MEMORY_GROWTH=1', '-DDISABLE_CONTEXT'],
assert_returncode=None, literals=[
'Direct leak of 3427 byte(s) in 3 object(s) allocated from:',
'SUMMARY: LeakSanitizer: 3427 byte(s) leaked in 3 allocation(s).',
])
@no_fastcomp('asan is not supported on fastcomp')
def test_asan_null_deref(self):
self.do_smart_test(path_from_root('tests', 'other', 'test_asan_null_deref.c'),
emcc_args=['-fsanitize=address', '-s', 'ALLOW_MEMORY_GROWTH=1'],
assert_returncode=None, literals=[
'AddressSanitizer: null-pointer-dereference on address',
])
@no_fastcomp('asan is not supported on fastcomp')
def test_asan_no_stack_trace(self):
self.do_smart_test(path_from_root('tests', 'other', 'test_lsan_leaks.c'),
emcc_args=['-fsanitize=address', '-s', 'ALLOW_MEMORY_GROWTH=1', '-DDISABLE_CONTEXT', '-s', 'EXIT_RUNTIME'],
assert_returncode=None, literals=[
'Direct leak of 3427 byte(s) in 3 object(s) allocated from:',
'SUMMARY: AddressSanitizer: 3427 byte(s) leaked in 3 allocation(s).',
])
@no_fastcomp('asan is not supported on fastcomp')
def test_asan_pthread_stubs(self):
self.do_smart_test(path_from_root('tests', 'other', 'test_asan_pthread_stubs.c'), emcc_args=['-fsanitize=address', '-s', 'ALLOW_MEMORY_GROWTH=1'])
@parameterized({
'async': ['-s', 'WASM_ASYNC_COMPILATION=1'],
'sync': ['-s', 'WASM_ASYNC_COMPILATION=0'],
})
@no_fastcomp('offset converter is not supported on fastcomp')
def test_offset_converter(self, *args):
self.do_smart_test(path_from_root('tests', 'other', 'test_offset_converter.c'),
emcc_args=['-s', 'USE_OFFSET_CONVERTER', '-g4'] + list(args), literals=['ok'])
@no_windows('ptys and select are not available on windows')
@no_fastcomp('fastcomp clang detects colors differently')
def test_build_error_color(self):
create_test_file('src.c', 'int main() {')
returncode, output = self.run_on_pty([EMCC, 'src.c'])
self.assertNotEqual(returncode, 0)
self.assertIn(b"\x1b[1msrc.c:1:13: \x1b[0m\x1b[0;1;31merror: \x1b[0m\x1b[1mexpected '}'\x1b[0m", output)
self.assertIn(b"\x1b[31merror: ", output)
@parameterized({
'fno_diagnostics_color': ['-fno-diagnostics-color'],
'fdiagnostics_color_never': ['-fdiagnostics-color=never'],
})
@no_windows('ptys and select are not available on windows')
def test_pty_no_color(self, flag):
with open('src.c', 'w') as f:
f.write('int main() {')
returncode, output = self.run_on_pty([EMCC, flag, 'src.c'])
self.assertNotEqual(returncode, 0)
self.assertNotIn(b'\x1b', output)
@no_fastcomp('sanitizers are not supported on fastcomp')
def test_sanitizer_color(self):
create_test_file('src.c', '''
#include <emscripten.h>
int main() {
int *p = 0, q;
EM_ASM({ Module.printWithColors = true; });
q = *p;
}
''')
run_process([EMCC, '-fsanitize=null', 'src.c'])
output = run_js('a.out.js', stderr=PIPE, full_output=True)
self.assertIn('\x1b[1msrc.c', output)
@no_fastcomp('main param optimizations are upstream-only')
def test_main_reads_params(self):
create_test_file('no.c', '''
int main() {
return 42;
}
''')
run_process([EMCC, 'no.c', '-O3', '-o', 'no.js'])
no = os.path.getsize('no.js')
create_test_file('yes.c', '''
int main(int argc, char **argv) {
return argc;
}
''')
run_process([EMCC, 'yes.c', '-O3', '-o', 'yes.js'])
yes = os.path.getsize('yes.js')
# not having to set up argc/argv allows us to avoid including a
# significant amount of JS for string support (which is not needed
# otherwise in such a trivial program).
self.assertLess(no, 0.95 * yes)
@no_fastcomp('not optimized in fastcomp')
def test_INCOMING_MODULE_JS_API(self):
def test(args):
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-O3', '--closure', '1'] + args)
for engine in JS_ENGINES:
self.assertContained('hello, world!', run_js('a.out.js', engine=engine))
with open('a.out.js') as f:
# ignore \r which on windows can increase the size
return len(f.read().replace('\r', ''))
normal = test([])
changed = test(['-s', 'INCOMING_MODULE_JS_API=[]'])
print('sizes', normal, changed)
# Changing this option to [] should decrease code size.
self.assertLess(changed, normal)
# Check an absolute code size as well, with some slack.
self.assertLess(abs(changed - 5795), 150)
def test_llvm_includes(self):
self.build('#include <stdatomic.h>', self.get_dir(), 'atomics.c')
def test_mmap_and_munmap(self):
emcc_args = []
for f in ['data_ro.dat', 'data_rw.dat']:
create_test_file(f, 'Test file')
emcc_args.extend(['--embed-file', f])
self.do_other_test('mmap_and_munmap', emcc_args)
def test_mmap_and_munmap_anonymous(self):
self.do_other_test('mmap_and_munmap_anonymous', emcc_args=['-s', 'NO_FILESYSTEM'])
def test_mmap_memorygrowth(self):
self.do_other_test('mmap_memorygrowth', ['-s', 'ALLOW_MEMORY_GROWTH=1'])
def test_files_and_module_assignment(self):
# a pre-js can set Module to a new object or otherwise undo file preloading/
# embedding changes to Module.preRun. we show an error to avoid confusion
create_test_file('pre.js', 'Module = {};')
create_test_file('src.cpp', r'''
#include <stdio.h>
int main() {
printf("file exists: %d\n", !!fopen("src.cpp", "rb"));
}
''')
run_process([EMCC, 'src.cpp', '--pre-js', 'pre.js', '--embed-file', 'src.cpp'])
result = run_js('a.out.js', assert_returncode=None, stderr=PIPE, full_output=True)
self.assertContained('Module.preRun should exist because file support used it; did a pre-js delete it?', result)
def test_error(pre):
create_test_file('pre.js', pre)
run_process([EMCC, 'src.cpp', '--pre-js', 'pre.js', '--embed-file', 'src.cpp'])
result = run_js('a.out.js', assert_returncode=None, stderr=PIPE, full_output=True)
self.assertContained('All preRun tasks that exist before user pre-js code should remain after; did you replace Module or modify Module.preRun?', result)
# error if the user replaces Module or Module.preRun
test_error('Module = { preRun: [] };')
test_error('Module.preRun = [];')
@no_fastcomp('fastcomp defines this in the backend itself, so it is always on there')
def test_EMSCRIPTEN_and_STRICT(self):
# __EMSCRIPTEN__ is the proper define; we support EMSCRIPTEN for legacy
# code, unless STRICT is enabled.
create_test_file('src.c', '''
#ifndef EMSCRIPTEN
#error "not defined"
#endif
''')
run_process([EMCC, 'src.c', '-c'])
self.expect_fail([EMCC, 'src.c', '-s', 'STRICT', '-c'])
def test_exception_settings(self):
for catching, throwing, opts in itertools.product([0, 1], repeat=3):
cmd = [EMCC, path_from_root('tests', 'other', 'exceptions_modes_symbols_defined.cpp'), '-s', 'DISABLE_EXCEPTION_THROWING=%d' % (1 - throwing), '-s', 'DISABLE_EXCEPTION_CATCHING=%d' % (1 - catching), '-O%d' % opts]
print(cmd)
if not throwing and not catching:
self.assertContained('DISABLE_EXCEPTION_THROWING was set (likely due to -fno-exceptions), which means no C++ exception throwing support code is linked in, but such support is required', self.expect_fail(cmd))
elif not throwing and catching:
self.assertContained('DISABLE_EXCEPTION_THROWING was set (probably from -fno-exceptions) but is not compatible with enabling exception catching (DISABLE_EXCEPTION_CATCHING=0)', self.expect_fail(cmd))
else:
run_process(cmd)
@no_fastcomp('new clang feature')
def test_fignore_exceptions(self):
# the new clang flag -fignore-exceptions basically is the same as -s DISABLE_EXCEPTION_CATCHING=1,
# that is, it allows throwing, but emits no support code for catching.
run_process([EMCC, path_from_root('tests', 'other', 'exceptions_modes_symbols_defined.cpp'), '-s', 'DISABLE_EXCEPTION_CATCHING=0'])
enable_size = os.path.getsize('a.out.wasm')
run_process([EMCC, path_from_root('tests', 'other', 'exceptions_modes_symbols_defined.cpp'), '-s', 'DISABLE_EXCEPTION_CATCHING=1'])
disable_size = os.path.getsize('a.out.wasm')
run_process([EMCC, path_from_root('tests', 'other', 'exceptions_modes_symbols_defined.cpp'), '-s', '-fignore-exceptions'])
ignore_size = os.path.getsize('a.out.wasm')
self.assertGreater(enable_size, disable_size)
self.assertEqual(disable_size, ignore_size)
@no_fastcomp('assumes wasm object files')
def test_f_exception(self):
create_test_file('src.cpp', r'''
#include <stdio.h>
int main () {
try {
throw 42;
} catch (int e) {
printf("CAUGHT: %d\n", e);
}
return 0;
}
''')
for compile_flags, link_flags, expect_caught in [
# exceptions are off by default
([], [], False),
# enabling exceptions at link and compile works
(['-fexceptions'], ['-fexceptions'], True),
# just compile isn't enough as the JS runtime lacks support
(['-fexceptions'], [], False),
# just link isn't enough as codegen didn't emit exceptions support
([], ['-fexceptions'], False),
]:
print(compile_flags, link_flags, expect_caught)
run_process([EMCC, 'src.cpp', '-c', '-o', 'src.o'] + compile_flags)
run_process([EMCC, 'src.o'] + link_flags)
result = run_js('a.out.js', assert_returncode=None, stderr=PIPE)
self.assertContainedIf('CAUGHT', result, expect_caught)
def test_assertions_on_internal_api_changes(self):
create_test_file('src.c', r'''
#include <emscripten.h>
int main(int argc, char **argv) {
EM_ASM({
try {
Module['read'];
out('it should not be there');
} catch(e) {
out('error: ' + e);
}
});
}
''')
run_process([EMCC, 'src.c', '-s', 'ASSERTIONS'])
self.assertContained('Module.read has been replaced with plain read', run_js('a.out.js'))
def test_assertions_on_incoming_module_api_changes(self):
create_test_file('pre.js', r'''
var Module = {
read: function() {}
}
''')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'ASSERTIONS', '--pre-js', 'pre.js'])
self.assertContained('Module.read option was removed', run_js('a.out.js', assert_returncode=None, stderr=PIPE))
def test_assertions_on_outgoing_module_api_changes(self):
create_test_file('src.cpp', r'''
#include <emscripten.h>
int main() {
EM_ASM({
console.log();
function check(name) {
try {
Module[name];
console.log("success: " + name);
} catch(e) {
}
}
check("read");
// TODO check("setWindowTitle");
check("wasmBinary");
check("arguments");
});
}
''')
run_process([EMCC, 'src.cpp', '-s', 'ASSERTIONS'])
self.assertContained('''
Module.read has been replaced with plain read_ (the initial value can be provided on Module, but after startup the value is only looked for on a local variable of that name)
Module.wasmBinary has been replaced with plain wasmBinary (the initial value can be provided on Module, but after startup the value is only looked for on a local variable of that name)
Module.arguments has been replaced with plain arguments_ (the initial value can be provided on Module, but after startup the value is only looked for on a local variable of that name)
''', run_js('a.out.js', assert_returncode=None, stderr=PIPE))
def test_assertions_on_ready_promise(self):
# check that when assertions are on we give useful error messages for
# mistakenly thinking the Promise is an instance. I.e., once you could do
# Module()._main to get an instance and the main function, but after
# the breaking change in #10697 Module() now returns a promise, and to get
# the instance you must use .then() to get a callback with the instance.
create_test_file('test.js', r'''
try {
Module()._main;
} catch(e) {
console.log(e);
}
try {
Module().onRuntimeInitialized = 42;
} catch(e) {
console.log(e);
}
''')
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'MODULARIZE', '-s', 'ASSERTIONS', '--extern-post-js', 'test.js'])
out = run_js('a.out.js')
self.assertContained('You are getting _main on the Promise object, instead of the instance. Use .then() to get called back with the instance, see the MODULARIZE docs in src/settings.js', out)
self.assertContained('You are setting onRuntimeInitialized on the Promise object, instead of the instance. Use .then() to get called back with the instance, see the MODULARIZE docs in src/settings.js', out)
def test_em_asm_duplicate_strings(self):
# We had a regression where tow different EM_ASM strings from two diffferent
# object files were de-duplicated in wasm-emscripten-finalize. This used to
# work when we used zero-based index for store the JS strings, but once we
# switched to absolute addresses the string needs to exist twice in the JS
# file.
create_test_file('foo.c', '''
#include <emscripten.h>
void foo() {
EM_ASM({ console.log('Hello, world!'); });
}
''')
create_test_file('main.c', '''
#include <emscripten.h>
void foo();
int main() {
foo();
EM_ASM({ console.log('Hello, world!'); });
return 0;
}
''')
run_process([EMCC, '-c', 'foo.c'])
run_process([EMCC, '-c', 'main.c'])
run_process([EMCC, 'foo.o', 'main.o'])
self.assertContained('Hello, world!\nHello, world!\n', run_js('a.out.js'))
def test_em_asm_strict_c(self):
create_test_file('src.c', '''
#include <emscripten/em_asm.h>
int main() {
EM_ASM({ console.log('Hello, world!'); });
}
''')
result = run_process([EMCC, '-std=c11', 'src.c'], stderr=PIPE, check=False)
self.assertNotEqual(result.returncode, 0)
self.assertIn('EM_ASM does not work in -std=c* modes, use -std=gnu* modes instead', result.stderr)
def test_boost_graph(self):
self.do_smart_test(path_from_root('tests', 'test_boost_graph.cpp'),
emcc_args=['-s', 'USE_BOOST_HEADERS=1'],
assert_returncode=0)
@no_fastcomp('EM_ASM and setjmp works fine on fastcomp')
def test_setjmp_em_asm(self):
create_test_file('src.c', '''
#include <emscripten.h>
#include <setjmp.h>
int main() {
jmp_buf buf;
setjmp(buf);
EM_ASM({
console.log("hello world");
});
}
''')
result = run_process([EMCC, 'src.c'], stderr=PIPE, check=False)
self.assertNotEqual(result.returncode, 0)
self.assertIn('Cannot use EM_ASM* alongside setjmp/longjmp', result.stderr)
self.assertIn('Please consider using EM_JS, or move the EM_ASM into another function.', result.stderr)
def test_missing_stdlibs(self):
# Certain standard libraries are expected to be useable via -l flags but
# don't actually exist in our standard library path. Make sure we don't
# error out when linking with these flags.
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-lm', '-ldl', '-lrt', '-lpthread'])
@no_fastcomp('lld-specific')
def test_supported_linker_flags(self):
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Wl,--print-map'], stderr=PIPE).stderr
self.assertContained('warning: ignoring unsupported linker flag: `--print-map`', out)
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Xlinker', '--print-map'], stderr=PIPE).stderr
self.assertContained('warning: ignoring unsupported linker flag: `--print-map`', out)
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Wl,-rpath=foo'], stderr=PIPE).stderr
self.assertContained('warning: ignoring unsupported linker flag: `-rpath=foo`', out)
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Wl,-rpath-link,foo'], stderr=PIPE).stderr
self.assertContained('warning: ignoring unsupported linker flag: `-rpath-link`', out)
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'),
'-Wl,--no-check-features,-mllvm,-debug'], stderr=PIPE).stderr
self.assertNotContained('warning: ignoring unsupported linker flag', out)
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Wl,-allow-shlib-undefined'], stderr=PIPE).stderr
self.assertContained('warning: ignoring unsupported linker flag: `-allow-shlib-undefined`', out)
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Wl,--allow-shlib-undefined'], stderr=PIPE).stderr
self.assertContained('warning: ignoring unsupported linker flag: `--allow-shlib-undefined`', out)
out = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Wl,-version-script,foo'], stderr=PIPE).stderr
self.assertContained('warning: ignoring unsupported linker flag: `-version-script`', out)
@no_fastcomp('lld-specific')
def test_linker_flags_pass_through(self):
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Wl,--waka'])
self.assertContained('wasm-ld: error: unknown argument: --waka', err)
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Xlinker', '--waka'])
self.assertContained('wasm-ld: error: unknown argument: --waka', err)
def test_linker_flags_unused(self):
err = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-c', '-lbar'], stderr=PIPE).stderr
self.assertContained("warning: argument unused during compilation: '-lbar' [-Wunused-command-line-argument]", err)
def test_non_wasm_without_wasm_in_vm(self):
# Test that our non-wasm output does not depend on wasm support in the vm.
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-s', 'WASM=0'])
with open('a.out.js') as f:
js = f.read()
with open('a.out.js', 'w') as f:
f.write('var WebAssembly = null;\n' + js)
for engine in JS_ENGINES:
self.assertContained('hello, world!', run_js('a.out.js', engine=engine))
def test_compile_only_with_object_extension(self):
# Emscripten supports compiling to an object file when the output has an
# object extension.
# Most compilers require the `-c` to be explicit.
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-c', '-o', 'hello1.o'])
err = run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-o', 'hello2.o'], stderr=PIPE).stderr
self.assertContained('warning: Assuming object file output in the absence of `-c`', err)
self.assertBinaryEqual('hello1.o', 'hello2.o')
def test_empty_output_extension(self):
# Default to JS output when no extension is present
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-Werror', '-o', 'hello'])
self.assertContained('hello, world!', run_js('hello'))
def test_backwards_deps_in_archive(self):
# Test that JS dependencies from deps_info.json work for code linked via
# static archives using -l<name>
run_process([EMCC, path_from_root('tests', 'sockets', 'test_gethostbyname.c'), '-o', 'a.o'])
run_process([LLVM_AR, 'cr', 'liba.a', 'a.o'])
create_test_file('empty.c', 'static int foo = 0;')
run_process([EMCC, 'empty.c', '-la', '-L.'])
self.assertContained('success', run_js('a.out.js'))
def test_warning_flags(self):
create_test_file('not_object.bc', 'some text')
run_process([EMCC, '-c', '-o', 'hello.o', path_from_root('tests', 'hello_world.c')])
cmd = [EMCC, 'hello.o', 'not_object.bc', '-o', 'a.wasm']
# warning that is enabled by default
stderr = run_process(cmd, stderr=PIPE).stderr
self.assertContained('emcc: warning: not_object.bc is not a valid input file [-Winvalid-input]', stderr)
# -w to suppress warnings
stderr = run_process(cmd + ['-w'], stderr=PIPE).stderr
self.assertNotContained('warning', stderr)
# -Wno-invalid-input to suppress just this one warning
stderr = run_process(cmd + ['-Wno-invalid-input'], stderr=PIPE).stderr
self.assertNotContained('warning', stderr)
# with -Werror should fail
stderr = self.expect_fail(cmd + ['-Werror'])
self.assertContained('emcc: error: not_object.bc is not a valid input file [-Winvalid-input] [-Werror]', stderr)
# with -Werror + -Wno-error=<type> should only warn
stderr = run_process(cmd + ['-Werror', '-Wno-error=invalid-input'], stderr=PIPE).stderr
self.assertContained('emcc: warning: not_object.bc is not a valid input file [-Winvalid-input]', stderr)
# check that `-Werror=foo` also enales foo
stderr = self.expect_fail(cmd + ['-Werror=legacy-settings', '-s', 'TOTAL_MEMORY=1'])
self.assertContained('error: use of legacy setting: TOTAL_MEMORY (setting renamed to INITIAL_MEMORY) [-Wlegacy-settings] [-Werror]', stderr)
def test_emranlib(self):
create_test_file('foo.c', 'int foo = 1;')
create_test_file('bar.c', 'int bar = 2;')
run_process([EMCC, '-c', 'foo.c', 'bar.c'])
# Create a library with no archive map
run_process([EMAR, 'crS', 'liba.a', 'foo.o', 'bar.o'])
output = run_process([shared.LLVM_NM, '--print-armap', 'liba.a'], stdout=PIPE).stdout
self.assertNotContained('Archive map', output)
# Add an archive map
run_process([EMRANLIB, 'liba.a'])
output = run_process([shared.LLVM_NM, '--print-armap', 'liba.a'], stdout=PIPE).stdout
self.assertContained('Archive map', output)
def test_pthread_stub(self):
# Verify that programs containing pthread code can still be compiled even
# without enabling threads. This is possible becase we link in
# libpthread_stub.a
create_test_file('pthread.c', '''
#include <pthread.h>
int main() {
pthread_atfork(NULL, NULL, NULL);
return 0;
}
''')
run_process([EMCC, 'pthread.c'])
def test_stdin_preprocess(self):
create_test_file('temp.h', '#include <string>')
outputStdin = run_process([EMCC, '-x', 'c++', '-dM', '-E', '-'], input="#include <string>", stdout=PIPE).stdout
outputFile = run_process([EMCC, '-x', 'c++', '-dM', '-E', 'temp.h'], stdout=PIPE).stdout
self.assertTextDataIdentical(outputStdin, outputFile)
def test_stdin_compile_only(self):
# Should fail without -x lang specifier
with open(path_from_root('tests', 'hello_world.cpp')) as f:
err = self.expect_fail([EMCC, '-c', '-'], input=f.read())
self.assertContained('error: -E or -x required when input is from standard input', err)
with open(path_from_root('tests', 'hello_world.cpp')) as f:
run_process([EMCC, '-c', '-o', 'out.o', '-x', 'c++', '-'], input=f.read())
self.assertExists('out.o')
# Same again but without an explicit output filename
with open(path_from_root('tests', 'hello_world.cpp')) as f:
run_process([EMCC, '-c', '-x', 'c++', '-'], input=f.read())
self.assertExists('-.o')
def test_stdin_compile_and_link(self):
with open(path_from_root('tests', 'hello_world.cpp')) as f:
run_process([EMCC, '-x', 'c++', '-'], input=f.read())
self.assertContained('hello, world!', run_js('a.out.js'))
def is_object_file(self, filename):
if self.is_wasm_backend():
return building.is_wasm('-')
else:
return building.is_bitcode('-')
def test_stdout_link(self):
# linking to stdout `-` doesn't work, we have no way to pass such an output filename
# through post-link tools such as binaryen.
err = self.expect_fail([EMCC, '-o', '-', path_from_root('tests', 'hello_world.cpp')])
self.assertContained('invalid output filename: `-`', err)
self.assertNotExists('-')
err = self.expect_fail([EMCC, '-o', '-foo', path_from_root('tests', 'hello_world.cpp')])
self.assertContained('invalid output filename: `-foo`', err)
self.assertNotExists('-foo')
def test_output_to_nowhere(self):
nowhere = 'NULL' if WINDOWS else '/dev/null'
run_process([EMCC, path_from_root('tests', 'hello_world.cpp'), '-o', nowhere, '-c'])
# Test that passing -s MIN_X_VERSION=-1 on the command line will result in browser X being not supported at all.
# I.e. -s MIN_X_VERSION=-1 is equal to -s MIN_X_VERSION=Infinity
def test_drop_support_for_browser(self):
# Test that -1 means "not supported"
run_process([EMCC, path_from_root('tests', 'test_html5.c'), '-s', 'MIN_IE_VERSION=-1'])
self.assertContained('allowsDeferredCalls: true', open('a.out.js').read())
self.assertNotContained('allowsDeferredCalls: JSEvents.isInternetExplorer()', open('a.out.js').read())
def test_errno_type(self):
create_test_file('errno_type.c', '''
#include <errno.h>
// Use of these constants in C preprocessor comparisons should work.
#if EPERM > 0
#define DAV1D_ERR(e) (-(e))
#else
#define DAV1D_ERR(e) (e)
#endif
''')
run_process([EMCC, 'errno_type.c'])
@no_fastcomp("uses standalone mode")
def test_standalone_syscalls(self):
run_process([EMCC, path_from_root('tests', 'other', 'standalone_syscalls', 'test.cpp'), '-o', 'test.wasm'])
with open(path_from_root('tests', 'other', 'standalone_syscalls', 'test.out')) as f:
expected = f.read()
for engine in WASM_ENGINES:
self.assertContained(expected, run_js('test.wasm', engine))
@no_windows('TODO: fix setjmp.h on clang on windows on ci')
@no_fastcomp("uses standalone mode")
def test_wasm2c_reactor(self):
# test compiling an unsafe library using wasm2c, then using it from a
# main program. this shows it is easy to use wasm2c as a sandboxing
# mechanism.
# first compile the library with emcc, getting a .c and .h
run_process([EMCC,
path_from_root('tests', 'other', 'wasm2c', 'unsafe-library.c'),
'-O3', '-o', 'lib.wasm', '-s', 'WASM2C', '--no-entry'])
# compile that .c to a native object
run_process([CLANG_CC, 'lib.wasm.c', '-c', '-O3', '-o', 'lib.o'])
# compile the main program natively normally, and link with the
# unsafe library
run_process([CLANG_CC,
path_from_root('tests', 'other', 'wasm2c', 'my-code.c'),
'-O3', 'lib.o', '-o', 'program.exe'])
output = run_process([os.path.abspath('program.exe')], stdout=PIPE).stdout
with open(path_from_root('tests', 'other', 'wasm2c', 'output.txt')) as f:
self.assertEqual(output, f.read())
@parameterized({
'wasm2js': (['-s', 'WASM=0'], ''),
'modularize': (['-s', 'MODULARIZE'], 'Module()'),
})
@no_fastcomp('wasm2js only')
def test_promise_polyfill(self, constant_args, extern_post_js):
def test(args):
# legacy browsers may lack Promise, which wasm2js depends on. see what
# happens when we kill the global Promise function.
create_test_file('extern-post.js', extern_post_js)
run_process([EMCC, path_from_root('tests', 'hello_world.cpp')] + constant_args + args + ['--extern-post-js', 'extern-post.js'])
with open('a.out.js') as f:
js = f.read()
with open('a.out.js', 'w') as f:
f.write('Promise = undefined;\n' + js)
return run_js('a.out.js', stderr=PIPE, full_output=True, assert_returncode=None)
# we fail without legacy support
self.assertNotContained('hello, world!', test([]))
# but work with it
self.assertContained('hello, world!', test(['-s', 'LEGACY_VM_SUPPORT']))
# Compile-test for -s USE_WEBGPU=1 and library_webgpu.js.
def test_webgpu_compiletest(self):
for args in [[], ['-s', 'ASSERTIONS=1']]:
run_process([EMCC, path_from_root('tests', 'webgpu_dummy.cpp'), '-s', 'USE_WEBGPU=1'] + args)
@no_fastcomp('lld only')
def test_signature_mismatch(self):
create_test_file('a.c', 'void foo(); int main() { foo(); return 0; }')
create_test_file('b.c', 'int foo() { return 1; }')
stderr = run_process([EMCC, 'a.c', 'b.c'], stderr=PIPE).stderr
self.assertContained('function signature mismatch: foo', stderr)
self.expect_fail([EMCC, '-Wl,--fatal-warnings', 'a.c', 'b.c'])
self.expect_fail([EMCC, '-s', 'STRICT', 'a.c', 'b.c'])
@no_fastcomp('lld only')
def test_lld_report_undefined(self):
create_test_file('main.c', 'void foo(); int main() { foo(); return 0; }')
stderr = self.expect_fail([EMCC, '-s', 'LLD_REPORT_UNDEFINED', 'main.c'])
self.assertContained('wasm-ld: error:', stderr)
self.assertContained('main_0.o: undefined symbol: foo', stderr)
@no_fastcomp('wasm backend only')
def test_4GB(self):
stderr = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '-s', 'INITIAL_MEMORY=2GB'])
self.assertContained('INITIAL_MEMORY must be less than 2GB due to current spec limitations', stderr)
# Verifies that warning messages that Closure outputs are recorded to console
def test_closure_warnings(self):
proc = run_process([EMCC, path_from_root('tests', 'test_closure_warning.c'), '-O3', '--closure', '1', '-s', 'CLOSURE_WARNINGS=quiet'], stderr=PIPE)
self.assertNotContained('WARNING', proc.stderr)
proc = run_process([EMCC, path_from_root('tests', 'test_closure_warning.c'), '-O3', '--closure', '1', '-s', 'CLOSURE_WARNINGS=warn'], stderr=PIPE)
self.assertContained('WARNING - [JSC_REFERENCE_BEFORE_DECLARE] Variable referenced before declaration', proc.stderr)
self.expect_fail([EMCC, path_from_root('tests', 'test_closure_warning.c'), '-O3', '--closure', '1', '-s', 'CLOSURE_WARNINGS=error'])
@no_fastcomp('test wasm object files')
def test_bitcode_input(self):
# Verify that bitcode files are accepted as input
create_test_file('main.c', 'void foo(); int main() { return 0; }')
run_process([EMCC, '-emit-llvm', '-c', '-o', 'main.bc', 'main.c'])
self.assertTrue(building.is_bitcode('main.bc'))
run_process([EMCC, '-c', '-o', 'main.o', 'main.bc'])
self.assertTrue(building.is_wasm('main.o'))
def test_nostdlib(self):
# First ensure all the system libs are built
run_process([EMCC, path_from_root('tests', 'unistd', 'close.c')])
self.assertContained('undefined symbol:', self.expect_fail([EMCC, path_from_root('tests', 'unistd', 'close.c'), '-nostdlib']))
self.assertContained('undefined symbol:', self.expect_fail([EMCC, path_from_root('tests', 'unistd', 'close.c'), '-nodefaultlibs']))
# Build again but with explit system libraries
libs = ['-lc', '-lcompiler_rt']
if self.is_wasm_backend():
libs.append('-lc_rt_wasm')
run_process([EMCC, path_from_root('tests', 'unistd', 'close.c'), '-nostdlib'] + libs)
run_process([EMCC, path_from_root('tests', 'unistd', 'close.c'), '-nodefaultlibs'] + libs)
def test_argument_match(self):
# Verify that emcc arguments match precisely. We had a bug where only the prefix
# was matched
run_process([EMCC, path_from_root('tests', 'hello_world.c'), '--js-opts', '10'])
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '--js-optsXX'])
self.assertContained("error: unsupported option '--js-optsXX'", err)
def test_missing_argument(self):
err = self.expect_fail([EMCC, path_from_root('tests', 'hello_world.c'), '--js-opts'])
self.assertContained("error: option '--js-opts' requires an argument", err)
def test_default_to_cxx(self):
create_test_file('foo.h', '#include <string.h>')
create_test_file('cxxfoo.h', '#include <string>')
# The default bahviour is to default to C++, which means the C++ header can be compiled even
# with emcc.
run_process([EMCC, '-c', 'cxxfoo.h'])
# But this means that C flags can't be passed (since we are assuming C++)
err = self.expect_fail([EMCC, '-std=gnu11', '-c', 'foo.h'])
self.assertContained("'-std=gnu11' not allowed with 'C++'", err)
# If we disable DEFAULT_TO_CXX the emcc can be used with cflags, but can't be used to build
# C++ headers
run_process([EMCC, '-std=gnu11', '-c', 'foo.h', '-s', 'DEFAULT_TO_CXX=0'])
err = self.expect_fail([EMCC, '-c', 'cxxfoo.h', '-s', 'DEFAULT_TO_CXX=0'])
self.assertContained("'string' file not found", err)
# Using em++ should alwasy work for C++ headers
run_process([EMXX, '-c', 'cxxfoo.h', '-s', 'DEFAULT_TO_CXX=0'])
# Or using emcc with `-x c++`
run_process([EMCC, '-c', 'cxxfoo.h', '-s', 'DEFAULT_TO_CXX=0', '-x', 'c++-header'])
@parameterized({
'': ([],),
'minimal': (['-s', 'MINIMAL_RUNTIME'],),
})
def test_support_errno(self, args):
self.emcc_args += args
src = path_from_root('tests', 'core', 'test_support_errno.c')
output = path_from_root('tests', 'core', 'test_support_errno.out')
self.do_run_from_file(src, output)
size_default = os.path.getsize('src.c.o.js')
# Run the same test again but with SUPPORT_ERRNO disabled. This time we don't expect errno
# to be set after the failing syscall.
self.set_setting('SUPPORT_ERRNO', 0)
output = path_from_root('tests', 'core', 'test_support_errno_disabled.out')
self.do_run_from_file(src, output)
# Verify the JS output was smaller
self.assertLess(os.path.getsize('src.c.o.js'), size_default)
@no_fastcomp('no .s file support')
def test_assembly(self):
run_process([EMCC, '-c', path_from_root('tests', 'other', 'test_asm.s'), '-o', 'foo.o'])
src = path_from_root('tests', 'other', 'test_asm.c')
output = path_from_root('tests', 'other', 'test_asm.out')
self.emcc_args.append('foo.o')
self.do_run_from_file(src, output)
@no_fastcomp('no .s file support')
def test_assembly_preprocessed(self):
run_process([EMCC, '-c', path_from_root('tests', 'other', 'test_asm_cpp.S'), '-o', 'foo.o'])
src = path_from_root('tests', 'other', 'test_asm.c')
output = path_from_root('tests', 'other', 'test_asm.out')
self.emcc_args.append('foo.o')
self.do_run_from_file(src, output)
def test_export_global_address(self):
src = path_from_root('tests', 'other', 'test_export_global_address.c')
output = path_from_root('tests', 'other', 'test_export_global_address.out')
self.do_run_from_file(src, output)
@no_fastcomp('wasm-ld only')
def test_linker_version(self):
out = run_process([EMCC, '-Wl,--version'], stdout=PIPE).stdout
self.assertContained('LLD ', out)
# Tests that if a JS library function is missing, the linker will print out which function
# depended on the missing function.
def test_chained_js_error_diagnostics(self):
err = self.expect_fail([EMCC, path_from_root('tests', 'test_chained_js_error_diagnostics.c'), '--js-library', path_from_root('tests', 'test_chained_js_error_diagnostics.js')])
self.assertContained("error: undefined symbol: nonexistent_function (referenced by bar__deps: ['nonexistent_function'], referenced by foo__deps: ['bar'], referenced by top-level compiled C/C++ code)", err)
def test_xclang_flag(self):
create_test_file('foo.h', ' ')
run_process([EMCC, '-c', '-o', 'out.o', '-Xclang', '-include', '-Xclang', 'foo.h', path_from_root('tests', 'hello_world.c')])
def test_emcc_size_parsing(self):
create_test_file('foo.h', ' ')
err = self.expect_fail([EMCC, '-s', 'TOTAL_MEMORY=X'])
self.assertContained('error: invalid byte size `X`. Valid suffixes are: kb, mb, gb, tb', err)
err = self.expect_fail([EMCC, '-s', 'TOTAL_MEMORY=11PB'])
self.assertContained('error: invalid byte size `11PB`. Valid suffixes are: kb, mb, gb, tb', err)
def test_native_call_before_init(self):
self.set_setting('ASSERTIONS')
self.set_setting('EXPORTED_FUNCTIONS', ['_foo'])
self.add_pre_run('console.log("calling foo"); Module["_foo"]();')
self.build('#include <stdio.h>\nint foo() { puts("foo called"); return 3; }', self.get_dir(), 'foo.c')
err = self.expect_fail(NODE_JS + ['foo.c.o.js'], stdout=PIPE)
self.assertContained('native function `foo` called before runtime initialization', err)
def test_native_call_after_exit(self):
self.set_setting('ASSERTIONS')
self.set_setting('EXIT_RUNTIME')
self.add_on_exit('console.log("calling main again"); Module["_main"]();')
self.build('#include <stdio.h>\nint main() { puts("foo called"); return 0; }', self.get_dir(), 'foo.c')
err = self.expect_fail(NODE_JS + ['foo.c.o.js'], stdout=PIPE)
self.assertContained('native function `main` called after runtime exit', err)
|
py | 1a4d57049b9afb84069331719a50ba8069607ca1 | #!/usr/bin/env python
# Run this test like so:
# vtkpython TestLinePlot.py -D $VTK_DATA_ROOT \
# -B $VTK_DATA_ROOT/Baseline/Charts/
import os
import vtk
import vtk.test.Testing
import math
class TestLinePlot(vtk.test.Testing.vtkTest):
def testLinePlot(self):
"Test if line plots can be built with python"
# Set up a 2D scene, add an XY chart to it
view = vtk.vtkContextView()
view.GetRenderer().SetBackground(1.0,1.0,1.0)
view.GetRenderWindow().SetSize(400,300)
chart = vtk.vtkChartXY()
view.GetScene().AddItem(chart)
# Create a table with some points in it
table = vtk.vtkTable()
arrX = vtk.vtkFloatArray()
arrX.SetName("X Axis")
arrC = vtk.vtkFloatArray()
arrC.SetName("Cosine")
arrS = vtk.vtkFloatArray()
arrS.SetName("Sine")
arrS2 = vtk.vtkFloatArray()
arrS2.SetName("Sine2")
numPoints = 69
inc = 7.5 / (numPoints - 1)
for i in range(0,numPoints):
arrX.InsertNextValue(i*inc)
arrC.InsertNextValue(math.cos(i * inc) + 0.0)
arrS.InsertNextValue(math.sin(i * inc) + 0.0)
arrS2.InsertNextValue(math.sin(i * inc) + 0.5)
table.AddColumn(arrX)
table.AddColumn(arrC)
table.AddColumn(arrS)
table.AddColumn(arrS2)
# Now add the line plots with appropriate colors
line = chart.AddPlot(0)
line.SetInput(table,0,1)
line.SetColor(0,255,0,255)
line.SetWidth(1.0)
line = chart.AddPlot(0)
line.SetInput(table,0,2)
line.SetColor(255,0,0,255);
line.SetWidth(5.0)
line = chart.AddPlot(0)
line.SetInput(table,0,3)
line.SetColor(0,0,255,255);
line.SetWidth(4.0)
view.GetRenderWindow().SetMultiSamples(0)
img_file = "TestLinePlot.png"
vtk.test.Testing.compareImage(view.GetRenderWindow(),vtk.test.Testing.getAbsImagePath(img_file),threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestLinePlot, 'test')])
|
py | 1a4d574badee67d534d44d9e1a3369f15dd87ea3 | from kashgari.corpus import ChineseDailyNerCorpus
from kashgari.embeddings import BERTEmbedding
import kashgari
from kashgari.tasks.labeling import BiLSTM_CRF_Model
"""
pip install tensorflow==1.15.3
pip install 'kashgari>=1.0.0,<2.0.0'
"""
"""
https://eliyar.biz/nlp_chinese_bert_ner/
"""
def main():
# train_x, train_y = ChineseDailyNerCorpus.load_data("train")
# valid_x, valid_y = ChineseDailyNerCorpus.load_data("validate")
ChineseDailyNerCorpus.__zip_file__name
test_x, test_y = ChineseDailyNerCorpus.load_data("test")
# print(f"train data count: {len(train_x)}")
# print(f"validate data count: {len(valid_x)}")
print(f"test data count: {len(test_x)}")
bert_embed = BERTEmbedding(
"models/chinese_L-12_H-768_A-12", task=kashgari.LABELING, sequence_length=100
)
model = BiLSTM_CRF_Model(bert_embed)
# model.fit(
# train_x,
# train_y,
# x_validate=valid_x,
# y_validate=valid_y,
# epochs=1,
# batch_size=512,
# )
model.save("models/ner.h5")
model.evaluate(test_x, test_y)
predictions = model.predict_classes(test_x)
print(predictions)
if "__main__" == __name__:
main()
|
py | 1a4d57e33d7467e63aa3d0c8865754d3c3b21aed | # Generated by Django 4.0.1 on 2022-01-31 21:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tareas', '0004_alter_tarea_expire_date'),
]
operations = [
migrations.AlterField(
model_name='tarea',
name='comments',
field=models.TextField(max_length=250, verbose_name='Comentarios'),
),
migrations.AlterField(
model_name='tarea',
name='description',
field=models.TextField(max_length=50, verbose_name='Descripción'),
),
]
|
py | 1a4d591e801b61e4311b0f776d8ad3e71661413e | class TextureNodeMath:
operation = None
use_clamp = None
|
py | 1a4d597fce35d4948a19dd8bcdefbc2d4ee98715 | import ConfigParser
import csv
import matplotlib.pyplot as plt
def write_configs(model, train_percent = 0.3, scale = 'true', normalize = 'true', k_best = 'false'):
config = ConfigParser.RawConfigParser()
config.add_section('TrainingData')
config.set('TrainingData', 'labelColumn', 'label')
config.set('TrainingData', 'textColumn', 'extracted_text')
config.set('TrainingData', 'separateTrainingTesting', 'false')
config.set('TrainingData', 'possibleLabels', '2,3,4')
config.set('TrainingData', 'cross_validation', 'false')
config.add_section('Classifier')
config.set('Classifier', 'model', model)
config.set('Classifier', 'scale', scale)
config.set('Classifier', 'normalize', normalize)
config.set('Classifier', 'k_best', k_best)
config.set('Classifier', 'trainPercent', train_percent)
with open('ad_classification.ini', 'wb') as configfile:
config.write(configfile)
def read_results():
p_r_fscore_support = dict()
with open('results.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
for index, row in enumerate(reader):
if(index != 0):
if(row[0] == 'Weighted'):
p_r_fscore_support['precision'] = float(row[1])
p_r_fscore_support['recall'] = float(row[2])
p_r_fscore_support['fscore'] = float(row[3])
else:
print row
print p_r_fscore_support
return p_r_fscore_support
def plot_graph(train_results, train_percents, model, is_multiple = True, type= 'Precision'):
plt.clf()
if(is_multiple):
plt.title(type+'-Train Percentage curve')
for index, result in enumerate(train_results):
plt.plot(train_percents, result, lw=2 , label=model[index])
plt.legend(loc = 'lower right')
else:
plt.title(model + ' '+type+'-Train Percentage curve')
plt.plot(train_percents, train_results, lw=2, color='navy', label=model)
plt.xlabel('Train Percent')
plt.ylabel(type)
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.05])
if(is_multiple):
plt.savefig(type+'all.png', dpi=100)
else:
plt.savefig(type+'-'+model +'.png', dpi=100)
#plt.show()
classifiers = ['random_forest', 'knn', 'logistic_regression', 'svm', 'sgd', 'nn', 'dtree', 'gaussianNB']
#classifiers = ['random_forest','nn']
train_percents = [0.1, 0.3, 0.5, 0.7, 0.9]
#train_percents = [0.1, 0.3]
model_results_prec = [0] * len(classifiers)
model_results_rec = [0] * len(classifiers)
for i, model in enumerate(classifiers):
train_results_prec = [0] * len(train_percents)
train_results_rec = [0] * len(train_percents)
for index, percent in enumerate(train_percents):
write_configs(model, percent)
execfile('ad_classification.py')
stats = read_results()
train_results_prec[index] = stats['precision']
train_results_rec[index] = stats['recall']
plot_graph(train_results_prec, train_percents, model, False, 'Precision')
plot_graph(train_results_rec, train_percents, model, False, 'Recall')
model_results_prec[i] = train_results_prec
model_results_rec[i] = train_results_rec
plot_graph(model_results_prec, train_percents, classifiers, True, 'Precision')
plot_graph(model_results_rec, train_percents, classifiers, True, 'Recall') |
py | 1a4d59fca91b2932a95fbebd4f2108c488826e62 | from core.himesis import Himesis
import uuid
class HMother2Woman(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule Mother2Woman.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMother2Woman, self).__init__(name='HMother2Woman', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """Mother2Woman"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Mother2Woman')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class Parent() node
self.add_node()
self.vs[3]["mm__"] = """Parent"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class Parent()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class Family() node
self.add_node()
self.vs[5]["mm__"] = """Family"""
self.vs[5]["attr1"] = """1"""
# match_contains node for class Family()
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# apply class Woman() node
self.add_node()
self.vs[7]["mm__"] = """Woman"""
self.vs[7]["attr1"] = """1"""
# apply_contains node for class Woman()
self.add_node()
self.vs[8]["mm__"] = """apply_contains"""
# match association Parent--family-->Family node
self.add_node()
self.vs[9]["attr1"] = """family"""
self.vs[9]["mm__"] = """directLink_S"""
# match association Family--mothers-->Parent node
self.add_node()
self.vs[10]["attr1"] = """mothers"""
self.vs[10]["mm__"] = """directLink_S"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class Parent()
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class Family()
(1,8), # applymodel -> apply_contains
(8,7), # apply_contains -> apply_class Woman()
(3,9), # match_class Parent() -> association family
(9,5), # association family -> match_class Family()
(5,10), # match_class Family() -> association mothers
(10,3), # association mothers -> match_class Parent()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((7,'fullName'),('concat',((3,'firstName'),(5,'lastName')))), ((7,'ApplyAttribute'),('constant','solveRef')), ]
|
py | 1a4d5a16fa6f6a31d244441a5c1143245e1771b8 | ##############################################################################
# Copyright (c) 2016 ZTE Corporation
# [email protected]
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# 09/06/2016: change for migration after refactoring
# 16/06/2016: Alignment of test name (JIRA: FUNCTEST-304)
##############################################################################
collections_old2New = {
# 'pod': 'pods',
# 'test_projects': 'projects',
# 'test_testcases': 'testcases',
# 'test_results': 'results'
}
fields_old2New = {
# 'test_results': [({}, {'creation_date': 'start_date'})]
}
docs_old2New = {
# 'test_results': [
# ({'criteria': 'failed'}, {'criteria': 'FAILED'}),
# ({'criteria': 'passed'}, {'criteria': 'PASS'})
# ]
# 'testcases': [
# ({'name': 'vPing'}, {'name': 'vping_ssh'}),
# ({'name': 'Tempest'}, {'name': 'tempest_smoke_serial'}),
# ({'name': 'Rally'}, {'name': 'rally_sanity'}),
# ({'name': 'ODL'}, {'name': 'odl'}),
# ({'name': 'vIMS'}, {'name': 'vims'}),
# ({'name': 'ONOS'}, {'name': 'onos'}),
# ({'name': 'vPing_userdata'}, {'name': 'vping_userdata'}),
# ({'name': 'ovno'}, {'name': 'ocl'})
# ],
# 'results': [
# ({'case_name': 'vPing'}, {'case_name': 'vping_ssh'}),
# ({'case_name': 'Tempest'}, {'case_name': 'tempest_smoke_serial'}),
# ({'case_name': 'Rally'}, {'case_name': 'rally_sanity'}),
# ({'case_name': 'ODL'}, {'case_name': 'odl'}),
# ({'case_name': 'vIMS'}, {'case_name': 'vims'}),
# ({'case_name': 'ONOS'}, {'case_name': 'onos'}),
# ({'case_name': 'vPing_userdata'}, {'case_name': 'vping_userdata'}),
# ({'case_name': 'ovno'}, {'case_name': 'ocl'})
# ]
'results': [
({'trust_indicator': 0},
{'trust_indicator': {'current': 0, 'histories': []}})
]
}
|
py | 1a4d5a20ad8d8593fb4f67143f3ec55014d2addf | # coding=utf-8
# Este fichero generado para la asignatura SGDI
# Practica 2 Mineria de datos y recuperacion de la información, Ejercicio 3
# Autores: Antonio Calvo Morata y Carlos Congosto Sandoval
# Antonio Calvo Morata y Carlos Congosto Sandoval declaramos que esta solución es fruto exclusivamente de nuestro
# trabajo personal. No hemos sido ayudados por ninguna otra persona ni hemos obtenido la solución de fuentes externas,
# y tampoco hemos compartido nuestra solución con nadie. Declaramos además que no hemos realizado de manera deshonesta
# ninguna otra actividad que pueda mejorar nuestros resultados ni perjudicar los resultados de los demás.
import string, os, sys, math, collections
from operator import itemgetter
# Dada una linea de texto, devuelve una lista de palabras no vacias
# convirtiendo a minusculas y eliminando signos de puntuacion por los extremos
# Ejemplo:
# > extrae_palabras("Hi! What is your name? John.")
# ['hi', 'what', 'is', 'your', 'name', 'john']
def extrae_palabras(linea):
return filter(lambda x: len(x) > 0,
map(lambda x: x.lower().strip(string.punctuation), linea.split()))
class VectorialIndex(object):
def __init__(self, directorio, stop=[]):
self.filesList = []
self.invertedIndex = dict()
# for doc in os.listdir(directorio):
# file = open(doc, 'r')
for path, subdirs, files in os.walk(directorio):
# Creamos el indice invertido
for name in files:
idDoc = len(self.filesList)
self.filesList.append(os.path.join(path, name))
file = open(os.path.join(path, name),'r')
# Leemos todas las palabras del archivo
for l in file:
line = extrae_palabras(l)
for word in line:
if word not in stop: # Si es una palabra no "censurada"
# Si no hay una entrada en el indice la creamos con el valor de una lista con la tupla
# de ese id y 1 (ha aparecido una vez)
if word not in self.invertedIndex:
self.invertedIndex[word] = [(idDoc, 1)]
# Si la palabra ya ha aparecido en el archivo le sumamos uno al contador de veces q aparece
else :
lastTuple = self.invertedIndex[word][-1]
if idDoc == lastTuple[0]:
self.invertedIndex[word][-1] = (lastTuple[0], lastTuple[1]+1)
# Sino iniciamos la cuenta en uno
else :
self.invertedIndex[word].append((idDoc, 1))
# Sustituimos el número de apariciones por el valor TD-IDF de cada palabra en cada documento
totalFiles = len(self.invertedIndex)
self.norma = [0] * totalFiles;
for key, values in self.invertedIndex.items():
tuplelist = []
for tuple in values:
# TF-IDF(i,j) = TF(i,j) * IDF(i,j) = (1+log(f(i,j))) * log(N/n(i)
tdidf = (1 + math.log(tuple[1], 2)) * math.log(totalFiles/len(values), 2)
tuplelist.append((tuple[0], tdidf))
self.norma[tuple[0]] += math.pow(tdidf, 2)
self.invertedIndex[key] = tuplelist
i = 0
for value in self.norma:
self.norma[i] = math.sqrt(value)
i+=1;
###########################################
# #
# CONSULTA VECTORIAL #
# #
###########################################
def consulta_vectorial(self, consulta, n=3):
scores = [(0, 0)] * len(self.filesList) # Inicializamos el producto escalar parcial asociado a cada documento d (en total hay N)
# Obtenemos la lista [(doc,peso),...(doc,peso)] asociada al término t
words = consulta.split()
for word in words:
p = self.invertedIndex[word] # Pesos del término word en cada documento
# Sumamos el peso en cada producto escalar parcial
for (id, peso) in p:
scores[id] = (id, scores[id][1]+peso) # No hay producto
# Dividimos entre la norma | d |
for id in range(len(self.filesList)):
scores[id] = (id, scores[id][1]/self.norma[id])
# Obtenemos los k documentos con mayor relevancia
scores.sort(key=itemgetter(1), reverse=True)
# Trasformamos los ids al nombre de los ficheros
result = []
for tuple in scores[0:n] :
result.append((self.filesList[tuple[0]], tuple[1]))
return result
###########################################
# #
# CONSULTA CONJUNCION #
# #
###########################################
def consulta_conjuncion(self, consulta):
words = consulta.split()
result = []
if len(words) > 1 :
appearances = []
# Obtenemos la lista de ids de archivos donde aparece cada palabra de la consulta
for word in words:
appearances.append(self.invertedIndex[word])
# Algoritmo de intersección de un conjunto de listas
terms = sorted(appearances, key=len)
answer = terms[0]
terms = terms[1:]
while terms != None and answer != None:
e = terms[0]
answer = self.intersect(answer, e)
if len(terms) > 1:
terms = terms[1:]
else :
terms = None
# Trasformamos los ids al nombre de los ficheros
for tuple in answer:
result.append(self.filesList[tuple[0]])
else :
# Si la consulta sólo tiene una palabra devolvemos directamente los archivos donde aparece
for tuple in self.invertedIndex[words[0]] :
result.append(self.filesList[tuple[0]])
return result
# Función para la intersección de dos listas
def intersect(self,list1, list2):
answer = []
it1 = iter(list1)
it2 = iter(list2)
p1 = it1.next()
p2 = it2.next()
while True:
try:
if p1[0] == p2[0]:
answer.append(p1)
p1 = it1.next()
p2 = it2.next()
elif p1[0] < p2[0]:
p1 = it1.next()
else:
p2 = it2.next()
except StopIteration:
break
return answer
# Como argumentos recibe:
# argv[1] = Directorio de la coleccion (OBLIGATORIO)
# argv[2:n] = Palabras para la consulta (OPCIONAL)
# * Si arg[2] es un numero se usará como cantidad de documentos a mostrar en la consulta vectorial
if __name__ == '__main__':
if len(sys.argv) < 2:
print "No se ha especificado directorio!"
exit(-1)
root = sys.argv[1]
request = ""
n = 2
if(len(sys.argv) > 2) :
k = 2
if(sys.argv[2].isdigit()):
n = int(sys.argv[2])
k += 1
request = sys.argv[k]
for i in range(k+1, len(sys.argv)):
request += " "+str(sys.argv[i])
# Valor de consulta por defecto si no se pasan como argumento
else :
request = "civil war"
v = VectorialIndex(root,[])
print(v.consulta_conjuncion(request))
print(v.consulta_vectorial(request, n))
|
py | 1a4d5a30e596c03fd0458c25e763054736580c6d | """
Robocopy backup script
In "execute_robocopy" replace the string "drive" with the actual drive letter.
Also, replace the path-placeholders with the actual paths.
Author: Fred Snyder
"""
# modules
import string
from glob import glob
from sys import argv
from sys import exit
from subprocess import call
# variable for the script name
script_name = "robocopy_backup.py"
# variable to turn on the robocopy /L (list files only) flag
if argv[1].lower() == 'test': # argv[0] is always the name of the script
test_run_robocopy = True
else:
test_run_robocopy = False
# variable to include/exclude certain folders
exclude_folders = False
# Get the drive letter as user input
drive_letter = input("External backup drive letter: ")
print('Is this correct? Drive: ' + drive_letter)
confirm_drive_letter = input("Y/N ")
if confirm_drive_letter.lower() == 'n':
exit('Wrong drive letter.')
# function that runs the robocopy command
def execute_robocopy():
# robocopy variables
source = "drive:\\path\\to\\folder"
destination = drive_letter + ":\\path\\to\\folder"
logPath = drive_letter + ":\\backup.log"
log = "/LOG:" + logPath
# folders which are excluded from backup
# WARNING: no trailing slashes behind directories
if exclude_folders == True:
excludeDirs = ['drive:\\foldername', 'drive:\\$RECYCLE.BIN', 'drive:\\System Volume Information']
else:
excludeDirs = []
# files which are excluded from backup
excludeFiles = ["pagefile.sys", "thumbs.db", ".DS_Store", ".Spotlight-V100", ".Trashes" ]
# check if certain file exists
if len(glob("drive:\\filename*")) > 0:
excludeTEMP = glob("drive:\\filename*")[0]
excludeFiles.append(excludeTEMP[3:])
# create command list for subprocess.call
command = ["robocopy", source, destination, "/MIR"]
# check if script is running in test mode
if test_run_robocopy == True:
command.extend(["/L"])
command.extend([log])
command.extend(["/XD"] + excludeDirs)
command.extend(["/XF"] + excludeFiles)
# call the subprocess
call(command)
execute_robocopy()
|
py | 1a4d5a6b0edd4a2c778534d532cb2c88e45f4ddc | import math
import torch
import torch.fft
import torch.nn as nn
class AutoCorrelation(nn.Module):
"""AutoCorrelation Mechanism with the following two phases:
(1) period-based dependencies discovery (2) time delay aggregation This block can replace the self-attention family mechanism seamlessly.
"""
def __init__(self,
mask_flag=True,
factor=1,
scale=None,
attention_dropout=0.1,
output_attention=False):
super(AutoCorrelation, self).__init__()
self.factor = factor
self.scale = scale
self.mask_flag = mask_flag
self.output_attention = output_attention
self.dropout = nn.Dropout(attention_dropout)
def time_delay_agg_training(self, values, corr):
"""SpeedUp version of Autocorrelation (a batch-normalization style
design) This is for the training phase."""
head = values.shape[1]
channel = values.shape[2]
length = values.shape[3]
# find top k
top_k = int(self.factor * math.log(length))
mean_value = torch.mean(torch.mean(corr, dim=1), dim=1)
index = torch.topk(torch.mean(mean_value, dim=0), top_k, dim=-1)[1]
weights = torch.stack([mean_value[:, index[i]] for i in range(top_k)],
dim=-1)
# update corr
tmp_corr = torch.softmax(weights, dim=-1)
# aggregation
tmp_values = values
delays_agg = torch.zeros_like(values).float()
for i in range(top_k):
pattern = torch.roll(tmp_values, -int(index[i]), -1)
delays_agg = delays_agg + pattern * (
tmp_corr[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(
1, head, channel, length))
return delays_agg
def time_delay_agg_inference(self, values, corr):
"""SpeedUp version of Autocorrelation (a batch-normalization style
design) This is for the inference phase."""
batch = values.shape[0]
head = values.shape[1]
channel = values.shape[2]
length = values.shape[3]
# index init
init_index = torch.arange(length).unsqueeze(0).unsqueeze(0).unsqueeze(
0).repeat(batch, head, channel, 1).cuda()
# find top k
top_k = int(self.factor * math.log(length))
mean_value = torch.mean(torch.mean(corr, dim=1), dim=1)
weights = torch.topk(mean_value, top_k, dim=-1)[0]
delay = torch.topk(mean_value, top_k, dim=-1)[1]
# update corr
tmp_corr = torch.softmax(weights, dim=-1)
# aggregation
tmp_values = values.repeat(1, 1, 1, 2)
delays_agg = torch.zeros_like(values).float()
for i in range(top_k):
tmp_delay = init_index + delay[:, i].unsqueeze(1).unsqueeze(
1).unsqueeze(1).repeat(1, head, channel, length)
pattern = torch.gather(tmp_values, dim=-1, index=tmp_delay)
delays_agg = delays_agg + pattern * (
tmp_corr[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(
1, head, channel, length))
return delays_agg
def time_delay_agg_full(self, values, corr):
"""Standard version of Autocorrelation."""
batch = values.shape[0]
head = values.shape[1]
channel = values.shape[2]
length = values.shape[3]
# index init
init_index = torch.arange(length).unsqueeze(0).unsqueeze(0).unsqueeze(
0).repeat(batch, head, channel, 1).cuda()
# find top k
top_k = int(self.factor * math.log(length))
weights = torch.topk(corr, top_k, dim=-1)[0]
delay = torch.topk(corr, top_k, dim=-1)[1]
# update corr
tmp_corr = torch.softmax(weights, dim=-1)
# aggregation
tmp_values = values.repeat(1, 1, 1, 2)
delays_agg = torch.zeros_like(values).float()
for i in range(top_k):
tmp_delay = init_index + delay[..., i].unsqueeze(-1)
pattern = torch.gather(tmp_values, dim=-1, index=tmp_delay)
delays_agg = delays_agg + pattern * (
tmp_corr[..., i].unsqueeze(-1))
return delays_agg
def forward(self, queries, keys, values, attn_mask):
B, L, H, E = queries.shape
_, S, _, D = values.shape
if L > S:
zeros = torch.zeros_like(queries[:, :(L - S), :]).float()
values = torch.cat([values, zeros], dim=1)
keys = torch.cat([keys, zeros], dim=1)
else:
values = values[:, :L, :, :]
keys = keys[:, :L, :, :]
# period-based dependencies
q_fft = torch.fft.rfft(
queries.permute(0, 2, 3, 1).contiguous(), dim=-1)
k_fft = torch.fft.rfft(keys.permute(0, 2, 3, 1).contiguous(), dim=-1)
res = q_fft * torch.conj(k_fft)
corr = torch.fft.irfft(res, dim=-1)
# time delay agg
if self.training:
V = self.time_delay_agg_training(
values.permute(0, 2, 3, 1).contiguous(),
corr).permute(0, 3, 1, 2)
else:
V = self.time_delay_agg_inference(
values.permute(0, 2, 3, 1).contiguous(),
corr).permute(0, 3, 1, 2)
if self.output_attention:
return (V.contiguous(), corr.permute(0, 3, 1, 2))
else:
return (V.contiguous(), None)
class AutoCorrelationLayer(nn.Module):
def __init__(self,
correlation,
d_model,
n_heads,
d_keys=None,
d_values=None):
super(AutoCorrelationLayer, self).__init__()
d_keys = d_keys or (d_model // n_heads)
d_values = d_values or (d_model // n_heads)
self.inner_correlation = correlation
self.query_projection = nn.Linear(d_model, d_keys * n_heads)
self.key_projection = nn.Linear(d_model, d_keys * n_heads)
self.value_projection = nn.Linear(d_model, d_values * n_heads)
self.out_projection = nn.Linear(d_values * n_heads, d_model)
self.n_heads = n_heads
def forward(self, queries, keys, values, attn_mask):
B, L, _ = queries.shape
_, S, _ = keys.shape
H = self.n_heads
queries = self.query_projection(queries).view(B, L, H, -1)
keys = self.key_projection(keys).view(B, S, H, -1)
values = self.value_projection(values).view(B, S, H, -1)
out, attn = self.inner_correlation(queries, keys, values, attn_mask)
out = out.view(B, L, -1)
return self.out_projection(out), attn
|
py | 1a4d5a812d2cefe9feba6b456c493a8c70f1694e | import datetime
from rich.padding import Padding
from rich.panel import Panel
from rich.text import Text
from rich.console import Group
from .config import console_print, console, key_at_index
def toggle_timer(log, labels) -> None:
label_name = key_at_index(labels, log.cur_index)
if log.active_label == False:
log.start()
log.active_label = True
log_msg = f"[b green_yellow]Tracking for [b cyan1]'{label_name}'[/] initiated[/]"
log_msg = Padding(log_msg, (1, 0))
console.print(Padding(
Panel.fit(log_msg, style='green_yellow', title='Tracker Update'), (1, 0)))
else:
log.stop()
log.active_label = False
elapsed_time = log.stop_time - log.start_time
latest_log = [log.start_time, log.stop_time, elapsed_time]
if log.timestamp == False:
latest_log[0] = latest_log[1] = -1
labels[label_name].append(latest_log)
log_msg = f"[b deep_pink2]Tracking for [b cyan1]'{label_name}'[/] terminated[/]"
elapsed_time = str(datetime.timedelta(seconds=round(elapsed_time)))
session_len = Text(justify='center')
session_len.append('Session lasted for ', style='b bright_white')
session_len.append(f'{elapsed_time}', style='b orange1')
message_group = Padding(Group(log_msg, session_len, fit=True), (1, 0))
console.print(Padding(
Panel.fit(message_group, style='deep_pink2', title='Tracker Update'), (1, 0)))
def toggle_label(log, labels, num) -> None:
if log.active_label:
label_name = key_at_index(labels, log.cur_index)
console_print(
f'[b cyan1]{label_name}[/] is currently active.', style='info')
toggle_timer(log, labels)
log.cur_index = (log.cur_index + num) % len(labels)
label_name = key_at_index(labels, log.cur_index)
console_print(
f'Active label changed to [b cyan1]{label_name}[/]', style='info')
|
py | 1a4d5bec685234b2b26c6ad58012fe56c99b4e8e | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake (in-memory) hypervisor+api.
Allows nova testing w/o a hypervisor. This module also documents the
semantics of real hypervisor connections.
"""
import collections
import contextlib
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from nova.compute import power_state
from nova.compute import task_states
import nova.conf
from nova.console import type as ctype
from nova import exception
from nova.i18n import _LW
from nova.objects import fields as obj_fields
from nova.virt import diagnostics
from nova.virt import driver
from nova.virt import hardware
from nova.virt import virtapi
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
_FAKE_NODES = None
def set_nodes(nodes):
"""Sets FakeDriver's node.list.
It has effect on the following methods:
get_available_nodes()
get_available_resource
To restore the change, call restore_nodes()
"""
global _FAKE_NODES
_FAKE_NODES = nodes
def restore_nodes():
"""Resets FakeDriver's node list modified by set_nodes().
Usually called from tearDown().
"""
global _FAKE_NODES
_FAKE_NODES = [CONF.host]
class FakeInstance(object):
def __init__(self, name, state, uuid):
self.name = name
self.state = state
self.uuid = uuid
def __getitem__(self, key):
return getattr(self, key)
class Resources(object):
vcpus = 0
memory_mb = 0
local_gb = 0
vcpus_used = 0
memory_mb_used = 0
local_gb_used = 0
def __init__(self, vcpus=8, memory_mb=8000, local_gb=500):
self.vcpus = vcpus
self.memory_mb = memory_mb
self.local_gb = local_gb
def claim(self, vcpus=0, mem=0, disk=0):
self.vcpus_used += vcpus
self.memory_mb_used += mem
self.local_gb_used += disk
def release(self, vcpus=0, mem=0, disk=0):
self.vcpus_used -= vcpus
self.memory_mb_used -= mem
self.local_gb_used -= disk
def dump(self):
return {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': self.vcpus_used,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used
}
class FakeDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
"supports_migrate_to_same_host": True
}
# Since we don't have a real hypervisor, pretend we have lots of
# disk and ram so this driver can be used to test large instances.
vcpus = 1000
memory_mb = 800000
local_gb = 600000
"""Fake hypervisor driver."""
def __init__(self, virtapi, read_only=False):
super(FakeDriver, self).__init__(virtapi)
self.instances = {}
self.resources = Resources(
vcpus=self.vcpus,
memory_mb=self.memory_mb,
local_gb=self.local_gb)
self.host_status_base = {
'hypervisor_type': 'fake',
'hypervisor_version': versionutils.convert_version_to_int('1.0'),
'hypervisor_hostname': CONF.host,
'cpu_info': {},
'disk_available_least': 0,
'supported_instances': [(
obj_fields.Architecture.X86_64,
obj_fields.HVType.FAKE,
obj_fields.VMMode.HVM)],
'numa_topology': None,
}
self._mounts = {}
self._interfaces = {}
self.active_migrations = {}
if not _FAKE_NODES:
set_nodes([CONF.host])
def init_host(self, host):
return
def list_instances(self):
return [self.instances[uuid].name for uuid in self.instances.keys()]
def list_instance_uuids(self):
return list(self.instances.keys())
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
pass
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
pass
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
uuid = instance.uuid
state = power_state.RUNNING
flavor = instance.flavor
self.resources.claim(
vcpus=flavor.vcpus,
mem=flavor.memory_mb,
disk=flavor.root_gb)
fake_instance = FakeInstance(instance.name, state, uuid)
self.instances[uuid] = fake_instance
def snapshot(self, context, instance, image_id, update_task_state):
if instance.uuid not in self.instances:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
update_task_state(task_state=task_states.IMAGE_UPLOADING)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
pass
def get_host_ip_addr(self):
return '192.168.0.1'
def set_admin_password(self, instance, new_pass):
pass
def inject_file(self, instance, b64_path, b64_contents):
pass
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
pass
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
pass
def unrescue(self, instance, network_info):
pass
def poll_rebooting_instances(self, timeout, instances):
pass
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
pass
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
self.instances[instance.uuid] = FakeInstance(
instance.name, power_state.RUNNING, instance.uuid)
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
pass
def power_off(self, instance, timeout=0, retry_interval=0):
pass
def power_on(self, context, instance, network_info,
block_device_info=None):
pass
def trigger_crash_dump(self, instance):
pass
def soft_delete(self, instance):
pass
def restore(self, instance):
pass
def pause(self, instance):
pass
def unpause(self, instance):
pass
def suspend(self, context, instance):
pass
def resume(self, context, instance, network_info, block_device_info=None):
pass
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
key = instance.uuid
if key in self.instances:
flavor = instance.flavor
self.resources.release(
vcpus=flavor.vcpus,
mem=flavor.memory_mb,
disk=flavor.root_gb)
del self.instances[key]
else:
LOG.warning(_LW("Key '%(key)s' not in instances '%(inst)s'"),
{'key': key,
'inst': self.instances}, instance=instance)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
pass
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the disk to the instance at mountpoint using info."""
instance_name = instance.name
if instance_name not in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = connection_info
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
try:
del self._mounts[instance.name][mountpoint]
except KeyError:
pass
def swap_volume(self, old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
"""Replace the disk attached to the instance."""
instance_name = instance.name
if instance_name not in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = new_connection_info
def attach_interface(self, context, instance, image_meta, vif):
if vif['id'] in self._interfaces:
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
self._interfaces[vif['id']] = vif
def detach_interface(self, context, instance, vif):
try:
del self._interfaces[vif['id']]
except KeyError:
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
def get_info(self, instance):
if instance.uuid not in self.instances:
raise exception.InstanceNotFound(instance_id=instance.uuid)
i = self.instances[instance.uuid]
return hardware.InstanceInfo(state=i.state,
max_mem_kb=0,
mem_kb=0,
num_cpu=2,
cpu_time_ns=0)
def get_diagnostics(self, instance):
return {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
'vda_read': 262144,
'vda_read_req': 112,
'vda_write': 5778432,
'vda_write_req': 488,
'vnet1_rx': 2070139,
'vnet1_rx_drop': 0,
'vnet1_rx_errors': 0,
'vnet1_rx_packets': 26701,
'vnet1_tx': 140208,
'vnet1_tx_drop': 0,
'vnet1_tx_errors': 0,
'vnet1_tx_packets': 662,
}
def get_instance_diagnostics(self, instance):
diags = diagnostics.Diagnostics(state='running', driver='fake',
hypervisor_os='fake-os', uptime=46664, config_drive=True)
diags.add_cpu(time=17300000000)
diags.add_nic(mac_address='01:23:45:67:89:ab',
rx_packets=26701,
rx_octets=2070139,
tx_octets=140208,
tx_packets = 662)
diags.add_disk(id='fake-disk-id',
read_bytes=262144,
read_requests=112,
write_bytes=5778432,
write_requests=488)
diags.memory_details.maximum = 524288
return diags
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
"""
bw = []
for instance in instances:
bw.append({'uuid': instance.uuid,
'mac_address': 'fa:16:3e:4c:2c:30',
'bw_in': 0,
'bw_out': 0})
return bw
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
volusage = []
return volusage
def get_host_cpu_stats(self):
stats = {'kernel': 5664160000000,
'idle': 1592705190000000,
'user': 26728850000000,
'iowait': 6121490000000}
stats['frequency'] = 800
return stats
def block_stats(self, instance, disk_id):
return [0, 0, 0, 0, None]
def get_console_output(self, context, instance):
return 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE'
def get_vnc_console(self, context, instance):
return ctype.ConsoleVNC(internal_access_path='FAKE',
host='fakevncconsole.com',
port=6969)
def get_spice_console(self, context, instance):
return ctype.ConsoleSpice(internal_access_path='FAKE',
host='fakespiceconsole.com',
port=6969,
tlsPort=6970)
def get_rdp_console(self, context, instance):
return ctype.ConsoleRDP(internal_access_path='FAKE',
host='fakerdpconsole.com',
port=6969)
def get_serial_console(self, context, instance):
return ctype.ConsoleSerial(internal_access_path='FAKE',
host='fakerdpconsole.com',
port=6969)
def get_mks_console(self, context, instance):
return ctype.ConsoleMKS(internal_access_path='FAKE',
host='fakemksconsole.com',
port=6969)
def get_console_pool_info(self, console_type):
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
return True
def refresh_instance_security_rules(self, instance):
return True
def get_available_resource(self, nodename):
"""Updates compute manager resource info on ComputeNode table.
Since we don't have a real hypervisor, pretend we have lots of
disk and ram.
"""
cpu_info = collections.OrderedDict([
('arch', 'x86_64'),
('model', 'Nehalem'),
('vendor', 'Intel'),
('features', ['pge', 'clflush']),
('topology', {
'cores': 1,
'threads': 1,
'sockets': 4,
}),
])
if nodename not in _FAKE_NODES:
return {}
host_status = self.host_status_base.copy()
host_status.update(self.resources.dump())
host_status['hypervisor_hostname'] = nodename
host_status['host_hostname'] = nodename
host_status['host_name_label'] = nodename
host_status['cpu_info'] = jsonutils.dumps(cpu_info)
return host_status
def ensure_filtering_rules_for_instance(self, instance, network_info):
return
def get_instance_disk_info(self, instance, block_device_info=None):
return
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
post_method(context, instance, dest, block_migration,
migrate_data)
return
def live_migration_force_complete(self, instance):
return
def live_migration_abort(self, instance):
return
def cleanup_live_migration_destination_check(self, context,
dest_check_data):
return
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return {}
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
return
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
return
def confirm_migration(self, context, migration, instance, network_info):
return
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data):
return
def unfilter_instance(self, instance, network_info):
return
def _test_remove_vm(self, instance_uuid):
"""Removes the named VM, as if it crashed. For testing."""
self.instances.pop(instance_uuid)
def host_power_action(self, action):
"""Reboots, shuts down or powers up the host."""
return action
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
if not mode:
return 'off_maintenance'
return 'on_maintenance'
def set_host_enabled(self, enabled):
"""Sets the specified host's ability to accept new instances."""
if enabled:
return 'enabled'
return 'disabled'
def get_volume_connector(self, instance):
return {'ip': CONF.my_block_storage_ip,
'initiator': 'fake',
'host': 'fakehost'}
def get_available_nodes(self, refresh=False):
return _FAKE_NODES
def instance_on_disk(self, instance):
return False
def quiesce(self, context, instance, image_meta):
pass
def unquiesce(self, context, instance, image_meta):
pass
class FakeVirtAPI(virtapi.VirtAPI):
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
# NOTE(danms): Don't actually wait for any events, just
# fall through
yield
class SmallFakeDriver(FakeDriver):
# The api samples expect specific cpu memory and disk sizes. In order to
# allow the FakeVirt driver to be used outside of the unit tests, provide
# a separate class that has the values expected by the api samples. So
# instead of requiring new samples every time those
# values are adjusted allow them to be overwritten here.
vcpus = 1
memory_mb = 8192
local_gb = 1028
|
py | 1a4d5c7af9773baa6c29f5e0b903eb33a35bcdaf | #Semáforo peatonal
#Ernesto Tolocka 2021
#www.profetolocka.com.ar/pytrainer
#Normalmente está en verde hasta que un peatón pulsa teclaVerde, entonces cambia a Amarillo y luego Rojo.
#Después de un tiempo en rojo, vuelve a la condición inicial
from PyTrainer import *
from time import sleep
#Comienza con verde encendido y el resto apagado
ledVerde.on ()
ledRojo.off ()
ledAmarillo.off ()
#Tiempo de encendido de cada led
tiempoRojo = 4
tiempoVerde = 2
tiempoAmarillo = 0.5
#Repite por siempre
while (True):
if (teclaVerde.value () == False):
ledVerde.off ()
#Hace un "bip"
buzzer.on ()
sleep (0.1)
buzzer.off ()
#Prende led amarillo
ledAmarillo.on ()
sleep (tiempoAmarillo)
ledAmarillo.off ()
#Prende led Rojo
ledRojo.on ()
sleep (tiempoRojo)
ledRojo.off ()
#Vuelve al modo normal
ledVerde.on ()
|
py | 1a4d5d1d50ca3549911bf045766a79512162dcba | from typing import Dict, List, Optional, Set
from chia.types.coin_record import CoinRecord
from chia.types.condition_with_args import ConditionWithArgs
from chia.util.clvm import int_from_bytes
from chia.util.condition_tools import ConditionOpcode
from chia.util.errors import Err
from chia.util.ints import uint32, uint64
from chia.types.blockchain_format.sized_bytes import bytes32
def blockchain_assert_my_coin_id(condition: ConditionWithArgs, unspent: CoinRecord) -> Optional[Err]:
"""
Checks if CoinID matches the id from the condition
"""
if unspent.coin.name() != condition.vars[0]:
return Err.ASSERT_MY_COIN_ID_FAILED
return None
def blockchain_assert_absolute_block_height_exceeds(
condition: ConditionWithArgs, prev_transaction_block_height: uint32
) -> Optional[Err]:
"""
Checks if the next block index exceeds the block index from the condition
"""
try:
expected_block_index = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
if prev_transaction_block_height < expected_block_index:
return Err.ASSERT_HEIGHT_ABSOLUTE_FAILED
return None
def blockchain_assert_relative_block_height_exceeds(
condition: ConditionWithArgs, unspent: CoinRecord, prev_transaction_block_height: uint32
) -> Optional[Err]:
"""
Checks if the coin age exceeds the age from the condition
"""
try:
expected_block_age = int_from_bytes(condition.vars[0])
expected_block_index = expected_block_age + unspent.confirmed_block_index
except ValueError:
return Err.INVALID_CONDITION
if prev_transaction_block_height < expected_block_index:
return Err.ASSERT_HEIGHT_RELATIVE_FAILED
return None
def blockchain_assert_absolute_time_exceeds(condition: ConditionWithArgs, timestamp):
"""
Checks if current time in millis exceeds the time specified in condition
"""
try:
expected_mili_time = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
current_time = timestamp
if current_time <= expected_mili_time:
return Err.ASSERT_SECONDS_ABSOLUTE_FAILED
return None
def blockchain_assert_relative_time_exceeds(condition: ConditionWithArgs, unspent: CoinRecord, timestamp):
"""
Checks if time since unspent creation in millis exceeds the time specified in condition
"""
try:
expected_mili_time = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
current_time = timestamp
if current_time <= expected_mili_time + unspent.timestamp:
return Err.ASSERT_SECONDS_RELATIVE_FAILED
return None
def blockchain_assert_announcement(condition: ConditionWithArgs, announcements: Set[bytes32]) -> Optional[Err]:
"""
Check if an announcement is included in the list of announcements
"""
announcement_hash = condition.vars[0]
if announcement_hash not in announcements:
return Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
return None
def blockchain_check_conditions_dict(
unspent: CoinRecord,
coin_announcement_names: Set[bytes32],
puzzle_announcement_names: Set[bytes32],
conditions_dict: Dict[ConditionOpcode, List[ConditionWithArgs]],
prev_transaction_block_height: uint32,
timestamp: uint64,
) -> Optional[Err]:
"""
Check all conditions against current state.
"""
for con_list in conditions_dict.values():
cvp: ConditionWithArgs
for cvp in con_list:
error = None
if cvp.opcode is ConditionOpcode.ASSERT_MY_COIN_ID:
error = blockchain_assert_my_coin_id(cvp, unspent)
elif cvp.opcode is ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT:
error = blockchain_assert_announcement(cvp, coin_announcement_names)
elif cvp.opcode is ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT:
error = blockchain_assert_announcement(cvp, puzzle_announcement_names)
elif cvp.opcode is ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE:
error = blockchain_assert_absolute_block_height_exceeds(cvp, prev_transaction_block_height)
elif cvp.opcode is ConditionOpcode.ASSERT_HEIGHT_RELATIVE:
error = blockchain_assert_relative_block_height_exceeds(cvp, unspent, prev_transaction_block_height)
elif cvp.opcode is ConditionOpcode.ASSERT_SECONDS_ABSOLUTE:
error = blockchain_assert_absolute_time_exceeds(cvp, timestamp)
elif cvp.opcode is ConditionOpcode.ASSERT_SECONDS_RELATIVE:
error = blockchain_assert_relative_time_exceeds(cvp, unspent, timestamp)
if error:
return error
return None
|
py | 1a4d5d2735c484e6b1958d762c4390f3a8c45508 | #! /usr/bin/python3
"""
pre-receive/dummy-echo.py
Stupidly echos incoming refs.
Code from https://www.atlassian.com/git/tutorials/git-hooks/server-side-hooks
"""
import fileinput
for line in fileinput.input():
print('pre-receive: Trying to push ref %s' % line)
|
py | 1a4d5de397f2338482195b9f1170625b464b0aef | from time import sleep
from pkg_resources import parse_version as v
import pytest
from qbittorrentapi.rss import RSSitemsDictionary
from tests.conftest import check, get_func
folder_one = "testFolderOne"
folder_two = "testFolderTwo"
item_one = "YTS1080p"
item_two = "YTS1080pNew"
url = "https://yts.mx/rss/"
def test_refresh_item(client, api_version, rss_feed):
if v(api_version) < v("2.2"):
with pytest.raises(NotImplementedError):
client.rss_refresh_item(item_path=rss_feed)
else:
client.rss_refresh_item(item_path=rss_feed)
check(
lambda: client.rss_items(include_feed_data=True)[rss_feed]["lastBuildDate"],
"",
negate=True,
check_limit=20,
)
last_refresh = client.rss_items(include_feed_data=True)[rss_feed][
"lastBuildDate"
]
sleep(1)
client.rss_refresh_item(item_path=rss_feed)
check(
lambda: client.rss_items(include_feed_data=True)[rss_feed]["lastBuildDate"],
last_refresh,
negate=True,
check_limit=20,
)
if v(api_version) < v("2.2"):
with pytest.raises(NotImplementedError):
client.rss.refresh_item(item_path=rss_feed)
else:
client.rss.refresh_item(item_path=rss_feed)
check(
lambda: client.rss_items(include_feed_data=True)[rss_feed]["lastBuildDate"],
"",
negate=True,
check_limit=20,
)
last_refresh = client.rss_items(include_feed_data=True)[rss_feed][
"lastBuildDate"
]
sleep(1)
client.rss.refresh_item(item_path=rss_feed)
check(
lambda: client.rss_items(include_feed_data=True)[rss_feed]["lastBuildDate"],
last_refresh,
negate=True,
check_limit=20,
)
def test_items(client, rss_feed):
check(lambda: client.rss_items(), rss_feed, reverse=True)
check(lambda: client.rss_items(include_feed_data=True), rss_feed, reverse=True)
check(
lambda: client.rss_items(include_feed_data=True)[rss_feed],
"articles",
reverse=True,
)
check(lambda: client.rss.items(), rss_feed, reverse=True)
check(lambda: client.rss.items.without_data, rss_feed, reverse=True)
check(lambda: client.rss.items.with_data[rss_feed], "articles", reverse=True)
def test_add_feed(client, rss_feed):
if rss_feed not in client.rss_items():
raise Exception("rss feed not found", client.rss_items())
def test_remove_feed1(client, rss_feed):
client.rss_remove_item(item_path=rss_feed)
check(lambda: client.rss_items(), rss_feed, reverse=True, negate=True)
def test_remove_feed2(client, rss_feed):
client.rss.remove_item(item_path=rss_feed)
check(lambda: client.rss_items(), rss_feed, reverse=True, negate=True)
def test_add_remove_folder(client):
name = "test_isos"
client.rss_add_folder(folder_path=name)
check(lambda: client.rss_items(), name, reverse=True)
client.rss_remove_item(item_path=name)
check(lambda: client.rss_items(), name, reverse=True, negate=True)
client.rss.add_folder(folder_path=name)
check(lambda: client.rss.items(), name, reverse=True)
client.rss.remove_item(item_path=name)
check(lambda: client.rss.items(), name, reverse=True, negate=True)
def test_move(client, rss_feed):
new_name = "new_loc"
client.rss_move_item(orig_item_path=rss_feed, new_item_path=new_name)
check(lambda: client.rss_items(), new_name, reverse=True)
client.rss.move_item(orig_item_path=new_name, new_item_path=rss_feed)
check(lambda: client.rss.items(), rss_feed, reverse=True)
def test_mark_as_read(client, api_version, rss_feed):
item_id = client.rss.items.with_data[rss_feed]["articles"][0]["id"]
if v(api_version) < v("2.5.1"):
with pytest.raises(NotImplementedError):
client.rss_mark_as_read(item_path=rss_feed, article_id=item_id)
else:
client.rss_mark_as_read(item_path=rss_feed, article_id=item_id)
check(
lambda: client.rss.items.with_data[rss_feed]["articles"][0],
"isRead",
reverse=True,
)
item_id = client.rss.items.with_data[rss_feed]["articles"][1]["id"]
if v(api_version) < v("2.5.1"):
with pytest.raises(NotImplementedError):
client.rss.mark_as_read(item_path=rss_feed, article_id=item_id)
else:
client.rss.mark_as_read(item_path=rss_feed, article_id=item_id)
check(
lambda: client.rss.items.with_data[rss_feed]["articles"][1],
"isRead",
reverse=True,
)
@pytest.mark.parametrize(
"client_func",
(
(
"rss_add_feed",
"rss_set_rule",
"rss_rules",
"rss_rename_rule",
"rss_matching_articles",
"rss_remove_rule",
"rss_remove_item",
),
(
"rss.add_feed",
"rss.set_rule",
"rss.rules",
"rss.rename_rule",
"rss.matching_articles",
"rss.remove_rule",
"rss.remove_item",
),
),
)
def test_rules(client, api_version, client_func, rss_feed):
def check_for_rule(name):
try:
get_func(client, client_func[2])() # rss_rules
check(
lambda: get_func(client, client_func[2])(), name, reverse=True
) # rss_rules
except TypeError:
check(
lambda: get_func(client, client_func[2]), name, reverse=True
) # rss_rules
_ = rss_feed # reference to avoid errors; needed to load RSS feed in to qbt
rule_name = item_one + "Rule"
rule_name_new = rule_name + "New"
rule_def = {"enabled": True, "affectedFeeds": url, "addPaused": True}
try:
get_func(client, client_func[1])(
rule_name=rule_name, rule_def=rule_def
) # rss_set_rule
check_for_rule(rule_name)
if v(api_version) >= v("2.6"): # rename was broken in qBittorrent for a period
get_func(client, client_func[3])(
orig_rule_name=rule_name, new_rule_name=rule_name_new
) # rss_rename_rule
check_for_rule(rule_name_new)
if v(api_version) < v("2.5.1"):
with pytest.raises(NotImplementedError):
get_func(client, client_func[4])(
rule_name=rule_name
) # rss_matching_articles
else:
assert isinstance(
get_func(client, client_func[4])(rule_name=rule_name),
RSSitemsDictionary,
) # rss_matching_articles
finally:
get_func(client, client_func[5])(rule_name=rule_name) # rss_remove_rule
get_func(client, client_func[5])(rule_name=rule_name_new) # rss_remove_rule
check(lambda: client.rss_rules(), rule_name, reverse=True, negate=True)
get_func(client, client_func[6])(item_path=item_one) # rss_remove_item
assert item_two not in client.rss_items()
check(lambda: client.rss_items(), item_two, reverse=True, negate=True)
|
py | 1a4d614bd3d7d544dbf9a70b799b179ff6bf41f7 | """Elasticsearch document model for django-elasticsearch-dsl
"""
from elasticsearch_dsl import analyzer
from django_elasticsearch_dsl import Document, fields, Keyword
from django_elasticsearch_dsl.registries import registry
from .models import ChildPage
booksearch_analyzer = analyzer(
"booksearch_analyzer",
tokenizer="standard",
filter=['lowercase', 'asciifolding', 'porter_stem'],
char_filter=['html_strip']
)
@registry.register_document
class ChildPageDocument(Document):
content = fields.TextField(attr='html_content',
analyzer=booksearch_analyzer)
title = fields.TextField(fields={'keyword': Keyword()})
author = fields.KeywordField(attr='author')
class Index:
name = 'booksearch'
settings = {'number_of_shards': 1,
'number_of_replicas': 1}
class Django:
model = ChildPage
fields = [
'page_number',
'parent_doc_id'
]
# Ignore auto updating of Elasticsearch when a model is saved
# or deleted:
# ignore_signals = True
# Don't perform an index refresh after every update (overrides global setting):
# auto_refresh = False
# Paginate the django queryset used to populate the index with the specified size
# (by default it uses the database driver's default setting)
# queryset_pagination = 5000
|
py | 1a4d6284d02dfbc90c93fe82fd672f1fa1a2b835 | from __future__ import absolute_import, division, print_function
import os
import pytest
from hypothesis import settings
@pytest.fixture(scope="session")
def C():
"""
Return a simple but fully features attrs class with an x and a y attribute.
"""
from attr import attributes, attr
@attributes
class C(object):
x = attr()
y = attr()
return C
# PyPy on Travis appears to be too slow.
settings.register_profile("travis_pypy", settings(perform_health_check=False))
settings.load_profile(os.getenv(u'HYPOTHESIS_PROFILE', 'default'))
|
py | 1a4d62ec69fe952711444bee35e2c2e97a59b349 | from flask_table import Table, Col, DatetimeCol, BoolCol
class BlockchainTable(Table):
# Class to format the table
classes = ['table', 'table-striped']
from_user = Col('From')
to_user = Col('To')
amount = Col('Amount')
timestamp = DatetimeCol('Timestamp', datetime_format='full')
nonce = Col('Nonce')
miner_verify = BoolCol('Transaction Verified')
|
py | 1a4d6317992523090752152d127a8b6be50cba5d | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
import merlin.models.tf as ml
from merlin.models.data.synthetic import SyntheticData
targets = {"target": tf.cast(tf.random.uniform((100,), maxval=2, dtype=tf.int32), tf.float32)}
def test_binary_classification_head(testing_data: SyntheticData):
from merlin.models.tf.utils import testing_utils
body = ml.InputBlock(testing_data.schema).connect(ml.MLPBlock([64]))
model = body.connect(ml.BinaryClassificationTask("target"))
testing_utils.assert_loss_and_metrics_are_valid(model, (testing_data.tf_tensor_dict, targets))
def test_serialization_binary_classification_head(testing_data: SyntheticData):
from merlin.models.tf.utils import testing_utils
body = ml.InputBlock(testing_data.schema).connect(ml.MLPBlock([64]))
model = body.connect(ml.BinaryClassificationTask("target"))
copy_model = testing_utils.assert_serialization(model)
testing_utils.assert_loss_and_metrics_are_valid(
copy_model, (testing_data.tf_tensor_dict, targets)
)
|
py | 1a4d631f1a43889a45b0eb5c80c7140fdd9bfa78 | from django.apps import AppConfig
class RunningboxConfig(AppConfig):
name = 'runningbox'
|
py | 1a4d63a9b268bc9c74bccea773095d56efa6dd52 | """
Tests for relationship detection.
"""
from itertools import chain
from typing import List, Optional, Tuple
import geopandas as gpd
import numpy as np
import pandas as pd
import pytest
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from shapely.geometry import MultiLineString
from shapely.prepared import PreparedGeometry
from fractopo.analysis.relationships import (
determine_crosscut_abutting_relationships,
determine_intersects,
determine_nodes_intersecting_sets,
plot_crosscut_abutting_relationships_plot,
)
from fractopo.general import prepare_geometry_traces
from tests import Helpers
@pytest.mark.parametrize(
"trace_series_two_sets, set_array, set_names_two_sets,"
"node_series_xy, buffer_value, assumed_intersections",
Helpers.test_determine_nodes_intersecting_sets_params,
)
def test_determine_nodes_intersecting_sets(
trace_series_two_sets: Tuple[gpd.GeoSeries, gpd.GeoSeries],
set_array: np.ndarray,
set_names_two_sets: Tuple[str, str],
node_series_xy: gpd.GeoSeries,
buffer_value: float,
assumed_intersections: Optional[List[bool]],
):
"""
Test determine_nodes_intersecting_sets.
"""
intersects_both_sets = determine_nodes_intersecting_sets(
trace_series_two_sets,
set_names_two_sets,
node_series_xy,
buffer_value,
)
assert isinstance(intersects_both_sets, list)
if assumed_intersections is not None:
assert len(assumed_intersections) == len(intersects_both_sets)
assert sum(assumed_intersections) == sum(intersects_both_sets)
assert assumed_intersections == intersects_both_sets
@pytest.mark.parametrize(
"trace_series",
Helpers.test_prepare_geometry_traces_params,
)
def test_prepare_geometry_traces(trace_series: gpd.GeoSeries):
"""
Test prepare_geometry_traces.
"""
prepared_traces = prepare_geometry_traces(trace_series)
assert isinstance(prepared_traces, PreparedGeometry)
assert isinstance(prepared_traces.context, MultiLineString)
assert all(
[prepared_traces.intersects(trace) for trace in trace_series.geometry.values]
)
@pytest.mark.parametrize(
"trace_series_two_sets, set_names_two_sets,"
"node_series_xy_intersects, node_types_xy_intersects, buffer_value",
Helpers.test_determine_intersects_params,
)
def test_determine_intersects(
trace_series_two_sets: Tuple[gpd.GeoSeries, gpd.GeoSeries],
set_names_two_sets: Tuple[str, str],
node_series_xy_intersects: gpd.GeoSeries,
node_types_xy_intersects: np.ndarray,
buffer_value: float,
):
"""
Test determine_intersects.
"""
assert isinstance(trace_series_two_sets, tuple)
assert isinstance(set_names_two_sets, tuple)
assert isinstance(node_series_xy_intersects, gpd.GeoSeries)
assert isinstance(node_types_xy_intersects, np.ndarray)
assert isinstance(buffer_value, float)
intersectframe = determine_intersects(
trace_series_two_sets=trace_series_two_sets,
set_names_two_sets=set_names_two_sets,
node_series_xy_intersects=node_series_xy_intersects,
node_types_xy_intersects=node_types_xy_intersects,
buffer_value=buffer_value,
)
assert isinstance(intersectframe, pd.DataFrame)
expected_cols = ["node", "nodeclass", "sets", "error"]
assert all(col in intersectframe.columns for col in expected_cols)
@pytest.mark.parametrize(
"trace_series, node_series, node_types, set_array, set_names, buffer_value, label",
Helpers.test_determine_crosscut_abutting_relationships_params,
)
def test_determine_crosscut_abutting_relationships(
trace_series: gpd.GeoSeries,
node_series: gpd.GeoSeries,
node_types: np.ndarray,
set_array: np.ndarray,
set_names: Tuple[str, ...],
buffer_value: float,
label: str,
):
"""
Test determine_crosscut_abutting_relationships.
"""
assert isinstance(trace_series, gpd.GeoSeries)
assert isinstance(node_series, gpd.GeoSeries)
assert isinstance(node_types, np.ndarray)
assert isinstance(set_array, np.ndarray)
assert isinstance(set_names, tuple)
assert isinstance(buffer_value, float)
assert isinstance(label, str)
relations_df = determine_crosscut_abutting_relationships(
trace_series=trace_series,
node_series=node_series,
node_types=node_types,
set_array=set_array,
set_names=set_names,
buffer_value=buffer_value,
label=label,
)
assert isinstance(relations_df, pd.DataFrame)
expected_cols = ["name", "sets", "x", "y", "y-reverse", "error-count"]
assert all(col in relations_df.columns for col in expected_cols)
def test_plot_crosscut_abutting_relationships_plot():
"""
Test plot_crosscut_abutting_relationships_plot.
"""
params = Helpers.test_determine_crosscut_abutting_relationships_params[0]
relations_df = determine_crosscut_abutting_relationships(*params)
set_array = params[3]
set_names = params[4]
assert isinstance(set_array, np.ndarray)
assert isinstance(set_names, tuple)
figs, fig_axes = plot_crosscut_abutting_relationships_plot(
relations_df=relations_df, set_array=set_array, set_names=set_names
)
assert all(isinstance(fig, Figure) for fig in figs)
assert all(isinstance(ax, Axes) for ax in chain(*fig_axes))
plt.close()
|
py | 1a4d644b05380eca9028ca6eb9a2aa483ee2ec92 | from __future__ import unicode_literals # at top of module
import datetime
import json
import arrow
import pytest
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from marshmallow import ValidationError
from freezegun import freeze_time
from mock import patch
from lemur.certificates.service import create_csr
from lemur.certificates.views import * # noqa
from lemur.common import utils
from lemur.domains.models import Domain
from lemur.tests.vectors import VALID_ADMIN_API_TOKEN, VALID_ADMIN_HEADER_TOKEN, VALID_USER_HEADER_TOKEN, CSR_STR, \
INTERMEDIATE_CERT_STR, SAN_CERT_STR, SAN_CERT_KEY
def test_get_or_increase_name(session, certificate):
from lemur.certificates.models import get_or_increase_name
from lemur.tests.factories import CertificateFactory
serial = 'AFF2DB4F8D2D4D8E80FA382AE27C2333'
assert get_or_increase_name(certificate.name, certificate.serial) == '{0}-{1}'.format(certificate.name, serial)
certificate.name = 'test-cert-11111111'
assert get_or_increase_name(certificate.name, certificate.serial) == 'test-cert-11111111-' + serial
certificate.name = 'test-cert-11111111-1'
assert get_or_increase_name('test-cert-11111111-1', certificate.serial) == 'test-cert-11111111-1-' + serial
cert2 = CertificateFactory(name='certificate1-' + serial)
session.commit()
assert get_or_increase_name('certificate1', int(serial, 16)) == 'certificate1-{}-1'.format(serial)
def test_get_certificate_primitives(certificate):
from lemur.certificates.service import get_certificate_primitives
names = [x509.DNSName(x.name) for x in certificate.domains]
with freeze_time(datetime.date(year=2016, month=10, day=30)):
primitives = get_certificate_primitives(certificate)
assert len(primitives) == 25
def test_certificate_output_schema(session, certificate, issuer_plugin):
from lemur.certificates.schemas import CertificateOutputSchema
# Clear the cached attribute first
if 'parsed_cert' in certificate.__dict__:
del certificate.__dict__['parsed_cert']
# Make sure serialization parses the cert only once (uses cached 'parsed_cert' attribute)
with patch('lemur.common.utils.parse_certificate', side_effect=utils.parse_certificate) as wrapper:
data, errors = CertificateOutputSchema().dump(certificate)
assert data['issuer'] == 'LemurTrustUnittestsClass1CA2018'
assert wrapper.call_count == 1
def test_certificate_edit_schema(session):
from lemur.certificates.schemas import CertificateEditInputSchema
input_data = {'owner': '[email protected]'}
data, errors = CertificateEditInputSchema().load(input_data)
assert len(data['notifications']) == 3
def test_authority_key_identifier_schema():
from lemur.schemas import AuthorityKeyIdentifierSchema
input_data = {
'useKeyIdentifier': True,
'useAuthorityCert': True
}
data, errors = AuthorityKeyIdentifierSchema().load(input_data)
assert sorted(data) == sorted({
'use_key_identifier': True,
'use_authority_cert': True
})
assert not errors
data, errors = AuthorityKeyIdentifierSchema().dumps(data)
assert sorted(data) == sorted(json.dumps(input_data))
assert not errors
def test_certificate_info_access_schema():
from lemur.schemas import CertificateInfoAccessSchema
input_data = {'includeAIA': True}
data, errors = CertificateInfoAccessSchema().load(input_data)
assert not errors
assert data == {'include_aia': True}
data, errors = CertificateInfoAccessSchema().dump(data)
assert not errors
assert data == input_data
def test_subject_key_identifier_schema():
from lemur.schemas import SubjectKeyIdentifierSchema
input_data = {'includeSKI': True}
data, errors = SubjectKeyIdentifierSchema().load(input_data)
assert not errors
assert data == {'include_ski': True}
data, errors = SubjectKeyIdentifierSchema().dump(data)
assert not errors
assert data == input_data
def test_extension_schema(client):
from lemur.certificates.schemas import ExtensionSchema
input_data = {
'keyUsage': {
'useKeyEncipherment': True,
'useDigitalSignature': True
},
'extendedKeyUsage': {
'useServerAuthentication': True
},
'subjectKeyIdentifier': {
'includeSKI': True
}
}
data, errors = ExtensionSchema().load(input_data)
assert not errors
data, errors = ExtensionSchema().dump(data)
assert not errors
def test_certificate_input_schema(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'test.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityStart': arrow.get(2018, 11, 9).isoformat(),
'validityEnd': arrow.get(2019, 11, 9).isoformat(),
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
assert data['authority'].id == authority.id
# make sure the defaults got set
assert data['common_name'] == 'test.example.com'
assert data['country'] == 'US'
assert data['location'] == 'Los Gatos'
assert len(data.keys()) == 19
def test_certificate_input_with_extensions(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'test.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'extensions': {
'keyUsage': {
'digital_signature': True
},
'extendedKeyUsage': {
'useClientAuthentication': True,
'useServerAuthentication': True
},
'subjectKeyIdentifier': {
'includeSKI': True
},
'subAltNames': {
'names': [
{'nameType': 'DNSName', 'value': 'test.example.com'}
]
}
},
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_out_of_range_date(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'test.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityYears': 100,
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert errors
input_data['validityStart'] = '2017-04-30T00:12:34.513631'
data, errors = CertificateInputSchema().load(input_data)
assert errors
input_data['validityEnd'] = '2018-04-30T00:12:34.513631'
data, errors = CertificateInputSchema().load(input_data)
assert errors
def test_certificate_valid_years(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'test.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityYears': 1,
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_valid_dates(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'test.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityStart': '2020-01-01T00:00:00',
'validityEnd': '2020-01-01T00:00:01',
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_cn_admin(client, authority, logged_in_admin):
"""Admin is exempt from CN/SAN domain restrictions."""
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': '*.admin-overrides-whitelist.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityStart': '2020-01-01T00:00:00',
'validityEnd': '2020-01-01T00:00:01',
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_allowed_names(client, authority, session, logged_in_user):
"""Test for allowed CN and SAN values."""
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'Names with spaces are not checked',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityStart': '2020-01-01T00:00:00',
'validityEnd': '2020-01-01T00:00:01',
'extensions': {
'subAltNames': {
'names': [
{'nameType': 'DNSName', 'value': 'allowed.example.com'},
{'nameType': 'IPAddress', 'value': '127.0.0.1'},
]
}
},
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_incative_authority(client, authority, session, logged_in_user):
"""Cannot issue certificates with an inactive authority."""
from lemur.certificates.schemas import CertificateInputSchema
authority.active = False
session.add(authority)
input_data = {
'commonName': 'foo.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityStart': '2020-01-01T00:00:00',
'validityEnd': '2020-01-01T00:00:01',
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert errors['authority'][0] == "The authority is inactive."
def test_certificate_disallowed_names(client, authority, session, logged_in_user):
"""The CN and SAN are disallowed by LEMUR_WHITELISTED_DOMAINS."""
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': '*.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityStart': '2020-01-01T00:00:00',
'validityEnd': '2020-01-01T00:00:01',
'extensions': {
'subAltNames': {
'names': [
{'nameType': 'DNSName', 'value': 'allowed.example.com'},
{'nameType': 'DNSName', 'value': 'evilhacker.org'},
]
}
},
'dnsProvider': None,
}
data, errors = CertificateInputSchema().load(input_data)
assert errors['common_name'][0].startswith("Domain *.example.com does not match whitelisted domain patterns")
assert (errors['extensions']['sub_alt_names']['names'][0]
.startswith("Domain evilhacker.org does not match whitelisted domain patterns"))
def test_certificate_sensitive_name(client, authority, session, logged_in_user):
"""The CN is disallowed by 'sensitive' flag on Domain model."""
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'sensitive.example.com',
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityStart': '2020-01-01T00:00:00',
'validityEnd': '2020-01-01T00:00:01',
'dnsProvider': None,
}
session.add(Domain(name='sensitive.example.com', sensitive=True))
data, errors = CertificateInputSchema().load(input_data)
assert errors['common_name'][0].startswith("Domain sensitive.example.com has been marked as sensitive")
def test_create_basic_csr(client):
csr_config = dict(
common_name='example.com',
organization='Example, Inc.',
organizational_unit='Operations',
country='US',
state='CA',
location='A place',
owner='[email protected]',
key_type='RSA2048',
extensions=dict(names=dict(sub_alt_names=x509.SubjectAlternativeName([x509.DNSName('test.example.com'), x509.DNSName('test2.example.com')])))
)
csr, pem = create_csr(**csr_config)
csr = x509.load_pem_x509_csr(csr.encode('utf-8'), default_backend())
for name in csr.subject:
assert name.value in csr_config.values()
def test_csr_empty_san(client):
"""Test that an empty "names" list does not produce a CSR with empty SubjectAltNames extension.
The Lemur UI always submits this extension even when no alt names are defined.
"""
csr_text, pkey = create_csr(
common_name='daniel-san.example.com',
owner='[email protected]',
key_type='RSA2048',
extensions={'sub_alt_names': {'names': x509.SubjectAlternativeName([])}}
)
csr = x509.load_pem_x509_csr(csr_text.encode('utf-8'), default_backend())
with pytest.raises(x509.ExtensionNotFound):
csr.extensions.get_extension_for_class(x509.SubjectAlternativeName)
def test_csr_disallowed_cn(client, logged_in_user):
"""Domain name CN is disallowed via LEMUR_WHITELISTED_DOMAINS."""
from lemur.common import validators
request, pkey = create_csr(
common_name='evilhacker.org',
owner='[email protected]',
key_type='RSA2048',
)
with pytest.raises(ValidationError) as err:
validators.csr(request)
assert str(err.value).startswith('Domain evilhacker.org does not match whitelisted domain patterns')
def test_csr_disallowed_san(client, logged_in_user):
"""SAN name is disallowed by LEMUR_WHITELISTED_DOMAINS."""
from lemur.common import validators
request, pkey = create_csr(
common_name="CN with spaces isn't a domain and is thus allowed",
owner='[email protected]',
key_type='RSA2048',
extensions={'sub_alt_names': {'names': x509.SubjectAlternativeName([x509.DNSName('evilhacker.org')])}}
)
with pytest.raises(ValidationError) as err:
validators.csr(request)
assert str(err.value).startswith('Domain evilhacker.org does not match whitelisted domain patterns')
def test_get_name_from_arn(client):
from lemur.certificates.service import get_name_from_arn
arn = 'arn:aws:iam::11111111:server-certificate/mycertificate'
assert get_name_from_arn(arn) == 'mycertificate'
def test_get_account_number(client):
from lemur.certificates.service import get_account_number
arn = 'arn:aws:iam::11111111:server-certificate/mycertificate'
assert get_account_number(arn) == '11111111'
def test_mint_certificate(issuer_plugin, authority):
from lemur.certificates.service import mint
cert_body, private_key, chain, external_id, csr = mint(authority=authority, csr=CSR_STR)
assert cert_body == SAN_CERT_STR
def test_create_certificate(issuer_plugin, authority, user):
from lemur.certificates.service import create
cert = create(authority=authority, csr=CSR_STR, owner='[email protected]', creator=user['user'])
assert str(cert.not_after) == '2047-12-31T22:00:00+00:00'
assert str(cert.not_before) == '2017-12-31T22:00:00+00:00'
assert cert.issuer == 'LemurTrustUnittestsClass1CA2018'
assert cert.name == 'SAN-san.example.org-LemurTrustUnittestsClass1CA2018-20171231-20471231-AFF2DB4F8D2D4D8E80FA382AE27C2333'
cert = create(authority=authority, csr=CSR_STR, owner='[email protected]', name='ACustomName1', creator=user['user'])
assert cert.name == 'ACustomName1'
def test_reissue_certificate(issuer_plugin, authority, certificate):
from lemur.certificates.service import reissue_certificate
new_cert = reissue_certificate(certificate)
assert new_cert
def test_create_csr():
csr, private_key = create_csr(owner='[email protected]', common_name='ACommonName', organization='test', organizational_unit='Meters', country='US',
state='CA', location='Here', key_type='RSA2048')
assert csr
assert private_key
extensions = {'sub_alt_names': {'names': x509.SubjectAlternativeName([x509.DNSName('AnotherCommonName')])}}
csr, private_key = create_csr(owner='[email protected]', common_name='ACommonName', organization='test', organizational_unit='Meters', country='US',
state='CA', location='Here', extensions=extensions, key_type='RSA2048')
assert csr
assert private_key
def test_import(user):
from lemur.certificates.service import import_certificate
cert = import_certificate(body=SAN_CERT_STR, chain=INTERMEDIATE_CERT_STR, private_key=SAN_CERT_KEY, creator=user['user'])
assert str(cert.not_after) == '2047-12-31T22:00:00+00:00'
assert str(cert.not_before) == '2017-12-31T22:00:00+00:00'
assert cert.issuer == 'LemurTrustUnittestsClass1CA2018'
assert cert.name == 'SAN-san.example.org-LemurTrustUnittestsClass1CA2018-20171231-20471231-AFF2DB4F8D2D4D8E80FA382AE27C2333-2'
cert = import_certificate(body=SAN_CERT_STR, chain=INTERMEDIATE_CERT_STR, private_key=SAN_CERT_KEY, owner='[email protected]', name='ACustomName2', creator=user['user'])
assert cert.name == 'ACustomName2'
@pytest.mark.skip
def test_upload(user):
from lemur.certificates.service import upload
cert = upload(body=SAN_CERT_STR, chain=INTERMEDIATE_CERT_STR, private_key=SAN_CERT_KEY, owner='[email protected]', creator=user['user'])
assert str(cert.not_after) == '2040-01-01T20:30:52+00:00'
assert str(cert.not_before) == '2015-06-26T20:30:52+00:00'
assert cert.issuer == 'Example'
assert cert.name == 'long.lived.com-Example-20150626-20400101-3'
cert = upload(body=SAN_CERT_STR, chain=INTERMEDIATE_CERT_STR, private_key=SAN_CERT_KEY, owner='[email protected]', name='ACustomName', creator=user['user'])
assert 'ACustomName' in cert.name
# verify upload with a private key as a str
def test_upload_private_key_str(user):
from lemur.certificates.service import upload
cert = upload(body=SAN_CERT_STR, chain=INTERMEDIATE_CERT_STR, private_key=SAN_CERT_KEY, owner='[email protected]', name='ACustomName', creator=user['user'])
assert cert
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
('', 401)
])
def test_certificate_get_private_key(client, token, status):
assert client.get(api.url_for(Certificates, certificate_id=1), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
('', 401)
])
def test_certificate_get(client, token, status):
assert client.get(api.url_for(Certificates, certificate_id=1), headers=token).status_code == status
def test_certificate_get_body(client):
response_body = client.get(api.url_for(Certificates, certificate_id=1), headers=VALID_USER_HEADER_TOKEN).json
assert response_body['serial'] == '211983098819107449768450703123665283596'
assert response_body['serialHex'] == '9F7A75B39DAE4C3F9524C68B06DA6A0C'
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificate_post(client, token, status):
assert client.post(api.url_for(Certificates, certificate_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
('', 401)
])
def test_certificate_put(client, token, status):
assert client.put(api.url_for(Certificates, certificate_id=1), data={}, headers=token).status_code == status
def test_certificate_put_with_data(client, certificate, issuer_plugin):
resp = client.put(api.url_for(Certificates, certificate_id=certificate.id), data=json.dumps({'owner': '[email protected]', 'description': 'test', 'notify': True}), headers=VALID_ADMIN_HEADER_TOKEN)
assert resp.status_code == 200
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificate_delete(client, token, status):
assert client.delete(api.url_for(Certificates, certificate_id=1), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificate_patch(client, token, status):
assert client.patch(api.url_for(Certificates, certificate_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
('', 401)
])
def test_certificates_get(client, token, status):
assert client.get(api.url_for(CertificatesList), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
('', 401)
])
def test_certificates_post(client, token, status):
assert client.post(api.url_for(CertificatesList), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificates_put(client, token, status):
assert client.put(api.url_for(CertificatesList), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificates_delete(client, token, status):
assert client.delete(api.url_for(CertificatesList), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificates_patch(client, token, status):
assert client.patch(api.url_for(CertificatesList), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificate_credentials_post(client, token, status):
assert client.post(api.url_for(CertificatePrivateKey, certificate_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificate_credentials_put(client, token, status):
assert client.put(api.url_for(CertificatePrivateKey, certificate_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificate_credentials_delete(client, token, status):
assert client.delete(api.url_for(CertificatePrivateKey, certificate_id=1), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificate_credentials_patch(client, token, status):
assert client.patch(api.url_for(CertificatePrivateKey, certificate_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificates_upload_get(client, token, status):
assert client.get(api.url_for(CertificatesUpload), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
('', 401)
])
def test_certificates_upload_post(client, token, status):
assert client.post(api.url_for(CertificatesUpload), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificates_upload_put(client, token, status):
assert client.put(api.url_for(CertificatesUpload), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificates_upload_delete(client, token, status):
assert client.delete(api.url_for(CertificatesUpload), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
('', 405)
])
def test_certificates_upload_patch(client, token, status):
assert client.patch(api.url_for(CertificatesUpload), data={}, headers=token).status_code == status
def test_sensitive_sort(client):
resp = client.get(api.url_for(CertificatesList) + '?sortBy=private_key&sortDir=asc', headers=VALID_ADMIN_HEADER_TOKEN)
assert "'private_key' is not sortable or filterable" in resp.json['message']
def test_boolean_filter(client):
resp = client.get(api.url_for(CertificatesList) + '?filter=notify;true', headers=VALID_ADMIN_HEADER_TOKEN)
assert resp.status_code == 200
# Also don't crash with invalid input (we currently treat that as false)
resp = client.get(api.url_for(CertificatesList) + '?filter=notify;whatisthis', headers=VALID_ADMIN_HEADER_TOKEN)
assert resp.status_code == 200
|
py | 1a4d64ccedd0ede270e791f92ab22d8751fe3dd0 | from setuptools import setup, find_packages
with open('README.md', 'r', encoding='utf-8') as f:
long_description = f.read()
setup(
name="getpalette",
version="1.0.7",
description="Get color palette from images",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ssiyad/getpalette",
author="Sabu Siyad",
author_email="[email protected]",
license="MIT",
packages=find_packages(),
include_package_data=True,
install_requires=[
'Pillow==8.2.0',
'matplotlib==3.0.3',
'scipy==1.3.1',
'pandas==0.24.2'
],
entry_points={
"console_scripts": [
"getpalette = getpalette.__main__:main",
]
}
)
|
py | 1a4d654f44ca53c4d56426d3d408fac4e5bdd823 | from .illustration import plot_data_w_fluid, plot_mixing
from .activation import activation_fn_dispatcher
import json
import numpy as np
import sys
def do_measurements(ex, _config, _run, sim_info, pXs, pVs, acc, ms, fXs, fVs, plotting_this_iteration, save_all_data_this_iteration):
if acc is not None:
_run.log_scalar("Ratio activated", sum(acc)/len(acc), sim_info['time_step_index'])
_run.log_scalar("Mass ratio activated", sum(ms*acc)/sum(ms), sim_info['time_step_index'])
_run.log_scalar("Mass activated", sum(ms*acc), sim_info['time_step_index'])
if pVs is not None:
## Avg velocity
pV_avg = np.mean(pVs, axis=0)
_run.log_scalar("pv_avg_x", pV_avg[0], sim_info['time_step_index'])
_run.log_scalar("pv_avg_y", pV_avg[1], sim_info['time_step_index'])
if fVs is not None:
## Max Velocity
ind_fVs_max = np.argmax(np.linalg.norm(fVs,axis=1))
_run.log_scalar("vmax_x", fVs[ind_fVs_max,0], sim_info['time_step_index'])
_run.log_scalar("vmax_y", fVs[ind_fVs_max,1], sim_info['time_step_index'])
_run.log_scalar("x_vmax", fXs[ind_fVs_max,0], sim_info['time_step_index'])
_run.log_scalar("y_vmax", fXs[ind_fVs_max,1], sim_info['time_step_index'])
_run.log_scalar("x_vmax_c", fXs[ind_fVs_max,0]-(sim_info['x_max']+sim_info['x_min'])/2, sim_info['time_step_index'])
_run.log_scalar("y_vmax_c", fXs[ind_fVs_max,1]-(sim_info['x_max']+sim_info['x_min'])/2, sim_info['time_step_index'])
## Avg velocity
fV_avg = np.mean(fVs, axis=0)
_run.log_scalar("fv_avg_x", fV_avg[0], sim_info['time_step_index'])
_run.log_scalar("fv_avg_y", fV_avg[1], sim_info['time_step_index'])
## Avg velocity in activated area
w = activation_fn_dispatcher(_config, sim_info['t'])(fXs)
fV_acc_avg = np.average(fVs ,weights=w, axis=0)
_run.log_scalar("fv_acc_avg_x", fV_acc_avg[0], sim_info['time_step_index'])
_run.log_scalar("fv_acc_avg_y", fV_acc_avg[1], sim_info['time_step_index'])
if save_all_data_this_iteration:
d= {
'pXs' : pXs.tolist() ,
'pVs' : pVs.tolist() ,
'acc' : acc.tolist() ,
'ms' : ms.tolist() ,
'fXs' : fXs.tolist() ,
'fVs' : fVs.tolist() ,
'sim_info' : sim_info
}
dump_file_loc = f"{sim_info['data_dir']}/data_dump-{sim_info['time_step_index']}.json"
with open(dump_file_loc, 'w') as f:
json.dump(d,f, indent=4)
ex.add_artifact(dump_file_loc)
if plotting_this_iteration:
if _config.get('mixing_experiment',False):
plot_mixing(pXs,
sim_info,
image_folder=sim_info['data_dir'],
title=f"t={sim_info['t']:.3f}",
L=_config['L'],
fix_frame=True,
SAVEFIG=_config['SAVEFIG'],
ex=ex)
else:
plot_data_w_fluid(pXs, pVs, fXs, fVs,
sim_info,
image_folder=sim_info['data_dir'],
title=f"t={sim_info['t']:.3f}",
L=_config['L'],
fix_frame=True,
SAVEFIG=_config['SAVEFIG'],
ex=ex,
plot_particles=True,
plot_fluids=True,
side_by_side=True,
fluid_plot_type = 'quiver')
def do_one_timestep_correlation_measurement(ex, _config, _run, sim_info, pXs, pXs_old):
assert(pXs.shape==pXs_old.shape)
p1 = pXs.flatten()
p2 = pXs_old.flatten()
corr = np.dot(p1,p2)/(np.linalg.norm(p1)*np.linalg.norm(p2))
_run.log_scalar("One timestep correlator", corr, sim_info['time_step_index'])
return corr
|
py | 1a4d65f75bd21f0564f2ae5c6ced0ce323e5123e | import math
import torch
from torch.optim.optimizer import Optimizer
from torch.nn.utils import parameters_to_vector, vector_to_parameters
import torch.nn as nn
import torch.nn.functional as F
################################
## PyTorch Optimizer for VOGN ##
################################
required = object()
def update_input(self, input, output):
self.input = input[0].data
self.output = output
class VOGN(Optimizer):
"""Implements the VOGN algorithm. It uses the Generalized Gauss Newton (GGN)
approximation to the Hessian and a mean-field approximation. Note that this
optimizer does **not** support multiple model parameter groups. All model
parameters must use the same optimizer parameters.
model (nn.Module): network model
train_set_size (int): number of data points in the full training set
(objective assumed to be on the form (1/M)*sum(-log p))
lr (float, optional): learning rate (default: 1e-3)
beta (float, optional): coefficient used for computing
running average of squared gradient (default: 0.999)
prior_prec (float, optional): prior precision on parameters
(default: 1.0)
prec_init (float, optional): initial precision for variational dist. q
(default: 1.0)
num_samples (float, optional): number of MC samples
(default: 1)
"""
def __init__(self, model, train_set_size, lr=1e-3, beta1=0.9, beta2=0.999, prior_prec=1.0, prec_init=1.0,
num_samples=1):
if lr <= 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if prior_prec < 0.0:
raise ValueError("Invalid prior precision value: {}".format(prior_prec))
if prec_init < 0.0:
raise ValueError("Invalid initial s value: {}".format(prec_init))
if not 0.0 <= beta1 < 1.0:
raise ValueError("Invalid beta parameter: {}".format(beta1))
if not 0.0 <= beta2 < 1.0:
raise ValueError("Invalid beta parameter: {}".format(beta2))
if num_samples < 1:
raise ValueError("Invalid num_samples parameter: {}".format(num_samples))
if train_set_size < 1:
raise ValueError("Invalid number of training data points: {}".format(train_set_size))
defaults = dict(lr=lr, beta1=beta1, beta2=beta2, prior_prec=prior_prec, prec_init=prec_init,
num_samples=num_samples,
train_set_size=train_set_size)
self.train_modules = []
self.set_train_modules(model)
super(type(self), self).__init__(model.parameters(), defaults)
for module in self.train_modules:
module.register_forward_hook(update_input)
p = parameters_to_vector(self.param_groups[0]['params'])
# mean parameter of variational distribution.
self.state['mu'] = p.clone().detach()
self.state['mu_grad_avg'] = torch.zeros_like(p).detach()
# covariance parameter of variational distribution -- saved as the diagonal precision matrix.
self.state['Precision'] = torch.ones_like(p).mul_(defaults['prec_init']).detach()
def set_train_modules(self, module):
if len(list(module.children())) == 0:
if len(list(module.parameters())) != 0:
self.train_modules.append(module)
else:
for child in list(module.children()):
self.set_train_modules(child)
def step(self, closure):
"""Performs a single optimization step.
Arguments:
closure (callable): A closure that reevaluates the model
and returns the loss without doing the backward pass
"""
if closure is None:
raise RuntimeError(
'For now, VOGN only supports that the model/loss can be reevaluated inside the step function')
defaults = self.defaults
# We only support a single parameter group.
parameters = self.param_groups[0]['params']
Precision = self.state['Precision']
mu = self.state['mu']
GGN_hat = None
mu_grad_hat = None
loss_list = []
pred_list = []
for _ in range(defaults['num_samples']):
# Sample a parameter vector:
raw_noise = torch.normal(mean=torch.zeros_like(mu), std=1.0).detach()
p = torch.addcdiv(mu, 1., raw_noise, torch.sqrt(Precision))
vector_to_parameters(p, parameters)
# Store the loss
loss, preds = closure()
loss_list.append(loss.detach())
pred_list.append(preds.detach())
linear_combinations = []
# Store the pre-activations
for module in self.train_modules:
linear_combinations.append(module.output)
# Do the Backward pass for gradients and square gradients
linear_grad = torch.autograd.grad(loss, linear_combinations)
ggn = []
grad = []
N = 1
for i, module in enumerate(self.train_modules):
G = linear_grad[i].detach()
A = module.input.detach()
M = A.shape[0]
A = torch.cat([A] * N)
G *= M
G2 = torch.mul(G, G)
if isinstance(module, nn.BatchNorm1d):
A2 = torch.mul(A, A)
grad.append(torch.einsum('ij->j', torch.mul(G, A)))
ggn.append(torch.einsum('ij->j', torch.mul(G2, A2)))
if module.bias is not None:
grad.append(torch.einsum('ij->j', G))
ggn.append(torch.einsum('ij->j', G2))
if isinstance(module, nn.BatchNorm2d):
A2 = torch.mul(A, A)
grad.append(torch.einsum('ijkl->j', torch.mul(G, A)))
ggn.append(torch.einsum('ijkl->j', torch.mul(G2, A2)))
if module.bias is not None:
grad.append(torch.einsum('ijkl->j', G))
ggn.append(torch.einsum('ijkl->j', G2))
if isinstance(module, nn.Linear):
A2 = torch.mul(A, A)
grad.append(torch.einsum('ij,ik->jk', G, A))
ggn.append(torch.einsum('ij, ik->jk', G2, A2))
if module.bias is not None:
# A = torch.ones((M * N, 1), device=A.device)
grad.append(torch.einsum('ij->j', G))
ggn.append(torch.einsum('ij->j', G2))
if isinstance(module, nn.Conv2d):
A = F.unfold(A, kernel_size=module.kernel_size, dilation=module.dilation, padding=module.padding,
stride=module.stride)
A2 = torch.mul(A, A)
_, k, hw = A.shape
_, c, _, _ = G.shape
'''G = G.view(M, c, -1)
mean = torch.zeros((c, k), device=A.device)
mean.addbmm_(G, A)'''
_, c, _, _ = G.shape
G = G.view(M * N, c, -1)
G2 = G2.view(M * N, c, -1)
grad.append(torch.einsum('ijl,ikl->jk', G, A))
'''mean = torch.zeros((c, k), device=A.device)
mean.addbmm_(torch.mul(G, G), torch.mul(A, A))'''
ggn.append(torch.einsum('ijl,ikl->jk', G2, A2))
if module.bias is not None:
# A = torch.ones((M * N, 1, hw), device=A.device)
'''mean = torch.zeros((c, 1), device=A.device)
mean.addbmm_(G, A)'''
grad.append(torch.einsum('ijl->j', G).detach())
'''mean = torch.zeros((c, 1), device=A.device)
mean.addbmm_(torch.mul(G, G), torch.mul(A, A))'''
ggn.append(torch.einsum('ijl->j', G2).detach())
grad_vec = parameters_to_vector(grad).div(M).detach()
ggn_vec = parameters_to_vector(ggn).div(M).detach()
if mu_grad_hat is None:
mu_grad_hat = torch.zeros_like(grad_vec)
mu_grad_hat.add_(grad_vec)
if GGN_hat is None:
GGN_hat = torch.zeros_like(ggn_vec)
GGN_hat.add_(ggn_vec)
# Convert the parameter gradient to a single vector.
mu_grad_hat = mu_grad_hat.mul(defaults['train_set_size'] / defaults['num_samples'])
GGN_hat.mul_(defaults['train_set_size'] / defaults['num_samples'])
self.state['mu_grad_avg'].mul_(defaults['beta1']).add_(mu_grad_hat.mul(1 - defaults['beta1']))
# Get the mean loss over the number of samples
loss = torch.mean(torch.stack(loss_list))
#preds = torch.mean(torch.stack(pred_list))
# Update precision matrix
Precision = Precision.mul(defaults['beta2']) + GGN_hat.add(defaults['prior_prec']).mul(1 - defaults['beta2'])
self.state['Precision'] = Precision
# Update mean vector
mu.addcdiv_(-self.param_groups[0]['lr'], self.state['mu_grad_avg'] + torch.mul(mu, defaults['prior_prec']), Precision)
self.state['mu'] = mu
vector_to_parameters(self.state['mu'], self.param_groups[0]['params'])
return loss, pred_list
def get_mc_predictions(self, forward_function, inputs, ret_numpy=False, raw_noises=None, *args, **kwargs):
"""Returns Monte Carlo predictions.
Arguments:
forward_function (callable): The forward function of the model
that takes inputs and returns the outputs.
inputs (FloatTensor): The inputs to the model.
mc_samples (int): The number of Monte Carlo samples.
ret_numpy (bool): If true, the returned list contains numpy arrays,
otherwise it contains torch tensors.
"""
# We only support a single parameter group.
parameters = self.param_groups[0]['params']
predictions = []
Precision = self.state['Precision']
mu = self.state['mu']
if raw_noises is None:
raw_noises = [torch.zeros_like(mu)]
for raw_noise in raw_noises:
# Sample a parameter vector:
# raw_noise = torch.normal(mean=torch.zeros_like(mu), std=1.0)
p = torch.addcdiv(mu, 1., raw_noise, torch.sqrt(Precision))
vector_to_parameters(p, parameters)
# Call the forward computation function
outputs = forward_function(inputs, *args, **kwargs)
if ret_numpy:
outputs = outputs.data.cpu().numpy()
predictions.append(outputs)
vector_to_parameters(self.state['mu'], self.param_groups[0]['params'])
return predictions
def _kl_gaussian(self, p_mu, p_sigma, q_mu, q_sigma):
var_ratio = (p_sigma / q_sigma).pow(2)
t1 = ((p_mu - q_mu) / q_sigma).pow(2)
return 0.5 * torch.sum((var_ratio + t1 - 1 - var_ratio.log()))
def kl_divergence(self):
prec0 = self.defaults['prior_prec']
prec = self.state['Precision']
mu = self.state['mu']
sigma = 1. / torch.sqrt(prec)
mu0 = 0.
sigma0 = 1. / math.sqrt(prec0)
kl = self._kl_gaussian(p_mu=mu, p_sigma=sigma, q_mu=mu0, q_sigma=sigma0)
return kl |
py | 1a4d6766b5d716b75b1e585b34beb013239d38b3 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GradientDescent for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import constant_op
# pylint: disable=unused-import
from tensorflow.python.ops import math_ops
# pylint: enable=unused-import
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class GradientDescentOptimizer(optimizer.Optimizer):
"""Optimizer that implements the gradient descent algorithm.
@@__init__
"""
def __init__(self, learning_rate, use_locking=False, name="GradientDescent"):
"""Construct a new gradient descent optimizer.
Args:
learning_rate: A Tensor or a floating point value. The learning
rate to use.
use_locking: If True use locks for update operation.s
name: Optional name prefix for the operations created when applying
gradients. Defaults to "GradientDescent".
"""
super(GradientDescentOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
def _apply_dense(self, grad, var):
return training_ops.apply_gradient_descent(
var,
self._learning_rate_tensor,
grad,
use_locking=self._use_locking).op
def _apply_sparse(self, grad, var):
delta = ops.IndexedSlices(grad.values * self._learning_rate_tensor,
grad.indices, grad.dense_shape)
return var.scatter_sub(delta, use_locking=self._use_locking)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
|
py | 1a4d677b0b462dc59204812612e4bc25318a2a4b | from django.db import models
# Create your models here.
from django.db import models
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0) |
py | 1a4d6901f0a7db7ae09d3a316b07a09c8083b063 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
# https://github.com/pypa/setuptools_scm
use_scm = {"write_to": "napari_subboxer/_version.py"}
setup(use_scm_version=use_scm)
|
py | 1a4d693be9ef3d45402fda3923023aaed1e26afe | from typing import List
SELFID = "0" * 32
def maybe_and(sql, a):
if a:
return sql + " AND "
else:
return sql
def maybe_or(sql, a):
if a:
return sql + " OR "
else:
return sql
# TODO counts
def get_property_value(agent_memory, mem, prop):
# order of precedence:
# 1: main memory table
# 2: table corresponding to the nodes .TABLE
# 3: triple with the nodes memid as subject and prop as predicate
# is it in the main memory table?
cols = [c[1] for c in agent_memory._db_read("PRAGMA table_info(Memories)")]
if prop in cols:
cmd = "SELECT " + prop + " FROM Memories WHERE uuid=?"
r = agent_memory._db_read(cmd, mem.memid)
return r[0][0]
# is it in the mem.TABLE?
T = mem.TABLE
cols = [c[1] for c in agent_memory._db_read("PRAGMA table_info({})".format(T))]
if prop in cols:
cmd = "SELECT " + prop + " FROM " + T + " WHERE uuid=?"
r = agent_memory._db_read(cmd, mem.memid)
return r[0][0]
# is it a triple?
triples = agent_memory.get_triples(subj=mem.memid, pred_text=prop, return_obj_text="always")
if len(triples) > 0:
return triples[0][2]
return None
class MemorySearcher:
def __init__(self, self_memid=SELFID, search_data=None):
self.self_memid = self_memid
self.search_data = search_data
def search(self, memory, search_data=None) -> List["ReferenceObjectNode"]: # noqa T484
raise NotImplementedError
class ReferenceObjectSearcher(MemorySearcher):
def __init__(self, self_memid=SELFID, search_data=None):
super().__init__(self_memid=SELFID, search_data=None)
def is_filter_empty(self, filter_dict):
r = filter_dict.get("special")
if r and len(r) > 0:
return False
r = filter_dict.get("ref_obj_range")
if r and len(r) > 0:
return False
r = filter_dict.get("ref_obj_exact")
if r and len(r) > 0:
return False
r = filter_dict.get("memories_range")
if r and len(r) > 0:
return False
r = filter_dict.get("memories_exact")
if r and len(r) > 0:
return False
t = filter_dict.get("triples")
if t and len(t) > 0:
return False
return True
def range_queries(self, r, table, a=False):
""" this does x, y, z, pitch, yaw, etc.
input format for generates is
{"xmin": float, xmax: float, ... , yawmin: float, yawmax: float}
"""
sql = ""
vals = []
for k, v in r.items():
if "min" in k:
sql = maybe_and(sql, len(vals) > 0)
sql += table + "." + k.replace("min", "") + ">? "
vals.append(v)
if "max" in k:
sql = maybe_and(sql, len(vals) > 0)
sql += table + "." + k.replace("max", "") + "<? "
vals.append(v)
return sql, vals
def exact_matches(self, m, table, a=False):
sql = ""
vals = []
for k, v in m.items():
sql = maybe_and(sql, len(vals) > 0)
sql += table + "." + k + "=? "
vals.append(v)
return sql, vals
def triples(self, triples, a=False):
# currently does an "and": the memory needs to satisfy all triples
vals = []
if not triples:
return "", vals
sql = "ReferenceObjects.uuid IN (SELECT subj FROM Triples WHERE "
for t in triples:
sql = maybe_or(sql, len(vals) > 0)
vals.append(t["pred_text"])
if t.get("obj_text"):
sql += "(pred_text, obj_text)=(?, ?)"
vals.append(t["obj_text"])
else:
sql += "(pred_text, obj)=(?, ?)"
vals.append(t["obj"])
sql += " GROUP BY subj HAVING COUNT(subj)=? )"
vals.append(len(triples))
return sql, vals
def get_query(self, filter_dict, ignore_self=True):
if self.is_filter_empty(filter_dict):
query = "SELECT uuid FROM ReferenceObjects"
if ignore_self:
query += " WHERE uuid !=?"
return query, [self.self_memid]
else:
return query, []
query = (
"SELECT ReferenceObjects.uuid FROM ReferenceObjects"
" INNER JOIN Memories as M on M.uuid=ReferenceObjects.uuid"
" WHERE "
)
args = []
fragment, vals = self.range_queries(
filter_dict.get("ref_obj_range", {}), "ReferenceObjects"
)
query = maybe_and(query, len(args) > 0)
args.extend(vals)
query += fragment
fragment, vals = self.exact_matches(
filter_dict.get("ref_obj_exact", {}), "ReferenceObjects"
)
query = maybe_and(query, len(args) > 0 and len(vals) > 0)
args.extend(vals)
query += fragment
fragment, vals = self.range_queries(filter_dict.get("memories_range", {}), "M")
query = maybe_and(query, len(args) > 0 and len(vals) > 0)
args.extend(vals)
query += fragment
fragment, vals = self.exact_matches(filter_dict.get("memories_exact", {}), "M")
query = maybe_and(query, len(args) > 0 and len(vals) > 0)
args.extend(vals)
query += fragment
fragment, vals = self.triples(filter_dict.get("triples", []))
query = maybe_and(query, len(args) > 0 and len(vals) > 0)
args.extend(vals)
query += fragment
if ignore_self:
query += " AND ReferenceObjects.uuid !=?"
args.append(self.self_memid)
return query, args
# flag (default) so that it makes a copy of speaker_look etc so that if the searcher is called
# later so it doesn't return the new position of the agent/speaker/speakerlook
# how to parse this distinction?
def handle_special(self, memory, search_data):
d = search_data.get("special")
if not d:
return []
if d.get("SPEAKER"):
return [memory.get_player_by_eid(d["SPEAKER"])]
if d.get("SPEAKER_LOOK"):
memids = memory._db_read_one(
'SELECT uuid FROM ReferenceObjects WHERE ref_type="attention" AND type_name=?',
d["SPEAKER_LOOK"],
)
if memids:
memid = memids[0]
mem = memory.get_location_by_id(memid)
return [mem]
if d.get("AGENT"):
return [memory.get_player_by_eid(d["AGENT"])]
if d.get("DUMMY"):
return [d["DUMMY"]]
return []
def search(self, memory, search_data=None) -> List["ReferenceObjectNode"]: # noqa T484
"""Find ref_objs matching the given filters
filter_dict has children:
"ref_obj_range", dict, with keys "min<column_name>" or "max<column_name>",
(that is the string "min" prepended to the column name)
and float values vmin and vmax respectively.
<column_name> is any column in the ReferenceObjects table that
is a numerical value. filters on rows satisfying the inequality
<column_entry> > vmin or <column_entry> < vmax
"ref_obj_exact", dict, with keys "<column_name>"
<column_name> is any column in the ReferenceObjects table
checks exact matches to the value
"memories_range" and "memories_exact" are the same, but columns in the Memories table
"triples" list [t0, t1, ...,, tm]. each t in the list is a dict
with form t = {"pred_text": <pred>, "obj_text": <obj>}
or t = {"pred_text": <pred>, "obj": <obj_memid>}
currently returns memories with all triples matched
"""
if not search_data:
search_data = self.search_data
assert search_data
if search_data.get("special"):
return self.handle_special(memory, search_data)
query, args = self.get_query(search_data)
self.search_data = search_data
memids = [m[0] for m in memory._db_read(query, *args)]
return [memory.get_mem_by_id(memid) for memid in memids]
if __name__ == "__main__":
filter_dict = {
"ref_obj_range": {"minx": 3},
"memories_exact": {"create_time": 1},
"triples": [
{"pred_text": "has_tag", "obj_text": "cow"},
{"pred_text": "has_name", "obj_text": "eddie"},
],
}
|
py | 1a4d69737684dd90366530acfb5a0afc1194adb9 | # Time: O(n * k), n is the number of coins, k is the amount of money
# Space: O(k)
class Solution(object):
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
INF = 0x7fffffff # Using float("inf") would be slower.
dp = [INF] * (amount + 1)
dp[0] = 0
for i in range(amount + 1):
if dp[i] != INF:
for coin in coins:
if i + coin <= amount:
dp[i + coin] = min(dp[i + coin], dp[i] + 1)
return dp[amount] if dp[amount] != INF else -1
|
py | 1a4d6979953fe62ba50dd567447cd69eb6aeaa26 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: stream_metrics_schema.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from artifact_sdk.model.metadata_center import stream_metrics_schema_field_pb2 as artifact__sdk_dot_model_dot_metadata__center_dot_stream__metrics__schema__field__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='stream_metrics_schema.proto',
package='metadata_center',
syntax='proto3',
serialized_options=_b('ZIgo.easyops.local/contracts/protorepo-models/easyops/model/metadata_center'),
serialized_pb=_b('\n\x1bstream_metrics_schema.proto\x12\x0fmetadata_center\x1a\x44\x61rtifact_sdk/model/metadata_center/stream_metrics_schema_field.proto\"\xc8\x01\n\x13StreamMetricsSchema\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0b\n\x03org\x18\x02 \x01(\x05\x12\x0f\n\x07version\x18\x03 \x01(\x05\x12\x0c\n\x04name\x18\x04 \x01(\t\x12=\n\ndimensions\x18\x05 \x03(\x0b\x32).metadata_center.StreamMetricsSchemaField\x12:\n\x07metrics\x18\x06 \x03(\x0b\x32).metadata_center.StreamMetricsSchemaFieldBKZIgo.easyops.local/contracts/protorepo-models/easyops/model/metadata_centerb\x06proto3')
,
dependencies=[artifact__sdk_dot_model_dot_metadata__center_dot_stream__metrics__schema__field__pb2.DESCRIPTOR,])
_STREAMMETRICSSCHEMA = _descriptor.Descriptor(
name='StreamMetricsSchema',
full_name='metadata_center.StreamMetricsSchema',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='metadata_center.StreamMetricsSchema.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='org', full_name='metadata_center.StreamMetricsSchema.org', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='metadata_center.StreamMetricsSchema.version', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='metadata_center.StreamMetricsSchema.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dimensions', full_name='metadata_center.StreamMetricsSchema.dimensions', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metrics', full_name='metadata_center.StreamMetricsSchema.metrics', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=119,
serialized_end=319,
)
_STREAMMETRICSSCHEMA.fields_by_name['dimensions'].message_type = artifact__sdk_dot_model_dot_metadata__center_dot_stream__metrics__schema__field__pb2._STREAMMETRICSSCHEMAFIELD
_STREAMMETRICSSCHEMA.fields_by_name['metrics'].message_type = artifact__sdk_dot_model_dot_metadata__center_dot_stream__metrics__schema__field__pb2._STREAMMETRICSSCHEMAFIELD
DESCRIPTOR.message_types_by_name['StreamMetricsSchema'] = _STREAMMETRICSSCHEMA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StreamMetricsSchema = _reflection.GeneratedProtocolMessageType('StreamMetricsSchema', (_message.Message,), {
'DESCRIPTOR' : _STREAMMETRICSSCHEMA,
'__module__' : 'stream_metrics_schema_pb2'
# @@protoc_insertion_point(class_scope:metadata_center.StreamMetricsSchema)
})
_sym_db.RegisterMessage(StreamMetricsSchema)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
py | 1a4d69b62b51d1a50eee143987e0cdacb5366ac2 | from django.db import models
# Create your models here.
class Music(models.Model):
class Meta:
db_table = 'music'
title = models.CharField(max_length=200)
seconds = models.IntegerField()
def __str__(self):
return self.title |
py | 1a4d6a4d1c59652b1fbdd316411dc01bacc55620 | from django.core.management.base import BaseCommand
from django.core.cache import cache
from redis.exceptions import ResponseError
class Command(BaseCommand):
def handle(self, *args, **kwargs):
try:
cache.clear()
except ResponseError:
cache.clear()
self.stdout.write('Cleared cache\n')
|
py | 1a4d6a6aaebf4f4d862ce9e92bb68fe6463aa45b | import unittest
import six
from pynetbox.core.endpoint import Endpoint
if six.PY3:
from unittest.mock import patch, Mock
else:
from mock import patch, Mock
class EndPointTestCase(unittest.TestCase):
def test_filter(self):
with patch(
"pynetbox.core.query.Request.get", return_value=Mock()
) as mock:
api = Mock(base_url="http://localhost:8000/api")
app = Mock(name="test")
mock.return_value = [{'id': 123}, {'id': 321}]
test_obj = Endpoint(api, app, "test")
test = test_obj.filter(test="test")
self.assertEqual(len(test), 2)
def test_filter_empty_kwargs(self):
api = Mock(base_url="http://localhost:8000/api")
app = Mock(name="test")
test_obj = Endpoint(api, app, "test")
with self.assertRaises(ValueError) as _:
test_obj.filter()
def test_filter_reserved_kwargs(self):
api = Mock(base_url="http://localhost:8000/api")
app = Mock(name="test")
test_obj = Endpoint(api, app, "test")
with self.assertRaises(ValueError) as _:
test_obj.filter(id=1)
|
py | 1a4d6b7f1cbc75902e4b62063867301ab080409d | import sys
import re
import argparse
parser = argparse.ArgumentParser(description='Filter morpho-analyzed data from stdin.')
parser.add_argument('--max', type=int, default=500000, help="How many unique words to include. Default %(default)d.")
parser.add_argument('FILE', default='extension/dict-purelist/fi_FI', action='store', help="FILE.dic and FILE.aff will be created")
args = parser.parse_args()
word_re=re.compile(r"^[a-zA-ZåäöÅÄÖ]([a-zA-ZåäöÅÄÖ-]*[a-zA-ZåäöÅÄÖ])?$")
words_s=set()
words_l=[]
for line in sys.stdin:
line=line.rstrip()
if not line: #empty
continue
wrd,analysis=line.split("\t",1)
if analysis.endswith("+?"): #unrecognized
continue
if wrd in words_s: #already done
continue
if wrd[0]!=analysis[0] and wrd[0].lower()==analysis[0]: #capitalized version of lowercased lemmas
wrd=wrd.lower()
if wrd in words_s: #already done
continue
if word_re.match(wrd): #is word
words_s.add(wrd)
words_l.append(wrd)
else:
continue
if len(words_l)>=args.max:
break
if len(words_l)%10000==0:
print("at word",len(words_l))
with open(args.FILE+".dic","w") as f:
print(len(words_l),file=f)
for w in words_l:
print(w,file=f)
with open(args.FILE+".aff","w") as f:
print("SET UTF-8",file=f)
print("TRY abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZäöåÄÖÅ",file=f)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.