content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#!/usr/bin/env python3
'''
Edit gizmo snapshot files: compress, delete, transfer across machines.
@author: Andrew Wetzel <[email protected]>
'''
# system ----
from __future__ import absolute_import, division, print_function # python 2 compatability
import os
import sys
import glob
import numpy as np
# local ----
import utilities as ut
from gizmo_analysis import gizmo_io
# default subset of snapshots (65 snapshots)
snapshot_indices_keep = [
0, # z = 99
20, 26, 33, 41, 52, # z = 10 - 6
55, 57, 60, 64, 67, # z = 5.8 - 5.0
71, 75, 79, 83, 88, # z = 4.8 - 4.0
91, 93, 96, 99, 102, 105, 109, 112, 116, 120, # z = 3.9 - 3.0
124, 128, 133, 137, 142, 148, 153, 159, 165, 172, # z = 2.9 - 2.0
179, 187, 195, 204, 214, 225, 236, 248, 262, 277, # z = 1.9 - 1.0
294, 312, 332, 356, 382, 412, 446, 486, 534, # z = 0.9 - 0.1
539, 544, 550, 555, 561, 567, 573, 579, 585, # z = 0.09 - 0.01
600
]
#===================================================================================================
# compress files
#===================================================================================================
class CompressClass(ut.io.SayClass):
def compress_snapshots(
self, directory='output', directory_out='', snapshot_index_limits=[0, 600],
thread_number=1):
'''
Compress all snapshots in input directory.
Parameters
----------
directory : str : directory of snapshots
directory_out : str : directory to write compressed snapshots
snapshot_index_limits : list : min and max snapshot indices to compress
syncronize : bool : whether to synchronize parallel tasks,
wait for each thread bundle to complete before starting new bundle
'''
snapshot_indices = np.arange(snapshot_index_limits[0], snapshot_index_limits[1] + 1)
args_list = [(directory, directory_out, snapshot_index)
for snapshot_index in snapshot_indices]
ut.io.run_in_parallel(self.compress_snapshot, args_list, thread_number=thread_number)
def compress_snapshot(
self, directory='output', directory_out='', snapshot_index=600,
analysis_directory='~/analysis', python_executable='python3'):
'''
Compress single snapshot (which may be multiple files) in input directory.
Parameters
----------
directory : str : directory of snapshot
directory_out : str : directory to write compressed snapshot
snapshot_index : int : index of snapshot
analysis_directory : str : directory of analysis code
'''
executable = '{} {}/manipulate_hdf5/compactify_hdf5.py -L 0'.format(
python_executable, analysis_directory)
snapshot_name_base = 'snap*_{:03d}*'
if directory[-1] != '/':
directory += '/'
if directory_out and directory_out[-1] != '/':
directory_out += '/'
path_file_names = glob.glob(directory + snapshot_name_base.format(snapshot_index))
if len(path_file_names):
if 'snapdir' in path_file_names[0]:
path_file_names = glob.glob(path_file_names[0] + '/*')
path_file_names.sort()
for path_file_name in path_file_names:
if directory_out:
path_file_name_out = path_file_name.replace(directory, directory_out)
else:
path_file_name_out = path_file_name
executable_i = '{} -o {} {}'.format(executable, path_file_name_out, path_file_name)
self.say('executing: {}'.format(executable_i))
os.system(executable_i)
def test_compression(
self, snapshot_indices='all', simulation_directory='.', snapshot_directory='output',
compression_level=0):
'''
Read headers from all snapshot files in simulation_directory to check whether files have
been compressed.
'''
header_compression_name = 'compression.level'
simulation_directory = ut.io.get_path(simulation_directory)
snapshot_directory = ut.io.get_path(snapshot_directory)
Read = gizmo_io.ReadClass()
compression_wrong_snapshots = []
compression_none_snapshots = []
if snapshot_indices is None or snapshot_indices == 'all':
_path_file_names, snapshot_indices = Read.get_snapshot_file_names_indices(
simulation_directory + snapshot_directory)
elif np.isscalar(snapshot_indices):
snapshot_indices = [snapshot_indices]
for snapshot_index in snapshot_indices:
header = Read.read_header('index', snapshot_index, simulation_directory, verbose=False)
if header_compression_name in header:
if (compression_level is not None and
header[header_compression_name] != compression_level):
compression_wrong_snapshots.append(snapshot_index)
else:
compression_none_snapshots.append(snapshot_index)
self.say('* tested {} snapshots: {} - {}'.format(
len(snapshot_indices), min(snapshot_indices), max(snapshot_indices)))
self.say('* {} are uncompressed'.format(len(compression_none_snapshots)))
if len(compression_none_snapshots):
self.say('{}'.format(compression_none_snapshots))
self.say('* {} have wrong compression (level != {})'.format(
len(compression_wrong_snapshots), compression_level))
if len(compression_wrong_snapshots):
self.say('{}'.format(compression_wrong_snapshots))
Compress = CompressClass()
#===================================================================================================
# transfer files via globus
#===================================================================================================
class GlobusClass(ut.io.SayClass):
def submit_transfer(
self, simulation_path_directory='.', snapshot_directory='output',
batch_file_name='globus_batch.txt', machine_name='peloton'):
'''
Submit globus transfer of simulation files.
Must initiate from Stampede.
Parameters
----------
simulation_path_directory : str : '.' or full path + directory of simulation
snapshot_directory : str : directory of snapshot files within simulation_directory
batch_file_name : str : name of file to write
machine_name : str : name of machine transfering files to
'''
# set directory from which to transfer
simulation_path_directory = ut.io.get_path(simulation_path_directory)
if simulation_path_directory == './':
simulation_path_directory = os.getcwd()
if simulation_path_directory[-1] != '/':
simulation_path_directory += '/'
command = 'globus transfer $(globus bookmark show stampede){}'.format(
simulation_path_directory[1:]) # preceeding '/' already in globus bookmark
path_directories = simulation_path_directory.split('/')
simulation_directory = path_directories[-2]
# parse machine + directory to transfer to
if machine_name == 'peloton':
if 'elvis' in simulation_directory:
directory_to = 'm12_elvis'
else:
directory_to = simulation_directory.split('_')[0]
directory_to += '/' + simulation_directory + '/'
command += ' $(globus bookmark show peloton-scratch){}'.format(directory_to)
# set globus parameters
command += ' --sync-level=checksum --preserve-mtime --verify-checksum'
command += ' --label "{}" --batch < {}'.format(simulation_directory, batch_file_name)
# write globus batch file
self.write_batch_file(simulation_path_directory, snapshot_directory, batch_file_name)
self.say('* executing:\n{}\n'.format(command))
os.system(command)
def write_batch_file(
self, simulation_directory='.', snapshot_directory='output', file_name='globus_batch.txt'):
'''
Write batch file that sets files to transfer via globus.
Parameters
----------
simulation_directory : str : directory of simulation
snapshot_directory : str : directory of snapshot files within simulation_directory
file_name : str : name of batch file to write
'''
simulation_directory = ut.io.get_path(simulation_directory)
snapshot_directory = ut.io.get_path(snapshot_directory)
transfer_string = ''
# general files
transfer_items = [
'gizmo/',
'gizmo_config.sh',
'gizmo_parameters.txt',
'gizmo_parameters.txt-usedvalues',
'gizmo.out.txt',
'snapshot_times.txt',
'notes.txt',
'track/',
'halo/rockstar_dm/catalog_hdf5/',
]
for transfer_item in transfer_items:
if os.path.exists(simulation_directory + transfer_item):
command = '{} {}'
if transfer_item[-1] == '/':
transfer_item = transfer_item[:-1]
command += ' --recursive'
command = command.format(transfer_item, transfer_item) + '\n'
transfer_string += command
# initial condition files
transfer_items = glob.glob(simulation_directory + 'initial_condition*/*')
for transfer_item in transfer_items:
if '.ics' not in transfer_item:
transfer_item = transfer_item.replace(simulation_directory, '')
command = '{} {}\n'.format(transfer_item, transfer_item)
transfer_string += command
# snapshot files
for snapshot_index in snapshot_indices_keep:
snapshot_name = '{}snapdir_{:03d}'.format(snapshot_directory, snapshot_index)
if os.path.exists(simulation_directory + snapshot_name):
snapshot_string = '{} {} --recursive\n'.format(snapshot_name, snapshot_name)
transfer_string += snapshot_string
snapshot_name = '{}snapshot_{:03d}.hdf5'.format(snapshot_directory, snapshot_index)
if os.path.exists(simulation_directory + snapshot_name):
snapshot_string = '{} {}\n'.format(snapshot_name, snapshot_name)
transfer_string += snapshot_string
with open(file_name, 'w') as file_out:
file_out.write(transfer_string)
Globus = GlobusClass()
#===================================================================================================
# transfer files via rsync
#===================================================================================================
def rsync_snapshots(
machine_name, simulation_directory_from='', simulation_directory_to='.',
snapshot_indices=snapshot_indices_keep):
'''
Use rsync to copy snapshot file[s].
Parameters
----------
machine_name : str : 'pfe', 'stampede', 'bw', 'peloton'
directory_from : str : directory to copy from
directory_to : str : local directory to put snapshots
snapshot_indices : int or list : index[s] of snapshots to transfer
'''
snapshot_name_base = 'snap*_{:03d}*'
directory_from = ut.io.get_path(simulation_directory_from) + 'output/'
directory_to = ut.io.get_path(simulation_directory_to) + 'output/.'
if np.isscalar(snapshot_indices):
snapshot_indices = [snapshot_indices]
snapshot_path_names = ''
for snapshot_index in snapshot_indices:
snapshot_path_names += (
directory_from + snapshot_name_base.format(snapshot_index) + ' ')
command = 'rsync -ahvP --size-only '
command += '{}:"{}" {}'.format(machine_name, snapshot_path_names, directory_to)
print('\n* executing:\n{}\n'.format(command))
os.system(command)
def rsync_simulation_files(
machine_name, directory_from='/oldscratch/projects/xsede/GalaxiesOnFIRE', directory_to='.'):
'''
Use rsync to copy simulation files.
Parameters
----------
machine_name : str : 'pfe', 'stampede', 'bw', 'peloton'
directory_from : str : directory to copy from
directory_to : str : directory to put files
'''
excludes = [
'output/',
'restartfiles/',
'ewald_spc_table_64_dbl.dat',
'spcool_tables/',
'TREECOOL',
'energy.txt',
'balance.txt',
'GasReturn.txt',
'HIIheating.txt',
'MomWinds.txt',
'SNeIIheating.txt',
'*.ics',
'snapshot_scale-factors.txt',
'submit_gizmo*.py',
'*.bin',
'*.particles',
'*.bak',
'*.err',
'*.pyc',
'*.o',
'*.pro',
'*.perl',
'.ipynb_checkpoints',
'.slurm',
'.DS_Store',
'*~',
'._*',
'#*#',
]
directory_from = machine_name + ':' + ut.io.get_path(directory_from)
directory_to = ut.io.get_path(directory_to)
command = 'rsync -ahvP --size-only '
arguments = ''
for exclude in excludes:
arguments += '--exclude="{}" '.format(exclude)
command += arguments + directory_from + ' ' + directory_to + '.'
print('\n* executing:\n{}\n'.format(command))
os.system(command)
#===================================================================================================
# delete files
#===================================================================================================
def delete_snapshots(
snapshot_directory='output', snapshot_index_limits=[1, 599], delete_halos=False):
'''
Delete all snapshots in given directory within snapshot_index_limits,
except for those in snapshot_indices_keep list.
Parameters
----------
snapshot_directory : str : directory of snapshots
snapshot_index_limits : list : min and max snapshot indices to delete
delete_halos : bool : whether to delete halo catalog files at same snapshot times
'''
snapshot_name_base = 'snap*_{:03d}*'
if not snapshot_directory:
snapshot_directory = 'output/'
halo_name_base = 'halos_{:03d}*'
halo_directory = 'halo/rockstar_dm/catalog/'
if snapshot_directory[-1] != '/':
snapshot_directory += '/'
if snapshot_index_limits is None or not len(snapshot_index_limits):
snapshot_index_limits = [1, 599]
snapshot_indices = np.arange(snapshot_index_limits[0], snapshot_index_limits[1] + 1)
print()
for snapshot_index in snapshot_indices:
if snapshot_index not in snapshot_indices_keep:
snapshot_name = snapshot_directory + snapshot_name_base.format(snapshot_index)
print('* deleting: {}'.format(snapshot_name))
os.system('rm -rf {}'.format(snapshot_name))
if delete_halos:
halo_name = halo_directory + halo_name_base.format(snapshot_index)
print('* deleting: {}'.format(halo_name))
os.system('rm -rf {}'.format(halo_name))
print()
#===================================================================================================
# running from command line
#===================================================================================================
if __name__ == '__main__':
if len(sys.argv) <= 1:
raise OSError('specify function to run: compress, globus, rsync, delete')
function_kind = str(sys.argv[1])
assert ('compress' in function_kind or 'rsync' in function_kind or 'globus' in function_kind or
'delete' in function_kind)
if 'compress' in function_kind:
directory = 'output'
if len(sys.argv) > 2:
directory = str(sys.argv[2])
snapshot_index_max = 600
if len(sys.argv) > 3:
snapshot_index_max = int(sys.argv[3])
snapshot_index_limits = [0, snapshot_index_max]
Compress.compress_snapshots(directory, snapshot_index_limits=snapshot_index_limits)
elif 'globus' in function_kind:
directory = '.'
if len(sys.argv) > 2:
directory = str(sys.argv[2])
Globus.submit_transfer(directory)
elif 'rsync' in function_kind:
if len(sys.argv) < 5:
raise OSError(
'imports: machine_name simulation_directory_from simulation_directory_to')
machine_name = str(sys.argv[2])
simulation_directory_from = str(sys.argv[3])
simulation_directory_to = str(sys.argv[4])
rsync_simulation_files(machine_name, simulation_directory_from, simulation_directory_to)
rsync_snapshots(machine_name, simulation_directory_from, simulation_directory_to)
elif 'delete' in function_kind:
directory = 'output'
if len(sys.argv) > 3:
directory = str(sys.argv[3])
snapshot_index_limits = None
if len(sys.argv) > 4:
snapshot_index_limits = [int(sys.argv[4]), int(sys.argv[5])]
delete_snapshots(directory, snapshot_index_limits)
| 37.668132 | 100 | 0.590641 | [
"Apache-2.0"
] | UAPH4582/PH482_582 | students_final_projects/group-f/gizmo_analysis/gizmo_file.py | 17,139 | Python |
import re
from django import forms
from django.contrib.admin.widgets import AdminFileWidget
from django.forms.models import inlineformset_factory, BaseInlineFormSet
from django.utils.translation import ugettext_lazy as _
from multi_email_field.forms import MultiEmailField
from pycon.sponsorship.models import Sponsor, SponsorBenefit, SponsorLevel
def strip(text):
return u' '.join(text.strip().split())
class SponsorDetailsForm(forms.ModelForm):
contact_emails = MultiEmailField(
help_text=_(u"Please enter one email address per line.")
)
class Meta:
model = Sponsor
fields = ["name",
"contact_name",
"contact_emails",
"contact_phone",
"contact_address",
"external_url",
"display_url",
"twitter_username",
"web_description",
"web_logo",
]
widgets = {
'web_description': forms.widgets.Textarea(attrs={'cols': 40, 'rows': 5}),
}
class SponsorApplicationForm(SponsorDetailsForm):
class Meta(SponsorDetailsForm.Meta):
fields = SponsorDetailsForm.Meta.fields + [
"level",
"wants_table",
"wants_booth",
"small_entity_discount",
]
help_texts = {
'web_description': strip(
u"""
Your description can be up to 100 words long.
"""
),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
kwargs.update({
"initial": {
"contact_name": self.user.get_full_name(),
"contact_emails": [self.user.email],
}
})
super(SponsorApplicationForm, self).__init__(*args, **kwargs)
self.fields['level'].queryset = SponsorLevel.objects.exclude(
available=False)
def clean_web_description(self):
value = self.cleaned_data['web_description']
word_count = len(re.findall(r"[-\w']+", value.lower()))
if word_count > 100:
raise forms.ValidationError(
_(u"Your description is {} words long;"
" please reduce it to 100 or less.".format(word_count))
)
return value
def save(self, commit=True):
obj = super(SponsorApplicationForm, self).save(commit=False)
obj.applicant = self.user
if commit:
obj.save()
return obj
class SponsorBenefitsInlineFormSet(BaseInlineFormSet):
def _construct_form(self, i, **kwargs):
form = super(SponsorBenefitsInlineFormSet, self)._construct_form(i, **kwargs)
# only include the relevant data fields for this benefit type
fields = form.instance.data_fields()
form.fields = dict((k, v) for (k, v) in form.fields.items() if k in fields + ["id"])
for field in fields:
# don't need a label, the form template will label it with the benefit name
form.fields[field].label = ""
# provide word limit as help_text
if form.instance.benefit.type in ["text", "richtext"] and form.instance.max_words:
form.fields[field].help_text = u"maximum %s words" % form.instance.max_words
# use admin file widget that shows currently uploaded file
if field == "upload":
form.fields[field].widget = AdminFileWidget()
return form
SponsorBenefitsFormSet = inlineformset_factory(
Sponsor, SponsorBenefit,
formset=SponsorBenefitsInlineFormSet,
can_delete=False, extra=0,
fields=["text", "upload"]
)
class SponsorEmailForm(forms.Form):
from_ = forms.EmailField(widget=forms.TextInput(attrs={'class': 'fullwidth-input'}))
cc = forms.CharField(help_text=_(u"(comma-separated addresses)"),
required=False,
widget=forms.TextInput(attrs={'class': 'fullwidth-input'}))
bcc = forms.CharField(help_text=_(u"(comma-separated addresses)"),
required=False,
widget=forms.TextInput(attrs={'class': 'fullwidth-input'}))
subject = forms.CharField(widget=forms.TextInput(attrs={'class': 'fullwidth-input'}))
body = forms.CharField(widget=forms.Textarea(attrs={'class': 'fullwidth-textarea'}))
sample_subject = forms.CharField(
required=False,
widget=forms.TextInput(attrs={
'class': 'fullwidth-input',
'readonly': True,
'style': 'background-color: #ddd',
}),
)
sample_body = forms.CharField(
help_text=_(u"""
You can keep editing the body and hitting Send
until you love how this preview looks.
Then, press Send one final time!
"""),
required=False,
widget=forms.Textarea(attrs={
'class': 'fullwidth-textarea',
'readonly': True,
'style': 'background-color: #ddd',
}),
)
| 34.418919 | 94 | 0.586965 | [
"BSD-3-Clause"
] | tylerdave/PyCon-Website | pycon/sponsorship/forms.py | 5,094 | Python |
import logging
from common_lib.backend_service.generated.backend_service_v1_pb2_grpc import BackendServiceServicer
import common_lib.backend_service.generated.service_types_v1_pb2 as service_messages
from common_lib.grpc import GrpcExceptionHandler
logger = logging.getLogger(__name__)
class BackendService(BackendServiceServicer):
def __init__(self):
"""
Initialise service and configuration
"""
logger.info("Initialised Backend-Service - Ready for gRPC Calls.")
################################################################################
# RPC Methods
################################################################################
def Greet(self, request, context):
try:
name = request.name if request.name else None
with_error = request.withError
logger.info(f"Received Greet request with name={name} and withError={with_error}")
reply = f"Hello {name}!!"
return service_messages.GreetResult(
reply=reply
)
except Exception as e:
code, details = GrpcExceptionHandler.toGrpcError(e)
context.set_code(code)
context.set_details(details)
return service_messages.GreetResult()
| 34.078947 | 99 | 0.587645 | [
"MIT"
] | dgildeh/otel-python-cloud-run | backend-service/backend_service/service.py | 1,295 | Python |
class Solution:
def transpose(self, matrix: [[int]]) -> [[int]]:
m = len(matrix)
n = len(matrix[0])
res = [[0] * m for _ in range(n)]
for i in range(m) :
for j in range(n) :
res[j][i] = matrix[i][j]
return res
if __name__ == "__main__" :
s = Solution()
matrix = [[1, 2, 3,6], [4, 5, 6,8], [7, 8, 9,4]]
res = s.transpose(matrix)
print(res) | 25.235294 | 52 | 0.459207 | [
"Apache-2.0"
] | russellgao/algorithm | dailyQuestion/2021/2021-02/02-25/python/solution.py | 429 | Python |
# A simple script to generate fake data
import sys, random, math, json
import datetime as dt
USAGE = 'Usage: python make_fake_fixtures.py [num_of_members] [num_of_games] [num_of_tournaments]'
# Fake Names: First and Last
GIVEN_NAMES = [ 'bruce', 'malcolm', 'kobe', 'peter', 'kaylee', 'inara' ]
LAST_NAMES = [ 'lee', 'reynolds', 'bryant', 'parker', 'frye', 'serra' ]
# Misc Chapter Codes
CHAPTER_CODES = [ 'FFLY', 'NBAG', 'SJGC', 'TPGC', None]
CHAPTER_NAMES = [ 'Fire Fly Go Club', 'NBA Go Club', 'San Jose Go Club', 'Tampa Go Club', None ]
# State Names and City Names
STATE_CODES = [ 'CA', 'OR', 'NY', 'AZ', 'AR', 'FL', 'KS', 'KY', 'IA' ]
CITY_NAMES = [ 'Aurora', 'Austin', 'Boston', 'Chandler', 'Charlotte', 'Dallas', 'Dayton', 'Eugene' ]
# Country Names and Codes
COUNTRY_NAMES = [ 'United States', 'Canada', 'Japan', 'Korea', 'China', 'Tywain' ]
COUNTRY_CODES = [ 'US', 'CA', 'JP', 'KO', 'CH', 'TW' ] #these, oddly, are not the FK in the member table.
# Membership Status Codes
STATUS_CODES = [ 'accepted' ]
# Membership Types
MEMBERSHIP_TYPES = ['Full',
'Sustainer',
'Sponser',
'Lifetime',
'E-Journal']
if len(sys.argv) != 4:
print( USAGE )
quit()
try:
member_count = int(sys.argv[1])
game_count = int(sys.argv[2])
tourney_count = int(sys.argv[3])
except ValueError:
print( USAGE )
quit()
member_ids = [x for x in range(1, member_count+1)]
tournament_ids = ['T%s' % x for x in range(1, tourney_count+1)]
members = []
players = []
for member_id in member_ids:
date = dt.date.today() - dt.timedelta(days = random.randint(2,150))
join_date = date - dt.timedelta(days = 150)
renewal_due = date + dt.timedelta(days = random.randint(2,720))
first_name = random.choice(GIVEN_NAMES)
last_name = random.choice(LAST_NAMES)
members.append({
'pk': member_id,
'model': 'agagd_core.member',
'fields': {
'member_id': member_id,
'legacy_id': random.choice(range(1, member_count+1)),
'full_name': '%s %s' % (first_name, last_name),
'given_names': first_name,
'family_name': last_name,
'join_date': join_date.strftime("%Y-%m-%d"),
'renewal_due': renewal_due.strftime("%Y-%m-%d"),
'city': 'Seattle',
'state': random.choice(STATE_CODES),
'status': random.choice(STATUS_CODES),
'region': 'some region',
'country': random.choice(COUNTRY_NAMES),
'chapter': random.choice(CHAPTER_CODES),
'chapter_id': random.choice(range(1, len(CHAPTER_CODES)+1)),
'occupation': '',
'citizen': random.choice(range(0, 1)),
'password': 'hallo!',
'type': random.choice(MEMBERSHIP_TYPES),
'last_changed': date.strftime("%Y-%m-%d")
}
})
players.append({
'pk': member_id,
'model': 'agagd_core.players',
'fields': {
'elab_date': date.strftime("%Y-%m-%d"),
'name': first_name,
'last_name': last_name,
'rating': random.uniform(-15, 10),
'sigma': random.random()
}
})
ratings = []
ratings_range = list(range(0, 25))
for member_id in member_ids:
for rating_id in ratings_range:
elab_date = dt.date.today() - dt.timedelta(days = random.randint(2,20))
player_rating = players[member_id-1]['fields']['rating']
player_low_rating = player_rating - random.randint(0, 3)
player_high_rating = player_rating - random.randint(0, 3)
ratings.append({
'pk': None,
'model': 'agagd_core.rating',
'fields': {
'pin_player': member_id,
'elab_date': elab_date.strftime("%Y-%m-%d"),
'rating': random.uniform(player_low_rating, player_high_rating),
'tournament': random.choice(tournament_ids),
'sigma': random.random()
}
})
tournaments = []
for tourney_id in tournament_ids:
date = dt.date.today() - dt.timedelta(days = random.randint(2,20))
elab_date = date + dt.timedelta(days = 7)
random_state = random.choice(STATE_CODES)
tournaments.append({
'pk': tourney_id,
'model': 'agagd_core.tournament',
'fields': {
'total_players': random.randint(4,20),
'city': random.choice(CITY_NAMES),
'elab_date': elab_date.strftime("%Y-%m-%d"),
'description': random_state + tourney_id,
'wall_list': "1: Mal, Bruce 2d 2+/w0 3+/w0 4+/w0 3-0-0\n"
"2: Lee, Parker 1d 1-/b2 4+/w0 3-/w0 1-2-0\n"
"3: Lee, Matt 1k 4-/w0 1-/b6 2+/b4 1-2-0\n"
"4: Frye, Sam 3k 3+/b2 2-/b6 1-/b8 1-2-0\n"
"Note: This is not generated by the AGAGD.",
'state': random_state,
'rounds': random.randint(2,5),
'tournament_date': date.strftime("%Y-%m-%d")
}
})
games = []
for game_id in range(1, game_count+1):
p1 = random.choice(member_ids)
p2 = random.choice([member_id for member_id in member_ids if member_id != p1])
color_1 = random.choice(['B', 'W'])
color_2 = 'B' if color_1 != 'B' else 'W'
date = dt.date.today() - dt.timedelta(days = random.randint(2,20))
elab_date = date + dt.timedelta(days = 7)
games.append({
'pk': game_id,
'model': 'agagd_core.game',
'fields': {
'pin_player_2': p2,
'tournament_code': random.choice(tournaments)['pk'],
'rated': random.randint(0, 1),
'elab_date': elab_date.strftime("%Y-%m-%d"),
'handicap': random.randint(0, 9),
'online': random.randint(0, 1),
'color_2': color_2,
'sgf_code': '',
'komi': random.randint(0, 9),
'pin_player_1': p1,
'rank_1': '',
'result': random.choice(['B', 'W']),
'rank_2': '',
'game_date': date.strftime("%Y-%m-%d"),
'exclude': random.randint(0,1),
'round': random.randint(2,5),
'color_1': color_1
}
})
chapters = []
for member_id in range(0, len(CHAPTER_CODES)):
chapters.append({
'pk': member_id+1,
'model': 'agagd_core.chapters',
'fields': {
'member_id': member_id+1,
'code': CHAPTER_CODES[member_id],
'name': CHAPTER_NAMES[member_id],
'contact_text': random.choice(['Some contact info would go here.', '']),
'contact': 'Some guy',
'meeting_city': 'Seattle',
'url': 'www.localhost-is-best-host.com',
'display': random.randint(0, 1)
}
})
countries = []
for i, count_name in enumerate(COUNTRY_NAMES):
countries.append({
'pk': i,
'model': 'agagd_core.country',
'fields': {
'country_code': random.choice(COUNTRY_CODES),
'country_descr': count_name,
}
})
print( json.dumps(members + players + ratings + tournaments + games + chapters + countries, indent=4) )
| 36.904523 | 105 | 0.541667 | [
"MIT"
] | annabunches/agagd | scripts/make_fake_fixtures.py | 7,344 | Python |
# Generated by Django 2.2.7 on 2019-11-05 22:35
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import orders.models
import orders.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('courses', '0002_CourseGenitiveName'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(blank=True, db_index=True, null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=9)),
('paid', models.DateTimeField('Date when order got paid', null=True, blank=True, help_text='If set during creation, order automaticaly gets shipped')),
('shipped', models.DateTimeField('Date when order was shipped', null=True, blank=True)),
('course', orders.fields.ItemField(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='courses.Course')),
('record', orders.fields.ItemField(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='courses.Record')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| 42.358974 | 167 | 0.641041 | [
"MIT"
] | iNerV/education-backend | src/orders/migrations/0001_initial.py | 1,652 | Python |
#!/usr/bin/python3
#
# Copyright (c) Siemens AG, 2020
# [email protected]
#
# SPDX-License-Identifier: MIT
#
import subprocess
import results
import pexpect
import shutil
import util
import sys
import re
import os
print("Start Test")
# make clean
ret = subprocess.run(["make clean"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# make main
ret = subprocess.run(["make main"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = ret.stdout.decode("utf-8")
##########################################################################################################################################
# EXP15-C. Do not place a semicolon on the same line as an if, for, or while statement
# https://wiki.sei.cmu.edu/confluence/display/c/EXP15-C.+Do+not+place+a+semicolon+on+the+same+line+as+an+if%2C+for%2C+or+while+statement
# Fix the bug in line "for (; ii<strlen(str1)-1; ii++); {" of utilities.c
##########################################################################################################################################
#
# Line to search for:
# for (; ii<strlen(str1)-1; ii++); {
nHits = util.searchSource("utilities.c.pp","^\s*for.*;\s*{\s*$")
if nHits>0:
util.addFinding("Program does not behave as expected",0,"INCREMENTAL_2_FUNC_1362465447_","TEST_100001")
util.dumpFindings()
# run analysis
ret = subprocess.run(["./analyse.py"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# run AI
ret = subprocess.run(["./ai.py"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sys.exit(0)
os.remove("pizzas.txt")
shutil.copyfile("pizzas.ok.txt","pizzas.txt")
p = pexpect.spawn("./run_jail.sh ./main")
p.logfile = sys.stdout.buffer
p.sendline("help")
p.sendline("napoli")
p.sendline("crudo")
p.sendline("view")
p.sendline("checkout")
p.sendline("carbonara")
p.sendline("napoli")
p.sendline("romana")
p.sendline("checkout")
p.sendline("view")
p.sendline("checkout")
p.sendline("exit")
try:
p.expect("Thank you and goodbye!",timeout=1)
except:
p.kill(9)
B = p.before.decode("utf-8")
# with open("expect_result.txt","w+") as f:
# f.write(B)
# print("'"+p.before.decode("utf-8")+"'" )
# p.expect(pexpect.EOF)
expectResult = ""
with open("expect_result.txt","rb") as f:
l = f.read()
expectResult = l.decode("utf-8")
print(expectResult)
okFail = 1 if (expectResult==B) else 0
util.addFinding("Program is behaving as expected",okFail,"","TEST_900001")
##########################################################################################################################################
# EXP02-C. Be aware of the short-circuit behavior of the logical AND and OR operators
# https://wiki.sei.cmu.edu/confluence/display/c/EXP02-C.+Be+aware+of+the+short-circuit+behavior+of+the+logical+AND+and+OR+operators
# make sure that the participant rewrites the (1==nOrder) && printf as an if statement
##########################################################################################################################################
#
# Lines to search for:
# (1==nOrder) && printf("You have ordered 1 pizza.\n");
# (1<nOrder) && printf("You have ordered %d pizzas.\n",nOrder);
#
nHits = util.searchSource("main.c.pp","^\s*\(1==nOrder\)\s*&&\s*printf")
okFail = 1
if nHits>0:
okFail = 0
else:
nHits = util.searchSource("main.c.pp","^\s*\(1<nOrder\)\s*&&\s*printf")
if nHits>0:
okFail = 0
else:
okFail = 1
util.addFinding("Bad coding style present in the code",okFail,"NO_TAG","TEST_101001")
##########################################################################################################################################
# STR05-C. Use pointers to const when referring to string literals
# https://wiki.sei.cmu.edu/confluence/display/c/STR05-C.+Use+pointers+to+const+when+referring+to+string+literals
# make sure the participant adds "const" to the string "pizzaFileName"
##########################################################################################################################################
# Lines to for:
# char *pizzaFileName = "pizzas.txt";
#
nHits = util.searchSource("pizza.c.pp","^\s*const\s+char\s*\*\s*pizzaFileName\s*=\s*\"pizzas\.txt\"\s*;\s*$")
okFail = 0 if (nHits==0) else 1;
util.addFinding("Watch out for string literals",okFail,"NO_TAG","TEST_102001")
##########################################################################################################################################
# ERR01-C. Use ferror() rather than errno to check for FILE stream errors
# https://wiki.sei.cmu.edu/confluence/display/c/ERR01-C.+Use+ferror%28%29+rather+than+errno+to+check+for+FILE+stream+errors
# remove any checks with errno
##########################################################################################################################################
# Lines to for:
# if (errno!=0) {
#
nHits = util.searchSource("pizza.c.pp","^\s*if\s*\(\s*errno")
okFail = 1 if (nHits==0) else 0;
util.addFinding("Using outdated error checking mechanism",okFail,"NO_TAG","TEST_103001")
##########################################################################################################################################
# ERR07-C. Prefer functions that support error checking over equivalent functions that don't
# https://wiki.sei.cmu.edu/confluence/display/c/ERR07-C.+Prefer+functions+that+support+error+checking+over+equivalent+functions+that+don%27t
# Developer should not use atoi()
##########################################################################################################################################
nHits = util.searchSource("pizza.c.pp","^.*=\s*atoi\s*\(pizzaCost\s*\)\s*;\s*$")
okFail = 1 if (nHits==0) else 0;
util.addFinding("Some possible errors are not being checked",okFail,"NO_TAG","TEST_104001")
##########################################################################################################################################
# INT08-C. Verify that all integer values are in range
# https://wiki.sei.cmu.edu/confluence/display/c/INT08-C.+Verify+that+all+integer+values+are+in+range
# Make sure the user handles potential integer overflow when addind a pizza to the basket
# How to check: replace cost in pizzas.txt with large amount and order two pizzas. the program should not "crash"
##########################################################################################################################################
stdout = ret.stdout.decode("utf-8")
os.remove("pizzas.txt")
shutil.copyfile("pizzas.bad.txt","pizzas.txt")
p = pexpect.spawn("./run_jail.sh ./main")
p.logfile = sys.stdout.buffer
p.sendline("margherita")
p.sendline("margherita")
p.sendline("view")
p.sendline("checkout")
p.sendline("exit")
try:
p.expect("Thank you and goodbye!",timeout=1)
except:
p.kill(9)
B = p.before.decode("utf-8")
hitFlag = 1
for line in B.split("\n"):
# main.c:31:11: runtime error: signed integer overflow: 2147480047 + 2147480047 cannot be represented in type 'int'
if re.search(r"runtime error:",line) and re.search(r"signed integer overflow",line):
hitFlag = 0
util.addFinding("There are integer handling issues in this program",hitFlag,"NO_TAG","TEST_120001")
############################################
############################################
############################################
############################################
############################################
util.dumpFindings()
# run analysis
ret = subprocess.run(["./analyse.py"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# run AI
ret = subprocess.run(["./ai.py"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
| 45.470588 | 140 | 0.53273 | [
"MIT"
] | saucec0de/sifu | Challenges/C_CPP/0005_pizza/run.py | 7,730 | Python |
# (C) Copyright 2012-2019 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
import os
import argparse
from flask import Flask, request, Response, render_template, send_file, jsonify
from .server import WMSServer
from .plot.magics import Plotter, Styler
from .data.fs import Availability
application = Flask(__name__)
demo = os.path.join(os.path.dirname(__file__), "testdata", "sfc.grib")
demo = os.environ.get("SKINNYWMS_DATA_PATH", demo)
parser = argparse.ArgumentParser(description="Simple WMS server")
parser.add_argument(
"-f",
"--path",
default=demo,
help="Path to a GRIB or NetCDF file, or a directory\
containing GRIB and/or NetCDF files.",
)
parser.add_argument(
"--style", default="", help="Path to a directory where to find the styles"
)
parser.add_argument(
"--user_style", default="", help="Path to a json file containing the style to use"
)
parser.add_argument("--host", default="127.0.0.1", help="Hostname")
parser.add_argument("--port", default=5000, help="Port number")
parser.add_argument(
"--baselayer", default="", help="Path to a directory where to find the baselayer"
)
parser.add_argument(
"--magics-prefix",
default="magics",
help="prefix used to pass information to magics",
)
args = parser.parse_args()
if args.style != "":
os.environ["MAGICS_STYLE_PATH"] = args.style + ":ecmwf"
if args.user_style != "":
os.environ["MAGICS_USER_STYLE_PATH"] = args.user_style
server = WMSServer(Availability(args.path), Plotter(args.baselayer), Styler(args.user_style))
server.magics_prefix = args.magics_prefix
@application.route("/wms", methods=["GET"])
def wms():
return server.process(
request,
Response=Response,
send_file=send_file,
render_template=render_template,
reraise=True,
)
@application.route("/availability", methods=["GET"])
def availability():
return jsonify(server.availability.as_dict())
@application.route("/", methods=["GET"])
def index():
return render_template("leaflet_demo.html")
def execute():
application.run(port=args.port, host=args.host, debug=True, threaded=False)
| 27.164835 | 93 | 0.708738 | [
"Apache-2.0"
] | falkephi/skinnywms | skinnywms/wmssvr.py | 2,472 | Python |
temporary_zones = {
'forest': {
'room_keys': [
'a sacred grove of ancient wood', 'a sparsely-populated fledgling forest',
'a cluster of conifer trees'
]
},
'sewer': {
'room_keys': [
'a poorly-maintained sewage tunnel', 'a sewer tunnel constructed of ancient brick',
'a wide walkway along sewage'
]
},
'cave': {
'room_keys': [
'an uncertain rock bridge', 'a wide opening between', 'a damp rock shelf'
]
},
'alley': {
'room_keys': [
'a dark alleyway', 'a filthy alleyway', 'a narrow alleyway'
]
}
}
static_zones = {
'darkshire': [
{ # Encampment
'coords': {'x': 0, 'y': 0},
'exits': ['n'],
'key': "a makeshift encampment"
},
{ # Main Gate
'coords': {'x': 0, 'y': -1},
'exits': ['n', 's'],
'key': "a main gate"
},
{ # Town Square - South
'coords': {'x': 0, 'y': -2},
'exits': ['n', 'ne', 'e', 's', 'w', 'nw'],
'key': "a town square"
},
{ # Town Square - Southwest
'coords': {'x': -1, 'y': -2},
'exits': ['n', 'ne', 'e'],
'key': "a town square"
},
{ # Town Square - Southeast
'coords': {'x': 1, 'y': -2},
'exits': ['n', 'w', 'nw'],
'key': "a town square"
},
{ # Town Square - Middle
'coords': {'x': 0, 'y': -3},
'exits': ['n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw'],
'key': "the center of a town square"
},
{ # Town Square - West
'coords': {'x': -1, 'y': -3},
'exits': ['n', 'ne', 'e', 'se', 's'],
'key': "a town square"
},
{ # Town Square - East
'coords': {'x': 1, 'y': -3},
'exits': ['n', 's', 'sw', 'w', 'nw'],
'key': "a town square"
},
{ # Town Square - North
'coords': {'x': 0, 'y': -4},
'exits': ['e', 'se', 's', 'sw', 'w'],
'key': "a town square"
},
{ # Town Square - Northwest
'coords': {'x': -1, 'y': -4},
'exits': ['e', 'se', 's'],
'key': "a town square"
},
{ # Town Square - Northeast
'coords': {'x': 1, 'y': -4},
'exits': ['s', 'sw', 'w'],
'key': "a town square"
}
],
'testing_zone': [
{ # Hecate's Haven
'coords': {'x': 0, 'y': 0},
'exits': ['n', 'e', 's', 'w'],
'key': "Hecate's Haven",
'desc': ("You are in a well-constructed room with walls made of red granite rock, "
"masterfully crafted from various shapes. Aisles, featuring a worn-in deep crimson "
"carpet, connect all four exits to the center of the room. Four large round "
"sturdy oak tables, surrounded by various chairs, decorate the northern half of "
"room. Dominating the southwest corner of the room is a well-worn heavy oak bar "
"that runs from the western wall into a smooth-cornered turn to end against "
"the southern wall and is surrounded by stools. "
"In the southeast quarter, a lounge features a polished large black harpsichord "
"tucked into the corner, adjacent to a raised limestone platform serving as a "
"stage. Along the southern half of the middle aisle and against the eastern wall "
"rests a heavily padded L-shaped couch. Just in front of the couch sits a "
"low-lying table."),
'static_sentients': ['hoff'],
'tags': [('hecates_haven', 'rooms')]
},
{ # Shopper's Paradise
'coords': {'x': -1, 'y': 0},
'exits': ['e', 's'],
'key': "Shopper's Paradise",
'desc': ("You are in a room stuffed to the ceiling with ridiculous amounts of "
"merchandise. Running parallel against the western wall stands a magnificent "
"translucent ruby counter, covered in racks and jars that house a plethora of "
"curious wonders. Surrounding you are shelves and various containers filled with "
"an assortment of objects and supplies. Volund, the proprietor of this "
"establishment, appears busy with obsessively sorting and counting his inventory.")
},
{ # Back Alley
'coords': {'x': -1, 'y': 1},
'exits': ['n', 'e'],
'key': "a dark alley behind a shop",
'desc': ("You are in a dark and dirty back alley. Discarded trash is strewn about. "
"In the southwest corner you see a mucky ladder leading down into a sewer, "
"emenating a vile stench to through-out the alley. A few belligerent thugs are "
"causing a ruckus halfway down the alley.")
},
{ # Training Yard
'coords': {'x': 0, 'y': 1},
'exits': ['n', 'e', 'w'],
'key': "a training yard",
'desc': ("You are at a training yard paved with cobblestones as walkways and dirt for "
"fighting on. A thin layer of sand covers everything here. A never-ending supply "
"of slaves are locked up in a cage against the southern wall. Several of the "
"slaves are shackled to the southern wall, ready to fight and die. Benches run "
"under an ivy-covered pergola along the northern wall. Combatants of various skill "
"levels and weapons train here under the hot sun, watched over by the "
"Doctore Oenomaus and several other trainers; each a master of different weapons.")
},
{ # Apothecary's Shop
'coords': {'x': 1, 'y': 1},
'exits': ['n', 'w'],
'key': "Apothecary's Shop",
'desc': ("You are amongst a vast array of plants, spices, minerals, and animal "
"products. Behind a counter crafted from flowery vines stands Kerra the apothecary. "
"North of the counter are three rows of various plants. All along the southern wall "
"hangs shelves that house a multitude of jars, each containing some form of animal "
"product. The northwestern corner of the room is stacked high with spices, "
"minerals, and cured plants. Various incense burners are hanging from the ceiling, "
"filling the room with a sickly-sweet aroma that mixes with an earthy scent.")
},
{ # Kitchen
'coords': {'x': 1, 'y': 0},
'exits': ['s', 'w'],
'key': "a large noisy kitchen",
'desc': ("You are inside a large noisy kitchen lined with white ceramic tiles. Busy "
"people are hurriedly completing their tasks whilst communicating loudly. Pots and "
"pans are hanging on hooks all along the walls. Four large ovens are stacked in a "
"set of two against the northeast wall. A double sink is set in both the southeast "
"and northwest corners of the room. Straight down the middle of the room rests a "
"solid steel island, covered in various items being prepared. Ten stove-top burners "
"line the eastern wall. Against the northern wall sets various bags of grain, rice, "
"flour, and sugar. A whole cow hangs from the ceiling in the southwest corner, "
"ready to be butchered.")
},
{ # Road
'coords': {'x': 0, 'y': -1},
'exits': ['e', 's', 'w'],
'key': "a bustling cobblestone street",
'desc': ("You are surrounded by large swarths of people in every direction. Centered "
"in the middle of the street is a towering bronze statue of 10 feet; featuring a "
"heavily armored knight plunging his bastard sword into the chest of a kneeling "
"opponent, red liquid continuously spurts out from the statue’s chest and splashes "
"into a basin built into the foundation. Surrounding the statue are ornate marble "
"benches, each about 8 feet in length. Above the inn’s doorway hangs a glass "
"mosaic street lamp set alight by burning oil. Moving amongst the crowd are people "
"from all walks of life; from peasants, prostitutes, and workers to lords, ladies, "
"scholars, priests, knights, and hired guards.")
},
]
}
| 49.717514 | 101 | 0.520455 | [
"Unlicense"
] | kovitikus/hecate | rooms/zones.py | 8,804 | Python |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import find_packages
from setuptools import setup
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:
README = file_obj.read()
# NOTE: This is duplicated throughout and we should try to
# consolidate.
SETUP_BASE = {
'author': 'Google Cloud Platform',
'author_email': '[email protected]',
'scripts': [],
'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',
'license': 'Apache 2.0',
'platforms': 'Posix; MacOS X; Windows',
'include_package_data': True,
'zip_safe': False,
'classifiers': [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet',
],
}
REQUIREMENTS = [
'enum34',
'google-cloud-core >= 0.22.1, < 0.23dev',
'gapic-google-cloud-vision-v1 >= 0.14.0, < 0.15dev',
]
setup(
name='google-cloud-vision',
version='0.22.0',
description='Python Client for Google Cloud Vision',
long_description=README,
namespace_packages=[
'google',
'google.cloud',
],
packages=find_packages(),
install_requires=REQUIREMENTS,
**SETUP_BASE
)
| 30.422535 | 74 | 0.663889 | [
"Apache-2.0"
] | ammayathrajeshnair/googlecloudpython | vision/setup.py | 2,160 | Python |
"""Unit tests for the Nexus Parser
"""
from unittest import TestCase, main
from cogent3 import load_aligned_seqs
from cogent3.parse.nexus import (
MinimalNexusAlignParser,
find_fields,
get_BL_table,
get_tree_info,
parse_dnd,
parse_nexus_tree,
parse_PAUP_log,
parse_taxa,
parse_trans_table,
split_tree_info,
)
__author__ = "Catherine Lozupone"
__copyright__ = "Copyright 2007-2019, The Cogent Project"
__credits__ = ["Catherine Lozupone", "Rob Knight", "Micah Hamady"]
__license__ = "BSD-3"
__version__ = "2019.12.6a"
__maintainer__ = "Catherine Lozupone"
__email__ = "[email protected]"
__status__ = "Production"
Nexus_tree = """#NEXUS
Begin trees; [Treefile saved Wednesday, May 5, 2004 5:02 PM]
[!
>Data file = Grassland_short.nex
>Neighbor-joining search settings:
> Ties (if encountered) will be broken systematically
> Distance measure = Jukes-Cantor
> (Tree is unrooted)
]
Translate
1 outgroup25,
2 AF078391l,
3 AF078211af,
4 AF078393l,
5 AF078187af,
6 AF078320l,
7 AF078432l,
8 AF078290af,
9 AF078350l,
10 AF078356l,
11 AF078306af,
12 AF078429l,
13 AF078256af,
14 AF078443l,
15 AF078450l,
16 AF078452l,
17 AF078258af,
18 AF078380l,
19 AF078251af,
20 AF078179af,
21 outgroup258
;
tree PAUP_1 = [&R] (1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);
tree PAUP_2 = [&R] (1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);
End;""".split(
"\n"
)
Nexus_tree_2 = """#NEXUS
Begin trees; [Treefile saved Wednesday, June 14, 2006 11:20 AM]
[!>Neighbor-joining search settings:
> Ties (if encountered) will be broken systematically
> Distance measure = uncorrected ("p")
> (Tree is unrooted)
]
tree nj = [&U] ((((((((((YA10260L1:0.01855,SARAG06_Y:0.00367):0.01965,(((YA270L1G0:0.01095,SARAD10_Y:0.00699):0.01744,YA270L1A0:0.04329):0.00028,((YA165L1C1:0.01241,SARAA02_Y:0.02584):0.00213,((YA165L1H0:0.00092,SARAF10_Y:-0.00092):0.00250,(YA165L1A0:0.00177,SARAH10_Y:0.01226):0.00198):0.00131):0.00700):0.01111):0.11201,(YA160L1F0:0.00348,SARAG01_Y:-0.00122):0.13620):0.01202,((((YRM60L1D0:0.00357,(YRM60L1C0:0.00477,SARAE10_Y:-0.00035):0.00086):0.00092,SARAE03_Y:0.00126):0.00125,SARAC11_Y:0.00318):0.00160,YRM60L1H0:0.00593):0.09975):0.07088,SARAA01_Y:0.02880):0.00190,SARAB04_Y:0.05219):0.00563,YRM60L1E0:0.06099):0.00165,(YRM60L1H0:0.00450,SARAF11_Y:0.01839):0.00288):0.00129,YRM60L1B1:0.00713):0.00194,(YRM60L1G0:0.00990,(YA165L1G0:0.00576,(YA160L1G0:0.01226,SARAA11_Y:0.00389):0.00088):0.00300):0.00614,SARAC06_Y:0.00381);
end;""".split(
"\n"
)
Nexus_tree_3 = """#NEXUS
Begin trees; [Treefile saved Wednesday, May 5, 2004 5:02 PM]
[!
>Data file = Grassland_short.nex
>Neighbor-joining search settings:
> Ties (if encountered) will be broken systematically
> Distance measure = Jukes-Cantor
> (Tree is unrooted)
]
Translate
1 outgroup25,
2 AF078391l,
3 'AF078211af',
4 AF078393l,
5 AF078187af,
6 AF078320l,
7 AF078432l,
8 AF078290af,
9 AF078350l,
10 AF078356l,
11 AF078306af,
12 AF078429l,
13 AF078256af,
14 'AF078443l',
15 AF078450l,
16 AF078452l,
17 AF078258af,
18 'AF078380l',
19 AF078251af,
20 AF078179af,
21 outgroup258
;
tree PAUP_1 = [&R] (1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);
tree PAUP_2 = [&R] (1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);
End;""".split(
"\n"
)
PAUP_log = """
P A U P *
Version 4.0b10 for Macintosh (PPC/Altivec)
Wednesday, May 5, 2004 5:03 PM
This copy registered to: Scott Dawson
UC-Berkeley
(serial number = B400784)
-----------------------------NOTICE-----------------------------
This is a beta-test version. Please report any crashes,
apparent calculation errors, or other anomalous results.
There are no restrictions on publication of results obtained
with this version, but you should check the WWW site
frequently for bug announcements and/or updated versions.
See the README file on the distribution media for details.
----------------------------------------------------------------
Tree description:
Optimality criterion = parsimony
Character-status summary:
Of 500 total characters:
All characters are of type 'unord'
All characters have equal weight
253 characters are constant
109 variable characters are parsimony-uninformative
Number of parsimony-informative characters = 138
Multistate taxa interpreted as uncertainty
Character-state optimization: Accelerated transformation (ACCTRAN)
AncStates = "standard"
Tree number 1 (rooted using user-specified outgroup)
Branch lengths and linkages for tree #1
Assigned Minimum Maximum
Connected branch possible possible
Node to node length length length
-------------------------------------------------------------------------
40 root 0 0 0
outgroup25 (1)* 40 40 24 52
39 40 57 15 72
AF078391l (2) 39 56 48 81
38 39 33 17 71
37 38 31 14 48
22 37 20 11 33
AF078211af (3) 22 4 2 7
AF078393l (4) 22 1 0 3
36 37 14 5 32
AF078187af (5) 36 18 10 28
35 36 21 16 45
34 35 10 3 23
26 34 5 3 9
24 26 4 3 13
23 24 0 0 3
AF078320l (6) 23 1 1 3
AF078356l (10) 23 2 2 2
AF078350l (9) 24 5 3 5
25 26 9 2 10
AF078306af (11) 25 6 4 10
AF078380l (18) 25 5 3 10
33 34 5 4 15
29 33 3 1 4
28 29 2 2 2
27 28 3 1 3
AF078432l (7) 27 2 2 2
AF078450l (15) 27 3 3 4
AF078251af (19) 28 6 6 7
AF078258af (17) 29 6 6 6
32 33 4 3 15
AF078290af (8) 32 9 8 11
31 32 9 6 18
AF078429l (12) 31 2 1 5
30 31 10 9 18
AF078443l (14) 30 2 1 6
AF078452l (16) 30 4 4 5
AF078256af (13) 35 4 1 6
AF078179af (20) 38 48 34 79
outgroup258 (21)* 40 45 27 67
-------------------------------------------------------------------------
Sum 509
Tree length = 509
Consistency index (CI) = 0.7151
Homoplasy index (HI) = 0.2849
""".split(
"\n"
)
line1 = " 40 root 0 0 0"
line2 = "outgroup25 (1)* 40 40 24 52"
line3 = " 39 40 57 15 72"
line4 = "AF078391l (2) 39 56 48 81"
class NexusParserTests(TestCase):
"""Tests of the Nexus Parser functions"""
def test_parse_nexus_tree(self):
"""parse_nexus_tree returns a dnd string and a translation table list"""
Trans_table, dnd = parse_nexus_tree(Nexus_tree)
# check the full dendrogram string is returned
self.assertEqual(
dnd["tree PAUP_1"],
"(1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);",
)
# check that all taxa are returned in the Trans_table
self.assertEqual(Trans_table["1"], "outgroup25")
self.assertEqual(Trans_table["2"], "AF078391l")
self.assertEqual(Trans_table["3"], "AF078211af")
self.assertEqual(Trans_table["4"], "AF078393l")
self.assertEqual(Trans_table["5"], "AF078187af")
self.assertEqual(Trans_table["6"], "AF078320l")
self.assertEqual(Trans_table["21"], "outgroup258")
self.assertEqual(Trans_table["20"], "AF078179af")
self.assertEqual(Trans_table["19"], "AF078251af")
# check that Nexus files without translation table work
Trans_table, dnd = parse_nexus_tree(Nexus_tree_2)
self.assertEqual(Trans_table, None)
self.assertEqual(
dnd["tree nj"],
"((((((((((YA10260L1:0.01855,SARAG06_Y:0.00367):0.01965,(((YA270L1G0:0.01095,SARAD10_Y:0.00699):0.01744,YA270L1A0:0.04329):0.00028,((YA165L1C1:0.01241,SARAA02_Y:0.02584):0.00213,((YA165L1H0:0.00092,SARAF10_Y:-0.00092):0.00250,(YA165L1A0:0.00177,SARAH10_Y:0.01226):0.00198):0.00131):0.00700):0.01111):0.11201,(YA160L1F0:0.00348,SARAG01_Y:-0.00122):0.13620):0.01202,((((YRM60L1D0:0.00357,(YRM60L1C0:0.00477,SARAE10_Y:-0.00035):0.00086):0.00092,SARAE03_Y:0.00126):0.00125,SARAC11_Y:0.00318):0.00160,YRM60L1H0:0.00593):0.09975):0.07088,SARAA01_Y:0.02880):0.00190,SARAB04_Y:0.05219):0.00563,YRM60L1E0:0.06099):0.00165,(YRM60L1H0:0.00450,SARAF11_Y:0.01839):0.00288):0.00129,YRM60L1B1:0.00713):0.00194,(YRM60L1G0:0.00990,(YA165L1G0:0.00576,(YA160L1G0:0.01226,SARAA11_Y:0.00389):0.00088):0.00300):0.00614,SARAC06_Y:0.00381);",
)
def test_parse_nexus_tree_sq(self):
"""remove single quotes from tree and translate tables"""
Trans_table, dnd = parse_nexus_tree(Nexus_tree_3)
# check the full dendrogram string is returned
self.assertEqual(
dnd["tree PAUP_1"],
"(1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);",
)
# check that all taxa are returned in the Trans_table
self.assertEqual(Trans_table["1"], "outgroup25")
self.assertEqual(Trans_table["2"], "AF078391l")
self.assertEqual(Trans_table["3"], "AF078211af")
self.assertEqual(Trans_table["4"], "AF078393l")
self.assertEqual(Trans_table["5"], "AF078187af")
self.assertEqual(Trans_table["6"], "AF078320l")
self.assertEqual(Trans_table["21"], "outgroup258")
self.assertEqual(Trans_table["20"], "AF078179af")
self.assertEqual(Trans_table["19"], "AF078251af")
def test_get_tree_info(self):
"""get_tree_info returns the Nexus file section that describes the tree"""
result = get_tree_info(Nexus_tree)
self.assertEqual(len(result), 33)
self.assertEqual(
result[0], "Begin trees; [Treefile saved Wednesday, May 5, 2004 5:02 PM]"
)
self.assertEqual(
result[31],
"tree PAUP_1 = [&R] (1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);",
)
def test_split_tree_info(self):
"""split_tree_info splits lines into header, Trans_table, and dnd"""
tree_info = get_tree_info(Nexus_tree)
header, trans_table, dnd = split_tree_info(tree_info)
self.assertEqual(len(header), 9)
self.assertEqual(len(trans_table), 22)
self.assertEqual(len(dnd), 2)
self.assertEqual(
header[0], "Begin trees; [Treefile saved Wednesday, May 5, 2004 5:02 PM]"
)
self.assertEqual(header[8], "\tTranslate")
self.assertEqual(trans_table[0], "\t\t1 outgroup25,")
self.assertEqual(trans_table[21], "\t\t;")
self.assertEqual(
dnd[0],
"tree PAUP_1 = [&R] (1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);",
)
def test_parse_trans_table(self):
"""parse_trans_table returns a dict with the taxa names indexed by number"""
tree_info = get_tree_info(Nexus_tree)
header, trans_table, dnd = split_tree_info(tree_info)
Trans_table = parse_trans_table(trans_table)
self.assertEqual(len(Trans_table), 21)
# check that taxa are returned in the Trans_table
self.assertEqual(Trans_table["1"], "outgroup25")
self.assertEqual(Trans_table["2"], "AF078391l")
self.assertEqual(Trans_table["3"], "AF078211af")
self.assertEqual(Trans_table["4"], "AF078393l")
self.assertEqual(Trans_table["5"], "AF078187af")
self.assertEqual(Trans_table["6"], "AF078320l")
self.assertEqual(Trans_table["21"], "outgroup258")
self.assertEqual(Trans_table["20"], "AF078179af")
self.assertEqual(Trans_table["19"], "AF078251af")
def test_parse_dnd(self):
"""parse_dnd returns a dict with dnd indexed by tree name"""
tree_info = get_tree_info(Nexus_tree)
header, trans_table, dnd = split_tree_info(tree_info)
dnd_dict = parse_dnd(dnd)
self.assertEqual(
dnd_dict["tree PAUP_1"],
"(1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);",
)
# ------------------------------------------------------
def test_get_BL_table(self):
"""get_BL_table returns the section of the log file w/ the BL table"""
BL_table = get_BL_table(PAUP_log)
self.assertEqual(len(BL_table), 40)
self.assertEqual(
BL_table[0],
" 40 root 0 0 0",
)
self.assertEqual(
BL_table[39],
"outgroup258 (21)* 40 45 27 67",
)
def test_find_fields(self):
"""find_fields takes BL table line and returns field names mapped to info"""
result = find_fields(line1)
self.assertEqual(result["taxa"], "40")
self.assertEqual(result["bl"], "0")
self.assertEqual(result["parent"], "root")
def test_parse_taxa(self):
"""parse_taxa should return the taxa # from a taxa_field from find_fields"""
result1 = find_fields(line1)
result2 = find_fields(line2)
result3 = find_fields(line3)
result4 = find_fields(line4)
self.assertEqual(parse_taxa(result1["taxa"]), "40")
self.assertEqual(parse_taxa(result2["taxa"]), "1")
self.assertEqual(parse_taxa(result3["taxa"]), "39")
self.assertEqual(parse_taxa(result4["taxa"]), "2")
def test_parse_PAUP_log(self):
"""parse_PAUP_log extracts branch length info from a PAUP log file"""
BL_dict = parse_PAUP_log(PAUP_log)
self.assertEqual(len(BL_dict), 40)
self.assertEqual(BL_dict["1"], ("40", 40))
self.assertEqual(BL_dict["40"], ("root", 0))
self.assertEqual(BL_dict["39"], ("40", 57))
self.assertEqual(BL_dict["2"], ("39", 56))
self.assertEqual(BL_dict["26"], ("34", 5))
self.assertEqual(BL_dict["21"], ("40", 45))
def test_align_with_comments(self):
"""correctly handle an alignment block containing comments"""
parser = MinimalNexusAlignParser("data/nexus_comments.nex")
got = {n: s for n, s in parser}
expect = {
"Ephedra": "TTAAGCCATGCATGTCTAAGTATGAACTAATTCCAAACGGTGA",
"Gnetum": "TTAAGCCATGCATGTCTATGTACGAACTAATC-AGAACGGTGA",
"Welwitschia": "TTAAGCCATGCACGTGTAAGTATGAACTAGTC-GAAACGGTGA",
"Ginkgo": "TTAAGCCATGCATGTGTAAGTATGAACTCTTTACAGACTGTGA",
"Pinus": "TTAAGCCATGCATGTCTAAGTATGAACTAATTGCAGACTGTGA",
}
self.assertEqual(got, expect)
def test_align_with_spaced_seqs(self):
"""correctly handle an alignment block with spaces in seqs"""
parser = MinimalNexusAlignParser("data/nexus_dna.nex")
seqs = {n: s for n, s in parser}
self.assertEqual(len(seqs), 10) # 10 taxa
lengths = set(len(seqs[n]) for n in seqs)
self.assertEqual(lengths, {705}) # all same length and equal 705
def test_align_from_mixed(self):
"""correctly handle a file with tree and alignment block"""
parser = MinimalNexusAlignParser("data/nexus_mixed.nex")
got = {n: s for n, s in parser}
expect = {
"fish": "ACATAGAGGGTACCTCTAAG",
"frog": "ACATAGAGGGTACCTCTAAG",
"snake": "ACATAGAGGGTACCTCTAAG",
"mouse": "ACATAGAGGGTACCTCTAAG",
}
self.assertEqual(got, expect)
def test_align_no_blank_columns(self):
"""correctly handle a file with no white space at line starts"""
parser = MinimalNexusAlignParser("data/nexus_aa.nxs")
seqs = {n: s for n, s in parser}
self.assertEqual(len(seqs), 10) # 10 taxa
lengths = set(len(seqs[n]) for n in seqs)
self.assertEqual(lengths, {234}) # all same length and equal 234
def test_load_seqs_interface(self):
"""load_aligned_seqs correctly loads nexus alignments"""
aln = load_aligned_seqs("data/nexus_mixed.nex")
self.assertEqual(aln.num_seqs, 4)
self.assertEqual(len(aln), 20)
aln = load_aligned_seqs("data/nexus_aa.nxs")
self.assertEqual(aln.num_seqs, 10)
self.assertEqual(len(aln), 234)
if __name__ == "__main__":
main()
| 43.02765 | 830 | 0.538021 | [
"BSD-3-Clause"
] | tla256/cogent3 | tests/test_parse/test_nexus.py | 18,674 | Python |
from matplotlib import rc
rc("font", family="serif", size=12)
rc("text", usetex=True)
rc("text.latex", preamble=open("macros.tex").read())
#import daft
import imp
daft = imp.load_source('daft', '/Users/kpmurphy/github/daft/daft.py')
import os
pgm = daft.PGM([4, 4], origin=[0, 0])
pgm.add_node(daft.Node("one", r"$1$", 1, 1))
pgm.add_node(daft.Node("x1", r"$x_1$", 2, 1))
pgm.add_node(daft.Node("x2", r"$x_2$", 3, 1))
pgm.add_node(daft.Node("z1", r"$z_1$", 2, 2))
pgm.add_node(daft.Node("z2", r"$z_2$", 3, 2))
pgm.add_node(daft.Node("y", r"$y$", 2.5, 3))
pgm.add_edge("one", "z1", label="-1.5", xoffset=-0.3)
pgm.add_edge("x1", "z1", label="+1", xoffset=-0.3)
pgm.add_edge("x1", "z2", label="+1", xoffset=-0.4)
pgm.add_edge("x2", "z1", label="+1", xoffset=0.4)
pgm.add_edge("x2", "z2", label="+1", xoffset=0.3)
pgm.add_edge("z1", "y", label="-1", xoffset=-0.3)
pgm.add_edge("z2", "y", label="-1", xoffset=0.3)
#ax = pgm.render() # returns the pyplot axes object it drew onto
#ax.text(1, 2, "My label")
pgm.render()
folder = "/Users/kpmurphy/github/pyprobml/figures"
fname = "mlpXor"
pgm.figure.savefig(os.path.join(folder, "{}.png".format(fname)))
| 28.365854 | 69 | 0.629407 | [
"MIT"
] | herupraptono/kevmurphyML | figureCode/daft/mlpXor.py | 1,163 | Python |
#! /usr/bin/env python3
# counts the number of regions in a matrix
# vertically and horizontally connected
# e.g.
# [1, 0, 1, 1]
# [0, 1, 0, 0]
# [1, 1, 0, 1]
# has 4 regions
class Patch(object):
def __init__(self, visited, value):
self.visited = visited
self.value = value
def __repr__(self):
return "visited = %s, value = %s" % (self.visited, self.value)
def initialize(region):
for row in region:
yield [Patch(visited=False, value=field) for field in row]
testregion = list(initialize([
[True, False, True, True],
[False, True, False, False],
[True, True, False, True]
]))
def exploreField(region, x, y):
# check if we've reached the edge of the matrix
if (x < 0 or y < 0):
return True
try:
if region[y][x].visited or not region[y][x].value:
return True
except IndexError:
return True
region[y][x].visited = True
exploreField(region, y+1, x)
exploreField(region, y-1, x)
exploreField(region, y, x+1)
exploreField(region, y, x-1)
count = 0
for y, row in enumerate(testregion):
for x, patch in enumerate(row):
# if a patch is both unvisited and true
# this means exploreField will eliminate it
# so it counts as one region
if not patch.visited and patch.value:
count += 1
exploreField(testregion, x, y)
print(count)
| 23.783333 | 70 | 0.603364 | [
"Unlicense"
] | nisstyre56/Interview-Problems | regions.py | 1,427 | Python |
from __future__ import division
import errno
import os
import re
import shutil
import time
import uuid
from collections import namedtuple
from itertools import repeat
from pprint import pformat
import pytest
from cassandra import WriteFailure
from cassandra.concurrent import (execute_concurrent,
execute_concurrent_with_args)
from ccmlib.node import Node
from cqlsh_tests.cqlsh_tools import assert_resultset_contains
from dtest import Tester, create_ks, logger
from tools.assertions import assert_length_equal
from tools.data import rows_to_list
from tools.files import size_of_files_in_dir
from tools.funcutils import get_rate_limited_function
from tools.hacks import advance_to_next_cl_segment
since = pytest.mark.since
_16_uuid_column_spec = (
'a uuid PRIMARY KEY, b uuid, c uuid, d uuid, e uuid, f uuid, g uuid, '
'h uuid, i uuid, j uuid, k uuid, l uuid, m uuid, n uuid, o uuid, '
'p uuid'
)
def _insert_rows(session, table_name, insert_stmt, values):
prepared_insert = session.prepare(insert_stmt)
values = list(values) # in case values is a generator
execute_concurrent(session, ((prepared_insert, x) for x in values),
concurrency=500, raise_on_first_error=True)
data_loaded = rows_to_list(session.execute('SELECT * FROM ' + table_name))
logger.debug('{n} rows inserted into {table_name}'.format(n=len(data_loaded), table_name=table_name))
# use assert_equal over assert_length_equal to avoid printing out
# potentially large lists
assert len(values) == len(data_loaded)
return data_loaded
def _move_commitlog_segments(source_dir, dest_dir, verbose=True):
for source_filename in [name for name in os.listdir(source_dir) if not name.endswith('_cdc.idx')]:
source_path, dest_path = (os.path.join(source_dir, source_filename),
os.path.join(dest_dir, source_filename))
if verbose:
logger.debug('moving {} to {}'.format(source_path, dest_path))
shutil.move(source_path, dest_path)
def _get_16_uuid_insert_stmt(ks_name, table_name):
return (
'INSERT INTO {ks_name}.{table_name} '
'(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) '
'VALUES (uuid(), uuid(), uuid(), uuid(), uuid(), '
'uuid(), uuid(), uuid(), uuid(), uuid(), uuid(), '
'uuid(), uuid(), uuid(), uuid(), uuid())'
).format(ks_name=ks_name, table_name=table_name)
def _get_create_table_statement(ks_name, table_name, column_spec, options=None):
if options:
options_pairs = ('{k}={v}'.format(k=k, v=v) for (k, v) in options.items())
options_string = 'WITH ' + ' AND '.join(options_pairs)
else:
options_string = ''
return (
'CREATE TABLE ' + ks_name + '.' + table_name + ' '
'(' + column_spec + ') ' + options_string
)
def _write_to_cdc_write_failure(session, insert_stmt):
prepared = session.prepare(insert_stmt)
start, rows_loaded, error_found = time.time(), 0, False
rate_limited_debug = get_rate_limited_function(logger.debug, 5)
while not error_found:
# We want to fail if inserting data takes too long. Locally this
# takes about 10s, but let's be generous.
assert ((time.time() - start) <= 600), (
"It's taken more than 10 minutes to reach a WriteFailure trying "
'to overrun the space designated for CDC commitlogs. This could '
"be because data isn't being written quickly enough in this "
'environment, or because C* is failing to reject writes when '
'it should.')
# If we haven't logged from here in the last 5s, do so.
rate_limited_debug(
' data load step has lasted {s:.2f}s, '
'loaded {r} rows'.format(s=(time.time() - start), r=rows_loaded))
batch_results = list(execute_concurrent(
session,
((prepared, ()) for _ in range(1000)),
concurrency=500,
# Don't propagate errors to the main thread. We expect at least
# one WriteFailure, so we handle it below as part of the
# results recieved from this method.
raise_on_first_error=False
))
# Here, we track the number of inserted values by getting the
# number of successfully completed statements...
rows_loaded += len([br for br in batch_results if br[0]])
# then, we make sure that the only failures are the expected
# WriteFailure.
assert ([] == [result for (success, result) in batch_results
if not success and not isinstance(result, WriteFailure)])
# Finally, if we find a WriteFailure, that means we've inserted all
# the CDC data we can and so we flip error_found to exit the loop.
if any(isinstance(result, WriteFailure) for (_, result) in batch_results):
logger.debug("write failed (presumably because we've overrun "
'designated CDC commitlog space) after '
'loading {r} rows in {s:.2f}s'.format(
r=rows_loaded,
s=time.time() - start))
error_found = True
return rows_loaded
_TableInfoNamedtuple = namedtuple('TableInfoNamedtuple', [
# required
'ks_name', 'table_name', 'column_spec',
# optional
'options', 'insert_stmt',
# derived
'name', 'create_stmt'
])
class TableInfo(_TableInfoNamedtuple):
__slots__ = ()
def __new__(cls, ks_name, table_name, column_spec, options=None, insert_stmt=None):
name = ks_name + '.' + table_name
create_stmt = _get_create_table_statement(ks_name, table_name, column_spec, options)
self = super(TableInfo, cls).__new__(
cls,
# required
ks_name=ks_name, table_name=table_name, column_spec=column_spec,
# optional
options=options, insert_stmt=insert_stmt,
# derived
name=name, create_stmt=create_stmt
)
return self
def _set_cdc_on_table(session, table_name, value, ks_name=None):
"""
Uses <session> to set CDC to <value> on <ks_name>.<table_name>.
"""
table_string = ks_name + '.' + table_name if ks_name else table_name
value_string = 'true' if value else 'false'
stmt = 'ALTER TABLE ' + table_string + ' WITH CDC = ' + value_string
logger.debug(stmt)
session.execute(stmt)
def _get_set_cdc_func(session, ks_name, table_name):
"""
Close over a session, keyspace name, and table name and return a function
that takes enables CDC on that keyspace if its argument is truthy and
otherwise disables it.
"""
def set_cdc(value):
return _set_cdc_on_table(
session=session,
ks_name=ks_name, table_name=table_name,
value=value
)
return set_cdc
def _get_commitlog_files(node_path):
commitlog_dir = os.path.join(node_path, 'commitlogs')
return {
os.path.join(commitlog_dir, name)
for name in os.listdir(commitlog_dir)
}
def _get_cdc_raw_files(node_path, cdc_raw_dir_name='cdc_raw'):
commitlog_dir = os.path.join(node_path, cdc_raw_dir_name)
return {
os.path.join(commitlog_dir, name)
for name in os.listdir(commitlog_dir)
}
@since('3.8')
class TestCDC(Tester):
"""
@jira_ticket CASSANDRA-8844
@jira_ticket CASSANDRA-12148
Test the correctness of some features of CDC, Change Data Capture, which
provides a view of the commitlog on tables for which it is enabled.
"""
@pytest.fixture(autouse=True)
def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
fixture_dtest_setup.allow_log_errors = True
fixture_dtest_setup.ignore_log_patterns = (
# We expect to see this error in the logs when we reach CDC limit
r'Failed to apply mutation locally'
)
def _create_temp_dir(self, dir_name, verbose=True):
"""
Create a directory that will be deleted when this test class is torn
down.
"""
if verbose:
logger.debug('creating ' + dir_name)
try:
os.mkdir(dir_name)
except OSError as e:
if e.errno != errno.EEXIST:
logger.debug(dir_name + ' already exists. removing and recreating.')
shutil.rmtree(dir_name)
os.mkdir(dir_name)
else:
raise e
def debug_and_rmtree():
shutil.rmtree(dir_name)
logger.debug(dir_name + ' removed')
self.addCleanup(debug_and_rmtree)
def prepare(self, ks_name,
table_name=None, cdc_enabled_table=None,
gc_grace_seconds=None,
column_spec=None,
configuration_overrides=None,
table_id=None):
"""
Create a 1-node cluster, start it, create a keyspace, and if
<table_name>, create a table in that keyspace. If <cdc_enabled_table>,
that table is created with CDC enabled. If <column_spec>, use that
string to specify the schema of the table -- for example, a valid value
is 'a int PRIMARY KEY, b int'. The <configuration_overrides> is
treated as a dict-like object and passed to
self.cluster.set_configuration_options.
"""
config_defaults = {
'cdc_enabled': True,
# we want to be able to generate new segments quickly
'commitlog_segment_size_in_mb': 2,
}
if configuration_overrides is None:
configuration_overrides = {}
self.cluster.populate(1)
self.cluster.set_configuration_options(dict(config_defaults, **configuration_overrides))
self.cluster.start()
node = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node)
create_ks(session, ks_name, rf=1)
if table_name is not None:
assert cdc_enabled_table is not None, 'if creating a table in prepare, must specify whether or not CDC is enabled on it'
assert column_spec is not None, 'if creating a table in prepare, must specify its schema'
options = {}
if gc_grace_seconds is not None:
options['gc_grace_seconds'] = gc_grace_seconds
if table_id is not None:
options['id'] = table_id
if cdc_enabled_table:
options['cdc'] = 'true'
stmt = _get_create_table_statement(
ks_name, table_name, column_spec,
options=options
)
logger.debug(stmt)
session.execute(stmt)
return node, session
def _assert_cdc_data_readable_on_round_trip(self, start_with_cdc_enabled):
"""
Parameterized test asserting that data written to a table is still
readable after flipping the CDC flag on that table, then flipping it
again. Starts with CDC enabled if start_with_cdc_enabled, otherwise
starts with it disabled.
"""
ks_name, table_name = 'ks', 'tab'
sequence = [True, False, True] if start_with_cdc_enabled else [False, True, False]
start_enabled, alter_path = sequence[0], list(sequence[1:])
node, session = self.prepare(ks_name=ks_name, table_name=table_name,
cdc_enabled_table=start_enabled,
column_spec='a int PRIMARY KEY, b int')
set_cdc = _get_set_cdc_func(session=session, ks_name=ks_name, table_name=table_name)
insert_stmt = session.prepare('INSERT INTO ' + table_name + ' (a, b) VALUES (?, ?)')
# data = zip(list(range(1000)), list(range(1000)))
start = 0
stop = 1000
step = 1
data = [(n, min(n+step, stop)) for n in range(start, stop, step)]
execute_concurrent_with_args(session, insert_stmt, data)
# We need data to be in commitlogs, not sstables.
assert [] == list(node.get_sstables(ks_name, table_name))
for enable in alter_path:
set_cdc(enable)
assert_resultset_contains(session.execute('SELECT * FROM ' + table_name), data)
def test_cdc_enabled_data_readable_on_round_trip(self):
"""
Test that data is readable after an enabled->disabled->enabled round
trip.
"""
self._assert_cdc_data_readable_on_round_trip(start_with_cdc_enabled=True)
def test_cdc_disabled_data_readable_on_round_trip(self):
"""
Test that data is readable after an disabled->enabled->disabled round
trip.
"""
self._assert_cdc_data_readable_on_round_trip(start_with_cdc_enabled=False)
def test_non_cdc_segments_deleted_after_replay(self):
"""
Test that non-cdc segment files generated in previous runs are deleted
after replay.
"""
ks_name, table_name = 'ks', 'tab'
node, session = self.prepare(ks_name=ks_name, table_name=table_name,
cdc_enabled_table=True,
column_spec='a int PRIMARY KEY, b int')
old_files = _get_cdc_raw_files(node.get_path())
node.drain()
session.cluster.shutdown()
node.stop()
node.start(wait_for_binary_proto=True)
new_files = _get_cdc_raw_files(node.get_path())
assert len(old_files.intersection(new_files)) == 0
def test_insertion_and_commitlog_behavior_after_reaching_cdc_total_space(self):
"""
Test that C* behaves correctly when CDC tables have consumed all the
space available to them. In particular: after writing
cdc_total_space_in_mb MB into CDC commitlogs:
- CDC writes are rejected
- non-CDC writes are accepted
- on flush, CDC commitlogs are copied to cdc_raw
- on flush, non-CDC commitlogs are not copied to cdc_raw
This is a lot of behavior to validate in one test, but we do so to
avoid running multiple tests that each write 1MB of data to fill
cdc_total_space_in_mb.
"""
ks_name = 'ks'
full_cdc_table_info = TableInfo(
ks_name=ks_name, table_name='full_cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'full_cdc_tab'),
options={'cdc': 'true'}
)
configuration_overrides = {
# Make CDC space as small as possible so we can fill it quickly.
'cdc_total_space_in_mb': 4,
}
node, session = self.prepare(
ks_name=ks_name,
configuration_overrides=configuration_overrides
)
session.execute(full_cdc_table_info.create_stmt)
# Later, we'll also make assertions about the behavior of non-CDC
# tables, so we create one here.
non_cdc_table_info = TableInfo(
ks_name=ks_name, table_name='non_cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'non_cdc_tab')
)
session.execute(non_cdc_table_info.create_stmt)
# We'll also make assertions about the behavior of CDC tables when
# other CDC tables have already filled the designated space for CDC
# commitlogs, so we create the second CDC table here.
empty_cdc_table_info = TableInfo(
ks_name=ks_name, table_name='empty_cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'empty_cdc_tab'),
options={'cdc': 'true'}
)
session.execute(empty_cdc_table_info.create_stmt)
# Here, we insert values into the first CDC table until we get a
# WriteFailure. This should happen when the CDC commitlogs take up 1MB
# or more.
logger.debug('flushing non-CDC commitlogs')
node.flush()
# Then, we insert rows into the CDC table until we can't anymore.
logger.debug('beginning data insert to fill CDC commitlogs')
rows_loaded = _write_to_cdc_write_failure(session, full_cdc_table_info.insert_stmt)
assert 0 < rows_loaded, ('No CDC rows inserted. This may happen when '
'cdc_total_space_in_mb > commitlog_segment_size_in_mb')
commitlog_dir = os.path.join(node.get_path(), 'commitlogs')
commitlogs_size = size_of_files_in_dir(commitlog_dir)
logger.debug('Commitlog dir ({d}) is {b}B'.format(d=commitlog_dir, b=commitlogs_size))
# We should get a WriteFailure when trying to write to the CDC table
# that's filled the designated CDC space...
try:
session.execute(full_cdc_table_info.insert_stmt)
raise Exception("WriteFailure expected")
except WriteFailure:
pass
# or any CDC table.
try:
session.execute(empty_cdc_table_info.insert_stmt)
raise Exception("WriteFailure expected")
except WriteFailure:
pass
# Now we test for behaviors of non-CDC tables when we've exceeded
# cdc_total_space_in_mb.
#
# First, we drain and save the names of all the new discarded CDC
# segments
node.drain()
session.cluster.shutdown()
node.stop()
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node)
pre_non_cdc_write_cdc_raw_segments = _get_cdc_raw_files(node.get_path())
# Snapshot the _cdc.idx file if > 4.0 for comparison at end
before_cdc_state = [] # init empty here to quiet PEP
if self.cluster.version() >= '4.0':
# Create ReplayData objects for each index file found in loading cluster
node1_path = os.path.join(node.get_path(), 'cdc_raw')
before_cdc_state = [ReplayData.load(node1_path, name)
for name in os.listdir(node1_path) if name.endswith('_cdc.idx')]
# save the names of all the commitlog segments written up to this
# point:
pre_non_cdc_write_segments = _get_commitlog_files(node.get_path())
# Check that writing to non-CDC tables succeeds even when writes to CDC
# tables are rejected:
non_cdc_prepared_insert = session.prepare(non_cdc_table_info.insert_stmt)
session.execute(non_cdc_prepared_insert, ()) # should not raise an exception
# Check the following property: any new commitlog segments written to
# after cdc_raw has reached its maximum configured size should not be
# moved to cdc_raw, on commitlog discard, because any such commitlog
# segments are written to non-CDC tables.
#
# First, write to non-cdc tables.
start, time_limit = time.time(), 600
rate_limited_debug = get_rate_limited_function(logger.debug, 5)
logger.debug('writing to non-cdc table')
# We write until we get a new commitlog segment.
while _get_commitlog_files(node.get_path()) <= pre_non_cdc_write_segments:
elapsed = time.time() - start
rate_limited_debug(' non-cdc load step has lasted {s:.2f}s'.format(s=elapsed))
assert elapsed <= time_limit, "It's been over a {s}s and we haven't written a new " \
"commitlog segment. Something is wrong.".format(s=time_limit)
execute_concurrent(
session,
((non_cdc_prepared_insert, ()) for _ in range(1000)),
concurrency=500,
raise_on_first_error=True,
)
# Finally, we check that draining doesn't move any new segments to cdc_raw:
node.drain()
session.cluster.shutdown()
if self.cluster.version() < '4.0':
assert pre_non_cdc_write_cdc_raw_segments == _get_cdc_raw_files(node.get_path())
else:
# Create ReplayData objects for each index file found in loading cluster
node2_path = os.path.join(node.get_path(), 'cdc_raw')
after_cdc_state = [ReplayData.load(node2_path, name)
for name in os.listdir(node2_path) if name.endswith('_cdc.idx')]
# Confirm all indexes in 1st are accounted for and match corresponding entry in 2nd.
found = True
for idx in before_cdc_state:
idx_found = False
for idx_two in after_cdc_state:
if compare_replay_data(idx, idx_two):
idx_found = True
if not idx_found:
found = False
break
if not found:
self._fail_and_print_sets(before_cdc_state, after_cdc_state,
'Found CDC index in before not matched in after (non-CDC write test)')
# Now we confirm we don't have anything that showed up in 2nd not accounted for in 1st
orphan_found = False
for idx_two in after_cdc_state:
index_found = False
for idx in before_cdc_state:
if compare_replay_data(idx_two, idx):
index_found = True
if not index_found:
orphan_found = True
break
if orphan_found:
self._fail_and_print_sets(before_cdc_state, after_cdc_state,
'Found orphaned index file in after CDC state not in former.')
def _fail_and_print_sets(self, rd_one, rd_two, msg):
print('Set One:')
for idx in rd_one:
print(' {},{},{},{}'.format(idx.name, idx.completed, idx.offset, idx.log_name))
print('Set Two:')
for idx_two in rd_two:
print(' {},{},{},{}'.format(idx_two.name, idx_two.completed, idx_two.offset, idx_two.log_name))
pytest.fail(msg)
def _init_new_loading_node(self, ks_name, create_stmt, use_thrift=False):
loading_node = Node(
name='node2',
cluster=self.cluster,
auto_bootstrap=False,
thrift_interface=('127.0.0.2', 9160) if use_thrift else None,
storage_interface=('127.0.0.2', 7000),
jmx_port='7400',
remote_debug_port='0',
initial_token=None,
binary_interface=('127.0.0.2', 9042)
)
logger.debug('adding node')
self.cluster.add(loading_node, is_seed=True, data_center="dc1")
logger.debug('starting new node')
loading_node.start(wait_for_binary_proto=120)
logger.debug('recreating ks and table')
loading_session = self.patient_exclusive_cql_connection(loading_node)
create_ks(loading_session, ks_name, rf=1)
logger.debug('creating new table')
loading_session.execute(create_stmt)
logger.debug('stopping new node')
loading_session.cluster.shutdown()
loading_node.stop()
return loading_node
def test_cdc_data_available_in_cdc_raw(self):
ks_name = 'ks'
# First, create a new node just for data generation.
generation_node, generation_session = self.prepare(ks_name=ks_name)
cdc_table_info = TableInfo(
ks_name=ks_name, table_name='cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'cdc_tab'),
options={
'cdc': 'true',
# give table an explicit id so when we create it again it's the
# same table and we can replay into it
'id': uuid.uuid4()
}
)
# Write until we get a new CL segment to avoid replaying initialization
# mutations from this node's startup into system tables in the other
# node. See CASSANDRA-11811.
advance_to_next_cl_segment(
session=generation_session,
commitlog_dir=os.path.join(generation_node.get_path(), 'commitlogs')
)
generation_session.execute(cdc_table_info.create_stmt)
# insert 10000 rows
inserted_rows = _insert_rows(generation_session, cdc_table_info.name, cdc_table_info.insert_stmt,
repeat((), 10000))
# drain the node to guarantee all cl segments will be recycled
logger.debug('draining')
generation_node.drain()
logger.debug('stopping')
# stop the node and clean up all sessions attached to it
generation_session.cluster.shutdown()
generation_node.stop()
# We can rely on the existing _cdc.idx files to determine which .log files contain cdc data.
source_path = os.path.join(generation_node.get_path(), 'cdc_raw')
source_cdc_indexes = {ReplayData.load(source_path, name)
for name in source_path if name.endswith('_cdc.idx')}
# assertNotEqual(source_cdc_indexes, {})
assert source_cdc_indexes != {}
# create a new node to use for cdc_raw cl segment replay
loading_node = self._init_new_loading_node(ks_name, cdc_table_info.create_stmt, self.cluster.version() < '4')
# move cdc_raw contents to commitlog directories, then start the
# node again to trigger commitlog replay, which should replay the
# cdc_raw files we moved to commitlogs into memtables.
logger.debug('moving cdc_raw and restarting node')
_move_commitlog_segments(
os.path.join(generation_node.get_path(), 'cdc_raw'),
os.path.join(loading_node.get_path(), 'commitlogs')
)
loading_node.start(wait_for_binary_proto=120)
logger.debug('node successfully started; waiting on log replay')
loading_node.grep_log('Log replay complete')
logger.debug('log replay complete')
# final assertions
validation_session = self.patient_exclusive_cql_connection(loading_node)
data_in_cdc_table_after_restart = rows_to_list(
validation_session.execute('SELECT * FROM ' + cdc_table_info.name)
)
logger.debug('found {cdc} values in CDC table'.format(
cdc=len(data_in_cdc_table_after_restart)
))
# Then we assert that the CDC data that we expect to be there is there.
# All data that was in CDC tables should have been copied to cdc_raw,
# then used in commitlog replay, so it should be back in the cluster.
assert (inserted_rows == data_in_cdc_table_after_restart), 'not all expected data selected'
if self.cluster.version() >= '4.0':
# Create ReplayData objects for each index file found in loading cluster
loading_path = os.path.join(loading_node.get_path(), 'cdc_raw')
dest_cdc_indexes = [ReplayData.load(loading_path, name)
for name in os.listdir(loading_path) if name.endswith('_cdc.idx')]
# Compare source replay data to dest to ensure replay process created both hard links and index files.
for srd in source_cdc_indexes:
# Confirm both log and index are in dest
assert os.path.isfile(os.path.join(loading_path, srd.idx_name))
assert os.path.isfile(os.path.join(loading_path, srd.log_name))
# Find dest ReplayData that corresponds to the source (should be exactly 1)
corresponding_dest_replay_datae = [x for x in dest_cdc_indexes
if srd.idx_name == x.idx_name]
assert_length_equal(corresponding_dest_replay_datae, 1)
drd = corresponding_dest_replay_datae[0]
# We can't compare equality on offsets since replay uses the raw file length as the written
# cdc offset. We *can*, however, confirm that the offset in the replayed file is >=
# the source file, ensuring clients are signaled to replay at least all the data in the
# log.
assert drd.offset >= srd.offset
# Confirm completed flag is the same in both
assert srd.completed == drd.completed
# Confirm that the relationship between index files on the source
# and destination looks like we expect.
# First, grab the mapping between the two, make sure it's a 1-1
# mapping, and transform the dict to reflect that:
src_to_dest_idx_map = {
src_rd: [dest_rd for dest_rd in dest_cdc_indexes
if dest_rd.idx_name == src_rd.idx_name]
for src_rd in source_cdc_indexes
}
for src_rd, dest_rds in src_to_dest_idx_map.items():
assert_length_equal(dest_rds, 1)
src_to_dest_idx_map[src_rd] = dest_rds[0]
# All offsets in idx files that were copied should be >0 on the
# destination node.
assert (
0 not in {i.offset for i in src_to_dest_idx_map.values()}),\
('Found index offsets == 0 in an index file on the '
'destination node that corresponds to an index file on the '
'source node:\n'
'{}').format(pformat(src_to_dest_idx_map))
# Offsets of all shared indexes should be >= on the destination
# than on the source.
for src_rd, dest_rd in src_to_dest_idx_map.items():
assert dest_rd.offset >= src_rd.offset
src_to_dest_idx_map = {
src_rd: [dest_rd for dest_rd in dest_cdc_indexes
if dest_rd.idx_name == src_rd.idx_name]
for src_rd in source_cdc_indexes
}
for k, v in src_to_dest_idx_map.items():
assert_length_equal(v, 1)
assert k.offset >= v.offset
def compare_replay_data(rd_one, rd_two):
return rd_one.idx_name == rd_two.idx_name and \
rd_one.completed == rd_two.completed and \
rd_one.offset == rd_two.offset and \
rd_one.log_name == rd_two.log_name
class ReplayData(namedtuple('ReplayData', ['idx_name', 'completed', 'offset', 'log_name'])):
"""
Replay data class containing data from a _cdc.idx file. Build one with the load method.
"""
@classmethod
def load(cls, path, name):
assert '_cdc' in name, 'expected to find _cdc in passed in index name. Did not: ' + name
with open(os.path.join(path, name), 'r') as f:
offset, completed = [line.strip() for line in f.readlines()]
return cls(
idx_name=name,
completed=completed,
offset=int(offset),
log_name=re.sub('_cdc.idx', '.log', name)
)
| 43.051176 | 132 | 0.624398 | [
"Apache-2.0"
] | Ankou76ers/cassandra-dtest | cdc_test.py | 31,126 | Python |
from os.path import isfile
from os.path import join
import matplotlib.pyplot as plt
import pytest
from SciDataTool import DataTime, Data1D, DataLinspace, VectorField, Norm_ref
from numpy import linspace, sin, squeeze
from Tests import TEST_DATA_DIR
from Tests import save_plot_path as save_path
from pyleecan.Classes.ImportMatlab import ImportMatlab
from pyleecan.Classes.InputFlux import InputFlux
from pyleecan.Classes.OPdq import OPdq
from pyleecan.Classes.Output import Output
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Functions.load import load
from pyleecan.Functions.Plot import dict_2D, dict_3D
from pyleecan.definitions import DATA_DIR
@pytest.fixture(scope="module")
def import_data():
data = import_data_func()
return data
def import_data_func():
SCIM_006 = load(join(DATA_DIR, "Machine", "SCIM_006.json"))
simu = Simu1(name="test_plots", machine=SCIM_006)
mat_file_Br = join(TEST_DATA_DIR, "Plots", "default_proj_Br.mat")
mat_file_time = join(TEST_DATA_DIR, "Plots", "default_proj_time.mat")
mat_file_angle = join(TEST_DATA_DIR, "Plots", "default_proj_angle.mat")
mat_file_Br_cfft2 = join(TEST_DATA_DIR, "Plots", "default_proj_Br_cfft2.mat")
mat_file_Brfreqs = join(TEST_DATA_DIR, "Plots", "default_proj_Brfreqs.mat")
mat_file_Brwavenumber = join(
TEST_DATA_DIR, "Plots", "default_proj_Brwavenumber.mat"
)
if not isfile(mat_file_Br):
import urllib.request
url = "https://www.pyleecan.org/Data/default_proj_Br.mat"
urllib.request.urlretrieve(url, mat_file_Br)
if not isfile(mat_file_Br_cfft2):
import urllib.request
url = "https://www.pyleecan.org/Data/default_proj_Br_cfft2.mat"
urllib.request.urlretrieve(url, mat_file_Br_cfft2)
data = {}
data["SCIM_006"] = SCIM_006
data["simu"] = simu
# Read input files from Manatee
data["flux"] = ImportMatlab(mat_file_Br, var_name="XBr")
data["time"] = ImportMatlab(mat_file_time, var_name="timec")
data["angle"] = ImportMatlab(mat_file_angle, var_name="alpha_radc")
data["flux_FT"] = ImportMatlab(mat_file_Br_cfft2, var_name="Fwr")
data["freqs"] = ImportMatlab(mat_file_Brfreqs, var_name="freqs")
data["wavenumber"] = ImportMatlab(mat_file_Brwavenumber, var_name="orders")
data["N0"] = 2000
data["Id_ref"] = 10
data["Iq_ref"] = -10
# Plot parameters
data["freq_max"] = 2000
data["r_max"] = 78
return data
class Test_plots(object):
@pytest.mark.long_5s
@pytest.mark.SingleOP
@pytest.mark.SCIM
def test_default_proj_Br_time_space(self, import_data):
SCIM_006 = import_data["SCIM_006"]
simu = import_data["simu"]
time = import_data["time"]
angle = import_data["angle"]
flux = import_data["flux"]
freq_max = import_data["freq_max"]
N0 = import_data["N0"]
Id_ref = import_data["Id_ref"]
Iq_ref = import_data["Iq_ref"]
time_arr = squeeze(time.get_data())
angle_arr = squeeze(angle.get_data())
flux_arr = flux.get_data()
norm_angle = {"space_order": Norm_ref(ref=3)}
simu = Simu1(name="test_default_proj_Br_time_space", machine=SCIM_006)
simu.mag = None
simu.force = None
simu.struct = None
simu.input = InputFlux(
B_dict={"Br": flux},
time=time,
angle=angle,
OP=OPdq(N0=N0, Id_ref=Id_ref, Iq_ref=Iq_ref),
)
out = Output(simu=simu)
simu.run()
out2 = Output(simu=simu)
# Reduce to 1/3 period
Br_reduced = flux_arr[0:672, 0:672]
time_reduced = time_arr[0:672]
angle_reduced = angle_arr[0:672]
# Build the data objects
Time2 = Data1D(
name="time",
unit="s",
symmetries={"period": 3},
values=time_reduced,
)
Angle2 = Data1D(
name="angle",
unit="rad",
symmetries={"period": 3},
values=angle_reduced,
normalizations=norm_angle,
)
Br2 = DataTime(
symbol="B_r",
name="Airgap radial flux density",
unit="T",
axes=[Time2, Angle2],
values=Br_reduced,
)
out2.mag.B = VectorField(
name="Airgap flux density", symbol="B", components={"radial": Br2}
)
# Plot the result by comparing the two simulation (sym / no sym)
plt.close("all")
out.mag.B.plot_2D_Data(
"time",
"angle[0]{°}",
data_list=[out2.mag.B],
is_auto_ticks=False,
legend_list=["Reference", "Periodic"],
save_path=join(save_path, "test_default_proj_Br_dataobj_period.png"),
is_show_fig=False,
**dict_2D,
)
out.mag.B.plot_2D_Data(
"freqs=[0," + str(freq_max) + "]",
data_list=[out2.mag.B],
legend_list=["Reference", "Periodic"],
is_auto_ticks=False,
save_path=join(save_path, "test_default_proj_Br_dataobj_period_fft.png"),
is_show_fig=False,
**dict_2D,
)
out3 = Output(simu=simu)
# Get linspace data
t0 = time_arr[0]
tf = time_arr[-1]
deltat = time_arr[1] - time_arr[0]
a0 = angle_arr[0]
deltaa = angle_arr[1] - angle_arr[0]
Na = len(angle_arr)
# Build the data objects
Time3 = DataLinspace(
name="time",
unit="s",
initial=t0,
final=tf + deltat,
step=deltat,
include_endpoint=False,
)
Angle3 = DataLinspace(
name="angle",
unit="rad",
normalizations=norm_angle,
initial=a0,
step=deltaa,
number=Na,
include_endpoint=False,
)
Br3 = DataTime(
symbol="B_r",
name="Airgap radial flux density",
unit="T",
axes=[Time3, Angle3],
values=flux_arr,
)
out3.mag.B = VectorField(
name="Airgap flux density", symbol="B", components={"radial": Br3}
)
# Plot the result by comparing the two simulation (Data1D / DataLinspace)
plt.close("all")
out.mag.B.plot_2D_Data(
"angle{°}",
data_list=[out3.mag.B],
legend_list=["Reference", "Linspace"],
is_auto_ticks=False,
save_path=join(save_path, "test_default_proj_Br_dataobj_linspace.png"),
is_show_fig=False,
**dict_2D,
)
out.mag.B.components["radial"].axes[1].normalizations["space_order"] = Norm_ref(
ref=3
)
out.mag.B.plot_2D_Data(
"wavenumber->space_order=[0,100]",
data_list=[out3.mag.B],
legend_list=["Reference", "Linspace"],
is_auto_ticks=False,
save_path=join(save_path, "test_default_proj_Br_dataobj_linspace_fft.png"),
is_show_fig=False,
**dict_2D,
)
simu4 = Simu1(name="test_default_proj_Br_time_space_ift", machine=SCIM_006)
simu4.mag = None
simu4.force = None
simu4.struct = None
simu4.input = InputFlux(
B_dict={"Br": flux},
time=time,
angle=angle,
OP=OPdq(N0=N0, Id_ref=Id_ref, Iq_ref=Iq_ref),
)
out4 = Output(simu=simu4)
simu4.run()
out4.post.legend_name = "Inverse FT"
# Plot the result by comparing the two simulation (direct / ifft)
plt.close("all")
out.mag.B.plot_2D_Data(
"angle{°}",
data_list=[out4.mag.B],
legend_list=["Reference", "Inverse FFT"],
is_auto_ticks=False,
save_path=join(save_path, "test_default_proj_Br_dataobj_ift.png"),
is_show_fig=False,
**dict_2D,
)
out.mag.B.plot_2D_Data(
"wavenumber=[0,100]",
data_list=[out4.mag.B],
legend_list=["Reference", "Inverse FFT"],
is_auto_ticks=False,
save_path=join(save_path, "test_default_proj_Br_dataobj_ift_fft.png"),
is_show_fig=False,
**dict_2D,
)
out5 = Output(simu=simu)
# Get linspace data
t0 = 0.01
tf = 0.04
Nt = 3000
time5 = linspace(0.01, 0.04, 3000, endpoint=True)
# Compute sine function
Br5 = 0.2 * sin(375 * time5 - 1.5)
# Build the data objects
Time5 = DataLinspace(
name="time",
unit="s",
initial=t0,
final=tf,
number=Nt,
include_endpoint=True,
)
flux5 = DataTime(
symbol="B_r",
name="Airgap radial flux density",
unit="T",
axes=[Time5],
values=Br5,
)
out5.mag.B = VectorField(
name="Airgap flux density", symbol="B", components={"radial": flux5}
)
# Plot the result by comparing the two simulation (sym / no sym)
plt.close("all")
out.mag.B.plot_2D_Data(
"time",
"angle[0]{°}",
data_list=[out5.mag.B],
legend_list=["Br", "0.2sin(375t-1.5)"],
save_path=join(save_path, "test_default_proj_Br_compare.png"),
is_auto_ticks=False,
is_show_fig=False,
**dict_2D,
)
@pytest.mark.SingleOP
@pytest.mark.SCIM
def test_default_proj_Br_cfft2(self, import_data):
SCIM_006 = import_data["SCIM_006"]
simu = import_data["simu"]
time = import_data["time"]
angle = import_data["angle"]
flux = import_data["flux"]
freq_max = import_data["freq_max"]
r_max = import_data["r_max"]
N0 = import_data["N0"]
Id_ref = import_data["Id_ref"]
Iq_ref = import_data["Iq_ref"]
N_stem = 100
simu = Simu1(name="test_default_proj_Br_cfft2", machine=SCIM_006)
simu.input = InputFlux(
B_dict={"Br": flux},
time=time,
angle=angle,
OP=OPdq(N0=N0, Id_ref=Id_ref, Iq_ref=Iq_ref),
)
simu.mag = None
simu.force = None
simu.struct = None
out = Output(simu=simu)
simu.run()
# Plot the 2D FFT of flux density as stem plot
plt.close("all")
out.mag.B.plot_3D_Data(
"freqs=[0," + str(freq_max) + "]",
"wavenumber=[-" + str(r_max) + "," + str(r_max) + "]",
N_stem=N_stem,
is_auto_ticks=False,
save_path=join(save_path, "test_default_proj_Br_dataobj_cfft2.png"),
is_show_fig=False,
**dict_3D,
)
@pytest.mark.SingleOP
@pytest.mark.SCIM
def test_default_proj_surf(self, import_data):
SCIM_006 = import_data["SCIM_006"]
simu = import_data["simu"]
time = import_data["time"]
angle = import_data["angle"]
flux = import_data["flux"]
flux_FT = import_data["flux_FT"]
freqs = import_data["freqs"]
wavenumber = import_data["wavenumber"]
freq_max = import_data["freq_max"]
r_max = import_data["r_max"]
N0 = import_data["N0"]
Id_ref = import_data["Id_ref"]
Iq_ref = import_data["Iq_ref"]
simu = Simu1(name="test_default_proj_surf", machine=SCIM_006)
simu.mag = None
simu.force = None
simu.struct = None
simu.input = InputFlux(
B_dict={"Br": flux},
time=time,
angle=angle,
OP=OPdq(N0=N0, Id_ref=Id_ref, Iq_ref=Iq_ref),
)
out = Output(simu=simu)
simu.run()
# Plot the result by comparing the two simulation (sym / no sym)
plt.close("all")
out.mag.B.plot_3D_Data(
"time=[0,0.06]",
"angle{°}",
component_list=["radial"],
save_path=join(save_path, "test_default_proj_Br_surf_dataobj.png"),
is_2D_view=False,
is_show_fig=False,
**dict_3D,
)
@pytest.mark.SingleOP
@pytest.mark.SCIM
def test_default_proj_fft2(self, import_data):
SCIM_006 = import_data["SCIM_006"]
simu = import_data["simu"]
time = import_data["time"]
angle = import_data["angle"]
flux = import_data["flux"]
freq_max = import_data["freq_max"]
r_max = import_data["r_max"]
N0 = import_data["N0"]
Id_ref = import_data["Id_ref"]
Iq_ref = import_data["Iq_ref"]
simu = Simu1(name="test_default_proj_fft2", machine=SCIM_006)
simu.mag = None
simu.force = None
simu.struct = None
simu.input = InputFlux(
B_dict={"Br": flux},
time=time,
angle=angle,
OP=OPdq(N0=N0, Id_ref=Id_ref, Iq_ref=Iq_ref),
)
out = Output(simu=simu)
simu.run()
# Plot the 2D FFT of flux density as 2D scatter plot with colormap
plt.close("all")
freq_max = 500
r_max = 20
out.mag.B.plot_3D_Data(
"freqs=[0," + str(freq_max) + "]",
"wavenumber=[-" + str(r_max) + "," + str(r_max) + "]",
is_2D_view=True,
is_auto_ticks=False,
save_path=join(save_path, "test_default_proj_Br_fft2_dataobj.png"),
is_show_fig=False,
**dict_3D,
)
@pytest.mark.SingleOP
@pytest.mark.SCIM
def test_default_proj_time_space(self, import_data):
SCIM_006 = import_data["SCIM_006"]
simu = import_data["simu"]
time = import_data["time"]
angle = import_data["angle"]
flux = import_data["flux"]
N0 = import_data["N0"]
Id_ref = import_data["Id_ref"]
Iq_ref = import_data["Iq_ref"]
simu = Simu1(name="test_default_proj_time_space", machine=SCIM_006)
simu.mag = None
simu.force = None
simu.struct = None
simu.input = InputFlux(
B_dict={"Br": flux},
time=time,
angle=angle,
OP=OPdq(N0=N0, Id_ref=Id_ref, Iq_ref=Iq_ref),
)
out = Output(simu=simu)
simu.run()
# Plot the result by comparing the two simulation (sym / no sym)
plt.close("all")
out.mag.B.plot_3D_Data(
"time",
"angle{°}",
is_2D_view=True,
save_path=join(save_path, "test_default_proj_Br_time_space_dataobj.png"),
is_show_fig=False,
**dict_3D,
)
if __name__ == "__main__":
data = import_data_func()
test_plot_class = Test_plots()
test_plot_class.test_default_proj_Br_time_space(data)
test_plot_class.test_default_proj_Br_cfft2(data)
test_plot_class.test_default_proj_surf(data)
test_plot_class.test_default_proj_fft2(data)
test_plot_class.test_default_proj_time_space(data)
| 31.835789 | 88 | 0.570427 | [
"Apache-2.0"
] | EOMYS-Public/pyleecan | Tests/Plot/test_plots.py | 15,128 | Python |
'''A module for loading settings'''
import logging.config
import sys
from logging import getLogger
from pathlib import Path
import yaml
from hazelsync.metrics import get_metrics_engine
DEFAULT_SETTINGS = '/etc/hazelsync.yaml'
CLUSTER_DIRECTORY = '/etc/hazelsync.d'
DEFAULT_LOGGING = {
'version': 1,
'formatters': {
'syslog': {'format': '%(name)s[%(process)d]: %(levelname)s: %(message)s'},
'default': {'format': '%(asctime)s - %(name)s: %(levelname)s %(message)s'},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'stream': sys.stderr,
'formatter': 'default',
},
'syslog': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'facility': 'local0',
'formatter': 'syslog',
},
},
'loggers': {
'hazelsync': {
'handlers': ['console', 'syslog'],
'level': 'DEBUG',
'propagate': True,
},
},
}
log = getLogger('hazelsync')
class SettingError(AttributeError):
'''Raise an exception if there is a configuration error'''
def __init__(self, job, message):
log.error("Configuration error (in %s): %s", job, message)
super().__init__(message)
class GlobalSettings:
'''A class to manage the global settings of hazelsync'''
def __init__(self, path=DEFAULT_SETTINGS):
path = Path(path)
text = path.read_text(encoding='utf-8')
data = yaml.safe_load(text)
self.default_backend = data.get('default_backend', 'localfs')
self.job_options = data.get('job_options')
self.backend_options = data.get('backend_options')
self.logging = data.get('logging', DEFAULT_LOGGING)
metrics_config = data.get('metrics', {})
self.metrics = get_metrics_engine(metrics_config)
def logger(self):
'''Setup logging and return the logger'''
logging.config.dictConfig(self.logging)
mylogger = getLogger('hazelsync')
mylogger.debug('Logger initialized')
return mylogger
def job(self, job_type: str) -> dict:
'''Return defaults for a job type'''
return self.job_options.get(job_type, {})
def backend(self, backend_type: str) -> dict:
'''Return defaults for a backend type'''
return self.backend_options.get(backend_type, {})
class ClusterSettings:
'''A class to manage the settings of a cluster'''
directory = Path(CLUSTER_DIRECTORY)
def __init__(self, name, global_path=DEFAULT_SETTINGS):
self.name = name
self.globals = GlobalSettings(global_path)
path = ClusterSettings.directory / f"{name}.yaml"
text = path.read_text(encoding='utf-8')
data = yaml.safe_load(text)
self.job_type = data.get('job')
self.job_options = data.get('options', {})
self.backend_type = data.get('backend') or self.globals.default_backend
self.backend_options = data.get('backend_options', {})
@staticmethod
def list() -> dict:
'''List the backup cluster found in the settings'''
settings = {}
for path in ClusterSettings.directory.glob('*.yaml'):
cluster = path.stem
settings[cluster] = {'path': path}
try:
settings[cluster]['config_status'] = 'success'
except KeyError as err:
log.error(err)
settings[cluster]['config'] = {}
settings[cluster]['config_status'] = 'failure'
return settings
def job(self):
'''Return the job options (merged with defaults)'''
defaults = self.globals.job(self.job_type)
options = self.job_options
return self.job_type, {**defaults, **options}
def backend(self):
'''Return the backend option (merged with defaults)'''
defaults = self.globals.backend(self.backend_type)
options = self.backend_options
return self.backend_type, {**defaults, **options}
| 33.241935 | 83 | 0.598496 | [
"Apache-2.0"
] | Japannext/hazelsync | hazelsync/settings.py | 4,122 | Python |
from src.models.user.contribs import (
ContributionDay,
FullContributionDay,
FullUserContributions,
Language,
RepoContributionStats,
UserContributions,
)
from src.models.user.follows import User, UserFollows
from src.models.user.main import FullUserPackage, UserPackage
from src.models.wrapped.bar import BarData, BarDatum
from src.models.wrapped.calendar import CalendarDayData, CalendarLanguageDayData
from src.models.wrapped.main import WrappedPackage
from src.models.wrapped.numeric import ContribStats, LOCStats, MiscStats, NumericData
from src.models.wrapped.pie import PieData, PieDatum
from src.models.wrapped.swarm import SwarmData, SwarmDatum
__all__ = [
# User
"UserPackage",
"FullUserPackage",
"UserContributions",
"FullUserContributions",
"FullContributionDay",
"ContributionDay",
"RepoContributionStats",
"Language",
"User",
"UserFollows",
# Wrapped
"WrappedPackage",
"BarData",
"BarDatum",
"CalendarDayData",
"CalendarLanguageDayData",
"NumericData",
"ContribStats",
"LOCStats",
"MiscStats",
"PieData",
"PieDatum",
"SwarmData",
"SwarmDatum",
]
| 26.311111 | 85 | 0.724662 | [
"MIT"
] | avgupta456/github-trends | backend/src/models/__init__.py | 1,184 | Python |
# Generated by Django 3.0.5 on 2020-04-23 19:10
import datetime
from django.db import migrations, models
import django.db.models.deletion
import wagtail.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailimages', '0001_squashed_0021'),
('wagtailcore', '0045_assign_unlock_grouppagepermission'),
]
operations = [
migrations.CreateModel(
name='PostIndex',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('featured_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='Post',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('content', wagtail.core.fields.RichTextField()),
('content_en', wagtail.core.fields.RichTextField(null=True)),
('content_pl', wagtail.core.fields.RichTextField(null=True)),
('date', models.DateTimeField(default=datetime.datetime.now, verbose_name='Post date')),
('featured_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| 40.934783 | 191 | 0.613914 | [
"MIT"
] | sfikrakow/www | blog/migrations/0001_initial.py | 1,883 | Python |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
import re
from django.conf import settings
from django.contrib.admin.utils import flatten
from django.utils.translation import ugettext_lazy as _
from pipeline.core.data.var import LazyVariable
from pipeline_plugins.cmdb_ip_picker.utils import get_ip_picker_result
from pipeline_plugins.base.utils.inject import supplier_account_for_project
from pipeline_plugins.base.utils.adapter import cc_get_inner_ip_by_module_id
from pipeline_plugins.components.utils import cc_get_ips_info_by_str
from pipeline_plugins.components.utils.common import ip_re
from gcloud.core.models import Project
from gcloud.utils.cmdb import get_business_host
from gcloud.utils.ip import get_ip_by_regex
logger = logging.getLogger("root")
class VarIpPickerVariable(LazyVariable):
code = "ip"
name = _("IP选择器(即将下线,请用新版)")
type = "general"
tag = "var_ip_picker.ip_picker"
form = "%svariables/cmdb/var_ip_picker.js" % settings.STATIC_URL
def get_value(self):
var_ip_picker = self.value
username = self.pipeline_data["executor"]
project_id = self.pipeline_data["project_id"]
project = Project.objects.get(id=project_id)
bk_biz_id = project.bk_biz_id if project.from_cmdb else ""
bk_supplier_account = supplier_account_for_project(project_id)
produce_method = var_ip_picker["var_ip_method"]
if produce_method == "custom":
custom_value = var_ip_picker["var_ip_custom_value"]
data = cc_get_ips_info_by_str(username, bk_biz_id, custom_value)
ip_list = data["ip_result"]
data = ",".join([ip["InnerIP"] for ip in ip_list])
else:
ip_pattern = re.compile(ip_re)
module_id_list = var_ip_picker["var_ip_tree"]
module_inst_id_list = []
tree_ip_list = []
for custom_id in module_id_list:
try:
ip_or_module_id = custom_id.split("_")[-1]
if ip_pattern.match(ip_or_module_id):
# select certain ip
tree_ip_list.append(ip_or_module_id)
else:
# select whole module
module_inst_id_list.append(int(ip_or_module_id))
except Exception:
logger.warning("ip_picker module ip transit failed: {origin}".format(origin=custom_id))
# query cc to get module's ip list and filter tree_ip_list
host_list = cc_get_inner_ip_by_module_id(
username, bk_biz_id, module_inst_id_list, bk_supplier_account, ["host_id", "bk_host_innerip"]
)
cc_ip_list = cc_get_ips_info_by_str(username, bk_biz_id, ",".join(tree_ip_list))["ip_result"]
select_ip = set()
for host_info in host_list:
select_ip.add(host_info["host"].get("bk_host_innerip", ""))
for ip_info in cc_ip_list:
select_ip.add(ip_info["InnerIP"])
data = ",".join(list(set(select_ip)))
return data
class VarCmdbIpSelector(LazyVariable):
code = "ip_selector"
name = _("IP选择器")
type = "general"
tag = "var_cmdb_ip_selector.ip_selector"
form = "%svariables/cmdb/var_cmdb_ip_selector.js" % settings.STATIC_URL
def get_value(self):
username = self.pipeline_data["executor"]
project_id = self.pipeline_data["project_id"]
project = Project.objects.get(id=project_id)
bk_biz_id = project.bk_biz_id if project.from_cmdb else ""
bk_supplier_account = supplier_account_for_project(project_id)
ip_selector = self.value
ip_result = get_ip_picker_result(username, bk_biz_id, bk_supplier_account, ip_selector)
# get for old value compatible
if self.value.get("with_cloud_id", False):
ip = ",".join(["{}:{}".format(host["bk_cloud_id"], host["bk_host_innerip"]) for host in ip_result["data"]])
else:
ip = ",".join([host["bk_host_innerip"] for host in ip_result["data"]])
return ip
class SetDetailData(object):
def __init__(self, data, separator=","):
self._value = data
self.set_count = len(self._value)
item_values = {}
modules = []
total_ip_set = set()
# verbose_ip_list 和 ip_module_list 元素一一对应
verbose_ip_list = []
verbose_ip_module_list = []
for item in data:
set_name = item["bk_set_name"]
for key, val in item.items():
if key == "__module":
module_ips = flatten([mod["value"] for mod in val])
total_ip_set.update(module_ips)
verbose_ip_list += module_ips
verbose_ip_module_list += flatten(
[["{}>{}".format(set_name, mod["key"])] * len(mod["value"]) for mod in val]
)
item_module = {mod["key"]: separator.join(mod["value"]) for mod in val}
modules.append(item_module)
else:
item_values.setdefault(key, []).append(val)
for attr, attr_val in item_values.items():
setattr(self, attr, attr_val)
flat_val = separator.join(map(str, attr_val))
setattr(self, "flat__{}".format(attr), flat_val)
setattr(self, "_module", modules)
setattr(self, "flat__ip_list", separator.join(list(total_ip_set)))
setattr(self, "flat__verbose_ip_list", separator.join(verbose_ip_list))
setattr(self, "flat__verbose_ip_module_list", separator.join(verbose_ip_module_list))
self._pipeline_var_str_value = "Allocate {} sets with names: {}".format(
self.set_count, separator.join(item_values["bk_set_name"])
)
def __repr__(self):
return self._pipeline_var_str_value
class VarCmdbSetAllocation(LazyVariable):
code = "set_allocation"
name = _("集群资源筛选")
type = "general"
tag = "var_cmdb_resource_allocation.set_allocation"
form = "%svariables/cmdb/var_cmdb_resource_allocation.js" % settings.STATIC_URL
def get_value(self):
"""
@summary: 返回 SetDetailData 对象
@note: 引用集群资源变量某一列某一行的属性,如 ${value.bk_set_name[0]} -> "集群1"
@note: 引用集群资源变量某一列的全部属性,多行用换行符 `\n` 分隔,如 ${value.flat__bk_set_name} -> "集群1\n集群2"
@note: 引用集群资源变量的模块分配的 IP ${value._module[0]["gamesvr"]} -> "127.0.0.1,127.0.0.2"
@return:
"""
separator = self.value.get("separator", ",")
return SetDetailData(self.value["data"], separator)
class VarCmdbAttributeQuery(LazyVariable):
code = "attribute_query"
name = _("主机属性查询器")
type = "general"
tag = "var_cmdb_attr_query.attr_query"
form = "%svariables/cmdb/var_cmdb_attribute_query.js" % settings.STATIC_URL
def get_value(self):
"""
@summary: 返回 dict 对象,将每个可从CMDB查询到的输入IP作为键,将从CMDB查询到的主机属性封装成字典作为值
@note: 引用127.0.0.1的所有属性,如 ${value["127.0.0.1"]} -> {"bk_host_id": 999, "import_from": 3, ...}
@note: 引用127.0.0.1的bk_host_id属性,如 ${value["127.0.0.1"]["bk_host_id"]} -> 999
@return:
"""
username = self.pipeline_data["executor"]
project_id = self.pipeline_data["project_id"]
project = Project.objects.get(id=project_id)
bk_biz_id = project.bk_biz_id if project.from_cmdb else ""
bk_supplier_account = supplier_account_for_project(project_id)
ip_list = get_ip_by_regex(self.value)
if not ip_list:
return {}
hosts_list = get_business_host(
username,
bk_biz_id,
bk_supplier_account,
[
"bk_cpu",
"bk_isp_name",
"bk_os_name",
"bk_province_name",
"bk_host_id",
"import_from",
"bk_os_version",
"bk_disk",
"operator",
"bk_mem",
"bk_host_name",
"bk_host_innerip",
"bk_comment",
"bk_os_bit",
"bk_outer_mac",
"bk_asset_id",
"bk_service_term",
"bk_sla",
"bk_cpu_mhz",
"bk_host_outerip",
"bk_state_name",
"bk_os_type",
"bk_mac",
"bk_bak_operator",
"bk_supplier_account",
"bk_sn",
"bk_cpu_module",
],
ip_list,
)
hosts = {}
for host in hosts_list:
ip = host["bk_host_innerip"]
# bk_cloud_id as a dict is not needed
if "bk_cloud_id" in host:
host.pop("bk_cloud_id")
hosts[ip] = host
return hosts
| 39.217213 | 119 | 0.612917 | [
"Apache-2.0"
] | springborland/bk-sops | pipeline_plugins/variables/collections/sites/open/cc.py | 9,905 | Python |
# Import packages
import os
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.datasets import mnist
from sklearn.preprocessing import LabelBinarizer
# Import model builder from model_build.py
from model_build import DRbuild
# Load MNIST dataset
((trainData, trainLabels), (testData, testLabels)) = mnist.load_data()
# Add grayscale channel dimension
trainData = trainData.reshape((trainData.shape[0], 28, 28, 1))
testData = testData.reshape((testData.shape[0], 28, 28, 1))
# Scale data to [0, 1] range
trainData = trainData.astype("float32") / 255.0
testData = testData.astype("float32") / 255.0
# Enconde label to vector
le = LabelBinarizer()
trainLabels = le.fit_transform(trainLabels)
testLabels = le.transform(testLabels)
# starting learning rate
LR = 1e-3
# Epochs to train
EPOCHS = 10
# Batch size
BS = 128
# Compile model
opt = Adam(lr=LR)
model = DRbuild(width=28, height=28, depth=1, classes=10)
model.compile(loss="categorical_crossentropy", optimizer=opt,
metrics=["accuracy"])
# Train model
H = model.fit(
trainData, trainLabels,
validation_data=(testData, testLabels),
batch_size=BS,
epochs=EPOCHS,
verbose=1)
# Evaluate model
predictions = model.predict(testData)
# Serialize model
path = os.getcwd()
path = path[:path.rfind('\\') + 1]
path = path + r'models\digit_classifier.h5'
model.save(path, save_format="h5")
| 25.272727 | 70 | 0.735252 | [
"MIT"
] | eightlay/SudokuSolver | src/model_train.py | 1,390 | Python |
"""Account, user fixtures."""
import json
import logging
from time import monotonic, sleep
from typing import List, NamedTuple, Optional, Tuple
import pytest
from box import Box
from dynaconf import settings
from ambra_sdk.exceptions.service import DuplicateName, NotEmpty
from ambra_sdk.models import Group
from ambra_sdk.service.filtering import Filter, FilterCondition
from ambra_sdk.service.query import QueryO, QueryOPF
logger = logging.getLogger(__name__)
@pytest.fixture(scope='module')
def storage_cluster(api, request):
"""Specific storage cluster.
:param api: api
:param request: pytest request
:raises RuntimeError: Unknown cluster name
:return: cluster box
"""
cluster_name = request.param
cluster = None
if cluster_name != 'DEFAULT':
cluster = QueryOPF(
api=api,
url='/cluster/list',
request_data={},
errors_mapping={},
paginated_field='clusters',
required_sid=True,
).filter_by(Filter(
'name',
FilterCondition.equals,
cluster_name,
)).first()
if cluster is None:
raise RuntimeError(
'Unknown cluster name {name}'.format(name=cluster_name),
)
return cluster
class UserParams(NamedTuple):
"""User params."""
account: Box
user: Box
class GroupParams(NamedTuple):
"""Group params."""
uuid: str
namespace_id: str
name: str
def create_account(api, account_name: str) -> Tuple[Box, Box]:
"""Create new account.
:param api: api
:param account_name: account name
:raises RuntimeError: Cant find account
:return: user params
"""
# If account exists - raise DuplicateName error
QueryO(
api=api,
url='/account/add',
request_data={
'name': account_name,
},
errors_mapping={
'DUPLICATE_NAME': DuplicateName(),
},
required_sid=True,
).get()
account = api \
.Account \
.list() \
.filter_by(
Filter(
'name',
FilterCondition.equals,
account_name,
),
).first()
if account is None:
raise RuntimeError('Cant find test account')
# set role permissions
admin_role = api \
.Role \
.list(account_id=account.uuid) \
.filter_by(
Filter(
'name',
FilterCondition.equals,
'Administrator',
),
).first()
if admin_role is None:
raise RuntimeError('Cant find admin role')
api.Role.set(
uuid=admin_role.uuid,
permissions=json.dumps(
{
'study_delete': 1,
'study_duplicate': 1,
'study_split': 1,
'study_merge': 1,
'study_delete_image': 1,
},
),
).get()
user = api.User.get(account_id=account.uuid).get()
logger.info('Created account %s', account.name)
return (account, user)
def account_studies(api, account) -> List[Box]:
"""List of account studies.
:param api: api
:param account: account
:return: list of studies
"""
account_namespaces = [account.namespace_id]
group_namespaces = [
group.namespace_id for group in
api.Group.list(account_id=account.uuid).only(Group.namespace_id).all()
]
account_namespaces.extend(group_namespaces)
# Method study list does not support in_condition filtering for namespace !
acc_studies = []
for account_namespace in account_namespaces:
studies = api \
.Study \
.list() \
.filter_by(
Filter(
field_name='phi_namespace',
condition=FilterCondition.equals,
value=account_namespace,
),
).all()
acc_studies.extend(list(studies))
return acc_studies
def delete_account(api, account) -> Box:
"""Delete account.
:param api: api
:param account: account
:raises RuntimeError: if account have undeleted studies
"""
try:
QueryO(
api=api,
url='/account/delete/',
request_data={
'uuid': account.uuid,
},
errors_mapping={
'NOT_EMPTY': NotEmpty(),
},
required_sid=True,
).get()
except NotEmpty:
acc_studies = account_studies(api, account)
raise RuntimeError(
'Account have undeleted studies:\n{studies}'.format(
studies='\n'.join(
[
str((study.uuid, study.study_uid))
for study in acc_studies
],
),
),
)
def clear_studies(api, account):
"""Delete account studies.
:param api: api
:param account: account
"""
account_namespaces = [account.namespace_id]
group_namespaces = [
group.namespace_id for group in
api.Group.list(account_id=account.uuid).only(Group.namespace_id).all()
]
account_namespaces.extend(group_namespaces)
# Method study list does not support in_condition filtering for namespace !
# So delete studies in loop
for account_namespace in account_namespaces:
studies = api \
.Study \
.list() \
.filter_by(
Filter(
field_name='phi_namespace',
condition=FilterCondition.equals,
value=account_namespace,
),
).all()
for study in studies:
study_uid = study.uuid
logger.error('Remove undeleted study %s', study_uid)
api.Study.delete(uuid=study_uid).get()
@pytest.fixture(scope='module') # NOQA:WPS210,WPS231
def account(api, storage_cluster):
"""Get account.
:param api: ambra api
:param storage_cluster: storage cluster
:yields: test account
:raises RuntimeError: On deleted account with existing studies
:raises TimeoutError: Time for waiting account deletion is out
"""
account_name = settings.TEST_ACCOUNT_NAME
if storage_cluster:
account_name = '{account}_{cluster}'.format(
account=account_name,
cluster=storage_cluster.name,
)
try:
account, user = create_account(api, account_name)
except DuplicateName:
logger.error('Duplicated account: %s', account_name)
account = api \
.Account \
.list() \
.filter_by(
Filter(
'name',
FilterCondition.equals,
account_name,
),
).first()
if account is None:
raise RuntimeError('Account duplicated but not exists')
clear_studies(api, account)
delete_account(api, account)
account, user = create_account(api, account_name)
if storage_cluster is not None:
QueryO(
api=api,
url='/cluster/account/bind',
request_data={
'account_id': account.uuid,
'cluster_id': storage_cluster.uuid,
},
errors_mapping={},
required_sid=True,
).get()
logger.info(
'Bind account to storage cluster {name}'.format(
name=storage_cluster.name,
),
)
yield UserParams(
account=account,
user=user,
)
delete_account(api, account)
start = monotonic()
while True:
if monotonic() - start >= settings.API['account_deletion_timeout']:
raise TimeoutError('Account still exists')
account = api \
.Account \
.list() \
.filter_by(
Filter(
'name',
FilterCondition.equals,
account_name,
),
).first()
if account is None:
return
sleep(settings.API['account_deletion_check_interval'])
@pytest.fixture
def create_group(api, account):
"""Create new group in account.
:param api: api fixture
:param account: account fixture
:yields: create_group function
"""
groups = []
group_counter = 0
def _create_group(name: Optional[str] = None):
nonlocal group_counter
group_counter += 1
if name is None:
name = 'SDK_TEST_GROUP_{gnum}'.format(gnum=group_counter)
account_id = account.account.uuid
response = api.Group.add(
account_id=account_id,
name=name,
).get()
group = GroupParams(
uuid=response.uuid,
namespace_id=response.namespace_id,
name=name,
)
groups.append(group)
# add account user to the group
api.Group.user_add(
uuid=group.uuid,
user_id=account.user.uuid,
).get()
return group
yield _create_group
for group in groups:
api.Group.delete(uuid=group.uuid).get()
| 26.704871 | 79 | 0.554828 | [
"Apache-2.0"
] | agolovkina/sdk-python | tests/fixtures/account.py | 9,320 | Python |
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
#直方图反向投影
def bitwise_and():
small = cv.imread("C:/1/image/small.jpg")
big = cv.imread("C:/1/image/big.jpg")
small_hsv = cv.cvtColor(small, cv.COLOR_BGR2HSV)
big_hsv = cv.cvtColor(big, cv.COLOR_BGR2HSV)
"""
h,s,v = cv.split(small_hsv)
print(h)
print(s)
print(v)
"""
lower_hsv = np.array([1, 120, 240])
upper_hsv = np.array([4, 160, 255])
mask = cv.inRange(big_hsv, lower_hsv, upper_hsv)
dest = cv.bitwise_and(big_hsv, big_hsv, mask=mask)
cv.imshow('mask', dest)
cv.imshow('video', big)
def back_projection_demo():
sample = cv.imread("C:/1/image/small.jpg")
target = cv.imread("C:/1/image/big.jpg")
roi_hsv = cv.cvtColor(sample,cv.COLOR_BGR2HSV)
target_hsv = cv.cvtColor(target,cv.COLOR_BGR2HSV)
#show images
cv.imshow("sample",sample)
cv.imshow("target",target)
roiHist = cv.calcHist([roi_hsv],[0,1],None,[32,32],[0,180,0,256]) #求出样本直方图
cv.normalize(roiHist,roiHist,0,256,cv.NORM_MINMAX) #直方图归一化
dest = cv.calcBackProject([target_hsv],[0,1],roiHist,[0,180,0,256],1) #直方图反向投影
cv.imshow("back_projection_demo", dest)
def hist2d_demo(image):
hsv = cv.cvtColor(image,cv.COLOR_BGR2HSV)
hist = cv.calcHist([image],[0,1],None,[32,32],[0,180,0,256])
# cv.imshow("hist2d_demo",hist)
plt.imshow(hist,interpolation='nearest')
plt.title("2D Histogram")
plt.show()
src = cv.imread("C:/1/1.jpg")
# cv.namedWindow('input_image', cv.WINDOW_AUTOSIZE)
# cv.imshow("input_image",src)
bitwise_and()
cv.waitKey(0)
cv.destroyAllWindows()
| 27.508197 | 89 | 0.644815 | [
"Apache-2.0"
] | Sanduoo/OpenCV-Python | 11_ Histogram3.py | 1,732 | Python |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V2beta1CrossVersionObjectReference(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'name': 'str'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'name': 'name'
}
def __init__(self, api_version=None, kind=None, name=None):
"""
V2beta1CrossVersionObjectReference - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._name = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.kind = kind
self.name = name
@property
def api_version(self):
"""
Gets the api_version of this V2beta1CrossVersionObjectReference.
API version of the referent
:return: The api_version of this V2beta1CrossVersionObjectReference.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V2beta1CrossVersionObjectReference.
API version of the referent
:param api_version: The api_version of this V2beta1CrossVersionObjectReference.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V2beta1CrossVersionObjectReference.
Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"
:return: The kind of this V2beta1CrossVersionObjectReference.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V2beta1CrossVersionObjectReference.
Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"
:param kind: The kind of this V2beta1CrossVersionObjectReference.
:type: str
"""
if kind is None:
raise ValueError("Invalid value for `kind`, must not be `None`")
self._kind = kind
@property
def name(self):
"""
Gets the name of this V2beta1CrossVersionObjectReference.
Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
:return: The name of this V2beta1CrossVersionObjectReference.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V2beta1CrossVersionObjectReference.
Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
:param name: The name of this V2beta1CrossVersionObjectReference.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V2beta1CrossVersionObjectReference):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.897297 | 121 | 0.583608 | [
"Apache-2.0"
] | StephenPCG/python | kubernetes/client/models/v2beta1_cross_version_object_reference.py | 5,161 | Python |
from flask import Flask, redirect, render_template, request
import os
import sys
from flasgger import Swagger
from server import app
from server.routes.prometheus import track_requests
from os import path
from userapp import improcess
import base64
app=Flask(__name__)
global images
images = []
# The python-flask stack includes the flask extension flasgger, which will build
# and publish your swagger ui and specification at the /apidocs url. Here we set up
# the basic swagger attributes, which you should modify to match you application.
# See: https://github.com/rochacbruno-archive/flasgger
swagger_template = {
"swagger": "2.0",
"info": {
"title": "Example API for python-flask stack",
"description": "API for helloworld, plus health/monitoring",
"contact": {
"responsibleOrganization": "IBM",
"responsibleDeveloper": "Henry Nash",
"email": "[email protected]",
"url": "https://appsody.dev",
},
"version": "0.2"
},
"schemes": [
"http"
],
}
swagger = Swagger(app, template=swagger_template)
# The python-flask stack includes the prometheus metrics engine. You can ensure your endpoints
# are included in these metrics by enclosing them in the @track_requests wrapper.
@app.route('/hello')
@track_requests
def HelloWorld():
# To include an endpoint in the swagger ui and specification, we include a docstring that
# defines the attributes of this endpoint.
"""A hello message
Example endpoint returning a hello message
---
responses:
200:
description: A successful reply
examples:
text/plain: Hello from Appsody!
"""
return 'Hello from Appsody!'
@app.route("/home")
def home():
return "Your image preprocessor application test is successful"
@app.route('/process', methods = ['POST'])
@track_requests
def processing_image():
if request.method == 'POST':
f = request.files['file']
# create a secure filename
filename = f.filename
# save file
filepath = os.path.join("./userapp/",filename)
f.save(filepath)
# convert image to grayscale
filepath_processed = improcess.preprocessing(filepath);
#return render_template("display.html")
result = {}
j = 0
for i in filepath_processed:
images.append(i)
result[j]=i
j=j+1
print(images)
return result
@app.route('/getimages')
@track_requests
def get_image():
k={}
j = 0
for i in images:
path_image = "./userapp" + i
print(path_image)
f = open(path_image, "rb")
stri = base64.b64encode(f.read())
result = stri.decode('ASCII')
k[i]=result
j = j+1
images.clear()
return k
# It is considered bad form to return an error for '/', so let's redirect to the apidocs
@app.route('/')
def index():
return redirect('/apidocs')
# If you have additional modules that contain your API endpoints, for instance
# using Blueprints, then ensure that you use relative imports, e.g.:
# from .mymodule import myblueprint
| 27.627273 | 94 | 0.678842 | [
"Apache-2.0"
] | Bhaskers-Blu-Org1/process-images-derive-insights | sources/image_preprocessor/__init__.py | 3,039 | Python |
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint
import os
class TensorBoardFix(tf.keras.callbacks.TensorBoard):
"""
This fixes incorrect step values when using the TensorBoard callback with custom summary ops
https://stackoverflow.com/questions/64642944/steps-of-tf-summary-operations-in-tensorboard-are-always-0
"""
def on_train_begin(self, *args, **kwargs):
super(TensorBoardFix, self).on_train_begin(*args, **kwargs)
tf.summary.experimental.set_step(self._train_step)
def on_test_begin(self, *args, **kwargs):
super(TensorBoardFix, self).on_test_begin(*args, **kwargs)
tf.summary.experimental.set_step(self._val_step)
def get_callbacks(model_name='model',root_dir='logs/fit/',
monitor='val_categorical_accuracy',mode='max',
save_freq='epoch',save_best_only=True,
):
log_dir = os.path.join(root_dir,model_name)
tensorboard = TensorBoardFix(log_dir=log_dir,
histogram_freq=1,
update_freq=50,
)
save_model = ModelCheckpoint(filepath=os.path.join(log_dir,'model.h5'),
save_weights_only=False,
monitor=monitor,
mode=mode,
save_best_only=save_best_only,
save_freq=save_freq)
return [tensorboard,save_model] | 38.175 | 107 | 0.603143 | [
"MIT"
] | aguirrejuan/ConvRFF | convRFF/utils/utils.py | 1,527 | Python |
# MIT License
#
# Copyright (C) IBM Corporation 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import unittest
import numpy as np
from art.attacks import VirtualAdversarialMethod
from art.classifiers import KerasClassifier
from art.classifiers.classifier import ClassifierNeuralNetwork, ClassifierGradients
from art.utils import get_labels_np_array
from tests.utils import TestBase
from tests.utils import get_image_classifier_tf, get_image_classifier_kr, get_image_classifier_pt
from tests.utils import get_tabular_classifier_tf, get_tabular_classifier_kr, get_tabular_classifier_pt
from tests.attacks.utils import backend_test_classifier_type_check_fail
logger = logging.getLogger(__name__)
class TestVirtualAdversarial(TestBase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.n_train = 100
cls.n_test = 10
cls.x_train_mnist = cls.x_train_mnist[0 : cls.n_train]
cls.y_train_mnist = cls.y_train_mnist[0 : cls.n_train]
cls.x_test_mnist = cls.x_test_mnist[0 : cls.n_test]
cls.y_test_mnist = cls.y_test_mnist[0 : cls.n_test]
def test_keras_mnist(self):
classifier = get_image_classifier_kr()
scores = classifier._model.evaluate(self.x_train_mnist, self.y_train_mnist)
logging.info("[Keras, MNIST] Accuracy on training set: %.2f%%", (scores[1] * 100))
scores = classifier._model.evaluate(self.x_test_mnist, self.y_test_mnist)
logging.info("[Keras, MNIST] Accuracy on test set: %.2f%%", (scores[1] * 100))
self._test_backend_mnist(classifier, self.x_test_mnist, self.y_test_mnist)
def test_tensorflow_mnist(self):
classifier, sess = get_image_classifier_tf(from_logits=False)
scores = get_labels_np_array(classifier.predict(self.x_train_mnist))
acc = np.sum(np.argmax(scores, axis=1) == np.argmax(self.y_train_mnist, axis=1)) / self.y_train_mnist.shape[0]
logger.info("[TF, MNIST] Accuracy on training set: %.2f%%", (acc * 100))
scores = get_labels_np_array(classifier.predict(self.x_test_mnist))
acc = np.sum(np.argmax(scores, axis=1) == np.argmax(self.y_test_mnist, axis=1)) / self.y_test_mnist.shape[0]
logger.info("[TF, MNIST] Accuracy on test set: %.2f%%", (acc * 100))
self._test_backend_mnist(classifier, self.x_test_mnist, self.y_test_mnist)
def test_pytorch_mnist(self):
x_train_mnist = np.swapaxes(self.x_train_mnist, 1, 3).astype(np.float32)
x_test_mnist = np.swapaxes(self.x_test_mnist, 1, 3).astype(np.float32)
classifier = get_image_classifier_pt()
scores = get_labels_np_array(classifier.predict(x_train_mnist))
acc = np.sum(np.argmax(scores, axis=1) == np.argmax(self.y_train_mnist, axis=1)) / self.y_train_mnist.shape[0]
logger.info("[PyTorch, MNIST] Accuracy on training set: %.2f%%", (acc * 100))
scores = get_labels_np_array(classifier.predict(x_test_mnist))
acc = np.sum(np.argmax(scores, axis=1) == np.argmax(self.y_test_mnist, axis=1)) / self.y_test_mnist.shape[0]
logger.info("[PyTorch, MNIST] Accuracy on test set: %.2f%%", (acc * 100))
self._test_backend_mnist(classifier, x_test_mnist, self.y_test_mnist)
def _test_backend_mnist(self, classifier, x_test, y_test):
x_test_original = x_test.copy()
df = VirtualAdversarialMethod(classifier, batch_size=100, max_iter=2)
x_test_adv = df.generate(x_test)
self.assertFalse((x_test == x_test_adv).all())
y_pred = get_labels_np_array(classifier.predict(x_test_adv))
self.assertFalse((y_test == y_pred).all())
acc = np.sum(np.argmax(y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]
logger.info("Accuracy on adversarial examples: %.2f%%", (acc * 100))
# Check that x_test has not been modified by attack and classifier
self.assertAlmostEqual(float(np.max(np.abs(x_test_original - x_test))), 0.0, delta=0.00001)
def test_classifier_type_check_fail(self):
backend_test_classifier_type_check_fail(
VirtualAdversarialMethod, [ClassifierNeuralNetwork, ClassifierGradients]
)
def test_keras_iris_clipped(self):
classifier = get_tabular_classifier_kr()
# Test untargeted attack
attack = VirtualAdversarialMethod(classifier, eps=0.1)
x_test_iris_adv = attack.generate(self.x_test_iris)
self.assertFalse((self.x_test_iris == x_test_iris_adv).all())
self.assertTrue((x_test_iris_adv <= 1).all())
self.assertTrue((x_test_iris_adv >= 0).all())
preds_adv = np.argmax(classifier.predict(x_test_iris_adv), axis=1)
self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())
acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
logger.info("Accuracy on Iris with VAT adversarial examples: %.2f%%", (acc * 100))
def test_keras_iris_unbounded(self):
classifier = get_tabular_classifier_kr()
# Recreate a classifier without clip values
classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1)
attack = VirtualAdversarialMethod(classifier, eps=1)
x_test_iris_adv = attack.generate(self.x_test_iris)
self.assertFalse((self.x_test_iris == x_test_iris_adv).all())
self.assertTrue((x_test_iris_adv > 1).any())
self.assertTrue((x_test_iris_adv < 0).any())
preds_adv = np.argmax(classifier.predict(x_test_iris_adv), axis=1)
self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())
acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
logger.info("Accuracy on Iris with VAT adversarial examples: %.2f%%", (acc * 100))
# def test_iris_tf(self):
# classifier, _ = get_iris_classifier_tf()
#
# attack = VirtualAdversarialMethod(classifier, eps=.1)
# x_test_adv = attack.generate(x_test)
# #print(np.min(x_test_adv), np.max(x_test_adv), np.min(x_test), np.max(x_test))
# self.assertFalse((x_test == x_test_adv).all())
# self.assertTrue((x_test_adv <= 1).all())
# self.assertTrue((x_test_adv >= 0).all())
#
# preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
# self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
# acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
# logger.info('Accuracy on Iris with VAT adversarial examples: %.2f%%', (acc * 100))
# def test_iris_pt(self):
# (_, _), (x_test, y_test) = self.iris
# classifier = get_iris_classifier_pt()
#
# attack = VirtualAdversarialMethod(classifier, eps=.1)
# x_test_adv = attack.generate(x_test.astype(np.float32))
# #print(np.min(x_test_adv), np.max(x_test_adv), np.min(x_test), np.max(x_test))
# self.assertFalse((x_test == x_test_adv).all())
# self.assertTrue((x_test_adv <= 1).all())
# self.assertTrue((x_test_adv >= 0).all())
#
# preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
# self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
# acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
# logger.info('Accuracy on Iris with VAT adversarial examples: %.2f%%', (acc * 100))
def test_tensorflow_iris(self):
classifier, _ = get_tabular_classifier_tf()
attack = VirtualAdversarialMethod(classifier, eps=0.1)
with self.assertRaises(TypeError) as context:
x_test_iris_adv = attack.generate(self.x_test_iris)
self.assertIn(
"This attack requires a classifier predicting probabilities in the range [0, 1] as output."
"Values smaller than 0.0 or larger than 1.0 have been detected.",
str(context.exception),
)
def test_pytorch_iris(self):
classifier = get_tabular_classifier_pt()
attack = VirtualAdversarialMethod(classifier, eps=0.1)
with self.assertRaises(TypeError) as context:
x_test_iris_adv = attack.generate(self.x_test_iris.astype(np.float32))
self.assertIn(
"This attack requires a classifier predicting probabilities in the range [0, 1] as output."
"Values smaller than 0.0 or larger than 1.0 have been detected.",
str(context.exception),
)
if __name__ == "__main__":
unittest.main()
| 47.571429 | 120 | 0.693176 | [
"MIT"
] | Agisthemantobeat/adversarial-robustness-toolbox | tests/attacks/test_virtual_adversarial.py | 9,657 | Python |
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from towhee.engine.operator_runner import runner_base
from towhee.engine.operator_runner import map_runner
from towhee.engine.operator_runner import concat_runner
from towhee.engine.operator_runner import flatmap_runner
from towhee.engine.operator_runner import filter_runner
from towhee.engine.operator_runner import window_runner
from towhee.engine.operator_runner import generator_runner
from towhee.engine.operator_io.reader import DataFrameReader
from towhee.engine.operator_io.writer import DataFrameWriter
def create_runner(
runner_type: str,
name: str,
index: int,
op_name: str,
tag: str,
hub_op_id: str,
op_args: Dict[str, any],
reader: DataFrameReader,
writer: DataFrameWriter,
) -> runner_base.RunnerBase:
if runner_type.lower() == 'map':
return map_runner.MapRunner(name, index, op_name, tag, hub_op_id, op_args, reader, writer)
elif runner_type.lower() == 'flatmap':
return flatmap_runner.FlatMapRunner(name, index, op_name, tag, hub_op_id, op_args, reader, writer)
elif runner_type.lower() == 'filter':
return filter_runner.FilterRunner(name, index, op_name, tag, hub_op_id, op_args, reader, writer)
elif runner_type.lower() == 'concat':
return concat_runner.ConcatRunner(name, index, op_name, tag, hub_op_id, op_args, reader, writer)
elif runner_type.lower() in ['window', 'time_window']:
return window_runner.WindowRunner(name, index, op_name, tag, hub_op_id, op_args, reader, writer)
elif runner_type.lower() == 'generator':
return generator_runner.GeneratorRunner(name, index, op_name, tag, hub_op_id, op_args, reader, writer)
else:
raise AttributeError('No runner type named: {}'.format(runner_type))
| 44.528302 | 110 | 0.752966 | [
"Apache-2.0"
] | L-Net-1992/towhee | towhee/engine/operator_runner/__init__.py | 2,360 | Python |
import json
import os
import glob
import cv2
annotations_info = {'images': [], 'annotations': [], 'categories': []}
CLASS_NAME = ('__background__','plane', 'baseball-diamond', 'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle', 'ship',
'tennis-court', 'basketball-court',
'storage-tank', 'soccer-ball-field',
'roundabout', 'harbor',
'swimming-pool', 'helicopter')
categories_map = {class_name:i for i,class_name in enumerate(CLASS_NAME)}
del categories_map['__background__']
for key in categories_map:
categoriy_info = {"id": categories_map[key], "name": key}
annotations_info['categories'].append(categoriy_info)
image_root = '/Volumes/hy_mobile/03data/DOTA-v1.5/min_split_train'
json_annotation_path = '/Volumes/hy_mobile/03data/DOTA-v1.5/annotations/annotations_split_val.json'
image_file_paths = glob.glob(image_root+'/*.png')
ann_id = 1
for i, image_file_path in enumerate(image_file_paths):
image_name = os.path.basename(image_file_path).split('.')[0]
image_info = dict()
image = cv2.cvtColor(cv2.imread(image_file_path), cv2.COLOR_BGR2RGB)
height, width, _ = image.shape
image_info = {'file_name': image_name+'.png', 'id': i + 1,
'height': height, 'width': width}
annotations_info['images'].append(image_info)
annotation_info = {"id": i+1, "image_id":i+1,
"bbox": [10, 30, 10, 60, 40, 30, 40, 60],
"category_id": 1,
"area": 900,
"iscrowd": 0}
annotations_info['annotations'].append(annotation_info)
ann_id += 1
with open(json_annotation_path, 'w') as f:
json.dump(annotations_info, f, indent=4)
print('---整理后的标注文件---')
print('所有图片的数量:', len(annotations_info['images']))
print('所有标注的数量:', len(annotations_info['annotations']))
print('所有类别的数量:', len(annotations_info['categories']))
| 36.203704 | 99 | 0.640921 | [
"MIT"
] | NaCl-Ocean/Anchor_free_detection_rotation | tools/test_label.py | 2,019 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# sylco.py
"""
Combined algorithm by EA :
http://about.me/emre.aydin
http://github.com/eaydin
1) if letters < 3 : return 1
2) if doesn't end with "ted" or "tes" or "ses" or "ied", discard "es" and "ed" at the end.
3) discard trailing "e", except where ending is "le", also handle "le_exceptions"
4) check if consecutive vowels exists, triplets or pairs, count them as one.
5) count remaining vowels in word.
6) add one if starts with "mc"
7) add one if ends with "y" but is not surrouned by vowel
8) add one if "y" is surrounded by non-vowels and is not in the last word.
9) if starts with "tri-" or "bi-" and is followed by a vowel, add one.
10) if ends with "-ian", should be counted as two syllables, except for "-tian" and "-cian"
11) if starts with "co-" and is followed by a vowel, check if exists in the double syllable dictionary, if not, check if in single dictionary and act accordingly.
12) if starts with "pre-" and is followed by a vowel, check if exists in the double syllable dictionary, if not, check if in single dictionary and act accordingly.
13) check for "-n't" and cross match with dictionary to add syllable.
14) handling the exceptional words
TODO :
# isn't couldn't doesn't shouldn't ... (done)
# when detecting sentences, avoid "a.m. p.m." kind of usage.
# exception : "evacuate" "ambulances" "shuttled" "anyone"
"""
import re
import string
def getsentences(the_text) :
sents = re.findall(r"[A-Z].*?[\.!?]", the_text, re.M | re.DOTALL)
# sents_lo = re.findall(r"[a-z].*?[\.!?]", the_text)
return sents
def getwords(sentence) :
x = re.sub('['+string.punctuation+']', '', sentence).split()
return x
def sylco(word) :
word = word.lower()
# exception_add are words that need extra syllables
# exception_del are words that need less syllables
exception_add = ['serious','crucial']
exception_del = ['fortunately','unfortunately']
co_one = ['cool','coach','coat','coal','count','coin','coarse','coup','coif','cook','coign','coiffe','coof','court']
co_two = ['coapt','coed','coinci']
pre_one = ['preach']
syls = 0 #added syllable number
disc = 0 #discarded syllable number
#1) if letters < 3 : return 1
if len(word) <= 3 :
syls = 1
return syls
#2) if doesn't end with "ted" or "tes" or "ses" or "ied" or "ies", discard "es" and "ed" at the end.
# if it has only 1 vowel or 1 set of consecutive vowels, discard. (like "speed", "fled" etc.)
if word[-2:] == "es" or word[-2:] == "ed" :
doubleAndtripple_1 = len(re.findall(r'[eaoui][eaoui]',word))
if doubleAndtripple_1 > 1 or len(re.findall(r'[eaoui][^eaoui]',word)) > 1 :
if word[-3:] == "ted" or word[-3:] == "tes" or word[-3:] == "ses" or word[-3:] == "ied" or word[-3:] == "ies" :
pass
else :
disc+=1
#3) discard trailing "e", except where ending is "le"
le_except = ['whole','mobile','pole','male','female','hale','pale','tale','sale','aisle','whale','while']
if word[-1:] == "e" :
if word[-2:] == "le" and word not in le_except :
pass
else :
disc+=1
#4) check if consecutive vowels exists, triplets or pairs, count them as one.
doubleAndtripple = len(re.findall(r'[eaoui][eaoui]',word))
tripple = len(re.findall(r'[eaoui][eaoui][eaoui]',word))
disc+=doubleAndtripple + tripple
#5) count remaining vowels in word.
numVowels = len(re.findall(r'[eaoui]',word))
#6) add one if starts with "mc"
if word[:2] == "mc" :
syls+=1
#7) add one if ends with "y" but is not surrouned by vowel
if word[-1:] == "y" and word[-2] not in "aeoui" :
syls +=1
#8) add one if "y" is surrounded by non-vowels and is not in the last word.
for i,j in enumerate(word) :
if j == "y" :
if (i != 0) and (i != len(word)-1) :
if word[i-1] not in "aeoui" and word[i+1] not in "aeoui" :
syls+=1
#9) if starts with "tri-" or "bi-" and is followed by a vowel, add one.
if word[:3] == "tri" and word[3] in "aeoui" :
syls+=1
if word[:2] == "bi" and word[2] in "aeoui" :
syls+=1
#10) if ends with "-ian", should be counted as two syllables, except for "-tian" and "-cian"
if word[-3:] == "ian" :
#and (word[-4:] != "cian" or word[-4:] != "tian") :
if word[-4:] == "cian" or word[-4:] == "tian" :
pass
else :
syls+=1
#11) if starts with "co-" and is followed by a vowel, check if exists in the double syllable dictionary, if not, check if in single dictionary and act accordingly.
if word[:2] == "co" and word[2] in 'eaoui' :
if word[:4] in co_two or word[:5] in co_two or word[:6] in co_two :
syls+=1
elif word[:4] in co_one or word[:5] in co_one or word[:6] in co_one :
pass
else :
syls+=1
#12) if starts with "pre-" and is followed by a vowel, check if exists in the double syllable dictionary, if not, check if in single dictionary and act accordingly.
if word[:3] == "pre" and word[3] in 'eaoui' :
if word[:6] in pre_one :
pass
else :
syls+=1
#13) check for "-n't" and cross match with dictionary to add syllable.
negative = ["doesn't", "isn't", "shouldn't", "couldn't","wouldn't"]
if word[-3:] == "n't" :
if word in negative :
syls+=1
else :
pass
#14) Handling the exceptional words.
if word in exception_del :
disc+=1
if word in exception_add :
syls+=1
# calculate the output
return numVowels - disc + syls
def getFlesch(article) :
sentencelist = getsentences(article)
sentencesN = len(sentencelist)
syllablesN = 0
wordsN = 0
for sentence in sentencelist :
wordslist = getwords(sentence)
wordsN += len(wordslist)
for word in wordslist :
word = word.replace('\n','')
x = sylco(word)
syllablesN += x
"""
print("Sentences : %i" % sentencesN)
print("Words : %i" % wordsN)
print("Syllables : %i" % syllablesN)
"""
asl = wordsN / sentencesN
asw = syllablesN / wordsN
# flesh = (0.39 * (wordsN / sentencesN)) + (11.8 * (syllablesN / wordsN)) - 15.59
flesch = 206.835 - (1.015 * asl) - (84.6 * asw)
# http://office.microsoft.com/en-us/word-help/test-your-document-s-readability-HP010148506.aspx
return flesch
def getsyls(article) :
wordslist = getwords(article)
syllables = 0
for i in wordslist :
x = sylco(i)
syllables += x
return syllables
| 30.597285 | 168 | 0.589175 | [
"Unlicense"
] | balysv/HaikuBotto | sylco.py | 6,762 | Python |
from .client import Client
from .resources import Code
from .constants import ERROR_CODE
from .constants import HTTP_STATUS_CODE
__all__ = [
'Client',
'Code',
'ERROR_CODE',
'HTTP_STATUS_CODE',
]
| 17.666667 | 39 | 0.716981 | [
"Apache-2.0"
] | PaypayBob/paypayopa-sdk-python | paypayopa/__init__.py | 212 | Python |
"""Remesh a 3D mesh.
author : Tom Van Mele, Matthias Rippmann
email : [email protected]
"""
from __future__ import print_function
from compas.datastructures import Mesh
from compas.datastructures import trimesh_remesh
from compas.datastructures import mesh_quads_to_triangles
from compas.geometry import centroid_points
from compas.geometry import smooth_centroid
import compas_rhino
from compas_rhino.helpers import mesh_from_guid
from compas_rhino.helpers import mesh_identify_vertices
from compas_rhino.geometry import RhinoMesh
from compas_rhino.geometry import RhinoCurve
from compas_rhino.conduits import MeshConduit
from compas_rhino.artists import MeshArtist
# set the remeshing parameters
length = 0.25
kmax = 300
# select the original mesh
# select the border
# select the fixed points
guid_target = compas_rhino.select_mesh()
guid_border = compas_rhino.select_polyline()
guid_points = compas_rhino.select_points()
# wrap the Rhino mesh object for convenience
# wrap the Rhino curve object for convenience
# get the point coordinates
target = RhinoMesh(guid_target)
border = RhinoCurve(guid_border)
points = compas_rhino.get_point_coordinates(guid_points)
# make a mesh datastructure from the Rhino mesh
# triangulate the mesh
mesh = mesh_from_guid(Mesh, guid_target)
mesh_quads_to_triangles(mesh)
# identify the fixed vertices
# by matching the coordinates of the selected points
# up to a precision
keys = mesh_identify_vertices(mesh, points, '1f')
fixed = set(keys)
# create a conduit for visualisation
# define a callback
# for updating the conduit
# and for pulling the mesh back to the original during remeshing
# and for keeping the boundary on the boundary curve
conduit = MeshConduit(mesh, refreshrate=2)
def callback(mesh, k, args):
boundary = set(mesh.vertices_on_boundary())
for key, attr in mesh.vertices(data=True):
if key in fixed:
continue
if key in boundary:
x, y, z = border.closest_point(mesh.vertex_coordinates(key))
attr['x'] = x
attr['y'] = y
attr['z'] = z
else:
x, y, z = target.closest_point(mesh.vertex_coordinates(key))
attr['x'] = x
attr['y'] = y
attr['z'] = z
conduit.redraw(k)
# run the remeshing algorithm
# draw the result
with conduit.enabled():
trimesh_remesh(
mesh,
target=length,
kmax=kmax,
tol=0.1,
divergence=0.01,
allow_boundary_split=True,
allow_boundary_swap=True,
allow_boundary_collapse=False,
smooth=True,
fixed=fixed,
callback=callback)
artist = MeshArtist(mesh, layer='remeshed')
artist.draw_faces(join_faces=True)
| 24.283186 | 72 | 0.721574 | [
"MIT"
] | Mahdi-Soheyli/compas | docs/_examples/mesh-remeshing-on-mesh.py | 2,744 | Python |
##################
### original author: Parashar Dhapola
### modified by Rintu Kutum
##################
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import re
import json
import pybedtools as pbt
import collections
from scipy.stats import ttest_ind, sem, mannwhitneyu, gaussian_kde, zscore, wilcoxon, norm, poisson
from scipy import ndimage
from scipy.integrate import simps
import os
import glob
import itertools
import pysam
import tables
import sys
closest_bed_dist_jsons = glob.glob(pathname='./data/Histones/dist_json_formatted/*.json')
bed_closest_data = {}
bed_counts = {}
for i in closest_bed_dist_jsons:
print "\rProcessing\t%s\n" % i,
mark = i.split('/')[-1].split('_')[0]
cell = i.split('/')[-1].split('_', 1)[-1].split('.')[0]
if mark not in bed_counts:
bed_counts[mark] = {}
bed_counts[mark][cell] = pbt.BedTool('./data/Histones/bed_formatted/%s_%s.bed' % (mark, cell)).count()
if mark not in bed_closest_data:
bed_closest_data[mark] = {}
bed_closest_data[mark][cell] = json.load(open(i))
active_marks = ['H3k4me1', 'H3k4me2', 'H3k4me3', 'H3k9ac', 'H3k27ac', 'H4k20me1']
repress_marks = ['H3k9me1', 'H3k9me3', 'H3k27me3']
other_marks = ['H2az', 'H3k36me3', 'H3k79me2']
window = 10000
binsize = 200
window_frac_sig = 0.1
mpl.style.use('seaborn-whitegrid')
def get_smoothend_curve(array, smoothen=True, sigma=3, z_norm=False, log2=False):
a = array.copy()
if log2 is True:
a = np.log2(a)
if z_norm is True:
a = zscore(a)
if smoothen is True:
return ndimage.gaussian_filter1d(a, sigma)
else:
return a
def make_stats(t,c,w,b,swp):
u = int(w/b-w/b*swp)
d = int(w/b+w/b*swp)
mu = np.mean([np.mean(i[u:d]) for i in c])
return {
'vals': t*10000/bed_counts[mark][cell],
'shuffle_vals': c[:20]*10000/bed_counts[mark][cell],
'total_histone_marks': bed_counts[mark][cell],
'marks_sig_window': np.sum(t[u:d]),
'marks_full_window': np.sum(t),
'pval': 1-poisson(mu).cdf(np.mean(t[u:d])),
}
print "\rGenerating\t%s\n" % 'Figure-3A-3B-3C:',
stats = {}
for mark_set, nc, name in zip([active_marks, repress_marks, other_marks],
[2,1,1], ['activation', 'repression', 'others']):
nr = 3
fig, ax = plt.subplots(nr, nc, figsize=(1+5*nc, 12))
row = 0
col = 0
for mark in mark_set:
print (mark)
all_marks = []
all_controls = []
stats[mark] = {}
for cell in bed_closest_data[mark]:
t = np.array(bed_closest_data[mark][cell]['closest_dist'])
c = np.array(bed_closest_data[mark][cell]['shuffle_dist'])
stats[mark][cell] = make_stats(t, c, window, binsize, window_frac_sig)
x = np.asarray([i for i in range(len(t))])
if nc > 1:
axes = ax[row, col]
else:
axes = ax[row]
for cell in stats[mark]:
for shuffle in stats[mark][cell]['shuffle_vals']:
axes.plot(x, get_smoothend_curve(shuffle, z_norm=False, log2=True, smoothen=True),
alpha=0.2, c='lightgrey', linewidth=0.5)
for cell in stats[mark]:
if stats[mark][cell]['pval'] < 1e-2:
color = 'crimson'
else:
color = 'dimgrey'
axes.plot(x, get_smoothend_curve(stats[mark][cell]['vals'],
z_norm=False, log2=True, smoothen=True), alpha=0.7, c=color, linewidth=1.3)
axes.set_title(mark, fontsize=24)
axes.axvline(window/binsize, ls='--')
axes.axvspan(window/binsize-window/binsize*window_frac_sig,
window/binsize+window/binsize*window_frac_sig,
alpha=0.2, color='dodgerblue')
axes.set_xticks(list(map(int, np.linspace(0,(2*window)/binsize,9))))
axes.set_xlim((0,(2*window)/binsize))
_ = [tick.label.set_fontsize(20) for tick in axes.yaxis.get_major_ticks()]
if col == 0:
#axes.set_ylabel('Log2 (histone\nmarks per 10K\nmarks in sample)', fontsize=22)
axes.set_ylabel('Log2 (normalized\nhistone peaks)', fontsize=22)
if row == nr-1:
axes.set_xlabel('Distance from TRF2 peak center', fontsize=22)
axes.set_xticklabels(map(int, np.linspace(-window,window,9)), fontsize=20, rotation=45)
else:
axes.set_xticklabels([])
col+=1
if col == nc:
col = 0
row+=1
fig.tight_layout()
fig.savefig('./figures/Figure-3_histone_%s.png' % name, dpi=300)
for i in stats:
n = 0
ns = 0
for j in stats[i]:
n+=1
if stats[i][j]['pval'] < 0.01:
ns+=1
print (i, n, ns)
print "\rGenerating\t%s\n" % 'Figure-4A:',
import seaborn as sns
count_histone_df = []
for mark in stats:
for cell in stats[mark]:
count_histone_df.append([mark, cell,
stats[mark][cell]['total_histone_marks']])
count_histone_df = pd.DataFrame(count_histone_df, columns=['Mark', 'Cell', 'Value'])
fig, ax = plt.subplots(1,1, figsize=(14,5))
sns.set_style("whitegrid")
sns.violinplot(x="Mark", y="Value", data=count_histone_df, ax=ax, inner='point', c='Grey', saturation=0,
scale="width", order=active_marks+repress_marks+other_marks, scale_hue=True)
_ = ax.set_xticklabels(active_marks+repress_marks+other_marks, rotation=70, fontsize=24)
ax.set_title('Distribution of nubmer of histone peaks in cell lines for each histone mark', fontsize=26)
ax.set_xlabel('')
ax.set_ylabel('Number of histone peaks', fontsize=24)
_ = [tick.label.set_fontsize(24) for tick in ax.yaxis.get_major_ticks()]
sns.despine()
fig.tight_layout()
fig.savefig('./figures/Suppl-Figure-4A-histone-dist.png', dpi=300)
print "\rGenerating\t%s\n" % 'Figure-4B:',
# TRF2 +/-10KB
count_histone_df = []
for mark in stats:
for cell in stats[mark]:
count_histone_df.append([mark, cell,
stats[mark][cell]['marks_full_window']*10000/stats[mark][cell]['total_histone_marks']])
count_histone_df = pd.DataFrame(count_histone_df, columns=['Mark', 'Cell', 'Value'])
fig, ax = plt.subplots(1,1, figsize=(14,5))
sns.set_style("whitegrid")
sns.violinplot(x="Mark", y="Value", data=count_histone_df, ax=ax, inner='point', c='Grey', saturation=0,
scale="width", order=active_marks+repress_marks+other_marks, scale_hue=True)
_ = ax.set_xticklabels(active_marks+repress_marks+other_marks, rotation=70, fontsize=24)
ax.set_title('Distribution of histone peaks in +/- 10KB of TRF2 peaks', fontsize=26)
ax.set_xlabel('')
ax.set_ylabel('Number of normalized\nhistone peaks', fontsize=24)
_ = [tick.label.set_fontsize(24) for tick in ax.yaxis.get_major_ticks()]
sns.despine()
fig.tight_layout()
fig.savefig('./figures/Suppl-Figure-4B-histone-peaks-10kb-dist.png', dpi=300)
print "\rGenerating\t%s\n" % 'Figure-4C:',
# TRF2 +/-500bp
count_histone_df = []
for mark in stats:
for cell in stats[mark]:
count_histone_df.append([mark, cell,
stats[mark][cell]['marks_sig_window']*10000/stats[mark][cell]['total_histone_marks']])
count_histone_df = pd.DataFrame(count_histone_df, columns=['Mark', 'Cell', 'Value'])
fig, ax = plt.subplots(1,1, figsize=(14,5))
sns.set_style("whitegrid")
sns.violinplot(x="Mark", y="Value", data=count_histone_df, ax=ax, inner='point', c='Grey', saturation=0,
scale="width", order=active_marks+repress_marks+other_marks, scale_hue=True)
_ = ax.set_xticklabels(active_marks+repress_marks+other_marks, rotation=70, fontsize=24)
ax.set_title('Distribution of histone peaks in +/- 500bp of TRF2 peaks', fontsize=26)
ax.set_xlabel('')
ax.set_ylabel('Number of normalized\nhistone peaks', fontsize=24)
_ = [tick.label.set_fontsize(24) for tick in ax.yaxis.get_major_ticks()]
sns.despine()
fig.tight_layout()
fig.savefig('./figures/Suppl-Figure-4C-histone-peaks-500bp-dist.png', dpi=300)
print "\rGenerating\t%s\n" % 'Figure-4D:',
# p-values
count_histone_df = []
for mark in stats:
for cell in stats[mark]:
count_histone_df.append([mark, cell,
-np.log10(stats[mark][cell]['pval'])])
count_histone_df = pd.DataFrame(count_histone_df, columns=['Mark', 'Cell', 'Value'])
fig, ax = plt.subplots(1,1, figsize=(14,5))
sns.set_style("whitegrid")
sns.violinplot(x="Mark", y="Value", data=count_histone_df, ax=ax, inner='point', c='Grey', saturation=0,
scale="width", order=active_marks+repress_marks+other_marks, scale_hue=True)
_ = ax.set_xticklabels(active_marks+repress_marks+other_marks, rotation=70, fontsize=24)
ax.set_title('Distribution of p-values in cell lines', fontsize=26)
ax.set_xlabel('')
ax.set_ylabel('-log10(p-value)', fontsize=24)
_ = [tick.label.set_fontsize(24) for tick in ax.yaxis.get_major_ticks()]
sns.despine()
fig.tight_layout()
fig.savefig('./figures/4D-histone-pval-10kb-dist.png', dpi=300)
| 40.853211 | 104 | 0.654278 | [
"MIT"
] | rintukutum/TRF2-DNase-ChIP | 3-d-generate-Fig-3A-C-and-Suppl-fig-4A-D.py | 8,906 | Python |
import numpy as np
import pandas as pd
import os
import re
import fuelcell as fc
class Datum():
def __init__(self, name, data):
# data
self.name = name
self.raw_data = data
self.label = name
self.processed_data = None
self.expt_type = None
# processed values
self.current_data = None
self.potential_data = None
self.overpotential_data = None
self.logcurrent_data = None
self.realcurrent_data = None
self.imagcurrent_data = None
self.error_data = None
# misc parameters
self.area = 1
self.refelec = 0
self.thermo_potential = 0
# tafel
self.tafel_slope = None
self.exchg_curr = None
self.tafel_rsq = None
# eis
self.semicircle_params = None
self.linearfit_params = None
self.hfr = None
self.hfr_linear = None
self.lfr = None
self.eis_current = None
# visualization
self.line = None
self.errcaps = None
self.errbars = None
### accessors ###
def get_name(self):
return self.name
def get_raw_data(self):
if self.raw_data is not None:
return self.raw_data.copy()
return None
def get_label(self):
return self.label
def get_processed_data(self):
if self.processed_data is not None:
return self.processed_data.copy()
return None
def get_expt_type(self):
return self.expt_type
def get_current_data(self):
return self.current_data
def get_potential_data(self):
return self.potential_data
def get_overpotential_data(self):
return self.overpotential_data
def get_logcurrent_data(self):
return self.logcurrent_data
def get_realcurrent_data(self):
return self.realcurrent_data
def get_imagcurrent_data(self):
return self.imagcurrent_data
def get_error_data(self):
return self.error_data
def get_area(self):
return self.area
def get_refelec(self):
return self.refelec
def get_thermo_potential(self):
return self.thermo_potential
def get_tafel_slope(self):
return self.tafel_slope
def get_exchg_curr(self):
return self.exchg_curr
def get_tafel_rsq(self):
return self.tafel_rsq
def get_semicircle_params(self):
popt = self.semicircle_params
r, h, k = popt[0], popt[1], popt[2]
return r, h, k
def get_linearfit_params(self):
popt = self.linearfit_params
m, b = popt[0], popt[1]
return m, b
def get_hfr(self):
return self.hfr
def get_hfr_linear(self):
return self.hfr_linear
def get_lfr(self):
return self.lfr
def get_eis_current(self):
return self.eis_current
def get_line(self):
return self.line
def get_errcaps(self):
return self.errcaps
def get_errbars(self):
return self.errbars
### modifiers ###
def set_label(self, new_label):
self.label = new_label
def set_processed_data(self, new_data):
self.processed_data = new_data
def set_expt_type(self, new_type):
self.expt_type = new_type.lower()
def set_current_data(self, new_vals):
self.current_data = np.asarray(new_vals)
def set_potential_data(self, new_vals):
self.potential_data = np.asarray(new_vals)
def set_overpotential_data(self, new_vals):
self.overpotential_data = np.asarray(new_vals)
def set_logcurrent_data(self, new_vals):
self.logcurrent_data = np.asarray(new_vals)
def set_realcurrent_data(self, new_vals):
self.realcurrent_data = np.asarray(new_vals)
def set_imagcurrent_data(self, new_vals):
self.imagcurrent_data = np.asarray(new_vals)
def set_error_data(self, new_vals):
self.error_data = np.asarray(new_vals)
def set_area(self, new_val):
self.area = new_val
def set_refelec(self, new_val):
self.refelec = new_val
def set_thermo_potential(self, new_val):
self.thermo_potential = new_val
def set_tafel_slope(self, new_val):
self.tafel_slope = new_val
def set_exchg_curr(self, new_val):
self.exchg_curr = new_val
def set_tafel_rsq(self, new_val):
self.tafel_rsq = new_val
def set_semicircle_params(self, new_params):
self.semicircle_params = new_params
def set_linearfit_params(self, new_params):
self.linearfit_params = new_params
def set_hfr(self, new_val):
self.hfr = new_val
def set_hfr_linear(self, new_val):
self.hfr_linear = new_val
def set_lfr(self, new_val):
self.lfr = new_val
def set_eis_current(self, new_val):
self.eis_current = new_val
def set_line(self, new_val):
self.line = new_val
def set_errcaps(self, new_val):
self.errcaps = new_val
def set_errbars(self, new_val):
self.errbars = new_val | 20.613208 | 48 | 0.743707 | [
"MIT"
] | abhikandoi/fuelcell | fuelcell/model.py | 4,370 | Python |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ExtensionsV1beta1Scale(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'ExtensionsV1beta1ScaleSpec',
'status': 'ExtensionsV1beta1ScaleStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None): # noqa: E501
"""ExtensionsV1beta1Scale - a model defined in OpenAPI""" # noqa: E501
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this ExtensionsV1beta1Scale. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this ExtensionsV1beta1Scale. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this ExtensionsV1beta1Scale.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this ExtensionsV1beta1Scale. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this ExtensionsV1beta1Scale. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this ExtensionsV1beta1Scale. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this ExtensionsV1beta1Scale.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this ExtensionsV1beta1Scale. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this ExtensionsV1beta1Scale. # noqa: E501
:return: The metadata of this ExtensionsV1beta1Scale. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this ExtensionsV1beta1Scale.
:param metadata: The metadata of this ExtensionsV1beta1Scale. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this ExtensionsV1beta1Scale. # noqa: E501
:return: The spec of this ExtensionsV1beta1Scale. # noqa: E501
:rtype: ExtensionsV1beta1ScaleSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this ExtensionsV1beta1Scale.
:param spec: The spec of this ExtensionsV1beta1Scale. # noqa: E501
:type: ExtensionsV1beta1ScaleSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this ExtensionsV1beta1Scale. # noqa: E501
:return: The status of this ExtensionsV1beta1Scale. # noqa: E501
:rtype: ExtensionsV1beta1ScaleStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ExtensionsV1beta1Scale.
:param status: The status of this ExtensionsV1beta1Scale. # noqa: E501
:type: ExtensionsV1beta1ScaleStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExtensionsV1beta1Scale):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.239819 | 312 | 0.622737 | [
"Apache-2.0"
] | ACXLM/python | kubernetes/client/models/extensions_v1beta1_scale.py | 7,125 | Python |
#input variables to Monthly Attribution Paid Campaign Cloning Step 12: Deactivate + Delete Smart Campaigns
input={
'token': 'Token', #from Step 3: Get Token
'parent id': 'fid', #from Step 4: Get Lv2 Folder or Create Lv2 Folder
}
import re
import urllib.parse
import calendar
import datetime
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Bearer' + input['token']
}
response =""
sc_ids = input['sc_ids'].split(",")
sc_names = input['sc_names'].split(",")
for i in range(0,len(sc_ids)):
if "Anonymous" not in sc_names[i]:
url = 'https://028-jjw-728.mktorest.com/rest/asset/v1/smartCampaign/'+sc_ids[i]+'/deactivate.json'
else:
url = 'https://028-jjw-728.mktorest.com/rest/asset/v1/smartCampaign/'+sc_ids[i]+'/delete.json'
response = response + requests.request("POST", url, headers=headers, data ={}).text
return {'Response': response}
| 31.903226 | 110 | 0.635996 | [
"MIT"
] | tyron-pretorius/zapier | monthly_attribution_paid_campaign_cloning/deactivate_+_delete_smart_campaigns.py | 989 | Python |
'''
Copyright 2020, Amazon Web Services Inc.
This code is licensed under MIT license (see LICENSE.txt for details)
Python 3
'''
class TransportException(Exception):
'''Raised by the transport layer for most issues.'''
class BadHTTPMethod(Exception):
'''Raised for methods missing from the requests library.'''
class BadSink(Exception):
'''Raised when the target descriptor for transport is not ESDescriptor or
SQSDescriptor.'''
class BadAuth(Exception):
'''Raised if the transport client gets both SigV4 signing and HTTP Auth''' | 26.571429 | 78 | 0.734767 | [
"Apache-2.0"
] | 123BLiN/community | es_sink/es_sink/transport_exceptions.py | 558 | Python |
"""Controller para UpdateUser"""
from typing import Type, Optional
from datetime import datetime
from mitmirror.domain.usecases import UpdateUserInterface
from mitmirror.domain.models import User
from mitmirror.presenters.interfaces import ControllerInterface
from mitmirror.presenters.helpers import HttpRequest, HttpResponse
from mitmirror.errors import (
HttpBadRequestError,
DefaultError,
HttpNotFound,
HttpUnprocessableEntity,
)
class UpdateUserController(ControllerInterface):
"""Controller para o caso de uso UpdateUser"""
def __init__(self, usecase: Type[UpdateUserInterface]) -> None:
self.__usecase = usecase
def handler(
self, param: Optional[any] = None, http_request: Type[HttpRequest] = None
) -> HttpResponse:
"""Metodo para chamar o caso de uso"""
response = None
if not param:
raise HttpBadRequestError(
message="Essa requisiçao exige o seguinte parametro: <int:user_id>, error!"
)
if not str(param).isnumeric():
raise HttpUnprocessableEntity(
message="O parametro <user_id> deve ser do tipo inteiro, error!"
)
try:
response = None
if not http_request.body:
raise DefaultError(type_error=400)
name = http_request.body.get("name", None)
email = http_request.body.get("email", None)
username = http_request.body.get("username", None)
password = http_request.body.get("password", None)
response = self.__usecase.update(
user_id=param,
name=name,
email=email,
username=username,
password=password,
)
return self.__format_response(response["data"])
except DefaultError as error:
if error.type_error == 400:
raise HttpBadRequestError(
message="Esta requisicao precisa dos seguintes parametros:\
<str:name>, <str:email>, <str:username>, <any:password>, error!"
) from error
if error.type_error == 404:
raise HttpNotFound(message="Usuario nao encontrado, error!") from error
raise error
except Exception as error:
raise error
@classmethod
def __format_response(cls, response_method: Type[User]) -> HttpResponse:
"""Formatando a resposta"""
response = {
"message": "Informacoes do usuario atualizadas com sucesso!",
"data": {
"id": response_method.id,
"name": response_method.name,
"email": response_method.email,
"username": response_method.username,
"password_hash": "Nao mostramos isso aqui!",
"secundary_id": response_method.secundary_id,
"is_staff": response_method.is_staff,
"is_active_user": response_method.is_active_user,
"last_login": datetime.isoformat(response_method.last_login),
"date_joined": datetime.isoformat(response_method.date_joined),
},
}
return HttpResponse(status_code=200, body=response)
| 31.409524 | 91 | 0.604912 | [
"MIT"
] | Claayton/mitmirror-api | mitmirror/presenters/controllers/users/update_user_controller.py | 3,299 | Python |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2006, Frank Scholz <[email protected]>
class RenderingControlClient:
def __init__(self, service):
self.service = service
self.namespace = service.get_type()
self.url = service.get_control_url()
self.service.subscribe()
self.service.client = self
#print "RenderingControlClient __init__", self.url
#def __del__(self):
# #print "RenderingControlClient deleted"
# pass
def remove(self):
self.service.remove()
self.service = None
self.namespace = None
self.url = None
del self
def subscribe_for_variable(self, var_name, callback,signal=False):
self.service.subscribe_for_variable(var_name, instance=0, callback=callback,signal=signal)
def list_presets(self, instance_id=0):
action = self.service.get_action('ListPresets')
return action.call(InstanceID=instance_id)
def select_presets(self, instance_id=0, preset_name=''):
action = self.service.get_action('SelectPresets')
return action.call( InstanceID=instance_id,
PresetName=preset_name)
def get_mute(self, instance_id=0, channel='Master'):
action = self.service.get_action('GetMute')
return action.call( InstanceID=instance_id,
Channel=channel)
def set_mute(self, instance_id=0, channel='Master', desired_mute=0):
action = self.service.get_action('SetMute')
return action.call( InstanceID=instance_id,
Channel=channel,
DesiredMute=desired_mute)
def get_volume(self, instance_id=0, channel='Master'):
action = self.service.get_action('GetVolume')
return action.call( InstanceID=instance_id,
Channel=channel)
def set_volume(self, instance_id=0, channel='Master', desired_volume=0):
action = self.service.get_action('SetVolume')
return action.call( InstanceID=instance_id,
Channel=channel,
DesiredVolume=desired_volume)
def get_volume_db(self, instance_id=0, channel='Master'):
action = self.service.get_action('GetVolumeDB')
return action.call( InstanceID=instance_id,
Channel=channel)
def set_volume_db(self, instance_id=0, channel='Master', desired_volume=0):
action = self.service.get_action('SetVolumeDB')
return action.call( InstanceID=instance_id,
Channel=channel,
DesiredVolume=desired_volume)
def get_volume_db_range(self, instance_id=0, channel='Master'):
action = self.service.get_action('GetVolumeDBRange')
return action.call( InstanceID=instance_id,
Channel=channel)
def get_loudness(self, instance_id=0, channel='Master'):
action = self.service.get_action('GetLoudness')
return action.call( InstanceID=instance_id,
Channel=channel)
def set_loudness(self, instance_id=0, channel='Master', desired_loudness=0):
action = self.service.get_action('SetLoudness')
return action.call( InstanceID=instance_id,
Channel=channel,
DesiredLoudness=desired_loudness) | 40.27907 | 98 | 0.630196 | [
"BSD-3-Clause"
] | AndyThirtover/wb_gateway | WebBrickLibs/coherence/upnp/services/clients/rendering_control_client.py | 3,464 | Python |
import os
from telethon import TelegramClient, events, sync
from dotenv import load_dotenv
sleeping_txt = "I'm sleeping right now, get back to you when I wake up"
turned_on = False
already_messaged_users = set()
if __name__ == "__main__":
load_dotenv(override=True, verbose=True)
api_id = int(os.getenv("API_ID"))
api_hash = os.getenv("API_HASH")
session_name = os.getenv("SESSION_NAME")
with TelegramClient(session_name, api_id, api_hash) as client:
client.send_message('me' , 'BRB online!')
# Turns bot on and off
async def toggle_brb(event, turn_on):
global turned_on
turned_on = turn_on
sender = await event.message.get_sender()
await event.reply(f"BRB on: {turned_on}")
# /sleep : turns bot on
@client.on(events.NewMessage(pattern='(?i)\/sleep$'))
async def enable(event):
await toggle_brb(event, True)
# /up : turns bot off
@client.on(events.NewMessage(pattern='(?i)\/up$'))
async def disable(event):
global already_messaged_users
already_messaged_users = set() # clear cache of messaged users
await toggle_brb(event, False)
@client.on(events.NewMessage(incoming=True))
async def say_brb(event):
global turned_on, already_messaged_users
sender = await event.get_sender()
if not event.is_private or not turned_on or \
sender.id in already_messaged_users:
return
already_messaged_users.add(sender.id)
await event.reply(sleeping_txt)
client.run_until_disconnected()
| 32.480769 | 74 | 0.631735 | [
"MIT"
] | gmelodie/brb | main.py | 1,689 | Python |
from typing import TypeVar
from datetime import datetime
from gphotos import Utils
from gphotos.DbRow import DbRow
from gphotos.DatabaseMedia import DatabaseMedia
from gphotos.GoogleAlbumMedia import GoogleAlbumMedia
import logging
log = logging.getLogger(__name__)
# this allows self reference to this class in its factory methods
G = TypeVar('G', bound='GoogleAlbumsRow')
@DbRow.db_row
class GoogleAlbumsRow(DbRow):
"""
generates a class with attributes for each of the columns in the
SyncFiles table
"""
table = "Albums"
cols_def = {'RemoteId': str, 'AlbumName': str, 'Size': int,
'StartDate': datetime,
'EndDate': datetime, 'SyncDate': datetime,
'Downloaded': bool}
def to_media(self) -> DatabaseMedia:
db_media = DatabaseMedia(
_id=self.RemoteId,
_filename=self.AlbumName,
_size=self.Size,
_create_date=self.EndDate)
return db_media
@classmethod
def from_media(cls, album: GoogleAlbumMedia) -> G:
pass
@classmethod
def from_parm(cls, album_id, filename, size, start, end) -> G:
new_row = cls.make(
RemoteId=album_id,
AlbumName=filename,
Size=size,
StartDate=start,
EndDate=end,
SyncDate=Utils.date_to_string(
datetime.now()),
Downloaded=0)
return new_row
| 28.372549 | 68 | 0.629578 | [
"MIT"
] | dehnert/gphotos-sync | gphotos/GoogleAlbumsRow.py | 1,447 | Python |
import pygame
from game.game import Game
def initialization():
"""Инициализация нужных файлов игры"""
pygame.init()
pygame.display.set_icon(pygame.image.load("data/icon.bmp"))
pygame.display.set_caption('SPACE')
if __name__ == "__main__":
initialization()
game = Game()
game.run()
pygame.quit()
| 18.444444 | 63 | 0.671687 | [
"MIT"
] | shycoldii/asteroids | main.py | 361 | Python |
# External Dependencies
from __future__ import division
from numpy import isclose
from svgpathtools import Path
# Internal Dependencies
from misc4rings import isNear
class ClosedRingsOverlapError(Exception):
def __init__(self,mes):
self.mes = mes
def __str__(self):
return repr(self.mes)
def findAppropriateTstep(path, T, stepInPositiveDirection):
# Often the overlapping part of two paths is so small that when removed, pathXpathIntersections, will still consider the two curves as intersecting. This function is to find the smallest (signed) Tstep such that isNear(path(T),path(T+Tstep))==False.
# note: stepInPositiveDirection should be True if Tstep should be positve
# set initial guess as max possible step distance (and set sign of Tstep)
# T = float(T)
if stepInPositiveDirection:
Tstep = 1 - T
else:
Tstep = 0 - T
#check that what we're asking for is possible
if isNear(path.point(T + Tstep), path.point(T)):
raise Exception("An impossible Tstep was asked for.")
#Find a lower bound for Tstep by bisection
maxIts = 200 # limits Tstep to be > (1/2)**200
its = 0
while not isNear(path.point(T + Tstep), path.point(T)) and its < maxIts:
Tstep /= 2
its += 1
if its >= maxIts:
raise Exception("Max iterations reached in bisection to find "
"appropriate Tstep. This could theoretically be ok "
"if you have a curve with a huge number of "
"segments... just increase the maxIts in "
"findAppropriateTstep if you have a such a curve "
"(but I doubt that's the case - so tell Andy).")
return 2 * Tstep
def shortPart(path,T):
if isclose(T, 0) or isclose(T, 1):
return Path()
if T < 1-T: # T is closer to 0
# return cropPath(path,0,T)
return path.cropped(0, T)
else: # T is closer to 1
# return cropPath(path,T,1)
return path.cropped(T, 1)
def longPart(path, T, remove_a_little_extra=True):
if remove_a_little_extra:
if T < 1 - T: # T is closer to 0 than 1
extra = T
if isNear(path.point(T + extra), path.point(T)):
extra = findAppropriateTstep(path, T, True)
else: # T is closer to 1 than 0
extra = 1-T
if isNear(path.point(T+extra), path.point(T)):
extra = -1 * findAppropriateTstep(path, T, False)
else:
extra = 0
if T < 1 - T: #T is closer to 0 than 1
# return cropPath(path,T+extra,1)
return path.cropped(T + extra, 1)
else: #T is closer to 1 than 0
# return cropPath(path,0,T-extra)
return path.cropped(0, T - extra)
def remove_intersections(ipath, jpath, iclosed, jclosed, iringupdated=False, jringupdated=False): #removes one intersection at a time until all are gone
new_ipath = ipath
new_jpath = jpath
#find all intersections
res = ipath.intersect(jpath, justonemode=True)
# res = pathXpathIntersections(ipath, jpath, justonemode=True)
if res:
iT, iseg, i_t = res[0]
jT, jseg, j_t = res[1]
# iT = ipath.t2T(iseg, i_t)
# jT = jpath.t2T(jseg, j_t)
else:
run_again = False
return new_ipath, new_jpath, iringupdated, jringupdated, run_again
#Now find crop the path (if one ring is closed, crop the other ring)
if iclosed and jclosed: #then crop jpath
raise ClosedRingsOverlapError("")
elif jclosed: #jring closed so crop iring
new_ipath = longPart(ipath, iT)
new_jpath = jpath
iringupdated = True
elif iclosed: #iring closed so crop jring
new_jpath = longPart(jpath, jT)
new_ipath = ipath
jringupdated = True
else: #both rings are incomplete
if iT in [0, 1]:
new_ipath = longPart(ipath, iT)
new_jpath = jpath
iringupdated = True
elif jT in [0, 1]:
new_jpath = longPart(jpath, jT)
new_ipath = ipath
jringupdated = True
elif shortPart(ipath, iT).length() < shortPart(jpath, jT).length():
new_ipath = longPart(ipath, iT)
new_jpath = jpath
iringupdated = True
else:
new_jpath = longPart(jpath, jT)
new_ipath = ipath
jringupdated = True
run_again = True # might be more intersections to remove, so run again
return new_ipath, new_jpath, iringupdated, jringupdated, run_again
def remove_intersections_from_rings(rings):
from options4rings import intersection_removal_progress_output_on
from time import time as current_time
from andysmod import n_choose_k, format_time
[r.record_wasClosed() for r in rings] # record the current closure status
#for output
num_segments_in_ring_list = sum(len(r.path) for r in rings)
num_seg_pairs2check = n_choose_k(num_segments_in_ring_list, 2)
num_seg_pairs_checked = 0
current_percent_complete = 0
start_time = current_time()
count = 0
overlappingClosedRingPairs = []
for i in range(len(rings)):
iring = rings[i]
ipath = iring.path
new_ipath = ipath
iclosed = iring.wasClosed
iringupdated = False
num_segs_in_ipath = len(ipath) # for progress output
for j in range(i+1, len(rings)):
if rings[j].maxR < rings[i].minR or rings[i].maxR < rings[j].minR:
continue
jring = rings[j]
jpath = jring.path
new_jpath = jpath
jclosed = jring.wasClosed
jringupdated = False
num_segs_in_jpath = len(jpath) #for progress output
# while loop to remove intersections between iring and jring (if any exist)
run_again = True
maxits = 20
its = 0
while run_again and its < maxits:
try:
args = (new_ipath, new_jpath, iclosed, jclosed)
res = remove_intersections(*args, iringupdated=iringupdated, jringupdated=jringupdated)
new_ipath, new_jpath, iringupdated, jringupdated, run_again = res
except ClosedRingsOverlapError:
overlappingClosedRingPairs.append((i, j))
run_again = False
pass
its += 1
# raise Exception if while loop terminateded due to reaching max allowed iteratations
if its >= maxits:
# remove_intersections_from_rings([iring, jring])
# print(iring.xml)
# print(jring.xml)
raise Exception("Max iterations reached while removing intersections. Either the above two rings have over 20 intersections or this is a bug.")
# Output progess
if intersection_removal_progress_output_on.b:
num_seg_pairs_checked += num_segs_in_jpath*num_segs_in_ipath
if 100 * num_seg_pairs_checked / num_seg_pairs2check > int(100 * current_percent_complete):
current_percent_complete = num_seg_pairs_checked / num_seg_pairs2check
time_elapsed = current_time() - start_time
estimated_time_remaining = (1-current_percent_complete) * time_elapsed / current_percent_complete
stuff = (int(100 * current_percent_complete),
format_time(estimated_time_remaining),
format_time(time_elapsed))
mes = ("[%s%% complete || Est. Remaining Time = %s || "
"Elapsed Time = %s]\r" % stuff)
intersection_removal_progress_output_on.dprint(mes)
# update jring if jpath was trimmed
if jringupdated:
jring.updatePath(new_jpath)
count += 1
# update iring if ipath was trimmed
if iringupdated:
iring.updatePath(new_ipath)
count += 1
return rings, count, overlappingClosedRingPairs | 39.028708 | 250 | 0.608557 | [
"MIT"
] | mathandy/svg-dendro | noIntersections4rings.py | 8,157 | Python |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands.arm import is_valid_resource_id, resource_id, parse_resource_id
from azure.cli.core.util import CLIError
def get_target_resource_validator(dest, required):
def _validator(namespace):
name_or_id = getattr(namespace, dest)
rg = namespace.resource_group_name
res_ns = namespace.namespace
parent = namespace.parent
res_type = namespace.resource_type
usage_error = CLIError('usage error: --{0} ID | --{0} NAME --resource-group NAME '
'--{0}-type TYPE [--{0}-parent PARENT] '
'[--{0}-namespace NAMESPACE]'.format(dest))
if not name_or_id and required:
raise usage_error
elif name_or_id:
if is_valid_resource_id(name_or_id) and any((res_ns, parent, res_type)):
raise usage_error
elif not is_valid_resource_id(name_or_id):
from azure.cli.core.commands.client_factory import get_subscription_id
if res_type and '/' in res_type:
res_ns = res_ns or res_type.rsplit('/', 1)[0]
res_type = res_type.rsplit('/', 1)[1]
if not all((rg, res_ns, res_type, name_or_id)):
raise usage_error
setattr(namespace, dest,
'/subscriptions/{}/resourceGroups/{}/providers/{}/{}{}/{}'.format(
get_subscription_id(), rg, res_ns, parent + '/' if parent else '',
res_type, name_or_id))
del namespace.namespace
del namespace.parent
del namespace.resource_type
return _validator
def validate_diagnostic_settings(namespace):
from azure.cli.core.commands.client_factory import get_subscription_id
resource_group_error = "--resource-group is required when name is provided for "\
"storage account or workspace or service bus namespace and rule. "
if namespace.namespace or namespace.rule_name:
if namespace.namespace is None:
raise CLIError(resource_group_error)
if namespace.rule_name is None:
raise CLIError(resource_group_error)
if namespace.resource_group is None:
raise CLIError(resource_group_error)
if not is_valid_resource_id(namespace.namespace):
namespace.service_bus_rule_id = resource_id(subscription=get_subscription_id(),
resource_group=namespace.resource_group,
namespace='microsoft.ServiceBus',
type='namespaces',
name=namespace.namespace,
child_type='AuthorizationRules',
child_name=namespace.rule_name)
else:
resource_dict = parse_resource_id(namespace.namespace)
namespace.service_bus_rule_id = resource_id(subscription=resource_dict['subscription'],
resource_group=resource_dict['resource_group'],
namespace=resource_dict['namespace'],
type=resource_dict['type'],
name=resource_dict['name'],
child_type='AuthorizationRules',
child_name=namespace.rule_name)
if namespace.storage_account and not is_valid_resource_id(namespace.storage_account):
if namespace.resource_group is None:
raise CLIError(resource_group_error)
namespace.storage_account = resource_id(subscription=get_subscription_id(),
resource_group=namespace.resource_group,
namespace='microsoft.Storage',
type='storageAccounts',
name=namespace.storage_account)
if namespace.workspace and not is_valid_resource_id(namespace.workspace):
if namespace.resource_group is None:
raise CLIError(resource_group_error)
namespace.workspace = resource_id(subscription=get_subscription_id(),
resource_group=namespace.resource_group,
namespace='microsoft.OperationalInsights',
type='workspaces', name=namespace.workspace)
_validate_tags(namespace)
def _validate_tags(namespace):
""" Extracts multiple space-separated tags in key[=value] format """
if isinstance(namespace.tags, list):
tags_dict = {}
for item in namespace.tags:
tags_dict.update(_validate_tag(item))
namespace.tags = tags_dict
def _validate_tag(string):
""" Extracts a single tag in key[=value] format """
result = {}
if string:
comps = string.split('=', 1)
result = {comps[0]: comps[1]} if len(comps) > 1 else {string: ''}
return result
def process_action_group_detail_for_creation(namespace):
from azure.mgmt.monitor.models import ActionGroupResource, EmailReceiver, SmsReceiver, WebhookReceiver
_validate_tags(namespace)
ns = vars(namespace)
name = ns['action_group_name']
receivers = ns.pop('receivers') or []
action_group_resource_properties = {
'location': 'global', # as of now, 'global' is the only available location for action group
'group_short_name': ns.pop('short_name') or name[:12], # '12' is the short name length limitation
'email_receivers': [r for r in receivers if isinstance(r, EmailReceiver)],
'sms_receivers': [r for r in receivers if isinstance(r, SmsReceiver)],
'webhook_receivers': [r for r in receivers if isinstance(r, WebhookReceiver)],
'tags': ns.get('tags') or None
}
ns['action_group'] = ActionGroupResource(**action_group_resource_properties)
| 50.340909 | 106 | 0.561776 | [
"MIT"
] | aag09/azurecli | src/command_modules/azure-cli-monitor/azure/cli/command_modules/monitor/validators.py | 6,645 | Python |
from pytest import fixture
from ..f9 import F9
@fixture(scope="module")
def f9() -> F9:
return F9()
def test_evaluate_solution(f9: F9, helpers):
helpers.test_evaluate_solution(f9)
def test_evaluate_population(f9: F9, helpers):
helpers.test_evaluate_population(f9)
def test_dsm(f9: F9, helpers):
helpers.test_dsm(f9)
| 16.238095 | 46 | 0.72434 | [
"MIT"
] | piotr-rarus/evobench | evobench/continuous/cec2013lsgo/test/test_f9.py | 341 | Python |
import numbers
import warnings
import torch
from ignite.contrib.handlers.base_logger import BaseLogger, BaseOptimizerParamsHandler, BaseOutputHandler, \
BaseWeightsScalarHandler, BaseWeightsHistHandler
__all__ = ['TensorboardLogger', 'OptimizerParamsHandler', 'OutputHandler',
'WeightsScalarHandler', 'WeightsHistHandler', 'GradsScalarHandler', 'GradsHistHandler']
class OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output and/or metrics
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `another_engine=trainer` to take the epoch of the `trainer`
tb_logger.attach(evaluator,
log_handler=OutputHandler(tag="validation",
metric_names=["nll", "accuracy"],
another_engine=trainer),
event_name=Events.EPOCH_COMPLETED)
Example with CustomPeriodicEvent, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers import CustomPeriodicEvent
cpe = CustomPeriodicEvent(n_iterations=500)
cpe.attach(trainer)
@trainer.on(cpe.Events.ITERATIONS_500_COMPLETED)
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have CustomPeriodicEvent attached to it, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on Tensorboard.
tb_logger.attach(evaluator,
log_handler=OutputHandler(tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform),
event_name=Events.EPOCH_COMPLETED)
Args:
tag (str): common title for all produced plots. For example, 'training'
metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
metrics.
output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{'loss': loss1, `another_loss`: loss2}` to label the plot
with corresponding keys.
another_engine (Engine): another engine to use to provide the value of event. Typically, user can provide
the trainer if this handler is attached to an evaluator and thus it logs proper trainer's
epoch/iteration value.
global_step_transform (callable, optional): global step transform function to output a desired global step.
Output of function should be an integer. Default is None, global_step based on attached engine. If provided,
uses function output as global_step.
"""
def __init__(self, tag, metric_names=None, output_transform=None, another_engine=None, global_step_transform=None):
super(OutputHandler, self).__init__(tag, metric_names, output_transform, another_engine, global_step_transform)
def __call__(self, engine, logger, event_name):
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'OutputHandler' works only with TensorboardLogger")
metrics = self._setup_output_metrics(engine)
engine = engine if not hasattr(self, 'another_engine') or self.another_engine is None else self.another_engine
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError("global_step must be int, got {}."
" Please check the output of global_step_transform.".format(type(global_step)))
for key, value in metrics.items():
if isinstance(value, numbers.Number) or \
isinstance(value, torch.Tensor) and value.ndimension() == 0:
logger.writer.add_scalar("{}/{}".format(self.tag, key), value, global_step)
elif isinstance(value, torch.Tensor) and value.ndimension() == 1:
for i, v in enumerate(value):
logger.writer.add_scalar("{}/{}/{}".format(self.tag, key, i), v.item(), global_step)
else:
warnings.warn("TensorboardLogger output_handler can not log "
"metrics value type {}".format(type(value)))
class OptimizerParamsHandler(BaseOptimizerParamsHandler):
"""Helper handler to log optimizer parameters
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
tb_logger.attach(trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED)
Args:
optimizer (torch.optim.Optimizer): torch optimizer which parameters to log
param_name (str): parameter name
tag (str, optional): common title for all produced plots. For example, 'generator'
"""
def __init__(self, optimizer, param_name="lr", tag=None, global_step_transform=None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
if global_step_transform is None:
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
self.global_step_transform = global_step_transform
def __call__(self, engine, logger, event_name):
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'OptimizerParamsHandler' works only with TensorboardLogger")
global_step = self.global_step_transform(engine, event_name)
tag_prefix = "{}/".format(self.tag) if self.tag else ""
params = {"{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)}
for k, v in params.items():
logger.writer.add_scalar(k, v, global_step)
class WeightsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's weights as scalars.
Handler iterates over named parameters of the model, applies reduction function to each parameter
produce a scalar and then logs the scalar.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(trainer,
log_handler=WeightsScalarHandler(model, reduction=torch.norm),
event_name=Events.ITERATION_COMPLETED)
Args:
model (torch.nn.Module): model to log weights
reduction (callable): function to reduce parameters into scalar
"""
def __init__(self, model, reduction=torch.norm):
super(WeightsScalarHandler, self).__init__(model, reduction)
def __call__(self, engine, logger, event_name):
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'WeightsScalarHandler' works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
for name, p in self.model.named_parameters():
if p.grad is None:
continue
name = name.replace('.', '/')
logger.writer.add_scalar("weights_{}/{}".format(self.reduction.__name__, name),
self.reduction(p.data),
global_step)
class WeightsHistHandler(BaseWeightsHistHandler):
"""Helper handler to log model's weights as histograms.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(trainer,
log_handler=WeightsHistHandler(model),
event_name=Events.ITERATION_COMPLETED)
Args:
model (torch.nn.Module): model to log weights
"""
def __init__(self, model, tag=None, global_step_transform=None):
super(WeightsHistHandler, self).__init__(model)
self.tag = tag
if global_step_transform is None:
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
self.global_step_transform = global_step_transform
def __call__(self, engine, logger, event_name):
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'WeightsHistHandler' works only with TensorboardLogger")
global_step = self.global_step_transform(engine, event_name)
tag_prefix = "{}/".format(self.tag) if self.tag else ""
for name, p in self.model.named_parameters():
if p.grad is None:
continue
name = name.replace('.', '/')
logger.writer.add_histogram(tag="{}weights/{}".format(tag_prefix, name),
values=p.data.detach().cpu().numpy(),
global_step=global_step)
class GradsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's gradients as scalars.
Handler iterates over the gradients of named parameters of the model, applies reduction function to each parameter
produce a scalar and then logs the scalar.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(trainer,
log_handler=GradsScalarHandler(model, reduction=torch.norm),
event_name=Events.ITERATION_COMPLETED)
Args:
model (torch.nn.Module): model to log weights
reduction (callable): function to reduce parameters into scalar
"""
def __init__(self, model, reduction=torch.norm):
super(GradsScalarHandler, self).__init__(model, reduction)
def __call__(self, engine, logger, event_name):
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'GradsScalarHandler' works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
for name, p in self.model.named_parameters():
if p.grad is None:
continue
name = name.replace('.', '/')
logger.writer.add_scalar("grads_{}/{}".format(self.reduction.__name__, name),
self.reduction(p.grad),
global_step)
class GradsHistHandler(BaseWeightsHistHandler):
"""Helper handler to log model's gradients as histograms.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(trainer,
log_handler=GradsHistHandler(model),
event_name=Events.ITERATION_COMPLETED)
Args:
model (torch.nn.Module): model to log weights
"""
def __init__(self, model):
super(GradsHistHandler, self).__init__(model)
def __call__(self, engine, logger, event_name):
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'GradsHistHandler' works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
for name, p in self.model.named_parameters():
if p.grad is None:
continue
name = name.replace('.', '/')
logger.writer.add_histogram(tag="grads/{}".format(name),
values=p.grad.detach().cpu().numpy(),
global_step=global_step)
class TensorboardLogger(BaseLogger):
"""
TensorBoard handler to log metrics, model/optimizer parameters, gradients during the training and validation.
This class requires `tensorboardX <https://github.com/lanpa/tensorboardX>`_ package to be installed:
.. code-block:: bash
pip install tensorboardX
Args:
log_dir (str): path to the directory where to log.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log training loss at each iteration
tb_logger.attach(trainer,
log_handler=OutputHandler(tag="training", output_transform=lambda loss: {'loss': loss}),
event_name=Events.ITERATION_COMPLETED)
# Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch
# We setup `another_engine=trainer` to take the epoch of the `trainer` instead of `train_evaluator`.
tb_logger.attach(train_evaluator,
log_handler=OutputHandler(tag="training",
metric_names=["nll", "accuracy"],
another_engine=trainer),
event_name=Events.EPOCH_COMPLETED)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `another_engine=trainer` to take the epoch of the `trainer` instead of `evaluator`.
tb_logger.attach(evaluator,
log_handler=OutputHandler(tag="validation",
metric_names=["nll", "accuracy"],
another_engine=trainer),
event_name=Events.EPOCH_COMPLETED)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
tb_logger.attach(trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED)
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(trainer,
log_handler=WeightsScalarHandler(model),
event_name=Events.ITERATION_COMPLETED)
# Attach the logger to the trainer to log model's weights as a histogram after each epoch
tb_logger.attach(trainer,
log_handler=WeightsHistHandler(model),
event_name=Events.EPOCH_COMPLETED)
# Attach the logger to the trainer to log model's gradients norm after each iteration
tb_logger.attach(trainer,
log_handler=GradsScalarHandler(model),
event_name=Events.ITERATION_COMPLETED)
# Attach the logger to the trainer to log model's gradients as a histogram after each epoch
tb_logger.attach(trainer,
log_handler=GradsHistHandler(model),
event_name=Events.EPOCH_COMPLETED)
# We need to close the logger with we are done
tb_logger.close()
It is also possible to use the logger as context manager:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
with TensorboardLogger(log_dir="experiments/tb_logs") as tb_logger:
trainer = Engine(update_fn)
# Attach the logger to the trainer to log training loss at each iteration
tb_logger.attach(trainer,
log_handler=OutputHandler(tag="training",
output_transform=lambda loss: {'loss': loss}),
event_name=Events.ITERATION_COMPLETED)
"""
def __init__(self, log_dir):
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
try:
from tensorboardX import SummaryWriter
except ImportError:
raise RuntimeError("This contrib module requires tensorboardX to be installed. "
"Please install it with command: \n pip install tensorboardX")
try:
self.writer = SummaryWriter(log_dir)
except TypeError as err:
if "type object got multiple values for keyword argument 'logdir'" == str(err):
self.writer = SummaryWriter(log_dir=log_dir)
warnings.warn('tensorboardX version < 1.7 will not be supported '
'after ignite 0.3.0; please upgrade',
DeprecationWarning)
else:
raise err
def close(self):
self.writer.close()
| 43.636156 | 120 | 0.618019 | [
"MIT"
] | kevinleewy/Human-Pose-Transfer | helper/custom_ignite_handlers/tensorboard_logger.py | 19,069 | Python |
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import glob
import hashlib
import json
import os
import random
import re
import shutil
import sys
import time
import unittest
from subprocess import PIPE, STDOUT
from functools import wraps
from textwrap import dedent
if __name__ == '__main__':
raise Exception('do not run this file directly; do something like: tests/runner.py')
from tools.shared import run_js, run_process, try_delete
from tools.shared import NODE_JS, V8_ENGINE, JS_ENGINES, SPIDERMONKEY_ENGINE, PYTHON, EMCC, EMAR, WINDOWS, MACOS, AUTODEBUGGER, LLVM_ROOT
from tools import jsrun, shared, building
from runner import RunnerCore, path_from_root
from runner import skip_if, no_wasm_backend, no_fastcomp, needs_dlfcn, no_windows, no_asmjs, is_slow_test, create_test_file, parameterized
from runner import js_engines_modify, wasm_engines_modify, env_modify, with_env_modify
# decorators for limiting which modes a test can run in
def wasm_simd(f):
def decorated(self):
if not self.is_wasm_backend():
self.skipTest('wasm simd not compatible with asm.js or asm2wasm')
if not V8_ENGINE or V8_ENGINE not in JS_ENGINES:
self.skipTest('wasm simd only supported in d8 for now')
if self.is_wasm_backend() and not self.get_setting('WASM'):
self.skipTest('wasm2js only supports MVP for now')
self.emcc_args.append('-msimd128')
self.emcc_args.append('-fno-lax-vector-conversions')
with js_engines_modify([V8_ENGINE + ['--experimental-wasm-simd']]):
f(self)
return decorated
def bleeding_edge_wasm_backend(f):
def decorated(self):
if not self.is_wasm_backend():
self.skipTest('only works in wasm backend')
if not V8_ENGINE or V8_ENGINE not in JS_ENGINES:
self.skipTest('only works in d8 for now')
if self.is_wasm_backend() and not self.get_setting('WASM'):
self.skipTest('wasm2js only supports MVP for now')
with js_engines_modify([V8_ENGINE]):
f(self)
return decorated
def also_with_wasm_bigint(f):
def decorated(self):
self.set_setting('WASM_BIGINT', 0)
f(self)
if self.is_wasm_backend() and self.get_setting('WASM'):
self.set_setting('WASM_BIGINT', 1)
with js_engines_modify([NODE_JS + ['--experimental-wasm-bigint']]):
f(self)
return decorated
# without EMTEST_ALL_ENGINES set we only run tests in a single VM by
# default. in some tests we know that cross-VM differences may happen and
# so are worth testing, and they should be marked with this decorator
def all_engines(f):
def decorated(self):
old = self.use_all_engines
self.use_all_engines = True
try:
f(self)
finally:
self.use_all_engines = old
return decorated
# Tests exception handling in emscripten exception handling mode, and if
# possible, new wasm EH mode.
def with_both_exception_handling(f):
def decorated(self):
self.set_setting('DISABLE_EXCEPTION_CATCHING', 0)
f(self)
self.set_setting('DISABLE_EXCEPTION_CATCHING', 1)
# Wasm EH is currently supported only in wasm backend and V8
if self.is_wasm_backend() and V8_ENGINE and \
V8_ENGINE in JS_ENGINES and self.get_setting('WASM'):
self.emcc_args.append('-fwasm-exceptions')
with js_engines_modify([V8_ENGINE + ['--experimental-wasm-eh']]):
f(self)
return decorated
def no_wasm(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm', note)
return decorated
def no_wasm2js(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm2js', note)
return decorated
# Async wasm compilation can't work in some tests, they are set up synchronously
def sync(f):
assert callable(f)
def decorated(self):
if self.get_setting('WASM') or self.is_wasm_backend():
self.emcc_args += ['-s', 'WASM_ASYNC_COMPILATION=0'] # test is set up synchronously
f(self)
return decorated
def also_with_noderawfs(func):
def decorated(self):
orig_args = self.emcc_args[:]
func(self)
print('noderawfs')
self.emcc_args = orig_args + ['-s', 'NODERAWFS=1', '-DNODERAWFS']
with js_engines_modify([NODE_JS]):
func(self)
return decorated
def can_do_standalone(self):
return self.is_wasm_backend() and self.get_setting('WASM') and \
not self.get_setting('SAFE_STACK') and \
'-fsanitize=address' not in self.emcc_args
# Also run the test with -s STANDALONE. If we have wasm runtimes, also run in
# them (regardless we also check that the js+wasm combo works in js vms).
def also_with_standalone_wasm(func):
def decorated(self):
func(self)
# Standalone mode is only supported in the wasm backend, and not in all
# modes there.
if can_do_standalone(self):
print('standalone')
self.set_setting('STANDALONE_WASM', 1)
# we will not legalize the JS ffi interface, so we must use BigInt
# support in order for JS to have a chance to run this without trapping
# when it sees an i64 on the ffi.
self.set_setting('WASM_BIGINT', 1)
with js_engines_modify([NODE_JS + ['--experimental-wasm-bigint']]):
func(self)
return decorated
# Similar to also_with_standalone_wasm, but suitable for tests that cannot
# run in a wasm VM yet, as they are not 100% standalone. We can still
# run them with the JS code though.
def also_with_impure_standalone_wasm(func):
def decorated(self):
func(self)
# Standalone mode is only supported in the wasm backend, and not in all
# modes there.
if can_do_standalone(self):
print('standalone (impure; no wasm runtimes)')
self.set_setting('STANDALONE_WASM', 1)
# we will not legalize the JS ffi interface, so we must use BigInt
# support in order for JS to have a chance to run this without trapping
# when it sees an i64 on the ffi.
self.set_setting('WASM_BIGINT', 1)
with wasm_engines_modify([]):
with js_engines_modify([NODE_JS + ['--experimental-wasm-bigint']]):
func(self)
return decorated
# Similar to also_with_standalone_wasm, but suitable for tests that can *only*
# run in a wasm VM, or in non-standalone mode, but not in standalone mode with
# our JS.
def also_with_only_standalone_wasm(func):
def decorated(self):
func(self)
# Standalone mode is only supported in the wasm backend, and not in all
# modes there.
if can_do_standalone(self):
print('standalone (only; no js runtimes)')
self.set_setting('STANDALONE_WASM', 1)
with js_engines_modify([]):
func(self)
return decorated
def node_pthreads(f):
def decorated(self):
self.set_setting('USE_PTHREADS', 1)
if not self.is_wasm_backend():
self.skipTest('node pthreads only supported on wasm backend')
if not self.get_setting('WASM'):
self.skipTest("pthreads doesn't work in non-wasm yet")
if '-fsanitize=address' in self.emcc_args:
self.skipTest('asan ends up using atomics that are not yet supported in node 12')
with js_engines_modify([NODE_JS + ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']]):
f(self)
return decorated
# A simple check whether the compiler arguments cause optimization.
def is_optimizing(args):
return '-O' in str(args) and '-O0' not in args
def no_optimize(note=''):
assert not callable(note)
def decorator(func):
assert callable(func)
def decorated(self):
if is_optimizing(self.emcc_args):
self.skipTest(note)
func(self)
return decorated
return decorator
def needs_make(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip('Tool not available on Windows bots (%s)' % note)
return lambda f: f
def no_asan(note):
assert not callable(note)
def decorator(f):
assert callable(f)
@wraps(f)
def decorated(self, *args, **kwargs):
if '-fsanitize=address' in self.emcc_args:
self.skipTest(note)
f(self, *args, **kwargs)
return decorated
return decorator
def no_lsan(note):
assert not callable(note)
def decorator(f):
assert callable(f)
@wraps(f)
def decorated(self, *args, **kwargs):
if '-fsanitize=leak' in self.emcc_args:
self.skipTest(note)
f(self, *args, **kwargs)
return decorated
return decorator
def no_minimal_runtime(note):
assert not callable(note)
def decorator(f):
assert callable(f)
@wraps(f)
def decorated(self, *args, **kwargs):
if 'MINIMAL_RUNTIME=1' in self.emcc_args or self.get_setting('MINIMAL_RUNTIME'):
self.skipTest(note)
f(self, *args, **kwargs)
return decorated
return decorator
class TestCoreBase(RunnerCore):
def is_wasm2js(self):
return self.is_wasm_backend() and not self.get_setting('WASM')
# whether the test mode supports duplicate function elimination in js
def supports_js_dfe(self):
# wasm does this when optimizing anyhow, and the wasm backend always
# optimizes the wasm even if it does wasm2js later
if self.is_wasm() or self.is_wasm_backend():
return False
supported_opt_levels = ['-O2', '-O3', '-Oz', '-Os']
for opt_level in supported_opt_levels:
if opt_level in self.emcc_args:
return True
return False
# Use closure in some tests for some additional coverage
def maybe_closure(self):
if '-g' not in self.emcc_args and ('-O2' in self.emcc_args or '-Os' in self.emcc_args):
self.emcc_args += ['--closure', '1']
return True
return False
def verify_in_strict_mode(self, filename):
with open(filename) as infile:
js = infile.read()
filename += '.strict.js'
with open(filename, 'w') as outfile:
outfile.write('"use strict";\n' + js)
run_js(filename)
def get_bullet_library(self, use_cmake):
if use_cmake:
configure_commands = ['cmake', '.']
configure_args = ['-DBUILD_DEMOS=OFF', '-DBUILD_EXTRAS=OFF', '-DUSE_GLUT=OFF']
# Depending on whether 'configure' or 'cmake' is used to build, Bullet
# places output files in different directory structures.
generated_libs = [os.path.join('src', 'BulletDynamics', 'libBulletDynamics.a'),
os.path.join('src', 'BulletCollision', 'libBulletCollision.a'),
os.path.join('src', 'LinearMath', 'libLinearMath.a')]
else:
configure_commands = ['sh', './configure']
# Force a nondefault --host= so that the configure script will interpret
# that we are doing cross-compilation
# and skip attempting to run the generated executable with './a.out',
# which would fail since we are building a .js file.
configure_args = ['--disable-shared', '--host=i686-pc-linux-gnu',
'--disable-demos', '--disable-dependency-tracking']
generated_libs = [os.path.join('src', '.libs', 'libBulletDynamics.a'),
os.path.join('src', '.libs', 'libBulletCollision.a'),
os.path.join('src', '.libs', 'libLinearMath.a')]
return self.get_library(os.path.join('third_party', 'bullet'), generated_libs,
configure=configure_commands,
configure_args=configure_args,
cache_name_extra=configure_commands[0])
@also_with_standalone_wasm
def test_hello_world(self):
self.do_run_in_out_file_test('tests', 'core', 'test_hello_world')
# must not emit this unneeded internal thing
self.assertNotContained('EMSCRIPTEN_GENERATED_FUNCTIONS', open('src.c.o.js').read())
@sync
def test_wasm_synchronous_compilation(self):
self.set_setting('STRICT_JS')
self.do_run_in_out_file_test('tests', 'core', 'test_hello_world')
def test_intvars(self):
self.do_run_in_out_file_test('tests', 'core', 'test_intvars')
def test_sintvars(self):
self.do_run_in_out_file_test('tests', 'core', 'test_sintvars',
force_c=True)
def test_int53(self):
self.emcc_args += ['-s', 'DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=[$convertI32PairToI53,$convertU32PairToI53,$readI53FromU64,$readI53FromI64,$writeI53ToI64,$writeI53ToI64Clamped,$writeI53ToU64Clamped,$writeI53ToI64Signaling,$writeI53ToU64Signaling]']
if not self.is_wasm_backend():
self.emcc_args += ['-s', 'BINARYEN_TRAP_MODE=js']
self.do_run_in_out_file_test('tests', 'core', 'test_int53')
def test_i64(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i64')
def test_i64_2(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i64_2')
def test_i64_3(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i64_3')
def test_i64_4(self):
# stuff that also needs sign corrections
self.do_run_in_out_file_test('tests', 'core', 'test_i64_4')
def test_i64_b(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i64_b')
def test_i64_cmp(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i64_cmp')
def test_i64_cmp2(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i64_cmp2')
def test_i64_double(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i64_double')
def test_i64_umul(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i64_umul')
@also_with_standalone_wasm
def test_i64_precise(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i64_precise')
def test_i64_precise_needed(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i64_precise_needed')
def test_i64_llabs(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i64_llabs')
def test_i64_zextneg(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i64_zextneg')
def test_i64_7z(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_i64_7z',
args=['hallo'])
def test_i64_i16(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i64_i16')
def test_i64_qdouble(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i64_qdouble')
def test_i64_varargs(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i64_varargs',
args='waka fleefl asdfasdfasdfasdf'
.split(' '))
@no_fastcomp('wasm bigint')
@no_wasm2js('wasm_bigint')
def test_i64_invoke_bigint(self):
self.set_setting('WASM_BIGINT', 1)
self.emcc_args += ['-fexceptions']
self.do_run_in_out_file_test('tests', 'core', 'test_i64_invoke_bigint',
js_engines=[NODE_JS + ['--experimental-wasm-bigint']])
def test_vararg_copy(self):
self.do_run_in_out_file_test('tests', 'va_arg', 'test_va_copy')
def test_llvm_fabs(self):
self.set_setting('PRECISE_F32', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_llvm_fabs')
def test_double_varargs(self):
self.do_run_in_out_file_test('tests', 'core', 'test_double_varargs')
def test_trivial_struct_varargs(self):
self.do_run_in_out_file_test('tests', 'core', 'test_trivial_struct_varargs')
def test_struct_varargs(self):
self.do_run_in_out_file_test('tests', 'core', 'test_struct_varargs')
def test_zero_struct_varargs(self):
self.do_run_in_out_file_test('tests', 'core', 'test_zero_struct_varargs')
def zzztest_nested_struct_varargs(self):
self.do_run_in_out_file_test('tests', 'core', 'test_nested_struct_varargs')
def test_i32_mul_precise(self):
self.do_run_in_out_file_test('tests', 'core', 'test_i32_mul_precise')
def test_i16_emcc_intrinsic(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_i16_emcc_intrinsic')
def test_double_i64_conversion(self):
self.do_run_in_out_file_test('tests', 'core', 'test_double_i64_conversion')
def test_float32_precise(self):
self.set_setting('PRECISE_F32', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_float32_precise')
def test_negative_zero(self):
self.do_run_in_out_file_test('tests', 'core', 'test_negative_zero')
def test_line_endings(self):
self.build(open(path_from_root('tests', 'hello_world.cpp')).read(), self.get_dir(), 'hello_world.cpp')
def test_literal_negative_zero(self):
self.do_run_in_out_file_test('tests', 'core', 'test_literal_negative_zero')
@no_wasm_backend('test uses calls to expected js imports, rather than using llvm intrinsics directly')
def test_llvm_intrinsics(self):
self.do_run_in_out_file_test('tests', 'core', 'test_llvm_intrinsics')
@no_wasm_backend('test looks for js impls of intrinsics')
def test_lower_intrinsics(self):
self.emcc_args += ['-g1']
self.do_run_in_out_file_test('tests', 'core', 'test_lower_intrinsics')
# intrinsics should be lowered out
js = open('src.c.o.js').read()
assert ('llvm_' not in js) == is_optimizing(self.emcc_args) or not self.is_wasm(), 'intrinsics must be lowered when optimizing'
@also_with_standalone_wasm
def test_bswap64(self):
self.do_run_in_out_file_test('tests', 'core', 'test_bswap64')
@no_wasm_backend('uses EMULATED_FUNCTION_POINTERS')
def test_bswap64_emulate_fps(self):
# extra coverages
for emulate_casts in [0, 1]:
for emulate_fps in [0, 1, 2]:
print(emulate_casts, emulate_fps)
self.set_setting('EMULATE_FUNCTION_POINTER_CASTS', emulate_casts)
self.set_setting('EMULATED_FUNCTION_POINTERS', emulate_fps)
self.do_run_in_out_file_test('tests', 'core', 'test_bswap64')
def test_sha1(self):
self.do_run(open(path_from_root('tests', 'sha1.c')).read(), 'SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6')
@no_wasm_backend('test checks that __asmjs__ is #defined')
def test_asmjs_unknown_emscripten(self):
# No other configuration is supported, so always run this.
self.do_run(open(path_from_root('tests', 'asmjs-unknown-emscripten.c')).read(), '')
def test_cube2md5(self):
self.emcc_args += ['--embed-file', 'cube2md5.txt']
shutil.copyfile(path_from_root('tests', 'cube2md5.txt'), 'cube2md5.txt')
self.do_run(open(path_from_root('tests', 'cube2md5.cpp')).read(), open(path_from_root('tests', 'cube2md5.ok')).read(), assert_returncode=None)
@also_with_standalone_wasm
@needs_make('make')
def test_cube2hash(self):
# A good test of i64 math
self.do_run('', 'Usage: hashstring <seed>',
libraries=self.get_library('cube2hash', ['cube2hash.bc'], configure=None),
includes=[path_from_root('tests', 'cube2hash')], assert_returncode=None)
for text, output in [('fleefl', '892BDB6FD3F62E863D63DA55851700FDE3ACF30204798CE9'),
('fleefl2', 'AA2CC5F96FC9D540CA24FDAF1F71E2942753DB83E8A81B61'),
('64bitisslow', '64D8470573635EC354FEE7B7F87C566FCAF1EFB491041670')]:
self.do_run('src.cpp.o.js', 'hash value: ' + output, [text], no_build=True, assert_returncode=None)
def test_unaligned(self):
self.skipTest('LLVM marks the reads of s as fully aligned, making this test invalid')
src = r'''
#include <stdio.h>
struct S {
double x;
int y;
};
int main() {
// the 64-bit value here will not be 8-byte aligned
S s0[3] = { {0x12a751f430142, 22}, {0x17a5c85bad144, 98}, {1, 1}};
char buffer[10*sizeof(S)];
int b = int(buffer);
S *s = (S*)(b + 4-b%8);
s[0] = s0[0];
s[1] = s0[1];
s[2] = s0[2];
printf("*%d : %d : %d\n", sizeof(S), ((unsigned int)&s[0]) % 8 != ((unsigned int)&s[1]) % 8,
((unsigned int)&s[1]) - ((unsigned int)&s[0]));
s[0].x++;
s[0].y++;
s[1].x++;
s[1].y++;
printf("%.1f,%d,%.1f,%d\n", s[0].x, s[0].y, s[1].x, s[1].y);
return 0;
}
'''
# TODO: A version of this with int64s as well
self.do_run(src, '*12 : 1 : 12\n328157500735811.0,23,416012775903557.0,99\n')
return # TODO: continue to the next part here
# Test for undefined behavior in C. This is not legitimate code, but does exist
src = r'''
#include <stdio.h>
int main()
{
int x[10];
char *p = (char*)&x[0];
p++;
short *q = (short*)p;
*q = 300;
printf("*%d:%d*\n", *q, ((int)q)%2);
int *r = (int*)p;
*r = 515559;
printf("*%d*\n", *r);
long long *t = (long long*)p;
*t = 42949672960;
printf("*%lld*\n", *t);
return 0;
}
'''
try:
self.do_run(src, '*300:1*\n*515559*\n*42949672960*\n')
except Exception as e:
assert 'must be aligned' in str(e), e # expected to fail without emulation
def test_align64(self):
src = r'''
#include <stdio.h>
// inspired by poppler
enum Type {
A = 10,
B = 20
};
struct Object {
Type type;
union {
int intg;
double real;
char *name;
};
};
struct Principal {
double x;
Object a;
double y;
};
int main(int argc, char **argv)
{
int base = argc-1;
Object *o = NULL;
printf("%d,%d\n", sizeof(Object), sizeof(Principal));
printf("%d,%d,%d,%d\n", (int)&o[base].type, (int)&o[base].intg, (int)&o[base].real, (int)&o[base].name);
printf("%d,%d,%d,%d\n", (int)&o[base+1].type, (int)&o[base+1].intg, (int)&o[base+1].real, (int)&o[base+1].name);
Principal p, q;
p.x = p.y = q.x = q.y = 0;
p.a.type = A;
p.a.real = 123.456;
*(&q.a) = p.a;
printf("%.2f,%d,%.2f,%.2f : %.2f,%d,%.2f,%.2f\n", p.x, p.a.type, p.a.real, p.y, q.x, q.a.type, q.a.real, q.y);
return 0;
}
'''
self.do_run(src, '''16,32
0,8,8,8
16,24,24,24
0.00,10,123.46,0.00 : 0.00,10,123.46,0.00
''')
@no_asan('asan errors on corner cases we check')
def test_aligned_alloc(self):
self.do_run(open(path_from_root('tests', 'test_aligned_alloc.c')).read(), '', assert_returncode=0)
def test_unsigned(self):
src = '''
#include <stdio.h>
const signed char cvals[2] = { -1, -2 }; // compiler can store this is a string, so -1 becomes \\FF, and needs re-signing
int main()
{
{
unsigned char x = 200;
printf("*%d*\\n", x);
unsigned char y = -22;
printf("*%d*\\n", y);
}
int varey = 100;
unsigned int MAXEY = -1, MAXEY2 = -77;
printf("*%u,%d,%u*\\n", MAXEY, varey >= MAXEY, MAXEY2); // 100 >= -1? not in unsigned!
int y = cvals[0];
printf("*%d,%d,%d,%d*\\n", cvals[0], cvals[0] < 0, y, y < 0);
y = cvals[1];
printf("*%d,%d,%d,%d*\\n", cvals[1], cvals[1] < 0, y, y < 0);
// zext issue - see mathop in jsifier
unsigned char x8 = -10;
unsigned long hold = 0;
hold += x8;
int y32 = hold+50;
printf("*%u,%u*\\n", hold, y32);
// Comparisons
x8 = 0;
for (int i = 0; i < 254; i++) x8++; // make it an actual 254 in JS - not a -2
printf("*%d,%d*\\n", x8+1 == 0xff, x8+1 != 0xff); // 0xff may be '-1' in the bitcode
return 0;
}
'''
self.do_run(src, '*4294967295,0,4294967219*\n*-1,1,-1,1*\n*-2,1,-2,1*\n*246,296*\n*1,0*')
self.emcc_args.append('-Wno-constant-conversion')
src = '''
#include <stdio.h>
int main()
{
{
unsigned char x;
unsigned char *y = &x;
*y = -1;
printf("*%d*\\n", x);
}
{
unsigned short x;
unsigned short *y = &x;
*y = -1;
printf("*%d*\\n", x);
}
/*{ // This case is not checked. The hint for unsignedness is just the %u in printf, and we do not analyze that
unsigned int x;
unsigned int *y = &x;
*y = -1;
printf("*%u*\\n", x);
}*/
{
char x;
char *y = &x;
*y = 255;
printf("*%d*\\n", x);
}
{
char x;
char *y = &x;
*y = 65535;
printf("*%d*\\n", x);
}
{
char x;
char *y = &x;
*y = 0xffffffff;
printf("*%d*\\n", x);
}
return 0;
}
'''
self.do_run(src, '*255*\n*65535*\n*-1*\n*-1*\n*-1*')
def test_bitfields(self):
self.do_run_in_out_file_test('tests', 'core', 'test_bitfields')
def test_floatvars(self):
self.do_run_in_out_file_test('tests', 'core', 'test_floatvars')
def test_closebitcasts(self):
self.do_run_in_out_file_test('tests', 'core', 'closebitcasts')
def test_fast_math(self):
self.emcc_args += ['-ffast-math']
self.do_run_in_out_file_test('tests', 'core', 'test_fast_math',
args=['5', '6', '8'])
def test_zerodiv(self):
self.do_run_in_out_file_test('tests', 'core', 'test_zerodiv')
def test_zero_multiplication(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_zero_multiplication')
def test_isnan(self):
self.do_run_in_out_file_test('tests', 'core', 'test_isnan')
def test_globaldoubles(self):
self.do_run_in_out_file_test('tests', 'core', 'test_globaldoubles')
def test_math(self):
self.do_run_in_out_file_test('tests', 'core', 'test_math')
def test_erf(self):
self.do_run_in_out_file_test('tests', 'core', 'test_erf')
def test_math_hyperbolic(self):
self.do_run_in_out_file_test('tests', 'core', 'test_math_hyperbolic')
def test_math_lgamma(self):
self.do_run_in_out_file_test('tests', 'math', 'lgamma', assert_returncode=None)
if self.get_setting('ALLOW_MEMORY_GROWTH') == 0 and not self.is_wasm() and \
not self.is_wasm_backend():
print('main module')
self.set_setting('MAIN_MODULE', 1)
self.do_run_in_out_file_test('tests', 'math', 'lgamma', assert_returncode=None)
# Test that fmodf with -s PRECISE_F32=1 properly validates as asm.js (% operator cannot take in f32, only f64)
def test_math_fmodf(self):
self.do_run_in_out_file_test('tests', 'math', 'fmodf')
def test_frexp(self):
self.do_run_in_out_file_test('tests', 'core', 'test_frexp')
def test_rounding(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
for precise_f32 in [0, 1]:
print(precise_f32)
self.set_setting('PRECISE_F32', precise_f32)
self.do_run_in_out_file_test('tests', 'core', 'test_rounding')
def test_fcvt(self):
self.do_run_in_out_file_test('tests', 'core', 'test_fcvt')
def test_llrint(self):
self.do_run_in_out_file_test('tests', 'core', 'test_llrint')
def test_getgep(self):
# Generated code includes getelementptr (getelementptr, 0, 1), i.e., GEP as the first param to GEP
self.do_run_in_out_file_test('tests', 'core', 'test_getgep')
# No compiling from C/C++ - just process an existing .o/.ll/.bc file.
def do_run_object(self, obj_file, expected_output=None, **kwargs):
js_file = os.path.basename(obj_file) + '.js'
building.emcc(obj_file, self.get_emcc_args(), js_file)
self.do_run(js_file, expected_output, no_build=True, **kwargs)
def do_ll_run(self, filename, expected_output=None, **kwargs):
output_base = os.path.basename(filename)
objfile = self.prep_ll_file(output_base, filename)
self.do_run_object(objfile, expected_output, **kwargs)
def test_multiply_defined_symbols(self):
create_test_file('a1.c', 'int f() { return 1; }')
create_test_file('a2.c', 'void x() {}')
create_test_file('b1.c', 'int f() { return 2; }')
create_test_file('b2.c', 'void y() {}')
create_test_file('main.c', r'''
#include <stdio.h>
int f();
int main() {
printf("result: %d\n", f());
return 0;
}
''')
building.emcc('a1.c')
building.emcc('a2.c')
building.emcc('b1.c')
building.emcc('b2.c')
building.emcc('main.c')
building.emar('cr', 'liba.a', ['a1.c.o', 'a2.c.o'])
building.emar('cr', 'libb.a', ['b1.c.o', 'b2.c.o'])
building.link_to_object(['main.c.o', 'liba.a', 'libb.a'], 'all.o')
self.do_run_object('all.o', 'result: 1')
def test_if(self):
self.do_run_in_out_file_test('tests', 'core', 'test_if')
def test_if_else(self):
self.do_run_in_out_file_test('tests', 'core', 'test_if_else')
def test_loop(self):
self.do_run_in_out_file_test('tests', 'core', 'test_loop')
def test_stack(self):
self.set_setting('INLINING_LIMIT', 50)
self.do_run_in_out_file_test('tests', 'core', 'test_stack')
def test_stack_align(self):
src = path_from_root('tests', 'core', 'test_stack_align.cpp')
def test():
self.do_run(open(src).read(), ['''align 4: 0
align 8: 0
align 16: 0
align 32: 0
base align: 0, 0, 0, 0'''])
test()
@no_asan('stack size is too low for asan to work properly')
def test_stack_placement(self):
self.set_setting('TOTAL_STACK', 1024)
self.do_run_in_out_file_test('tests', 'core', 'test_stack_placement')
self.set_setting('GLOBAL_BASE', 102400)
self.do_run_in_out_file_test('tests', 'core', 'test_stack_placement')
@no_asan('asan does not support main modules')
@no_wasm2js('MAIN_MODULE support')
def test_stack_placement_pic(self):
if not self.is_wasm_backend() and self.get_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('memory growth is not compatible with MAIN_MODULE')
self.set_setting('TOTAL_STACK', 1024)
self.set_setting('MAIN_MODULE')
self.do_run_in_out_file_test('tests', 'core', 'test_stack_placement')
self.set_setting('GLOBAL_BASE', 102400)
self.do_run_in_out_file_test('tests', 'core', 'test_stack_placement')
def test_stack_restore(self):
if self.get_setting('WASM') or self.is_wasm_backend():
self.skipTest('generated code not available in wasm')
self.emcc_args += ['-g3'] # to be able to find the generated code
self.do_run_in_out_file_test('tests', 'core', 'test_stack_restore')
generated = open('src.c.o.js').read()
def ensure_stack_restore_count(function_name, expected_count):
code = generated[generated.find(function_name):]
code = code[:code.find('\n}') + 2]
actual_count = code.count('STACKTOP = sp')
assert actual_count == expected_count, ('Expected %d stack restorations, got %d' % (expected_count, actual_count)) + ': ' + code
ensure_stack_restore_count('function _no_stack_usage', 0)
ensure_stack_restore_count('function _alloca_gets_restored', 1)
ensure_stack_restore_count('function _stack_usage', 1)
def test_strings(self):
self.do_run_in_out_file_test('tests', 'core', 'test_strings', args=['wowie', 'too', '74'])
def test_strcmp_uni(self):
self.do_run_in_out_file_test('tests', 'core', 'test_strcmp_uni')
def test_strndup(self):
self.do_run_in_out_file_test('tests', 'core', 'test_strndup')
def test_errar(self):
self.do_run_in_out_file_test('tests', 'core', 'test_errar')
def test_mainenv(self):
self.do_run_in_out_file_test('tests', 'core', 'test_mainenv')
def test_funcs(self):
self.do_run_in_out_file_test('tests', 'core', 'test_funcs')
def test_structs(self):
self.do_run_in_out_file_test('tests', 'core', 'test_structs')
gen_struct_src = '''
#include <stdio.h>
#include <stdlib.h>
#include "emscripten.h"
struct S
{
int x, y;
};
int main()
{
S* a = {{gen_struct}};
a->x = 51; a->y = 62;
printf("*%d,%d*\\n", a->x, a->y);
{{del_struct}}(a);
return 0;
}
'''
def test_mallocstruct(self):
self.do_run(self.gen_struct_src.replace('{{gen_struct}}', '(S*)malloc(sizeof(S))').replace('{{del_struct}}', 'free'), '*51,62*')
@no_asan('ASan does not support custom memory allocators')
@no_lsan('LSan does not support custom memory allocators')
@parameterized({
'normal': [],
'debug': ['-DEMMALLOC_DEBUG'],
'debug_log': ['-DEMMALLOC_DEBUG', '-DEMMALLOC_DEBUG_LOG', '-DRANDOM_ITERS=130'],
})
def test_emmalloc(self, *args):
# in newer clang+llvm, the internal calls to malloc in emmalloc may be optimized under
# the assumption that they are external, so like in system_libs.py where we build
# malloc, we need to disable builtin here too
self.set_setting('MALLOC', 'none')
self.emcc_args += ['-fno-builtin'] + list(args)
self.do_run(open(path_from_root('system', 'lib', 'emmalloc.cpp')).read() +
open(path_from_root('system', 'lib', 'sbrk.c')).read() +
open(path_from_root('tests', 'core', 'test_emmalloc.cpp')).read(),
open(path_from_root('tests', 'core', 'test_emmalloc.txt')).read())
@no_asan('ASan does not support custom memory allocators')
@no_lsan('LSan does not support custom memory allocators')
def test_emmalloc_usable_size(self, *args):
self.set_setting('MALLOC', 'emmalloc')
self.emcc_args += list(args)
self.do_run_in_out_file_test('tests', 'core', 'test_malloc_usable_size')
@no_fastcomp('this feature works in fastcomp, but test outputs are sensitive to wasm backend')
@no_optimize('output is sensitive to optimization flags, so only test unoptimized builds')
@no_asan('ASan does not support custom memory allocators')
@no_lsan('LSan does not support custom memory allocators')
def test_emmalloc_memory_statistics(self, *args):
self.set_setting('MALLOC', 'emmalloc')
self.emcc_args += ['-s', 'INITIAL_MEMORY=128MB', '-g'] + list(args)
self.do_run_in_out_file_test('tests', 'core', 'test_emmalloc_memory_statistics')
@no_fastcomp('this feature works in fastcomp, but test outputs are sensitive to wasm backend')
@no_optimize('output is sensitive to optimization flags, so only test unoptimized builds')
@no_asan('ASan does not support custom memory allocators')
@no_lsan('LSan does not support custom memory allocators')
def test_emmalloc_trim(self, *args):
self.set_setting('MALLOC', 'emmalloc')
self.emcc_args += ['-s', 'INITIAL_MEMORY=128MB', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MAXIMUM_MEMORY=2147418112'] + list(args)
self.do_run_in_out_file_test('tests', 'core', 'test_emmalloc_trim')
# Test case against https://github.com/emscripten-core/emscripten/issues/10363
def test_emmalloc_memalign_corruption(self, *args):
self.set_setting('MALLOC', 'emmalloc')
self.do_run_in_out_file_test('tests', 'core', 'emmalloc_memalign_corruption')
def test_newstruct(self):
self.do_run(self.gen_struct_src.replace('{{gen_struct}}', 'new S').replace('{{del_struct}}', 'delete'), '*51,62*')
def test_addr_of_stacked(self):
self.do_run_in_out_file_test('tests', 'core', 'test_addr_of_stacked')
def test_globals(self):
self.do_run_in_out_file_test('tests', 'core', 'test_globals')
def test_linked_list(self):
self.do_run_in_out_file_test('tests', 'core', 'test_linked_list')
def test_sup(self):
src = '''
#include <stdio.h>
struct S4 { int x; }; // size: 4
struct S4_2 { short x, y; }; // size: 4, but for alignment purposes, 2
struct S6 { short x, y, z; }; // size: 6
struct S6w { char x[6]; }; // size: 6 also
struct S6z { int x; short y; }; // size: 8, since we align to a multiple of the biggest - 4
struct C___ { S6 a, b, c; int later; };
struct Carr { S6 a[3]; int later; }; // essentially the same, but differently defined
struct C__w { S6 a; S6w b; S6 c; int later; }; // same size, different struct
struct Cp1_ { int pre; short a; S6 b, c; int later; }; // fillers for a
struct Cp2_ { int a; short pre; S6 b, c; int later; }; // fillers for a (get addr of the other filler)
struct Cint { S6 a; int b; S6 c; int later; }; // An int (different size) for b
struct C4__ { S6 a; S4 b; S6 c; int later; }; // Same size as int from before, but a struct
struct C4_2 { S6 a; S4_2 b; S6 c; int later; }; // Same size as int from before, but a struct with max element size 2
struct C__z { S6 a; S6z b; S6 c; int later; }; // different size, 8 instead of 6
int main()
{
#define TEST(struc) \\
{ \\
struc *s = 0; \\
printf("*%s: %d,%d,%d,%d<%d*\\n", #struc, (int)&(s->a), (int)&(s->b), (int)&(s->c), (int)&(s->later), sizeof(struc)); \\
}
#define TEST_ARR(struc) \\
{ \\
struc *s = 0; \\
printf("*%s: %d,%d,%d,%d<%d*\\n", #struc, (int)&(s->a[0]), (int)&(s->a[1]), (int)&(s->a[2]), (int)&(s->later), sizeof(struc)); \\
}
printf("sizeofs:%d,%d\\n", sizeof(S6), sizeof(S6z));
TEST(C___);
TEST_ARR(Carr);
TEST(C__w);
TEST(Cp1_);
TEST(Cp2_);
TEST(Cint);
TEST(C4__);
TEST(C4_2);
TEST(C__z);
return 0;
}
'''
self.do_run(src, 'sizeofs:6,8\n*C___: 0,6,12,20<24*\n*Carr: 0,6,12,20<24*\n*C__w: 0,6,12,20<24*\n*Cp1_: 4,6,12,20<24*\n*Cp2_: 0,6,12,20<24*\n*Cint: 0,8,12,20<24*\n*C4__: 0,8,12,20<24*\n*C4_2: 0,6,10,16<20*\n*C__z: 0,8,16,24<28*')
@also_with_standalone_wasm
def test_assert(self):
self.do_run_in_out_file_test('tests', 'core', 'test_assert', assert_returncode=None)
def test_wcslen(self):
self.do_run_in_out_file_test('tests', 'core', 'test_wcslen')
def test_regex(self):
self.do_run_in_out_file_test('tests', 'core', 'test_regex')
def test_longjmp(self):
self.do_run_in_out_file_test('tests', 'core', 'test_longjmp')
def test_longjmp2(self):
self.do_run_in_out_file_test('tests', 'core', 'test_longjmp2')
@needs_dlfcn
def test_longjmp2_main_module(self):
# Test for binaryen regression:
# https://github.com/WebAssembly/binaryen/issues/2180
self.set_setting('MAIN_MODULE')
self.do_run_in_out_file_test('tests', 'core', 'test_longjmp2')
def test_longjmp3(self):
self.do_run_in_out_file_test('tests', 'core', 'test_longjmp3')
def test_longjmp4(self):
self.do_run_in_out_file_test('tests', 'core', 'test_longjmp4')
def test_longjmp_funcptr(self):
self.do_run_in_out_file_test('tests', 'core', 'test_longjmp_funcptr')
def test_longjmp_repeat(self):
self.do_run_in_out_file_test('tests', 'core', 'test_longjmp_repeat')
def test_longjmp_stacked(self):
self.do_run_in_out_file_test('tests', 'core', 'test_longjmp_stacked', assert_returncode=None)
def test_longjmp_exc(self):
self.do_run_in_out_file_test('tests', 'core', 'test_longjmp_exc', assert_returncode=None)
def test_longjmp_throw(self):
for disable_throw in [0, 1]:
print(disable_throw)
self.set_setting('DISABLE_EXCEPTION_CATCHING', disable_throw)
self.do_run_in_out_file_test('tests', 'core', 'test_longjmp_throw')
def test_longjmp_unwind(self):
self.do_run_in_out_file_test('tests', 'core', 'test_longjmp_unwind', assert_returncode=None)
def test_longjmp_i64(self):
self.emcc_args += ['-g']
self.do_run_in_out_file_test('tests', 'core', 'test_longjmp_i64', assert_returncode=None)
def test_siglongjmp(self):
self.do_run_in_out_file_test('tests', 'core', 'test_siglongjmp')
def test_setjmp_many(self):
src = r'''
#include <stdio.h>
#include <setjmp.h>
int main(int argc, char** argv) {
jmp_buf buf;
for (int i = 0; i < NUM; i++) printf("%d\n", setjmp(buf));
if (argc-- == 1131) longjmp(buf, 11);
return 0;
}
'''
for num in [1, 5, 20, 1000]:
print('NUM=%d' % num)
self.do_run(src.replace('NUM', str(num)), '0\n' * num)
def test_setjmp_many_2(self):
src = r'''
#include <setjmp.h>
#include <stdio.h>
jmp_buf env;
void luaWork(int d){
int x;
printf("d is at %d\n", d);
longjmp(env, 1);
}
int main()
{
const int ITERATIONS=25;
for(int i = 0; i < ITERATIONS; i++){
if(!setjmp(env)){
luaWork(i);
}
}
return 0;
}
'''
self.do_run(src, r'''d is at 24''')
def test_setjmp_noleak(self):
src = r'''
#include <setjmp.h>
#include <stdio.h>
#include <assert.h>
jmp_buf env;
void luaWork(int d){
int x;
printf("d is at %d\n", d);
longjmp(env, 1);
}
#include <malloc.h>
#include <stdlib.h>
void dump() {
struct mallinfo m = mallinfo();
printf("dump: %d , %d\n", m.arena, m.uordblks);
}
void work(int n)
{
printf("work %d\n", n);
dump();
if(!setjmp(env)){
luaWork(n);
}
if (n > 0) work(n-1);
}
int main() {
struct mallinfo m1 = mallinfo();
dump();
work(10);
dump();
struct mallinfo m2 = mallinfo();
assert(m1.uordblks == m2.uordblks);
printf("ok.\n");
}
'''
self.do_run(src, r'''ok.''')
@with_both_exception_handling
def test_exceptions(self):
self.set_setting('EXCEPTION_DEBUG', 1)
self.maybe_closure()
for support_longjmp in [0, 1]:
self.set_setting('SUPPORT_LONGJMP', support_longjmp)
self.do_run_from_file(path_from_root('tests', 'core', 'test_exceptions.cpp'), path_from_root('tests', 'core', 'test_exceptions_caught.out'))
def test_exceptions_off(self):
for support_longjmp in [0, 1]:
self.set_setting('DISABLE_EXCEPTION_CATCHING', 1)
self.do_run_from_file(path_from_root('tests', 'core', 'test_exceptions.cpp'), path_from_root('tests', 'core', 'test_exceptions_uncaught.out'), assert_returncode=None)
def test_exceptions_minimal_runtime(self):
self.set_setting('EXCEPTION_DEBUG', 1)
self.maybe_closure()
self.set_setting('MINIMAL_RUNTIME', 1)
for support_longjmp in [0, 1]:
self.set_setting('SUPPORT_LONGJMP', support_longjmp)
self.set_setting('DISABLE_EXCEPTION_CATCHING', 0)
self.do_run_from_file(path_from_root('tests', 'core', 'test_exceptions.cpp'), path_from_root('tests', 'core', 'test_exceptions_caught.out'))
self.set_setting('DISABLE_EXCEPTION_CATCHING', 1)
self.do_run_from_file(path_from_root('tests', 'core', 'test_exceptions.cpp'), path_from_root('tests', 'core', 'test_exceptions_uncaught.out'), assert_returncode=None)
@with_both_exception_handling
def test_exceptions_custom(self):
self.set_setting('EXCEPTION_DEBUG', 1)
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
self.maybe_closure()
src = '''
#include <iostream>
class MyException
{
public:
MyException(){ std::cout << "Construct..."; }
MyException( const MyException & ) { std::cout << "Copy..."; }
~MyException(){ std::cout << "Destruct..."; }
};
int function()
{
std::cout << "Throw...";
throw MyException();
}
int function2()
{
return function();
}
int main()
{
try
{
function2();
}
catch (MyException & e)
{
std::cout << "Caught...";
}
try
{
function2();
}
catch (MyException e)
{
std::cout << "Caught...";
}
return 0;
}
'''
self.do_run(src, 'Throw...Construct...Caught...Destruct...Throw...Construct...Copy...Caught...Destruct...Destruct...')
@with_both_exception_handling
def test_exceptions_2(self):
for safe in [0, 1]:
print(safe)
if safe and '-fsanitize=address' in self.emcc_args:
# Can't use safe heap with ASan
continue
self.set_setting('SAFE_HEAP', safe)
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_2')
@with_both_exception_handling
def test_exceptions_3(self):
src = r'''
#include <iostream>
#include <stdexcept>
int main(int argc, char **argv)
{
if (argc != 2) {
std::cout << "need an arg" << std::endl;
return 1;
}
int arg = argv[1][0] - '0';
try {
if (arg == 0) throw "a c string";
if (arg == 1) throw std::exception();
if (arg == 2) throw std::runtime_error("Hello");
} catch(const char * ex) {
std::cout << "Caught C string: " << ex << std::endl;
} catch(const std::exception &ex) {
std::cout << "Caught exception: " << ex.what() << std::endl;
} catch(...) {
std::cout << "Caught something else" << std::endl;
}
std::cout << "Done.\n";
}
'''
print('0')
self.do_run(src, 'Caught C string: a c string\nDone.', ['0'])
print('1')
self.do_run(None, 'Caught exception: std::exception\nDone.', ['1'], no_build=True)
print('2')
self.do_run(None, 'Caught exception: Hello\nDone.', ['2'], no_build=True)
def test_exceptions_white_list(self):
self.set_setting('DISABLE_EXCEPTION_CATCHING', 2)
# Wasm does not add an underscore to function names. For wasm, the
# mismatches are fixed in fixImports() function in JS glue code.
if not self.is_wasm_backend():
self.set_setting('EXCEPTION_CATCHING_WHITELIST', ["__Z12somefunctionv"])
else:
self.set_setting('EXCEPTION_CATCHING_WHITELIST', ["_Z12somefunctionv"])
# otherwise it is inlined and not identified
self.set_setting('INLINING_LIMIT', 50)
test_path = path_from_root('tests', 'core', 'test_exceptions_white_list')
src, output = (test_path + s for s in ('.cpp', '.out'))
self.do_run_from_file(src, output)
size = len(open('src.cpp.o.js').read())
shutil.copyfile('src.cpp.o.js', 'orig.js')
# check that an empty whitelist works properly (as in, same as exceptions disabled)
empty_output = path_from_root('tests', 'core', 'test_exceptions_white_list_empty.out')
self.set_setting('EXCEPTION_CATCHING_WHITELIST', [])
self.do_run_from_file(src, empty_output, assert_returncode=None)
empty_size = len(open('src.cpp.o.js').read())
shutil.copyfile('src.cpp.o.js', 'empty.js')
self.set_setting('EXCEPTION_CATCHING_WHITELIST', ['fake'])
self.do_run_from_file(src, empty_output, assert_returncode=None)
fake_size = len(open('src.cpp.o.js').read())
shutil.copyfile('src.cpp.o.js', 'fake.js')
self.set_setting('DISABLE_EXCEPTION_CATCHING', 1)
self.do_run_from_file(src, empty_output, assert_returncode=None)
disabled_size = len(open('src.cpp.o.js').read())
shutil.copyfile('src.cpp.o.js', 'disabled.js')
if not self.is_wasm():
print(size, empty_size, fake_size, disabled_size)
assert empty_size == fake_size, [empty_size, fake_size]
# big change when we disable exception catching of the function
assert size - empty_size > 0.01 * size, [empty_size, size]
# full disable can remove a little bit more
assert empty_size >= disabled_size, [empty_size, disabled_size]
def test_exceptions_white_list_2(self):
self.set_setting('DISABLE_EXCEPTION_CATCHING', 2)
# Wasm does not add an underscore to function names. For wasm, the
# mismatches are fixed in fixImports() function in JS glue code.
if not self.is_wasm_backend():
self.set_setting('EXCEPTION_CATCHING_WHITELIST', ["_main"])
else:
self.set_setting('EXCEPTION_CATCHING_WHITELIST', ["main"])
# otherwise it is inlined and not identified
self.set_setting('INLINING_LIMIT', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_white_list_2')
def test_exceptions_white_list_uncaught(self):
self.emcc_args += ['-std=c++11']
self.set_setting('DISABLE_EXCEPTION_CATCHING', 2)
# Wasm does not add an underscore to function names. For wasm, the
# mismatches are fixed in fixImports() function in JS glue code.
if not self.is_wasm_backend():
self.set_setting('EXCEPTION_CATCHING_WHITELIST', ["__Z4testv"])
else:
self.set_setting('EXCEPTION_CATCHING_WHITELIST', ["_Z4testv"])
# otherwise it is inlined and not identified
self.set_setting('INLINING_LIMIT', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_white_list_uncaught')
@with_both_exception_handling
def test_exceptions_uncaught(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
src = r'''
#include <stdio.h>
#include <exception>
struct X {
~X() {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
}
};
int main() {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
try {
X x;
throw 1;
} catch(...) {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
}
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
return 0;
}
'''
self.do_run(src, 'exception? no\nexception? yes\nexception? no\nexception? no\n')
src = r'''
#include <fstream>
#include <iostream>
int main() {
std::ofstream os("test");
os << std::unitbuf << "foo"; // trigger a call to std::uncaught_exception from
// std::basic_ostream::sentry::~sentry
std::cout << "success";
}
'''
self.do_run(src, 'success')
@with_both_exception_handling
def test_exceptions_uncaught_2(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
src = r'''
#include <iostream>
#include <exception>
int main() {
try {
throw std::exception();
} catch(std::exception) {
try {
throw;
} catch(std::exception) {}
}
if (std::uncaught_exception())
std::cout << "ERROR: uncaught_exception still set.";
else
std::cout << "OK";
}
'''
self.do_run(src, 'OK\n')
@with_both_exception_handling
def test_exceptions_typed(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
self.emcc_args += ['-s', 'SAFE_HEAP=0'] # Throwing null will cause an ignorable null pointer access.
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_typed')
@with_both_exception_handling
def test_exceptions_virtual_inheritance(self):
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_virtual_inheritance')
@with_both_exception_handling
def test_exceptions_convert(self):
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_convert')
# TODO Make setjmp-longjmp also use Wasm exception handling
@with_both_exception_handling
def test_exceptions_multi(self):
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_multi')
@with_both_exception_handling
def test_exceptions_std(self):
self.emcc_args += ['-s', 'SAFE_HEAP=0']
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_std')
@with_both_exception_handling
def test_exceptions_alias(self):
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_alias')
@with_both_exception_handling
def test_exceptions_rethrow(self):
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_rethrow')
@with_both_exception_handling
def test_exceptions_resume(self):
self.set_setting('EXCEPTION_DEBUG', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_resume')
@with_both_exception_handling
def test_exceptions_destroy_virtual(self):
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_destroy_virtual')
@with_both_exception_handling
def test_exceptions_refcount(self):
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_refcount')
@with_both_exception_handling
def test_exceptions_primary(self):
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_primary')
@with_both_exception_handling
def test_exceptions_simplify_cfg(self):
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_simplify_cfg')
@with_both_exception_handling
def test_exceptions_libcxx(self):
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_libcxx')
@with_both_exception_handling
def test_exceptions_multiple_inherit(self):
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_multiple_inherit')
@with_both_exception_handling
def test_exceptions_multiple_inherit_rethrow(self):
self.do_run_in_out_file_test('tests', 'core', 'test_exceptions_multiple_inherit_rethrow')
@with_both_exception_handling
def test_bad_typeid(self):
self.do_run(r'''
// exception example
#include <iostream> // std::cerr
#include <typeinfo> // operator typeid
#include <exception> // std::exception
class Polymorphic {virtual void member(){}};
int main () {
try
{
Polymorphic * pb = 0;
const std::type_info& ti = typeid(*pb); // throws a bad_typeid exception
}
catch (std::exception& e)
{
std::cerr << "exception caught: " << e.what() << '\n';
}
return 0;
}
''', 'exception caught: std::bad_typeid')
def test_iostream_ctors(self):
# iostream stuff must be globally constructed before user global
# constructors, so iostream works in global constructors
self.do_run(r'''
#include <iostream>
struct A {
A() { std::cout << "bug"; }
};
A a;
int main() {
std::cout << "free code" << std::endl;
return 0;
}
''', 'bugfree code')
# Marked as impure since the WASI reactor modules (modules without main)
# are not yet suppored by the wasm engines we test against.
@also_with_impure_standalone_wasm
def test_ctors_no_main(self):
self.emcc_args.append('--no-entry')
self.do_run_in_out_file_test('tests', 'core', 'test_ctors_no_main')
def test_class(self):
self.do_run_in_out_file_test('tests', 'core', 'test_class')
def test_inherit(self):
self.do_run_in_out_file_test('tests', 'core', 'test_inherit')
def test_isdigit_l(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_isdigit_l')
def test_iswdigit(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_iswdigit')
def test_polymorph(self):
self.do_run_in_out_file_test('tests', 'core', 'test_polymorph')
def test_complex(self):
self.do_run_in_out_file_test('tests', 'core', 'test_complex')
def test_float_builtins(self):
# tests wasm_libc_rt
if not self.is_wasm_backend():
self.skipTest('no __builtin_fmin support in JSBackend')
self.do_run_in_out_file_test('tests', 'core', 'test_float_builtins')
@no_asan('SAFE_HEAP cannot be used with ASan')
def test_segfault(self):
self.set_setting('SAFE_HEAP', 1)
for addr in ['0', 'new D2()']:
print(addr)
src = r'''
#include <stdio.h>
struct Classey {
virtual void doIt() = 0;
};
struct D1 : Classey {
virtual void doIt() { printf("fleefl\n"); }
};
struct D2 : Classey {
virtual void doIt() { printf("marfoosh\n"); }
};
int main(int argc, char **argv)
{
Classey *p = argc == 100 ? new D1() : (Classey*)%s;
p->doIt();
return 0;
}
''' % addr
self.do_run(src, 'segmentation fault' if addr.isdigit() else 'marfoosh', assert_returncode=None)
def test_dynamic_cast(self):
self.do_run_in_out_file_test('tests', 'core', 'test_dynamic_cast')
def test_dynamic_cast_b(self):
self.do_run_in_out_file_test('tests', 'core', 'test_dynamic_cast_b')
def test_dynamic_cast_2(self):
self.do_run_in_out_file_test('tests', 'core', 'test_dynamic_cast_2')
def test_funcptr(self):
self.do_run_in_out_file_test('tests', 'core', 'test_funcptr')
def test_mathfuncptr(self):
self.do_run_in_out_file_test('tests', 'core', 'test_mathfuncptr')
def test_funcptrfunc(self):
self.do_run_in_out_file_test('tests', 'core', 'test_funcptrfunc')
def test_funcptr_namecollide(self):
self.do_run_in_out_file_test('tests', 'core', 'test_funcptr_namecollide')
def test_emptyclass(self):
self.do_run_in_out_file_test('tests', 'core', 'test_emptyclass')
def test_alloca(self):
self.do_run_in_out_file_test('tests', 'core', 'test_alloca')
def test_rename(self):
self.do_run_in_out_file_test('tests', 'stdio', 'test_rename')
def test_remove(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'cstdio', 'test_remove')
def test_alloca_stack(self):
self.do_run_in_out_file_test('tests', 'core', 'test_alloca_stack')
def test_stack_byval(self):
self.do_run_in_out_file_test('tests', 'core', 'test_stack_byval')
def test_stack_varargs(self):
# in node.js we allocate argv[0] on the stack, which means the length
# of the program directory influences how much stack we need, and so
# long random temp dir names can lead to random failures. The stack
# size was increased here to avoid that.
self.set_setting('INLINING_LIMIT', 50)
self.set_setting('TOTAL_STACK', 8 * 1024)
self.do_run_in_out_file_test('tests', 'core', 'test_stack_varargs')
def test_stack_varargs2(self):
# in node.js we allocate argv[0] on the stack, which means the length
# of the program directory influences how much stack we need, and so
# long random temp dir names can lead to random failures. The stack
# size was increased here to avoid that.
self.set_setting('TOTAL_STACK', 8 * 1024)
src = r'''
#include <stdio.h>
#include <stdlib.h>
void func(int i) {
}
int main() {
for (int i = 0; i < 7000; i++) {
printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i);
}
printf("ok!\n");
return 0;
}
'''
self.do_run(src, 'ok!')
print('with return')
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
for (int i = 0; i < 7000; i++) {
int j = printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d",
i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i);
printf(" (%d)\n", j);
}
printf("ok!\n");
return 0;
}
'''
self.do_run(src, 'ok!')
print('with definitely no return')
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
void vary(const char *s, ...)
{
va_list v;
va_start(v, s);
char d[20];
vsnprintf(d, 20, s, v);
puts(d);
// Try it with copying
va_list tempva;
va_copy(tempva, v);
vsnprintf(d, 20, s, tempva);
puts(d);
va_end(v);
}
int main() {
for (int i = 0; i < 7000; i++) {
int j = printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d",
i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i);
printf(" (%d)\n", j);
vary("*cheez: %d+%d*", 99, 24);
vary("*albeit*");
}
printf("ok!\n");
return 0;
}
'''
self.do_run(src, 'ok!')
def test_stack_void(self):
self.set_setting('INLINING_LIMIT', 50)
self.do_run_in_out_file_test('tests', 'core', 'test_stack_void')
def test_life(self):
self.emcc_args += ['-std=c99']
self.do_run_in_out_file_test('tests', 'life', args=['2'])
def test_array2(self):
self.do_run_in_out_file_test('tests', 'core', 'test_array2')
def test_array2b(self):
self.do_run_in_out_file_test('tests', 'core', 'test_array2b')
def test_constglobalstructs(self):
self.do_run_in_out_file_test('tests', 'core', 'test_constglobalstructs')
def test_conststructs(self):
self.do_run_in_out_file_test('tests', 'core', 'test_conststructs')
def test_bigarray(self):
self.do_run_in_out_file_test('tests', 'core', 'test_bigarray')
def test_mod_globalstruct(self):
self.do_run_in_out_file_test('tests', 'core', 'test_mod_globalstruct')
@no_wasm_backend('long doubles are f128s in wasm backend')
def test_pystruct(self):
def test():
self.do_run_in_out_file_test('tests', 'test_pystruct')
test()
print('relocatable') # this tests recursive global structs => nontrivial postSets for relocation
assert self.get_setting('RELOCATABLE') == self.get_setting('EMULATED_FUNCTION_POINTERS') == 0
self.set_setting('RELOCATABLE', 1)
self.set_setting('EMULATED_FUNCTION_POINTERS', 1)
test()
def test_sizeof(self):
# Has invalid writes between printouts
self.set_setting('SAFE_HEAP', 0)
self.do_run_in_out_file_test('tests', 'core', 'test_sizeof')
def test_llvm_used(self):
self.do_run_in_out_file_test('tests', 'core', 'test_llvm_used')
@no_asan('SAFE_HEAP cannot be used with ASan')
def test_set_align(self):
self.set_setting('SAFE_HEAP', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_set_align')
def test_emscripten_api(self):
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_save_me_aimee'])
self.do_run_in_out_file_test('tests', 'core', 'test_emscripten_api')
if '-fsanitize=address' not in self.emcc_args:
# test EXPORT_ALL (this is not compatible with asan, which doesn't
# support dynamic linking at all or the LINKING flag)
self.set_setting('EXPORTED_FUNCTIONS', [])
self.set_setting('EXPORT_ALL', 1)
self.set_setting('LINKABLE', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_emscripten_api')
def test_emscripten_run_script_string_int(self):
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
const char *str = emscripten_run_script_string("1+1");
printf("got string: %s\n", str);
return 0;
}
'''
self.do_run(src, '''got string: 2''')
def test_emscripten_run_script_string_utf8(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
int main() {
const char *str = emscripten_run_script_string("'\\u2603 \\u2603 \\u2603 Hello!'");
printf("length of returned string: %d. Position of substring 'Hello': %d\n", strlen(str), strstr(str, "Hello")-str);
return 0;
}
'''
self.do_run(src, '''length of returned string: 18. Position of substring 'Hello': 12''')
def test_emscripten_run_script_string_null(self):
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
const char *str = emscripten_run_script_string("void(0)");
if (str) {
printf("got string: %s\n", str);
} else {
puts("got null");
}
return 0;
}
'''
self.do_run(src, 'got null')
def test_emscripten_get_now(self):
self.banned_js_engines = [V8_ENGINE] # timer limitations in v8 shell
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
if self.run_name == 'asm2':
self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage
self.do_run(open(path_from_root('tests', 'emscripten_get_now.cpp')).read(), 'Timer resolution is good')
def test_emscripten_get_compiler_setting(self):
test_path = path_from_root('tests', 'core', 'emscripten_get_compiler_setting')
src, output = (test_path + s for s in ('.c', '.out'))
old = self.get_setting('ASSERTIONS')
# with assertions, a nice message is shown
self.set_setting('ASSERTIONS', 1)
self.do_run(open(src).read(), 'You must build with -s RETAIN_COMPILER_SETTINGS=1', assert_returncode=None)
self.set_setting('ASSERTIONS', old)
self.set_setting('RETAIN_COMPILER_SETTINGS', 1)
self.do_run(open(src).read(), open(output).read().replace('waka', shared.EMSCRIPTEN_VERSION))
@no_fastcomp('ASYNCIFY has been removed from fastcomp')
def test_emscripten_has_asyncify(self):
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("%d\n", emscripten_has_asyncify());
return 0;
}
'''
self.set_setting('ASYNCIFY', 0)
self.do_run(src, '0')
self.set_setting('ASYNCIFY', 1)
self.do_run(src, '1')
# TODO: test only worked in non-fastcomp
def test_inlinejs(self):
self.skipTest('non-fastcomp is deprecated and fails in 3.5') # only supports EM_ASM
self.do_run_in_out_file_test('tests', 'core', 'test_inlinejs')
if self.emcc_args == []:
# opts will eliminate the comments
out = open('src.cpp.o.js').read()
for i in range(1, 5):
assert ('comment%d' % i) in out
# TODO: test only worked in non-fastcomp
def test_inlinejs2(self):
self.skipTest('non-fastcomp is deprecated and fails in 3.5') # only supports EM_ASM
self.do_run_in_out_file_test('tests', 'core', 'test_inlinejs2')
def test_inlinejs3(self):
if self.is_wasm():
self.skipTest('wasm requires a proper asm module')
self.emcc_args.append('-Wno-almost-asm')
test_path = path_from_root('tests', 'core', 'test_inlinejs3')
src, output = (test_path + s for s in ('.c', '.out'))
self.do_run_in_out_file_test('tests', 'core', 'test_inlinejs3')
print('no debugger, check validation')
src = open(src).read().replace('emscripten_debugger();', '')
self.do_run(src, open(output).read())
def test_inlinejs4(self):
self.do_run(r'''
#include <emscripten.h>
#define TO_STRING_INNER(x) #x
#define TO_STRING(x) TO_STRING_INNER(x)
#define assert_msg(msg, file, line) EM_ASM( throw 'Assert (' + msg + ') failed in ' + file + ':' + line + '!'; )
#define assert(expr) { \
if (!(expr)) { \
assert_msg(#expr, TO_STRING(__FILE__), TO_STRING(__LINE__)); \
} \
}
int main(int argc, char **argv) {
assert(argc != 17);
assert(false);
return 0;
}
''', 'false', assert_returncode=None)
def test_em_asm(self):
self.do_run_in_out_file_test('tests', 'core', 'test_em_asm')
self.do_run_in_out_file_test('tests', 'core', 'test_em_asm', force_c=True)
# Tests various different ways to invoke the EM_ASM(), EM_ASM_INT() and EM_ASM_DOUBLE() macros.
@no_asan('Cannot use ASan: test depends exactly on heap size')
def test_em_asm_2(self):
self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_2')
self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_2', force_c=True)
# Tests various different ways to invoke the MAIN_THREAD_EM_ASM(), MAIN_THREAD_EM_ASM_INT() and MAIN_THREAD_EM_ASM_DOUBLE() macros.
# This test is identical to test_em_asm_2, just search-replaces EM_ASM to MAIN_THREAD_EM_ASM on the test file. That way if new
# test cases are added to test_em_asm_2.cpp for EM_ASM, they will also get tested in MAIN_THREAD_EM_ASM form.
@no_asan('Cannot use ASan: test depends exactly on heap size')
def test_main_thread_em_asm(self):
src = open(path_from_root('tests', 'core', 'test_em_asm_2.cpp')).read()
create_test_file('src.cpp', src.replace('EM_ASM', 'MAIN_THREAD_EM_ASM'))
expected_result = open(path_from_root('tests', 'core', 'test_em_asm_2.out')).read()
create_test_file('result.out', expected_result.replace('EM_ASM', 'MAIN_THREAD_EM_ASM'))
self.do_run_from_file('src.cpp', 'result.out')
self.do_run_from_file('src.cpp', 'result.out', force_c=True)
def test_main_thread_async_em_asm(self):
self.do_run_in_out_file_test('tests', 'core', 'test_main_thread_async_em_asm')
self.do_run_in_out_file_test('tests', 'core', 'test_main_thread_async_em_asm', force_c=True)
# Tests MAIN_THREAD_EM_ASM_INT() function call with different signatures.
def test_main_thread_em_asm_signatures(self):
self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_signatures', assert_returncode=None)
def test_em_asm_unicode(self):
self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_unicode')
self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_unicode', force_c=True)
def test_em_asm_types(self):
self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_types')
self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_types', force_c=True)
def test_em_asm_unused_arguments(self):
self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_unused_arguments')
# Verify that EM_ASM macros support getting called with multiple arities.
# Maybe tests will later be joined into larger compilation units?
# Then this must still be compiled separately from other code using EM_ASM
# macros with arities 1-3. Otherwise this may incorrectly report a success.
def test_em_asm_parameter_pack(self):
self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_parameter_pack')
def test_em_asm_arguments_side_effects(self):
self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_arguments_side_effects')
self.do_run_in_out_file_test('tests', 'core', 'test_em_asm_arguments_side_effects', force_c=True)
@parameterized({
'normal': ([],),
'linked': (['-s', 'MAIN_MODULE'],),
})
def test_em_js(self, args):
if 'MAIN_MODULE' in args and self.get_setting('WASM') == 0:
self.skipTest('main module support for non-wasm')
if '-fsanitize=address' in self.emcc_args:
self.skipTest('no dynamic library support in asan yet')
self.emcc_args += args + ['-s', 'EXPORTED_FUNCTIONS=["_main","_malloc"]']
self.do_run_in_out_file_test('tests', 'core', 'test_em_js')
self.do_run_in_out_file_test('tests', 'core', 'test_em_js', force_c=True)
def test_runtime_stacksave(self):
src = open(path_from_root('tests', 'core', 'test_runtime_stacksave.c')).read()
self.do_run(src, 'success')
# Tests that -s MINIMAL_RUNTIME=1 builds can utilize -s ALLOW_MEMORY_GROWTH=1 option.
def test_minimal_runtime_memorygrowth(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
self.set_setting('MINIMAL_RUNTIME', 1)
src = open(path_from_root('tests', 'core', 'test_memorygrowth.c')).read()
# Fail without memory growth
self.do_run(src, 'OOM', assert_returncode=None)
# Win with it
self.emcc_args += ['-Wno-almost-asm', '-s', 'ALLOW_MEMORY_GROWTH']
self.do_run(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*')
def test_memorygrowth(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
if self.maybe_closure():
# verify NO_DYNAMIC_EXECUTION is compatible with closure
self.set_setting('DYNAMIC_EXECUTION', 0)
# With typed arrays in particular, it is dangerous to use more memory than INITIAL_MEMORY,
# since we then need to enlarge the heap(s).
src = open(path_from_root('tests', 'core', 'test_memorygrowth.c')).read()
# Fail without memory growth
self.do_run(src, 'OOM', assert_returncode=None)
fail = open('src.cpp.o.js').read()
# Win with it
self.emcc_args += ['-Wno-almost-asm', '-s', 'ALLOW_MEMORY_GROWTH']
self.do_run(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*')
win = open('src.cpp.o.js').read()
if '-O2' in self.emcc_args and not self.is_wasm():
# Make sure ALLOW_MEMORY_GROWTH generates different code (should be less optimized)
possible_starts = ['// EMSCRIPTEN_START_FUNCS', 'var TOTAL_STACK']
code_start = None
for s in possible_starts:
if fail.find(s) >= 0:
code_start = s
break
assert code_start is not None, 'Generated code must contain one of ' + str(possible_starts)
fail = fail[fail.find(code_start):]
win = win[win.find(code_start):]
assert len(fail) < len(win), 'failing code - without memory growth on - is more optimized, and smaller' + str([len(fail), len(win)])
# Tracing of memory growths should work
self.set_setting('EMSCRIPTEN_TRACING', 1)
self.emcc_args += ['--tracing']
self.do_run(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*')
def test_memorygrowth_2(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
# With typed arrays in particular, it is dangerous to use more memory than INITIAL_MEMORY,
# since we then need to enlarge the heap(s).
src = open(path_from_root('tests', 'core', 'test_memorygrowth_2.c')).read()
# Fail without memory growth
self.do_run(src, 'OOM', assert_returncode=None)
fail = open('src.cpp.o.js').read()
# Win with it
self.emcc_args += ['-Wno-almost-asm', '-s', 'ALLOW_MEMORY_GROWTH']
self.do_run(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*')
win = open('src.cpp.o.js').read()
if '-O2' in self.emcc_args and not self.is_wasm():
# Make sure ALLOW_MEMORY_GROWTH generates different code (should be less optimized)
assert len(fail) < len(win), 'failing code - without memory growth on - is more optimized, and smaller' + str([len(fail), len(win)])
def test_memorygrowth_3(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
# checks handling of malloc failure properly
self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH=0', '-s', 'ABORTING_MALLOC=0', '-s', 'SAFE_HEAP']
self.do_run_in_out_file_test('tests', 'core', 'test_memorygrowth_3')
@also_with_impure_standalone_wasm
def test_memorygrowth_MAXIMUM_MEMORY(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
if not self.is_wasm():
self.skipTest('wasm memory specific test')
# check that memory growth does not exceed the wasm mem max limit
self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'INITIAL_MEMORY=64Mb', '-s', 'MAXIMUM_MEMORY=100Mb']
self.do_run_in_out_file_test('tests', 'core', 'test_memorygrowth_wasm_mem_max')
def test_memorygrowth_linear_step(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
if not self.is_wasm():
self.skipTest('wasm memory specific test')
# check that memory growth does not exceed the wasm mem max limit and is exactly or one step below the wasm mem max
self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_STACK=1Mb', '-s', 'INITIAL_MEMORY=64Mb', '-s', 'MAXIMUM_MEMORY=130Mb', '-s', 'MEMORY_GROWTH_LINEAR_STEP=1Mb']
self.do_run_in_out_file_test('tests', 'core', 'test_memorygrowth_memory_growth_step')
def test_memorygrowth_geometric_step(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
if not self.is_wasm():
self.skipTest('wasm memory specific test')
self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MEMORY_GROWTH_GEOMETRIC_STEP=15', '-s', 'MEMORY_GROWTH_GEOMETRIC_CAP=0']
self.do_run_in_out_file_test('tests', 'core', 'test_memorygrowth_geometric_step')
def test_memorygrowth_3_force_fail_reallocBuffer(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
self.emcc_args += ['-Wno-almost-asm', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TEST_MEMORY_GROWTH_FAILS=1']
self.do_run_in_out_file_test('tests', 'core', 'test_memorygrowth_3')
@parameterized({
'nogrow': (['-s', 'ALLOW_MEMORY_GROWTH=0'],),
'grow': (['-s', 'ALLOW_MEMORY_GROWTH=1'],)
})
@no_asan('requires more memory when growing')
def test_aborting_new(self, args):
# test that C++ new properly errors if we fail to malloc when growth is
# enabled, with or without growth
self.emcc_args += ['-Wno-almost-asm', '-s', 'MAXIMUM_MEMORY=18MB'] + args
self.do_run_in_out_file_test('tests', 'core', 'test_aborting_new')
@no_asmjs()
@no_wasm2js('no WebAssembly.Memory()')
@no_asan('ASan alters the memory size')
def test_module_wasm_memory(self):
self.emcc_args += ['--pre-js', path_from_root('tests', 'core', 'test_module_wasm_memory.js')]
src = open(path_from_root('tests', 'core', 'test_module_wasm_memory.c')).read()
self.do_run(src, 'success', force_c=True)
def test_ssr(self): # struct self-ref
src = '''
#include <stdio.h>
// see related things in openjpeg
typedef struct opj_mqc_state {
unsigned int qeval;
int mps;
struct opj_mqc_state *nmps;
struct opj_mqc_state *nlps;
} opj_mqc_state_t;
static opj_mqc_state_t mqc_states[4] = {
{0x5600, 0, &mqc_states[2], &mqc_states[3]},
{0x5602, 1, &mqc_states[3], &mqc_states[2]},
};
int main() {
printf("*%d*\\n", (int)(mqc_states+1)-(int)mqc_states);
for (int i = 0; i < 2; i++)
printf("%d:%d,%d,%d,%d\\n", i, mqc_states[i].qeval, mqc_states[i].mps,
(int)mqc_states[i].nmps-(int)mqc_states, (int)mqc_states[i].nlps-(int)mqc_states);
return 0;
}
'''
self.do_run(src, '''*16*\n0:22016,0,32,48\n1:22018,1,48,32\n''')
def test_tinyfuncstr(self):
self.do_run_in_out_file_test('tests', 'core', 'test_tinyfuncstr')
def test_llvmswitch(self):
self.do_run_in_out_file_test('tests', 'core', 'test_llvmswitch')
def test_cxx_version(self):
self.do_run_in_out_file_test('tests', 'core', 'test_cxx_version')
@no_wasm2js('massive switches can break js engines')
def test_bigswitch(self):
src = open(path_from_root('tests', 'bigswitch.cpp')).read()
self.do_run(src, '''34962: GL_ARRAY_BUFFER (0x8892)
26214: what?
35040: GL_STREAM_DRAW (0x88E0)
3060: what?
''', args=['34962', '26214', '35040', str(0xbf4)], assert_returncode=None)
@no_wasm2js('massive switches can break js engines')
@is_slow_test
def test_biggerswitch(self):
if self.is_wasm_backend():
if not is_optimizing(self.emcc_args):
self.skipTest('nodejs takes >6GB to compile this if the wasm is not optimized, which OOMs, see https://github.com/emscripten-core/emscripten/issues/7928#issuecomment-458308453')
if '-Os' in self.emcc_args:
self.skipTest('hangs in recent upstream clang, see https://bugs.llvm.org/show_bug.cgi?id=43468')
num_cases = 20000
switch_case = run_process([PYTHON, path_from_root('tests', 'gen_large_switchcase.py'), str(num_cases)], stdout=PIPE, stderr=PIPE).stdout
self.do_run(switch_case, '''58996: 589965899658996
59297: 592975929759297
59598: default
59899: 598995989959899
Success!''')
def test_indirectbr(self):
self.emcc_args = [x for x in self.emcc_args if x != '-g']
self.do_run_in_out_file_test('tests', 'core', 'test_indirectbr')
@no_asan('local count too large for VMs')
@no_wasm2js('extremely deep nesting, hits stack limit on some VMs')
def test_indirectbr_many(self):
self.do_run_in_out_file_test('tests', 'core', 'test_indirectbr_many')
def test_pack(self):
src = '''
#include <stdio.h>
#include <string.h>
#pragma pack(push,1)
typedef struct header
{
unsigned char id;
unsigned short colour;
unsigned char desc;
} header;
#pragma pack(pop)
typedef struct fatheader
{
unsigned char id;
unsigned short colour;
unsigned char desc;
} fatheader;
int main( int argc, const char *argv[] ) {
header h, *ph = 0;
fatheader fh, *pfh = 0;
printf("*%d,%d,%d*\\n", sizeof(header), (int)((int)&h.desc - (int)&h.id), (int)(&ph[1])-(int)(&ph[0]));
printf("*%d,%d,%d*\\n", sizeof(fatheader), (int)((int)&fh.desc - (int)&fh.id), (int)(&pfh[1])-(int)(&pfh[0]));
return 0;
}
'''
self.do_run(src, '*4,3,4*\n*6,4,6*')
def test_varargs(self):
self.do_run_in_out_file_test('tests', 'core', 'test_varargs')
@no_wasm_backend('Calling varargs across function calls is undefined behavior in C,'
' and asmjs and wasm implement it differently.')
def test_varargs_multi(self):
self.do_run_in_out_file_test('tests', 'core', 'test_varargs_multi')
@unittest.skip('clang cannot compile this code with that target yet')
def test_varargs_byval(self):
src = r'''
#include <stdio.h>
#include <stdarg.h>
typedef struct type_a {
union {
double f;
void *p;
int i;
short sym;
} value;
} type_a;
enum mrb_vtype {
MRB_TT_FALSE = 0, /* 0 */
MRB_TT_CLASS = 9 /* 9 */
};
typedef struct type_b {
enum mrb_vtype tt:8;
} type_b;
void print_type_a(int argc, ...);
void print_type_b(int argc, ...);
int main(int argc, char *argv[])
{
type_a a;
type_b b;
a.value.p = (void*) 0x12345678;
b.tt = MRB_TT_CLASS;
printf("The original address of a is: %p\n", a.value.p);
printf("The original type of b is: %d\n", b.tt);
print_type_a(1, a);
print_type_b(1, b);
return 0;
}
void print_type_a(int argc, ...) {
va_list ap;
type_a a;
va_start(ap, argc);
a = va_arg(ap, type_a);
va_end(ap);
printf("The current address of a is: %p\n", a.value.p);
}
void print_type_b(int argc, ...) {
va_list ap;
type_b b;
va_start(ap, argc);
b = va_arg(ap, type_b);
va_end(ap);
printf("The current type of b is: %d\n", b.tt);
}
'''
self.do_run(src, '''The original address of a is: 0x12345678
The original type of b is: 9
The current address of a is: 0x12345678
The current type of b is: 9
''')
def test_functionpointer_libfunc_varargs(self):
self.do_run_in_out_file_test('tests', 'core', 'test_functionpointer_libfunc_varargs')
def test_structbyval(self):
self.set_setting('INLINING_LIMIT', 50)
# part 1: make sure that normally, passing structs by value works
src = r'''
#include <stdio.h>
struct point
{
int x, y;
};
void dump(struct point p) {
p.x++; // should not modify
p.y++; // anything in the caller!
printf("dump: %d,%d\n", p.x, p.y);
}
void dumpmod(struct point *p) {
p->x++; // should not modify
p->y++; // anything in the caller!
printf("dump: %d,%d\n", p->x, p->y);
}
int main( int argc, const char *argv[] ) {
point p = { 54, 2 };
printf("pre: %d,%d\n", p.x, p.y);
dump(p);
void (*dp)(point p) = dump; // And, as a function pointer
dp(p);
printf("post: %d,%d\n", p.x, p.y);
dumpmod(&p);
dumpmod(&p);
printf("last: %d,%d\n", p.x, p.y);
return 0;
}
'''
self.do_run(src, 'pre: 54,2\ndump: 55,3\ndump: 55,3\npost: 54,2\ndump: 55,3\ndump: 56,4\nlast: 56,4')
def test_stdlibs(self):
# safe heap prints a warning that messes up our output.
self.set_setting('SAFE_HEAP', 0)
# needs atexit
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_stdlibs')
def test_stdbool(self):
src = r'''
#include <stdio.h>
#include <stdbool.h>
int main() {
bool x = true;
bool y = false;
printf("*%d*\n", x != y);
return 0;
}
'''
self.do_run(src, '*1*', force_c=True)
def test_strtoll_hex(self):
# tests strtoll for hex strings (0x...)
self.do_run_in_out_file_test('tests', 'core', 'test_strtoll_hex')
def test_strtoll_dec(self):
# tests strtoll for decimal strings (0x...)
self.do_run_in_out_file_test('tests', 'core', 'test_strtoll_dec')
def test_strtoll_bin(self):
# tests strtoll for binary strings (0x...)
self.do_run_in_out_file_test('tests', 'core', 'test_strtoll_bin')
def test_strtoll_oct(self):
# tests strtoll for decimal strings (0x...)
self.do_run_in_out_file_test('tests', 'core', 'test_strtoll_oct')
def test_strtol_hex(self):
# tests strtoll for hex strings (0x...)
self.do_run_in_out_file_test('tests', 'core', 'test_strtol_hex')
def test_strtol_dec(self):
# tests strtoll for decimal strings (0x...)
self.do_run_in_out_file_test('tests', 'core', 'test_strtol_dec')
def test_strtol_bin(self):
# tests strtoll for binary strings (0x...)
self.do_run_in_out_file_test('tests', 'core', 'test_strtol_bin')
def test_strtol_oct(self):
# tests strtoll for decimal strings (0x...)
self.do_run_in_out_file_test('tests', 'core', 'test_strtol_oct')
@also_with_standalone_wasm
def test_atexit(self):
# Confirms they are called in the proper reverse order
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_atexit')
def test_atexit_threads(self):
# also tests thread exit (__cxa_thread_atexit)
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_atexit_threads')
@no_asan('test relies on null pointer reads')
def test_pthread_specific(self):
src = open(path_from_root('tests', 'pthread', 'specific.c')).read()
expected = open(path_from_root('tests', 'pthread', 'specific.c.txt')).read()
self.do_run(src, expected, force_c=True)
def test_pthread_equal(self):
self.do_run_in_out_file_test('tests', 'pthread', 'test_pthread_equal')
def test_tcgetattr(self):
src = open(path_from_root('tests', 'termios', 'test_tcgetattr.c')).read()
self.do_run(src, 'success', force_c=True)
def test_time(self):
self.do_run_in_out_file_test('tests', 'core', 'test_time')
for tz in ['EST+05EDT', 'UTC+0']:
print('extra tz test:', tz)
with env_modify({'TZ': tz}):
# Run the test with different time zone settings if
# possible. It seems that the TZ environment variable does not
# work all the time (at least it's not well respected by
# Node.js on Windows), but it does no harm either.
self.do_run_in_out_file_test('tests', 'core', 'test_time')
def test_timeb(self):
# Confirms they are called in reverse order
self.do_run_in_out_file_test('tests', 'core', 'test_timeb')
def test_time_c(self):
self.do_run_in_out_file_test('tests', 'core', 'test_time_c')
def test_gmtime(self):
self.do_run_in_out_file_test('tests', 'core', 'test_gmtime')
def test_strptime_tm(self):
self.do_run_in_out_file_test('tests', 'core', 'test_strptime_tm')
def test_strptime_days(self):
self.do_run_in_out_file_test('tests', 'core', 'test_strptime_days')
def test_strptime_reentrant(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_strptime_reentrant')
def test_strftime(self):
self.do_run_in_out_file_test('tests', 'core', 'test_strftime')
@no_wasm_backend("wasm backend doesn't compile intentional segfault into an abort() call. "
"It also doesn't segfault.")
def test_intentional_fault(self):
# Some programs intentionally segfault themselves, we should compile that into a throw
src = open(path_from_root('tests', 'core', 'test_intentional_fault.c')).read()
self.do_run(src, 'abort(' if self.run_name != 'asm2g' else 'abort(segmentation fault', assert_returncode=None)
def test_trickystring(self):
self.do_run_in_out_file_test('tests', 'core', 'test_trickystring')
def test_statics(self):
self.do_run_in_out_file_test('tests', 'core', 'test_statics')
def test_copyop(self):
# clang generated code is vulnerable to this, as it uses
# memcpy for assignments, with hardcoded numbers of bytes
# (llvm-gcc copies items one by one).
self.do_run_in_out_file_test('tests', 'core', 'test_copyop')
def test_memcpy_memcmp(self):
self.banned_js_engines = [V8_ENGINE] # Currently broken under V8_ENGINE but not node
def check(result, err):
result = result.replace('\n \n', '\n') # remove extra node output
return hashlib.sha1(result.encode('utf-8')).hexdigest()
self.do_run_in_out_file_test('tests', 'core', 'test_memcpy_memcmp', output_nicerizer=check)
def test_memcpy2(self):
self.do_run_in_out_file_test('tests', 'core', 'test_memcpy2', assert_returncode=None)
def test_memcpy3(self):
self.do_run_in_out_file_test('tests', 'core', 'test_memcpy3', assert_returncode=None)
@also_with_standalone_wasm
def test_memcpy_alignment(self):
self.do_run(open(path_from_root('tests', 'test_memcpy_alignment.cpp')).read(), 'OK.')
def test_memset_alignment(self):
self.do_run(open(path_from_root('tests', 'test_memset_alignment.cpp')).read(), 'OK.')
def test_memset(self):
self.do_run_in_out_file_test('tests', 'core', 'test_memset', assert_returncode=None)
def test_getopt(self):
self.do_run_in_out_file_test('tests', 'core', 'test_getopt', args=['-t', '12', '-n', 'foobar'])
def test_getopt_long(self):
self.do_run_in_out_file_test('tests', 'core', 'test_getopt_long', args=['--file', 'foobar', '-b'])
def test_memmove(self):
self.do_run_in_out_file_test('tests', 'core', 'test_memmove')
def test_memmove2(self):
self.do_run_in_out_file_test('tests', 'core', 'test_memmove2', assert_returncode=None)
def test_memmove3(self):
self.do_run_in_out_file_test('tests', 'core', 'test_memmove3')
def test_flexarray_struct(self):
self.do_run_in_out_file_test('tests', 'core', 'test_flexarray_struct')
def test_bsearch(self):
self.do_run_in_out_file_test('tests', 'core', 'test_bsearch')
@no_wasm_backend("https://github.com/emscripten-core/emscripten/issues/9039")
def test_stack_overflow(self):
self.set_setting('ASSERTIONS', 1)
self.do_run(open(path_from_root('tests', 'core', 'stack_overflow.cpp')).read(), 'Stack overflow!', assert_returncode=None)
def test_stackAlloc(self):
self.do_run_in_out_file_test('tests', 'core', 'stackAlloc')
def test_nestedstructs(self):
src = '''
#include <stdio.h>
#include "emscripten.h"
struct base {
int x;
float y;
union {
int a;
float b;
};
char c;
};
struct hashtableentry {
int key;
base data;
};
struct hashset {
typedef hashtableentry entry;
struct chain { entry elem; chain *next; };
// struct chainchunk { chain chains[100]; chainchunk *next; };
};
struct hashtable : hashset {
hashtable() {
base *b = NULL;
entry *e = NULL;
chain *c = NULL;
printf("*%d,%d,%d,%d,%d,%d|%d,%d,%d,%d,%d,%d,%d,%d|%d,%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n",
sizeof(base),
int(&(b->x)), int(&(b->y)), int(&(b->a)), int(&(b->b)), int(&(b->c)),
sizeof(hashtableentry),
int(&(e->key)), int(&(e->data)), int(&(e->data.x)), int(&(e->data.y)), int(&(e->data.a)), int(&(e->data.b)), int(&(e->data.c)),
sizeof(hashset::chain),
int(&(c->elem)), int(&(c->next)), int(&(c->elem.key)), int(&(c->elem.data)), int(&(c->elem.data.x)), int(&(c->elem.data.y)), int(&(c->elem.data.a)), int(&(c->elem.data.b)), int(&(c->elem.data.c))
);
}
};
struct B { char buffer[62]; int last; char laster; char laster2; };
struct Bits {
unsigned short A : 1;
unsigned short B : 1;
unsigned short C : 1;
unsigned short D : 1;
unsigned short x1 : 1;
unsigned short x2 : 1;
unsigned short x3 : 1;
unsigned short x4 : 1;
};
int main() {
hashtable t;
// Part 2 - the char[] should be compressed, BUT have a padding space at the end so the next
// one is aligned properly. Also handle char; char; etc. properly.
B *b = NULL;
printf("*%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n", int(b), int(&(b->buffer)), int(&(b->buffer[0])), int(&(b->buffer[1])), int(&(b->buffer[2])),
int(&(b->last)), int(&(b->laster)), int(&(b->laster2)), sizeof(B));
// Part 3 - bitfields, and small structures
Bits *b2 = NULL;
printf("*%d*\\n", sizeof(Bits));
return 0;
}
'''
# Bloated memory; same layout as C/C++
self.do_run(src, '*16,0,4,8,8,12|20,0,4,4,8,12,12,16|24,0,20,0,4,4,8,12,12,16*\n*0,0,0,1,2,64,68,69,72*\n*2*')
def prep_dlfcn_lib(self):
self.clear_setting('MAIN_MODULE')
self.set_setting('SIDE_MODULE')
def prep_dlfcn_main(self):
self.set_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
create_test_file('lib_so_pre.js', '''
if (!Module['preRun']) Module['preRun'] = [];
Module['preRun'].push(function() { FS.createDataFile('/', 'liblib.so', %s, true, false, false); });
''' % str(list(bytearray(open('liblib.so', 'rb').read()))))
self.emcc_args += ['--pre-js', 'lib_so_pre.js']
def build_dlfcn_lib(self, lib_src, dirname, filename):
if self.get_setting('WASM'):
# emcc emits a wasm in this case
self.build(lib_src, dirname, filename, js_outfile=False)
shutil.move(filename + '.o.wasm', os.path.join(dirname, 'liblib.so'))
else:
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
@needs_dlfcn
def test_dlfcn_missing(self):
self.set_setting('MAIN_MODULE', 1)
if self.has_changed_setting('ASSERTIONS'):
self.skipTest('test needs to customize ASSERTIONS')
self.set_setting('ASSERTIONS', 1)
src = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <assert.h>
int main() {
void* lib_handle = dlopen("libfoo.so", RTLD_NOW);
assert(!lib_handle);
printf("error: %s\n", dlerror());
return 0;
}
'''
self.do_run(src, 'error: Could not load dynamic lib: libfoo.so\nError: No such file or directory')
print('without assertions, the error is less clear')
self.set_setting('ASSERTIONS', 0)
self.do_run(src, 'error: Could not load dynamic lib: libfoo.so\nError: FS error')
@needs_dlfcn
def test_dlfcn_basic(self):
self.prep_dlfcn_lib()
lib_src = '''
#include <cstdio>
class Foo {
public:
Foo() {
puts("Constructing lib object.");
}
};
Foo global;
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
self.build_dlfcn_lib(lib_src, dirname, filename)
self.prep_dlfcn_main()
src = '''
#include <cstdio>
#include <dlfcn.h>
class Bar {
public:
Bar() {
puts("Constructing main object.");
}
};
Bar global;
int main() {
dlopen("liblib.so", RTLD_NOW);
return 0;
}
'''
self.do_run(src, 'Constructing main object.\nConstructing lib object.\n')
@needs_dlfcn
def test_dlfcn_i64(self):
self.prep_dlfcn_lib()
self.set_setting('EXPORTED_FUNCTIONS', ['_foo'])
lib_src = '''
int foo(int x) {
return (long long)x / (long long)1234;
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.c')
self.build_dlfcn_lib(lib_src, dirname, filename)
self.prep_dlfcn_main()
self.clear_setting('EXPORTED_FUNCTIONS')
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <dlfcn.h>
typedef int (*intfunc)(int);
void *p;
int main() {
p = malloc(1024);
void *lib_handle = dlopen("liblib.so", 0);
if (!lib_handle) {
puts(dlerror());
abort();
}
printf("dll handle: %p\n", lib_handle);
intfunc x = (intfunc)dlsym(lib_handle, "foo");
printf("foo func handle: %p\n", x);
if (p == 0) return 1;
if (!x) {
printf("dlsym failed: %s\n", dlerror());
return 1;
}
printf("|%d|\n", x(81234567));
return 0;
}
'''
self.do_run(src, '|65830|')
@needs_dlfcn
@no_wasm('EM_ASM in shared wasm modules, stored inside the wasm somehow')
def test_dlfcn_em_asm(self):
self.prep_dlfcn_lib()
lib_src = '''
#include <emscripten.h>
class Foo {
public:
Foo() {
EM_ASM( out("Constructing lib object.") );
}
};
Foo global;
'''
filename = 'liblib.cpp'
self.build_dlfcn_lib(lib_src, self.get_dir(), filename)
self.prep_dlfcn_main()
src = '''
#include <emscripten.h>
#include <dlfcn.h>
class Bar {
public:
Bar() {
EM_ASM( out("Constructing main object.") );
}
};
Bar global;
int main() {
dlopen("liblib.so", RTLD_NOW);
EM_ASM( out("All done.") );
return 0;
}
'''
self.do_run(src, 'Constructing main object.\nConstructing lib object.\nAll done.\n')
@needs_dlfcn
def test_dlfcn_qsort(self):
self.prep_dlfcn_lib()
self.set_setting('EXPORTED_FUNCTIONS', ['_get_cmp'])
lib_src = '''
int lib_cmp(const void* left, const void* right) {
const int* a = (const int*) left;
const int* b = (const int*) right;
if(*a > *b) return 1;
else if(*a == *b) return 0;
else return -1;
}
typedef int (*CMP_TYPE)(const void*, const void*);
extern "C" CMP_TYPE get_cmp() {
return lib_cmp;
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
self.build_dlfcn_lib(lib_src, dirname, filename)
self.prep_dlfcn_main()
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc'])
src = '''
#include <stdio.h>
#include <stdlib.h>
#include <dlfcn.h>
typedef int (*CMP_TYPE)(const void*, const void*);
int main_cmp(const void* left, const void* right) {
const int* a = (const int*) left;
const int* b = (const int*) right;
if(*a < *b) return 1;
else if(*a == *b) return 0;
else return -1;
}
int main() {
void* lib_handle;
CMP_TYPE (*getter_ptr)();
CMP_TYPE lib_cmp_ptr;
int arr[5] = {4, 2, 5, 1, 3};
qsort((void*)arr, 5, sizeof(int), main_cmp);
printf("Sort with main comparison: ");
for (int i = 0; i < 5; i++) {
printf("%d ", arr[i]);
}
printf("\\n");
lib_handle = dlopen("liblib.so", RTLD_NOW);
if (lib_handle == NULL) {
printf("Could not load lib.\\n");
return 1;
}
getter_ptr = (CMP_TYPE (*)()) dlsym(lib_handle, "get_cmp");
if (getter_ptr == NULL) {
printf("Could not find func.\\n");
return 1;
}
lib_cmp_ptr = getter_ptr();
qsort((void*)arr, 5, sizeof(int), lib_cmp_ptr);
printf("Sort with lib comparison: ");
for (int i = 0; i < 5; i++) {
printf("%d ", arr[i]);
}
printf("\\n");
return 0;
}
'''
self.do_run(src, 'Sort with main comparison: 5 4 3 2 1 *Sort with lib comparison: 1 2 3 4 5 *',
output_nicerizer=lambda x, err: x.replace('\n', '*'))
if self.get_setting('ASM_JS') and SPIDERMONKEY_ENGINE and os.path.exists(SPIDERMONKEY_ENGINE[0]) and not self.is_wasm():
out = run_js('liblib.so', engine=SPIDERMONKEY_ENGINE, full_output=True, stderr=STDOUT)
if 'asm' in out:
self.validate_asmjs(out)
@needs_dlfcn
def test_dlfcn_data_and_fptr(self):
# Failing under v8 since: https://chromium-review.googlesource.com/712595
if self.is_wasm():
self.banned_js_engines = [V8_ENGINE]
self.prep_dlfcn_lib()
lib_src = r'''
#include <stdio.h>
int theglobal = 42;
extern void parent_func(); // a function that is defined in the parent
int* lib_get_global_addr() {
return &theglobal;
}
void lib_fptr() {
printf("Second calling lib_fptr from main.\n");
parent_func();
// call it also through a pointer, to check indexizing
void (*p_f)();
p_f = parent_func;
p_f();
}
extern "C" void (*func(int x, void(*fptr)()))() {
printf("In func: %d\n", x);
fptr();
return lib_fptr;
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
self.set_setting('EXPORTED_FUNCTIONS', ['_func'])
self.build_dlfcn_lib(lib_src, dirname, filename)
self.prep_dlfcn_main()
src = r'''
#include <stdio.h>
#include <dlfcn.h>
#include <emscripten.h>
typedef void (*FUNCTYPE(int, void(*)()))();
FUNCTYPE func;
void EMSCRIPTEN_KEEPALIVE parent_func() {
printf("parent_func called from child\n");
}
void main_fptr() {
printf("First calling main_fptr from lib.\n");
}
int main() {
void* lib_handle;
FUNCTYPE* func_fptr;
// Test basic lib loading.
lib_handle = dlopen("liblib.so", RTLD_NOW);
if (lib_handle == NULL) {
printf("Could not load lib.\n");
return 1;
}
// Test looked up function.
func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func");
// Load twice to test cache.
func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func");
if (func_fptr == NULL) {
printf("Could not find func.\n");
return 1;
}
// Test passing function pointers across module bounds.
void (*fptr)() = func_fptr(13, main_fptr);
fptr();
// Test global data.
int* globaladdr = (int*) dlsym(lib_handle, "theglobal");
if (globaladdr == NULL) {
printf("Could not find global.\n");
return 1;
}
printf("Var: %d\n", *globaladdr);
return 0;
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_main'])
self.do_run(src, '''\
In func: 13
First calling main_fptr from lib.
Second calling lib_fptr from main.
parent_func called from child
parent_func called from child
Var: 42
''')
@needs_dlfcn
def test_dlfcn_varargs(self):
# this test is not actually valid - it fails natively. the child should fail
# to be loaded, not load and successfully see the parent print_ints func
self.prep_dlfcn_lib()
lib_src = r'''
void print_ints(int n, ...);
extern "C" void func() {
print_ints(2, 13, 42);
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
self.set_setting('EXPORTED_FUNCTIONS', ['_func'])
self.build_dlfcn_lib(lib_src, dirname, filename)
self.prep_dlfcn_main()
src = r'''
#include <stdarg.h>
#include <stdio.h>
#include <dlfcn.h>
#include <assert.h>
void print_ints(int n, ...) {
va_list args;
va_start(args, n);
for (int i = 0; i < n; i++) {
printf("%d\n", va_arg(args, int));
}
va_end(args);
}
int main() {
void* lib_handle;
void (*fptr)();
print_ints(2, 100, 200);
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle);
fptr = (void (*)())dlsym(lib_handle, "func");
fptr();
return 0;
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_main'])
self.do_run(src, '100\n200\n13\n42\n')
@needs_dlfcn
def test_dlfcn_alignment_and_zeroing(self):
self.prep_dlfcn_lib()
self.set_setting('INITIAL_MEMORY', 16 * 1024 * 1024)
lib_src = r'''
extern "C" {
int prezero = 0;
__attribute__((aligned(1024))) int superAligned = 12345;
int postzero = 0;
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
self.build_dlfcn_lib(lib_src, dirname, filename)
for i in range(10):
curr = '%d.so' % i
shutil.copyfile('liblib.so', curr)
self.emcc_args += ['--embed-file', curr]
self.prep_dlfcn_main()
self.set_setting('INITIAL_MEMORY', 128 * 1024 * 1024)
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dlfcn.h>
#include <assert.h>
#include <emscripten.h>
int main() {
printf("'prepare' memory with non-zero inited stuff\n");
int num = 120 * 1024 * 1024; // total is 128; we'll use 5*5 = 25 at least, so allocate pretty much all of it
void* mem = malloc(num);
assert(mem);
printf("setting this range to non-zero: %d - %d\n", int(mem), int(mem) + num);
memset(mem, 1, num);
EM_ASM({
var value = HEAP8[64*1024*1024];
out('verify middle of memory is non-zero: ' + value);
assert(value === 1);
});
free(mem);
for (int i = 0; i < 10; i++) {
char curr[] = "?.so";
curr[0] = '0' + i;
printf("loading %s\n", curr);
void* lib_handle = dlopen(curr, RTLD_NOW);
if (!lib_handle) {
puts(dlerror());
assert(0);
}
printf("getting superAligned\n");
int* superAligned = (int*)dlsym(lib_handle, "superAligned");
assert(superAligned);
assert(int(superAligned) % 1024 == 0); // alignment
printf("checking value of superAligned, at %d\n", superAligned);
assert(*superAligned == 12345); // value
printf("getting prezero\n");
int* prezero = (int*)dlsym(lib_handle, "prezero");
assert(prezero);
printf("checking value of prezero, at %d\n", prezero);
assert(*prezero == 0);
*prezero = 1;
assert(*prezero != 0);
printf("getting postzero\n");
int* postzero = (int*)dlsym(lib_handle, "postzero");
printf("checking value of postzero, at %d\n", postzero);
assert(postzero);
printf("checking value of postzero\n");
assert(*postzero == 0);
*postzero = 1;
assert(*postzero != 0);
}
printf("success.\n");
return 0;
}
'''
self.do_run(src, 'success.\n')
@needs_dlfcn
def test_dlfcn_self(self):
self.set_setting('MAIN_MODULE')
self.set_setting('EXPORT_ALL')
# TODO(https://github.com/emscripten-core/emscripten/issues/11121)
# We link with C++ stdlibs, even when linking with emcc for historical reasons. We can remove
# this if this issues is fixed.
self.emcc_args.append('-nostdlib++')
def post(filename):
js = open(filename).read()
start = js.find('var NAMED_GLOBALS')
first = js.find('{', start)
last = js.find('}', start)
exports = js[first + 1:last]
exports = exports.split(',')
# ensure there aren't too many globals; we don't want unnamed_addr
exports = [e.split(':')[0].strip('"') for e in exports]
exports.sort()
self.assertGreater(len(exports), 20)
# wasm backend includes alias in NAMED_GLOBALS
if self.is_wasm_backend():
self.assertLess(len(exports), 56)
else:
self.assertLess(len(exports), 33)
self.do_run_in_out_file_test('tests', 'core', 'test_dlfcn_self', post_build=post)
@needs_dlfcn
def test_dlfcn_unique_sig(self):
self.prep_dlfcn_lib()
lib_src = '''
#include <stdio.h>
int myfunc(int a, int b, int c, int d, int e, int f, int g, int h, int i, int j, int k, int l, int m) {
return 13;
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_myfunc'])
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.c')
self.build_dlfcn_lib(lib_src, dirname, filename)
self.prep_dlfcn_main()
src = '''
#include <assert.h>
#include <stdio.h>
#include <dlfcn.h>
typedef int (*FUNCTYPE)(int, int, int, int, int, int, int, int, int, int, int, int, int);
int main() {
void *lib_handle;
FUNCTYPE func_ptr;
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
func_ptr = (FUNCTYPE)dlsym(lib_handle, "myfunc");
assert(func_ptr != NULL);
assert(func_ptr(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == 13);
puts("success");
return 0;
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc'])
self.do_run(src, 'success', force_c=True)
@needs_dlfcn
def test_dlfcn_info(self):
self.prep_dlfcn_lib()
lib_src = '''
#include <stdio.h>
int myfunc(int a, int b, int c, int d, int e, int f, int g, int h, int i, int j, int k, int l, int m) {
return 13;
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_myfunc'])
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.c')
self.build_dlfcn_lib(lib_src, dirname, filename)
self.prep_dlfcn_main()
src = '''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <dlfcn.h>
typedef int (*FUNCTYPE)(int, int, int, int, int, int, int, int, int, int, int, int, int);
int main() {
void *lib_handle;
FUNCTYPE func_ptr;
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
func_ptr = (FUNCTYPE)dlsym(lib_handle, "myfunc");
assert(func_ptr != NULL);
assert(func_ptr(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == 13);
/* Verify that we don't corrupt func_ptr when calling dladdr. */
Dl_info info;
memset(&info, 0, sizeof(info));
dladdr(func_ptr, &info);
assert(func_ptr != NULL);
assert(func_ptr(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == 13);
/* Verify something useful lives in info. */
assert(info.dli_fname != NULL);
assert(info.dli_fbase == NULL);
assert(info.dli_sname == NULL);
assert(info.dli_saddr == NULL);
puts("success");
return 0;
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc'])
self.do_run(src, 'success', force_c=True)
@needs_dlfcn
def test_dlfcn_stacks(self):
self.prep_dlfcn_lib()
lib_src = '''
#include <assert.h>
#include <stdio.h>
#include <string.h>
int myfunc(const char *input) {
char bigstack[1024] = { 0 };
// make sure we didn't just trample the stack!
assert(!strcmp(input, "foobar"));
snprintf(bigstack, sizeof(bigstack), input);
return strlen(bigstack);
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_myfunc'])
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.c')
self.build_dlfcn_lib(lib_src, dirname, filename)
self.prep_dlfcn_main()
src = '''
#include <assert.h>
#include <stdio.h>
#include <dlfcn.h>
#include <string.h>
typedef int (*FUNCTYPE)(const char *);
int main() {
void *lib_handle;
FUNCTYPE func_ptr;
char str[128];
snprintf(str, sizeof(str), "foobar");
// HACK: Use strcmp in the main executable so that it doesn't get optimized out and the dynamic library
// is able to use it.
assert(!strcmp(str, "foobar"));
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
func_ptr = (FUNCTYPE)dlsym(lib_handle, "myfunc");
assert(func_ptr != NULL);
assert(func_ptr(str) == 6);
puts("success");
return 0;
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc', '_strcmp'])
self.do_run(src, 'success', force_c=True)
@needs_dlfcn
def test_dlfcn_funcs(self):
self.prep_dlfcn_lib()
lib_src = r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
typedef void (*voidfunc)();
typedef void (*intfunc)(int);
void callvoid(voidfunc f) { f(); }
void callint(voidfunc f, int x) { f(x); }
void void_0() { printf("void 0\n"); }
void void_1() { printf("void 1\n"); }
voidfunc getvoid(int i) {
switch(i) {
case 0: return void_0;
case 1: return void_1;
default: return NULL;
}
}
void int_0(int x) { printf("int 0 %d\n", x); }
void int_1(int x) { printf("int 1 %d\n", x); }
intfunc getint(int i) {
switch(i) {
case 0: return int_0;
case 1: return int_1;
default: return NULL;
}
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_callvoid', '_callint', '_getvoid', '_getint'])
dirname = self.get_dir()
self.build_dlfcn_lib(lib_src, dirname, os.path.join(dirname, 'liblib.c'))
self.prep_dlfcn_main()
src = r'''
#include <assert.h>
#include <stdio.h>
#include <dlfcn.h>
typedef void (*voidfunc)();
typedef void (*intfunc)(int);
typedef void (*voidcaller)(voidfunc);
typedef void (*intcaller)(intfunc, int);
typedef voidfunc (*voidgetter)(int);
typedef intfunc (*intgetter)(int);
void void_main() { printf("void_main.\n"); }
void int_main(int x) { printf("int_main %d\n", x); }
int main() {
printf("go\n");
void *lib_handle;
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
voidcaller callvoid = (voidcaller)dlsym(lib_handle, "callvoid");
assert(callvoid != NULL);
callvoid(void_main);
intcaller callint = (intcaller)dlsym(lib_handle, "callint");
assert(callint != NULL);
callint(int_main, 201);
voidgetter getvoid = (voidgetter)dlsym(lib_handle, "getvoid");
assert(getvoid != NULL);
callvoid(getvoid(0));
callvoid(getvoid(1));
intgetter getint = (intgetter)dlsym(lib_handle, "getint");
assert(getint != NULL);
callint(getint(0), 54);
callint(getint(1), 9000);
assert(getint(1000) == NULL);
puts("ok");
return 0;
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc'])
self.do_run(src, '''go
void_main.
int_main 201
void 0
void 1
int 0 54
int 1 9000
ok
''', force_c=True)
@needs_dlfcn
def test_dlfcn_mallocs(self):
# will be exhausted without functional malloc/free
self.set_setting('INITIAL_MEMORY', 64 * 1024 * 1024)
self.prep_dlfcn_lib()
lib_src = r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
void *mallocproxy(int n) { return malloc(n); }
void freeproxy(void *p) { free(p); }
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_mallocproxy', '_freeproxy'])
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.c')
self.build_dlfcn_lib(lib_src, dirname, filename)
self.prep_dlfcn_main()
src = open(path_from_root('tests', 'dlmalloc_proxy.c')).read()
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc', '_free'])
self.do_run(src, '''*294,153*''', force_c=True)
@needs_dlfcn
def test_dlfcn_longjmp(self):
self.prep_dlfcn_lib()
lib_src = r'''
#include <setjmp.h>
#include <stdio.h>
void jumpy(jmp_buf buf) {
static int i = 0;
i++;
if (i == 10) longjmp(buf, i);
printf("pre %d\n", i);
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_jumpy'])
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.c')
self.build_dlfcn_lib(lib_src, dirname, filename)
self.prep_dlfcn_main()
src = r'''
#include <assert.h>
#include <stdio.h>
#include <dlfcn.h>
#include <setjmp.h>
typedef void (*jumpfunc)(jmp_buf);
int main() {
printf("go!\n");
void *lib_handle;
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
jumpfunc jumpy = (jumpfunc)dlsym(lib_handle, "jumpy");
assert(jumpy);
jmp_buf buf;
int jmpval = setjmp(buf);
if (jmpval == 0) {
while (1) jumpy(buf);
} else {
printf("out!\n");
}
return 0;
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc', '_free'])
self.do_run(src, '''go!
pre 1
pre 2
pre 3
pre 4
pre 5
pre 6
pre 7
pre 8
pre 9
out!
''', force_c=True)
# TODO: make this work. need to forward tempRet0 across modules
# TODO Enable @with_both_exception_handling (the test is not working now)
@needs_dlfcn
def zzztest_dlfcn_exceptions(self):
self.set_setting('DISABLE_EXCEPTION_CATCHING', 0)
self.prep_dlfcn_lib()
lib_src = r'''
extern "C" {
int ok() {
return 65;
}
int fail() {
throw 123;
}
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_ok', '_fail'])
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
self.build_dlfcn_lib(lib_src, dirname, filename)
self.prep_dlfcn_main()
src = r'''
#include <assert.h>
#include <stdio.h>
#include <dlfcn.h>
typedef int (*intfunc)();
int main() {
printf("go!\n");
void *lib_handle;
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
intfunc okk = (intfunc)dlsym(lib_handle, "ok");
intfunc faill = (intfunc)dlsym(lib_handle, "fail");
assert(okk && faill);
try {
printf("ok: %d\n", okk());
} catch(...) {
printf("wha\n");
}
try {
printf("fail: %d\n", faill());
} catch(int x) {
printf("int %d\n", x);
}
try {
printf("fail: %d\n", faill());
} catch(double x) {
printf("caught %f\n", x);
}
return 0;
}
'''
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc', '_free'])
self.do_run(src, '''go!
ok: 65
int 123
ok
''')
@needs_dlfcn
def test_dlfcn_handle_alloc(self):
# verify that dlopen does not allocate already used handles
dirname = self.get_dir()
def indir(name):
return os.path.join(dirname, name)
libecho = r'''
#include <stdio.h>
static struct %(libname)s {
%(libname)s() {
puts("%(libname)s: loaded");
}
} _;
'''
self.prep_dlfcn_lib()
self.build_dlfcn_lib(libecho % {'libname': 'a'}, dirname, indir('a.cpp'))
shutil.move(indir('liblib.so'), indir('liba.so'))
self.build_dlfcn_lib(libecho % {'libname': 'b'}, dirname, indir('b.cpp'))
shutil.move(indir('liblib.so'), indir('libb.so'))
self.set_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
self.set_setting('EXPORT_ALL')
self.emcc_args += ['--embed-file', '.@/']
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', 32 * 1024 * 1024)
src = r'''
#include <dlfcn.h>
#include <assert.h>
#include <stddef.h>
int main() {
void *liba, *libb, *liba2;
int err;
liba = dlopen("liba.so", RTLD_NOW);
assert(liba != NULL);
libb = dlopen("libb.so", RTLD_NOW);
assert(liba != NULL);
err = dlclose(liba);
assert(!err);
liba2 = dlopen("liba.so", RTLD_NOW);
assert(liba2 != libb);
return 0;
}
'''
self.do_run(src, 'a: loaded\nb: loaded\na: loaded\n')
@needs_dlfcn
@bleeding_edge_wasm_backend
def test_dlfcn_feature_in_lib(self):
self.emcc_args.append('-mnontrapping-fptoint')
self.prep_dlfcn_lib()
lib_src = r'''
extern "C" int magic(float x) {
return __builtin_wasm_trunc_saturate_s_i32_f32(x);
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
self.build_dlfcn_lib(lib_src, dirname, filename)
self.prep_dlfcn_main()
src = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <stdlib.h>
typedef int (*fi)(float);
int main() {
void *lib_handle = dlopen("liblib.so", 0);
if (!lib_handle) {
puts(dlerror());
abort();
}
fi x = (fi)dlsym(lib_handle, "magic");
if (!x) {
puts(dlerror());
abort();
}
printf("float: %d.\n", x(42.99));
return 0;
}
'''
self.do_run(src, 'float: 42.\n')
def dylink_test(self, main, side, expected=None, header=None, main_emcc_args=[], force_c=False, need_reverse=True, auto_load=True, **kwargs):
if header:
create_test_file('header.h', header)
old_args = self.emcc_args[:]
# side settings
self.clear_setting('MAIN_MODULE')
self.set_setting('SIDE_MODULE')
side_suffix = 'wasm' if self.is_wasm() else 'js'
if isinstance(side, list):
# side is just a library
try_delete('liblib.cpp.o.' + side_suffix)
run_process([EMCC] + side + self.get_emcc_args() + ['-o', os.path.join(self.get_dir(), 'liblib.cpp.o.' + side_suffix)])
else:
base = 'liblib.cpp' if not force_c else 'liblib.c'
try_delete(base + '.o.' + side_suffix)
self.build(side, self.get_dir(), base, js_outfile=(side_suffix == 'js'))
if force_c:
shutil.move(base + '.o.' + side_suffix, 'liblib.cpp.o.' + side_suffix)
if SPIDERMONKEY_ENGINE and os.path.exists(SPIDERMONKEY_ENGINE[0]) and not self.is_wasm():
out = run_js('liblib.cpp.o.js', engine=SPIDERMONKEY_ENGINE, full_output=True, stderr=STDOUT)
if 'asm' in out:
self.validate_asmjs(out)
shutil.move('liblib.cpp.o.' + side_suffix, 'liblib.so')
# main settings
self.set_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
if auto_load:
self.set_setting('RUNTIME_LINKED_LIBS', ['liblib.so'])
self.emcc_args += main_emcc_args
if isinstance(main, list):
# main is just a library
try_delete('src.cpp.o.js')
run_process([EMCC] + main + self.emcc_args + self.serialize_settings() + ['-o', 'src.cpp.o.js'])
self.do_run(None, expected, no_build=True, **kwargs)
else:
self.do_run(main, expected, force_c=force_c, **kwargs)
self.emcc_args = old_args
if need_reverse:
# test the reverse as well
print('flip')
self.dylink_test(side, main, expected, header, main_emcc_args + ['--no-entry'], force_c, need_reverse=False, **kwargs)
def do_basic_dylink_test(self, need_reverse=True):
self.dylink_test(r'''
#include <stdio.h>
#include "header.h"
int main() {
printf("other says %d.\n", sidey());
return 0;
}
''', '''
#include "header.h"
int sidey() {
return 11;
}
''', 'other says 11.', 'extern "C" int sidey();', need_reverse=need_reverse)
@needs_dlfcn
def test_dylink_basics(self):
self.do_basic_dylink_test()
@needs_dlfcn
def test_dylink_no_export(self):
self.set_setting('NO_DECLARE_ASM_MODULE_EXPORTS')
self.do_basic_dylink_test()
@needs_dlfcn
def test_dylink_memory_growth(self):
if not self.is_wasm():
self.skipTest('wasm only')
self.set_setting('ALLOW_MEMORY_GROWTH', 1)
self.do_basic_dylink_test()
@needs_dlfcn
def test_dylink_safe_heap(self):
self.set_setting('SAFE_HEAP', 1)
self.do_basic_dylink_test()
@needs_dlfcn
def test_dylink_function_pointer_equality(self):
self.dylink_test(r'''
#include <stdio.h>
#include "header.h"
int main() {
void* puts_side = get_address();
printf("main module address %p.\n", &puts);
printf("side module address address %p.\n", puts_side);
if (&puts == puts_side)
printf("success\n");
else
printf("failure\n");
return 0;
}
''', '''
#include <stdio.h>
#include "header.h"
void* get_address() {
return (void*)&puts;
}
''', 'success', header='extern "C" void* get_address();')
@needs_dlfcn
def test_dylink_floats(self):
self.dylink_test(r'''
#include <stdio.h>
extern float sidey();
int main() {
printf("other says %.2f.\n", sidey()+1);
return 0;
}
''', '''
float sidey() { return 11.5; }
''', 'other says 12.50')
@needs_dlfcn
def test_dylink_printfs(self):
self.dylink_test(r'''
#include <stdio.h>
extern "C" void sidey();
int main() {
printf("hello from main\n");
sidey();
return 0;
}
''', r'''
#include <stdio.h>
extern "C" void sidey() {
printf("hello from side\n");
}
''', 'hello from main\nhello from side\n')
# Verify that a function pointer can be passed back and forth and invoked
# on both sides.
@needs_dlfcn
def test_dylink_funcpointer(self):
self.dylink_test(
main=r'''
#include <stdio.h>
#include <assert.h>
#include "header.h"
intfunc sidey(intfunc f);
void a(int arg) { printf("hello from funcptr: %d\n", arg); }
int main() {
intfunc b = sidey(a);
assert(a == b);
b(0);
return 0;
}
''',
side='''
#include "header.h"
intfunc sidey(intfunc f) { f(1); return f; }
''',
expected='hello from funcptr: 1\nhello from funcptr: 0\n',
header='typedef void (*intfunc)(int );')
@needs_dlfcn
# test dynamic linking of a module with multiple function pointers, stored
# statically
def test_dylink_static_funcpointers(self):
self.dylink_test(
main=r'''
#include <stdio.h>
#include "header.h"
void areturn0() { printf("hello 0\n"); }
void areturn1() { printf("hello 1\n"); }
void areturn2() { printf("hello 2\n"); }
voidfunc func_ptrs[3] = { areturn0, areturn1, areturn2 };
int main(int argc, char **argv) {
sidey(func_ptrs[0]);
sidey(func_ptrs[1]);
sidey(func_ptrs[2]);
return 0;
}
''',
side='''
#include "header.h"
void sidey(voidfunc f) { f(); }
''',
expected='hello 0\nhello 1\nhello 2\n',
header='typedef void (*voidfunc)(); void sidey(voidfunc f);')
@no_wasm('uses function tables in an asm.js specific way')
@needs_dlfcn
def test_dylink_asmjs_funcpointers(self):
self.dylink_test(
main=r'''
#include "header.h"
#include <emscripten.h>
void left1() { printf("left1\n"); }
void left2() { printf("left2\n"); }
voidfunc getleft1() { return left1; }
voidfunc getleft2() { return left2; }
int main(int argc, char **argv) {
printf("main\n");
EM_ASM({
// make the function table sizes a non-power-of-two
var newSize = alignFunctionTables();
//out('old size of function tables: ' + newSize);
while ((newSize & 3) !== 3) {
Module['FUNCTION_TABLE_v'].push(0);
newSize = alignFunctionTables();
}
//out('new size of function tables: ' + newSize);
// when masked, the two function pointers 1 and 2 should not happen to fall back to the right place
assert(((newSize+1) & 3) !== 1 || ((newSize+2) & 3) !== 2);
loadDynamicLibrary('liblib.so');
});
volatilevoidfunc f;
f = (volatilevoidfunc)left1;
f();
f = (volatilevoidfunc)left2;
f();
f = (volatilevoidfunc)getright1();
f();
f = (volatilevoidfunc)getright2();
f();
second();
return 0;
}
''',
side=r'''
#include "header.h"
void right1() { printf("right1\n"); }
void right2() { printf("right2\n"); }
voidfunc getright1() { return right1; }
voidfunc getright2() { return right2; }
void second() {
printf("second\n");
volatilevoidfunc f;
f = (volatilevoidfunc)getleft1();
f();
f = (volatilevoidfunc)getleft2();
f();
f = (volatilevoidfunc)right1;
f();
f = (volatilevoidfunc)right2;
f();
}
''',
expected='main\nleft1\nleft2\nright1\nright2\nsecond\nleft1\nleft2\nright1\nright2\n',
header='''
#include <stdio.h>
typedef void (*voidfunc)();
typedef volatile voidfunc volatilevoidfunc;
voidfunc getleft1();
voidfunc getleft2();
voidfunc getright1();
voidfunc getright2();
void second();
''', need_reverse=False, auto_load=False)
@needs_dlfcn
def test_dylink_funcpointers_wrapper(self):
self.dylink_test(
main=r'''\
#include <stdio.h>
#include "header.h"
int main(int argc, char **argv) {
charfunc f1 = emscripten_run_script;
f1("out('one')");
charfunc f2 = get();
f2("out('two')");
return 0;
}
''',
side='''\
#include "header.h"
charfunc get() {
return emscripten_run_script;
}
''',
expected='one\ntwo\n',
header='''\
#include <emscripten.h>
typedef void (*charfunc)(const char*);
extern charfunc get();
''')
@needs_dlfcn
def test_dylink_static_funcpointer_float(self):
self.dylink_test(
main=r'''\
#include <stdio.h>
#include "header.h"
int sidey(floatfunc f);
float func1(float f) { printf("hello 1: %f\n", f); return 0; }
floatfunc f1 = &func1;
int main(int argc, char **argv) {
printf("got: %d\n", sidey(f1));
f1(12.34);
return 0;
}
''',
side='''\
#include "header.h"
int sidey(floatfunc f) { f(56.78); return 1; }
''',
expected='hello 1: 56.779999\ngot: 1\nhello 1: 12.340000\n',
header='typedef float (*floatfunc)(float);')
@needs_dlfcn
def test_dylink_global_init(self):
self.dylink_test(r'''
#include <stdio.h>
struct Class {
Class() { printf("a new Class\n"); }
};
static Class c;
int main() {
return 0;
}
''', r'''
void nothing() {}
''', 'a new Class\n')
@needs_dlfcn
def test_dylink_global_inits(self):
def test():
self.dylink_test(header=r'''
#include <stdio.h>
struct Class {
Class(const char *name) { printf("new %s\n", name); }
};
''', main=r'''
#include "header.h"
static Class c("main");
int main() {
return 0;
}
''', side=r'''
#include "header.h"
static Class c("side");
''', expected=['new main\nnew side\n', 'new side\nnew main\n'])
test()
# TODO: this in wasm
if self.get_setting('ASSERTIONS') == 1 and not self.is_wasm():
print('check warnings')
self.set_setting('ASSERTIONS', 2)
test()
full = run_js('src.cpp.o.js', engine=JS_ENGINES[0], full_output=True, stderr=STDOUT)
self.assertNotContained("trying to dynamically load symbol '__ZN5ClassC2EPKc' (from 'liblib.so') that already exists", full)
@needs_dlfcn
def test_dylink_i64(self):
self.dylink_test(r'''
#include <stdio.h>
#include <stdint.h>
extern int64_t sidey();
int main() {
printf("other says %llx.\n", sidey());
return 0;
}
''', '''
#include <stdint.h>
int64_t sidey() {
volatile int64_t x = 11;
x = x * x * x * x;
x += x % 17;
x += (x * (1 << 30));
x -= 96;
x = (x + 1000) / ((x % 5) + 1);
volatile uint64_t y = x / 2;
x = y / 3;
y = y * y * y * y;
y += y % 17;
y += (y * (1 << 30));
y -= 121;
y = (y + 1000) / ((y % 5) + 1);
x += y;
return x;
}
''', 'other says 175a1ddee82b8c31.')
@all_engines
@needs_dlfcn
def test_dylink_i64_b(self):
self.dylink_test(r'''
#include <stdio.h>
#include <stdint.h>
extern int64_t sidey();
int64_t testAdd(int64_t a) {
return a + 1;
}
int64_t testAddB(int a) {
return a + 1;
}
typedef int64_t (*testAddHandler)(int64_t);
testAddHandler h = &testAdd;
typedef int64_t (*testAddBHandler)(int);
testAddBHandler hb = &testAddB;
int main() {
printf("other says %lld.\n", sidey());
int64_t r = h(42);
printf("my fp says: %lld.\n", r);
int64_t rb = hb(42);
printf("my second fp says: %lld.\n", r);
}
''', '''
#include <stdint.h>
int64_t sidey() {
volatile int64_t x = 0x12345678abcdef12LL;
x += x % 17;
x = 18 - x;
return x;
}
''', 'other says -1311768467750121224.\nmy fp says: 43.\nmy second fp says: 43.')
@needs_dlfcn
@also_with_wasm_bigint
def test_dylink_i64_c(self):
self.dylink_test(r'''
#include <cstdio>
#include <cinttypes>
#include "header.h"
typedef int32_t (*fp_type_32)(int32_t, int32_t, int32_t);
typedef int64_t (*fp_type_64)(int32_t, int32_t, int32_t);
int32_t internal_function_ret_32(int32_t i, int32_t j, int32_t k) {
return 32;
}
int64_t internal_function_ret_64(int32_t i, int32_t j, int32_t k) {
return 64;
}
int main() {
fp_type_32 fp32_internal = &internal_function_ret_32;
fp_type_32 fp32_external = &function_ret_32;
fp_type_64 fp64_external = &function_ret_64;
fp_type_64 fp64_internal = &internal_function_ret_64;
int32_t ires32 = fp32_internal(0,0,0);
printf("res32 - internal %d\n",ires32);
int32_t eres32 = fp32_external(0,0,0);
printf("res32 - external %d\n",eres32);
int64_t ires64 = fp64_internal(0,0,0);
printf("res64 - internal %" PRId64 "\n",ires64);
int64_t eres64 = fp64_external(0,0,0);
printf("res64 - external %" PRId64 "\n",eres64);
return 0;
}
''', '''
#include "header.h"
int32_t function_ret_32(int32_t i, int32_t j, int32_t k) {
return 32;
}
int64_t function_ret_64(int32_t i, int32_t j, int32_t k) {
return 64;
}
''', '''res32 - internal 32
res32 - external 32
res64 - internal 64
res64 - external 64\n''', header='''
#include <emscripten.h>
#include <cstdint>
EMSCRIPTEN_KEEPALIVE int32_t function_ret_32(int32_t i, int32_t j, int32_t k);
EMSCRIPTEN_KEEPALIVE int64_t function_ret_64(int32_t i, int32_t j, int32_t k);
''')
@needs_dlfcn
def test_dylink_class(self):
self.dylink_test(header=r'''
#include <stdio.h>
struct Class {
Class(const char *name);
};
''', main=r'''
#include "header.h"
int main() {
Class c("main");
return 0;
}
''', side=r'''
#include "header.h"
Class::Class(const char *name) { printf("new %s\n", name); }
''', expected=['new main\n'])
@needs_dlfcn
def test_dylink_global_var(self):
self.dylink_test(main=r'''
#include <stdio.h>
extern int x;
int main() {
printf("extern is %d.\n", x);
return 0;
}
''', side=r'''
int x = 123;
''', expected=['extern is 123.\n'])
@needs_dlfcn
def test_dylink_global_var_modded(self):
self.dylink_test(main=r'''
#include <stdio.h>
extern int x;
int main() {
printf("extern is %d.\n", x);
return 0;
}
''', side=r'''
int x = 123;
struct Initter {
Initter() { x = 456; }
};
Initter initter;
''', expected=['extern is 456.\n'])
@needs_dlfcn
def test_dylink_stdlib(self):
self.dylink_test(header=r'''
#include <math.h>
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
double pow_two(double x);
''', main=r'''
#include <stdio.h>
#include "header.h"
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
puts(ret);
printf("pow_two: %d.\n", int(pow_two(5.9)));
return 0;
}
''', side=r'''
#include "header.h"
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
double pow_two(double x) {
return pow(2, x);
}
''', expected=['hello through side\n\npow_two: 59.'])
@needs_dlfcn
def test_dylink_jslib(self):
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
test_lib_func: function(x) {
return x + 17.2;
}
});
''')
self.dylink_test(header=r'''
extern "C" { extern double test_lib_func(int input); }
''', main=r'''
#include <stdio.h>
#include "header.h"
extern double sidey();
int main2() { return 11; }
int main() {
int input = sidey();
double temp = test_lib_func(input);
printf("other says %.2f\n", temp);
printf("more: %.5f, %d\n", temp, input);
return 0;
}
''', side=r'''
#include <stdio.h>
#include "header.h"
extern int main2();
double sidey() {
int temp = main2();
printf("main2 sed: %d\n", temp);
printf("main2 sed: %u, %c\n", temp, temp/2);
return test_lib_func(temp);
}
''', expected='other says 45.2', main_emcc_args=['--js-library', 'lib.js'])
@needs_dlfcn
def test_dylink_global_var_jslib(self):
create_test_file('lib.js', r'''
mergeInto(LibraryManager.library, {
jslib_x: '{{{ makeStaticAlloc(4) }}}',
jslib_x__postset: 'HEAP32[_jslib_x>>2] = 148;',
});
''')
self.dylink_test(main=r'''
#include <stdio.h>
extern "C" int jslib_x;
extern void call_side();
int main() {
printf("main: jslib_x is %d.\n", jslib_x);
call_side();
return 0;
}
''', side=r'''
#include <stdio.h>
extern "C" int jslib_x;
void call_side() {
printf("side: jslib_x is %d.\n", jslib_x);
}
''', expected=['main: jslib_x is 148.\nside: jslib_x is 148.\n'], main_emcc_args=['--js-library', 'lib.js', '-s', 'EXPORTED_FUNCTIONS=["_main", "_jslib_x"]'])
@needs_dlfcn
def test_dylink_many_postsets(self):
NUM = 1234
self.dylink_test(header=r'''
#include <stdio.h>
typedef void (*voidfunc)();
static void simple() {
printf("simple.\n");
}
static volatile voidfunc funcs[''' + str(NUM) + '] = { ' + ','.join(['simple'] * NUM) + r''' };
static void test() {
volatile int i = ''' + str(NUM - 1) + r''';
funcs[i]();
i = 0;
funcs[i]();
}
extern void more();
''', main=r'''
#include "header.h"
int main() {
test();
more();
return 0;
}
''', side=r'''
#include "header.h"
void more() {
test();
}
''', expected=['simple.\nsimple.\nsimple.\nsimple.\n'])
@needs_dlfcn
def test_dylink_postsets_chunking(self):
self.dylink_test(header=r'''
extern int global_var;
''', main=r'''
#include <stdio.h>
#include "header.h"
// prepare 99 global variable with local initializer
static int p = 1;
#define P(x) __attribute__((used)) int *padding##x = &p;
P(01) P(02) P(03) P(04) P(05) P(06) P(07) P(08) P(09) P(10)
P(11) P(12) P(13) P(14) P(15) P(16) P(17) P(18) P(19) P(20)
P(21) P(22) P(23) P(24) P(25) P(26) P(27) P(28) P(29) P(30)
P(31) P(32) P(33) P(34) P(35) P(36) P(37) P(38) P(39) P(40)
P(41) P(42) P(43) P(44) P(45) P(46) P(47) P(48) P(49) P(50)
P(51) P(52) P(53) P(54) P(55) P(56) P(57) P(58) P(59) P(60)
P(61) P(62) P(63) P(64) P(65) P(66) P(67) P(68) P(69) P(70)
P(71) P(72) P(73) P(74) P(75) P(76) P(77) P(78) P(79) P(80)
P(81) P(82) P(83) P(84) P(85) P(86) P(87) P(88) P(89) P(90)
P(91) P(92) P(93) P(94) P(95) P(96) P(97) P(98) P(99)
// prepare global variable with global initializer
int *ptr = &global_var;
int main(int argc, char *argv[]) {
printf("%d\n", *ptr);
}
''', side=r'''
#include "header.h"
int global_var = 12345;
''', expected=['12345\n'])
@needs_dlfcn
def test_dylink_syslibs(self): # one module uses libcxx, need to force its inclusion when it isn't the main
# https://github.com/emscripten-core/emscripten/issues/10571
return self.skipTest('Currently not working due to duplicate symbol errors in wasm-ld')
def test(syslibs, expect_pass=True, need_reverse=True):
print('syslibs', syslibs, self.get_setting('ASSERTIONS'))
passed = True
try:
with env_modify({'EMCC_FORCE_STDLIBS': syslibs}):
self.dylink_test(main=r'''
void side();
int main() {
side();
return 0;
}
''', side=r'''
#include <iostream>
void side() { std::cout << "cout hello from side\n"; }
''', expected=['cout hello from side\n'], need_reverse=need_reverse, assert_returncode=None)
except Exception as e:
if expect_pass:
raise
print('(seeing expected fail)')
passed = False
assertion = 'build the MAIN_MODULE with EMCC_FORCE_STDLIBS=1 in the environment'
if self.get_setting('ASSERTIONS'):
self.assertContained(assertion, str(e))
else:
self.assertNotContained(assertion, str(e))
assert passed == expect_pass, ['saw', passed, 'but expected', expect_pass]
test('libc++')
test('1')
if not self.has_changed_setting('ASSERTIONS'):
self.set_setting('ASSERTIONS', 0)
test('', expect_pass=False, need_reverse=False)
self.set_setting('ASSERTIONS', 1)
test('', expect_pass=False, need_reverse=False)
@needs_dlfcn
@with_env_modify({'EMCC_FORCE_STDLIBS': 'libc++'})
def test_dylink_iostream(self):
self.dylink_test(header=r'''
#include <iostream>
#include <string>
std::string side();
''', main=r'''
#include "header.h"
int main() {
std::cout << "hello from main " << side() << std::endl;
return 0;
}
''', side=r'''
#include "header.h"
std::string side() { return "and hello from side"; }
''', expected=['hello from main and hello from side\n'])
@needs_dlfcn
def test_dylink_dynamic_cast(self): # issue 3465
self.dylink_test(header=r'''
class Base {
public:
virtual void printName();
};
class Derived : public Base {
public:
void printName();
};
''', main=r'''
#include "header.h"
#include <iostream>
using namespace std;
int main() {
cout << "starting main" << endl;
Base *base = new Base();
Base *derived = new Derived();
base->printName();
derived->printName();
if (dynamic_cast<Derived*>(derived)) {
cout << "OK" << endl;
} else {
cout << "KO" << endl;
}
return 0;
}
''', side=r'''
#include "header.h"
#include <iostream>
using namespace std;
void Base::printName() {
cout << "Base" << endl;
}
void Derived::printName() {
cout << "Derived" << endl;
}
''', expected=['starting main\nBase\nDerived\nOK'])
@needs_dlfcn
@with_both_exception_handling
def test_dylink_raii_exceptions(self):
self.dylink_test(main=r'''
#include <stdio.h>
extern int side();
int main() {
printf("from side: %d.\n", side());
}
''', side=r'''
#include <stdio.h>
typedef int (*ifdi)(float, double, int);
int func_with_special_sig(float a, double b, int c) {
printf("special %f %f %d\n", a, b, c);
return 1337;
}
struct DestructorCaller {
~DestructorCaller() { printf("destroy\n"); }
};
int side() {
// d has a destructor that must be called on function
// exit, which means an invoke will be used for the
// indirect call here - and the signature of that call
// is special and not present in the main module, so
// it must be generated for the side module.
DestructorCaller d;
volatile ifdi p = func_with_special_sig;
return p(2.18281, 3.14159, 42);
}
''', expected=['special 2.182810 3.141590 42\ndestroy\nfrom side: 1337.\n'])
@needs_dlfcn
@no_wasm_backend('wasm backend resolves symbols greedily on startup')
def test_dylink_hyper_dupe(self):
self.set_setting('INITIAL_MEMORY', 64 * 1024 * 1024)
if not self.has_changed_setting('ASSERTIONS'):
self.set_setting('ASSERTIONS', 2)
# test hyper-dynamic linking, and test duplicate warnings
create_test_file('third.cpp', r'''
#include <stdio.h>
int sidef() { return 36; }
int sideg = 49;
int bsidef() { return 536; }
extern void only_in_second_1(int x);
extern int second_to_third;
int third_to_second = 1337;
void only_in_third_0() {
// note we access our own globals directly, so
// it doesn't matter that overriding failed
printf("only_in_third_0: %d, %d, %d\n", sidef(), sideg, second_to_third);
only_in_second_1(2112);
}
void only_in_third_1(int x) {
printf("only_in_third_1: %d, %d, %d, %d\n", sidef(), sideg, second_to_third, x);
}
''')
if self.is_wasm():
libname = 'third.wasm'
else:
libname = 'third.js'
run_process([EMCC, 'third.cpp', '-o', libname, '-s', 'SIDE_MODULE', '-s', 'EXPORT_ALL'] + self.get_emcc_args())
self.dylink_test(main=r'''
#include <stdio.h>
#include <emscripten.h>
extern int sidef();
extern int sideg;
extern int bsidef();
extern int bsideg;
extern void only_in_second_0();
extern void only_in_third_0();
int main() {
EM_ASM({
loadDynamicLibrary('%s'); // hyper-dynamic! works at least for functions (and consts not used in same block)
});
printf("sidef: %%d, sideg: %%d.\n", sidef(), sideg);
printf("bsidef: %%d.\n", bsidef());
only_in_second_0();
only_in_third_0();
}
''' % libname,
side=r'''
#include <stdio.h>
int sidef() { return 10; } // third will try to override these, but fail!
int sideg = 20;
extern void only_in_third_1(int x);
int second_to_third = 500;
extern int third_to_second;
void only_in_second_0() {
printf("only_in_second_0: %d, %d, %d\n", sidef(), sideg, third_to_second);
only_in_third_1(1221);
}
void only_in_second_1(int x) {
printf("only_in_second_1: %d, %d, %d, %d\n", sidef(), sideg, third_to_second, x);
}
''',
expected=['sidef: 10, sideg: 20.\nbsidef: 536.\nonly_in_second_0: 10, 20, 1337\nonly_in_third_1: 36, 49, 500, 1221\nonly_in_third_0: 36, 49, 500\nonly_in_second_1: 10, 20, 1337, 2112\n'],
need_reverse=not self.is_wasm()) # in wasm, we can't flip as the side would have an EM_ASM, which we don't support yet TODO
if not self.has_changed_setting('ASSERTIONS'):
print('check warnings')
full = run_js('src.cpp.o.js', engine=JS_ENGINES[0], full_output=True, stderr=STDOUT)
self.assertContained("warning: symbol '_sideg' from '%s' already exists" % libname, full)
@needs_dlfcn
@no_wasm_backend('possible https://github.com/emscripten-core/emscripten/issues/9038')
def test_dylink_dso_needed(self):
def do_run(src, expected_output):
self.do_run(src + 'int main() { return test_main(); }', expected_output)
self._test_dylink_dso_needed(do_run)
@needs_dlfcn
def test_dylink_dot_a(self):
# .a linking must force all .o files inside it, when in a shared module
create_test_file('third.cpp', 'extern "C" int sidef() { return 36; }')
create_test_file('fourth.cpp', 'extern "C" int sideg() { return 17; }')
run_process([EMCC, '-c', 'third.cpp', '-o', 'third.o'] + self.get_emcc_args())
run_process([EMCC, '-c', 'fourth.cpp', '-o', 'fourth.o'] + self.get_emcc_args())
run_process([EMAR, 'rc', 'libfourth.a', 'fourth.o'])
self.dylink_test(main=r'''
#include <stdio.h>
#include <emscripten.h>
extern "C" int sidef();
extern "C" int sideg();
int main() {
printf("sidef: %d, sideg: %d.\n", sidef(), sideg());
}
''',
# contents of libfourth.a must be included, even if they aren't referred to!
side=['libfourth.a', 'third.o'],
expected=['sidef: 36, sideg: 17.\n'])
@needs_dlfcn
def test_dylink_spaghetti(self):
self.dylink_test(main=r'''
#include <stdio.h>
int main_x = 72;
extern int side_x;
int adjust = side_x + 10;
int *ptr = &side_x;
struct Class {
Class() {
printf("main init sees %d, %d, %d.\n", adjust, *ptr, main_x);
}
};
Class cm;
int main() {
printf("main main sees %d, %d, %d.\n", adjust, *ptr, main_x);
return 0;
}
''', side=r'''
#include <stdio.h>
extern int main_x;
int side_x = -534;
int adjust2 = main_x + 10;
int *ptr2 = &main_x;
struct Class {
Class() {
printf("side init sees %d, %d, %d.\n", adjust2, *ptr2, side_x);
}
};
Class cs;
''', expected=['side init sees 82, 72, -534.\nmain init sees -524, -534, 72.\nmain main sees -524, -534, 72.',
'main init sees -524, -534, 72.\nside init sees 82, 72, -534.\nmain main sees -524, -534, 72.'])
@needs_make('mingw32-make')
@needs_dlfcn
def test_dylink_zlib(self):
self.emcc_args += ['-I' + path_from_root('tests', 'third_party', 'zlib'), '-s', 'RELOCATABLE']
zlib_archive = self.get_zlib_library()
self.dylink_test(main=open(path_from_root('tests', 'third_party', 'zlib', 'example.c')).read(),
side=zlib_archive,
expected=open(path_from_root('tests', 'core', 'test_zlib.out')).read(),
force_c=True)
# @needs_dlfcn
# def test_dylink_bullet(self):
# self.emcc_args += ['-I' + path_from_root('tests', 'bullet', 'src')]
# side = self.get_bullet_library(self, True)
# self.dylink_test(main=open(path_from_root('tests', 'bullet', 'Demos', 'HelloWorld', 'HelloWorld.cpp')).read(),
# side=side,
# expected=[open(path_from_root('tests', 'bullet', 'output.txt')).read(), # different roundings
# open(path_from_root('tests', 'bullet', 'output2.txt')).read(),
# open(path_from_root('tests', 'bullet', 'output3.txt')).read()])
@needs_dlfcn
@no_fastcomp('https://github.com/emscripten-core/emscripten/issues/8376')
def test_dylink_rtti(self):
# Verify that objects created in one module and be dynamic_cast<> correctly
# in the another module.
# Each module will define its own copy of certain COMDAT symbols such as
# each classs's typeinfo, but at runtime they should both use the same one.
header = '''
#include <cstddef>
class Foo {
public:
virtual ~Foo() {}
};
class Bar : public Foo {
public:
virtual ~Bar() {}
};
bool is_bar(Foo* foo);
'''
main = '''
#include <stdio.h>
#include "header.h"
int main() {
Bar bar;
if (!is_bar(&bar)) {
puts("failure");
return 1;
}
puts("success");
return 0;
}
'''
side = '''
#include "header.h"
bool is_bar(Foo* foo) {
return dynamic_cast<Bar*>(foo) != nullptr;
}
'''
self.dylink_test(main=main,
side=side,
header=header,
expected='success')
def test_random(self):
src = r'''#include <stdlib.h>
#include <stdio.h>
int main()
{
srandom(0xdeadbeef);
printf("%ld\n", random());
}
'''
self.do_run(src, '956867869')
def test_rand(self):
src = r'''#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
int main()
{
// we need RAND_MAX to be a bitmask (power of 2 minus 1). this assertions guarantees
// if RAND_MAX changes the test failure will focus attention on that issue here.
assert(RAND_MAX == 0x7fffffff);
srand(0xdeadbeef);
for(int i = 0; i < 10; ++i)
printf("%d\n", rand());
unsigned int seed = 0xdeadbeef;
for(int i = 0; i < 10; ++i)
printf("%d\n", rand_r(&seed));
bool haveEvenAndOdd = true;
for(int i = 1; i <= 30; ++i)
{
int mask = 1 << i;
if (mask > RAND_MAX) break;
bool haveEven = false;
bool haveOdd = false;
for(int j = 0; j < 1000 && (!haveEven || !haveOdd); ++j)
{
if ((rand() & mask) == 0)
haveEven = true;
else
haveOdd = true;
}
haveEvenAndOdd = haveEvenAndOdd && haveEven && haveOdd;
}
if (haveEvenAndOdd)
printf("Have even and odd!\n");
return 0;
}
'''
expected = '''490242850
2074599277
1480056542
1912638067
931112055
2110392489
2053422194
1614832492
216117595
174823244
760368382
602359081
1121118963
1291018924
1608306807
352705809
958258461
1182561381
114276303
1481323674
Have even and odd!
'''
self.do_run(src, expected)
def test_strtod(self):
src = open(path_from_root('tests', 'core', 'test_strtod.c')).read()
expected = open(path_from_root('tests', 'core', 'test_strtod.out')).read()
self.do_run(src, expected)
def test_strtold(self):
self.do_run_in_out_file_test('tests', 'core', 'test_strtold')
def test_strtok(self):
self.do_run_in_out_file_test('tests', 'core', 'test_strtok')
def test_parseInt(self):
self.do_run_in_out_file_test('tests', 'core', 'test_parseInt')
def test_transtrcase(self):
self.do_run_in_out_file_test('tests', 'core', 'test_transtrcase')
@no_wasm2js('very slow to compile')
def test_printf(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'printf', 'test')
def test_printf_2(self):
self.do_run_in_out_file_test('tests', 'core', 'test_printf_2')
def test_printf_float(self):
self.do_run_in_out_file_test('tests', 'printf', 'test_float')
def test_printf_octal(self):
self.do_run_in_out_file_test('tests', 'printf', 'test_octal')
def test_vprintf(self):
self.do_run_in_out_file_test('tests', 'core', 'test_vprintf')
def test_vsnprintf(self):
self.do_run_in_out_file_test('tests', 'core', 'test_vsnprintf')
def test_printf_more(self):
self.do_run_in_out_file_test('tests', 'core', 'test_printf_more')
def test_perrar(self):
self.do_run_in_out_file_test('tests', 'core', 'test_perrar')
def test_atoX(self):
self.do_run_in_out_file_test('tests', 'core', 'test_atoX')
def test_strstr(self):
self.do_run_in_out_file_test('tests', 'core', 'test_strstr')
def test_fnmatch(self):
# Run one test without assertions, for additional coverage
if self.run_name == 'asm2m':
i = self.emcc_args.index('ASSERTIONS=1')
assert i > 0 and self.emcc_args[i - 1] == '-s'
self.emcc_args[i] = 'ASSERTIONS=0'
print('flip assertions off')
self.do_run_in_out_file_test('tests', 'core', 'test_fnmatch')
def test_sscanf(self):
self.do_run_in_out_file_test('tests', 'core', 'test_sscanf')
def test_sscanf_2(self):
# doubles
for ftype in ['float', 'double']:
src = r'''
#include <stdio.h>
int main(){
char strval1[] = "1.2345678901";
char strval2[] = "1.23456789e5";
char strval3[] = "1.23456789E5";
char strval4[] = "1.2345678e-5";
char strval5[] = "1.2345678E-5";
double dblval = 1.2345678901;
double tstval;
sscanf(strval1, "%lf", &tstval);
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval2, "%lf", &tstval);
dblval = 123456.789;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval3, "%lf", &tstval);
dblval = 123456.789;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval4, "%lf", &tstval);
dblval = 0.000012345678;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval5, "%lf", &tstval);
dblval = 0.000012345678;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
return 0;
}
'''
if ftype == 'float':
self.do_run(src.replace('%lf', '%f').replace('double', 'float'), '''Pass: 1.234568 1.234568
Pass: 123456.789062 123456.789062
Pass: 123456.789062 123456.789062
Pass: 0.000012 0.000012
Pass: 0.000012 0.000012''')
else:
self.do_run(src, '''Pass: 1.234568 1.234568
Pass: 123456.789000 123456.789000
Pass: 123456.789000 123456.789000
Pass: 0.000012 0.000012
Pass: 0.000012 0.000012''')
def test_sscanf_n(self):
self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_n')
def test_sscanf_whitespace(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_whitespace')
def test_sscanf_other_whitespace(self):
# use i16s in printf
self.set_setting('SAFE_HEAP', 0)
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_other_whitespace')
def test_sscanf_3(self):
self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_3')
def test_sscanf_4(self):
self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_4')
def test_sscanf_5(self):
self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_5')
def test_sscanf_6(self):
self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_6')
def test_sscanf_skip(self):
self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_skip')
def test_sscanf_caps(self):
self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_caps')
def test_sscanf_hex(self):
self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_hex')
def test_sscanf_float(self):
self.do_run_in_out_file_test('tests', 'core', 'test_sscanf_float')
def test_langinfo(self):
self.do_run_in_out_file_test('tests', 'core', 'test_langinfo')
def test_files(self):
self.banned_js_engines = [SPIDERMONKEY_ENGINE] # closure can generate variables called 'gc', which pick up js shell stuff
if self.maybe_closure(): # Use closure here, to test we don't break FS stuff
self.emcc_args = [x for x in self.emcc_args if x != '-g'] # ensure we test --closure 1 --memory-init-file 1 (-g would disable closure)
elif '-O3' in self.emcc_args and not self.is_wasm():
print('closure 2')
self.emcc_args += ['--closure', '2', '-Wno-almost-asm'] # Use closure 2 here for some additional coverage
return self.skipTest('TODO: currently skipped because CI runs out of memory running Closure in this test!')
self.emcc_args += ['-s', 'FORCE_FILESYSTEM=1', '--pre-js', 'pre.js']
print('base', self.emcc_args)
create_test_file('pre.js', '''
/** @suppress{checkTypes}*/
Module = {
'noFSInit': true,
'preRun': function() {
FS.createLazyFile('/', 'test.file', 'test.file', true, false);
// Test FS_* exporting
Module['FS_createDataFile']('/', 'somefile.binary', [100, 200, 50, 25, 10, 77, 123], true, false, false); // 200 becomes -56, since signed chars are used in memory
var test_files_input = 'hi there!';
var test_files_input_index = 0;
FS.init(function() {
return test_files_input.charCodeAt(test_files_input_index++) || null;
});
}
};
''')
create_test_file('test.file', 'some data')
src = open(path_from_root('tests', 'files.cpp')).read()
mem_file = 'src.cpp.o.js.mem'
try_delete(mem_file)
def clean(out, err):
return '\n'.join([line for line in (out + err).split('\n') if 'binaryen' not in line and 'wasm' not in line and 'so not running' not in line])
self.do_run(src, ('size: 7\ndata: 100,-56,50,25,10,77,123\nloop: 100 -56 50 25 10 77 123 \ninput:hi there!\ntexto\n$\n5 : 10,30,20,11,88\nother=some data.\nseeked=me da.\nseeked=ata.\nseeked=ta.\nfscanfed: 10 - hello\n5 bytes to dev/null: 5\nok.\ntexte\n', 'size: 7\ndata: 100,-56,50,25,10,77,123\nloop: 100 -56 50 25 10 77 123 \ninput:hi there!\ntexto\ntexte\n$\n5 : 10,30,20,11,88\nother=some data.\nseeked=me da.\nseeked=ata.\nseeked=ta.\nfscanfed: 10 - hello\n5 bytes to dev/null: 5\nok.\n'),
output_nicerizer=clean)
if self.uses_memory_init_file():
self.assertExists(mem_file)
def test_files_m(self):
# Test for Module.stdin etc.
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
create_test_file('pre.js', '''
Module = {
data: [10, 20, 40, 30],
stdin: function() { return Module.data.pop() || null },
stdout: function(x) { out('got: ' + x) }
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
src = r'''
#include <stdio.h>
#include <unistd.h>
int main () {
char c;
fprintf(stderr, "isatty? %d,%d,%d\n", isatty(fileno(stdin)), isatty(fileno(stdout)), isatty(fileno(stderr)));
while ((c = fgetc(stdin)) != EOF) {
putc(c+5, stdout);
}
return 0;
}
'''
def clean(out, err):
return '\n'.join(l for l in (out + err).splitlines() if 'warning' not in l and 'binaryen' not in l)
self.do_run(src, ('got: 35\ngot: 45\ngot: 25\ngot: 15\nisatty? 0,0,1\n', 'got: 35\ngot: 45\ngot: 25\ngot: 15\nisatty? 0,0,1', 'isatty? 0,0,1\ngot: 35\ngot: 45\ngot: 25\ngot: 15'), output_nicerizer=clean)
def test_mount(self):
self.set_setting('FORCE_FILESYSTEM', 1)
src = open(path_from_root('tests', 'fs', 'test_mount.c')).read()
self.do_run(src, 'success', force_c=True)
def test_getdents64(self):
src = open(path_from_root('tests', 'fs', 'test_getdents64.cpp')).read()
self.do_run(src, '..')
def test_getdents64_special_cases(self):
self.banned_js_engines = [V8_ENGINE] # https://bugs.chromium.org/p/v8/issues/detail?id=6881
src = path_from_root('tests', 'fs', 'test_getdents64_special_cases.cpp')
out = path_from_root('tests', 'fs', 'test_getdents64_special_cases.out')
self.do_run_from_file(src, out, assert_identical=True)
def test_getcwd_with_non_ascii_name(self):
self.banned_js_engines = [V8_ENGINE] # https://bugs.chromium.org/p/v8/issues/detail?id=6881
src = path_from_root('tests', 'fs', 'test_getcwd_with_non_ascii_name.cpp')
out = path_from_root('tests', 'fs', 'test_getcwd_with_non_ascii_name.out')
self.do_run_from_file(src, out, assert_identical=True)
def test_fwrite_0(self):
self.do_run_in_out_file_test('tests', 'core', 'test_fwrite_0')
def test_fgetc_ungetc(self):
print('TODO: update this test once the musl ungetc-on-EOF-stream bug is fixed upstream and reaches us')
self.set_setting('SYSCALL_DEBUG', 1)
self.clear()
orig_compiler_opts = self.emcc_args[:]
for fs in ['MEMFS', 'NODEFS']:
print(fs)
src = open(path_from_root('tests', 'stdio', 'test_fgetc_ungetc.c')).read()
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run(src, 'success', force_c=True, js_engines=[NODE_JS])
def test_fgetc_unsigned(self):
src = r'''
#include <stdio.h>
int main() {
FILE *file = fopen("file_with_byte_234.txt", "rb");
int c = fgetc(file);
printf("*%d\n", c);
}
'''
create_test_file('file_with_byte_234.txt', b'\xea', binary=True)
self.emcc_args += ['--embed-file', 'file_with_byte_234.txt']
self.do_run(src, '*234\n')
def test_fgets_eol(self):
src = r'''
#include <stdio.h>
char buf[32];
int main()
{
const char *r = "SUCCESS";
FILE *f = fopen("eol.txt", "r");
while (fgets(buf, 32, f) != NULL) {
if (buf[0] == '\0') {
r = "FAIL";
break;
}
}
printf("%s\n", r);
fclose(f);
return 0;
}
'''
open('eol.txt', 'wb').write(b'\n')
self.emcc_args += ['--embed-file', 'eol.txt']
self.do_run(src, 'SUCCESS\n')
def test_fscanf(self):
create_test_file('three_numbers.txt', '-1 0.1 -.1')
src = r'''
#include <stdio.h>
#include <assert.h>
#include <float.h>
int main()
{
float x = FLT_MAX, y = FLT_MAX, z = FLT_MAX;
FILE* fp = fopen("three_numbers.txt", "r");
if (fp) {
int match = fscanf(fp, " %f %f %f ", &x, &y, &z);
printf("match = %d\n", match);
printf("x = %0.1f, y = %0.1f, z = %0.1f\n", x, y, z);
} else {
printf("failed to open three_numbers.txt\n");
}
return 0;
}
'''
self.emcc_args += ['--embed-file', 'three_numbers.txt']
self.do_run(src, 'match = 3\nx = -1.0, y = 0.1, z = -0.1\n')
def test_fscanf_2(self):
create_test_file('a.txt', '''1/2/3 4/5/6 7/8/9
''')
self.emcc_args += ['--embed-file', 'a.txt']
self.do_run(r'''#include <cstdio>
#include <iostream>
using namespace std;
int
main( int argv, char ** argc ) {
cout << "fscanf test" << endl;
FILE * file;
file = fopen("a.txt", "rb");
int vertexIndex[4];
int normalIndex[4];
int uvIndex[4];
int matches = fscanf(file, "%d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d\n", &vertexIndex[0], &uvIndex[0], &normalIndex[0], &vertexIndex [1], &uvIndex[1], &normalIndex[1], &vertexIndex[2], &uvIndex[2], &normalIndex[2], &vertexIndex[3], &uvIndex[3], &normalIndex[3]);
cout << matches << endl;
return 0;
}
''', 'fscanf test\n9\n')
def test_fileno(self):
create_test_file('empty.txt', '')
src = r'''
#include <stdio.h>
#include <unistd.h>
int main()
{
FILE* fp = fopen("empty.txt", "r");
if (fp) {
printf("%d\n", fileno(fp));
} else {
printf("failed to open empty.txt\n");
}
return 0;
}
'''
self.emcc_args += ['--embed-file', 'empty.txt']
self.do_run(src, '3\n')
def test_readdir(self):
self.do_run_in_out_file_test('tests', 'dirent', 'test_readdir')
def test_readdir_empty(self):
self.do_run_in_out_file_test('tests', 'dirent', 'test_readdir_empty')
def test_stat(self):
src = open(path_from_root('tests', 'stat', 'test_stat.c')).read()
self.do_run(src, 'success', force_c=True)
self.verify_in_strict_mode('src.c.o.js')
def test_stat_chmod(self):
src = open(path_from_root('tests', 'stat', 'test_chmod.c')).read()
self.do_run(src, 'success', force_c=True)
def test_stat_mknod(self):
src = open(path_from_root('tests', 'stat', 'test_mknod.c')).read()
self.do_run(src, 'success', force_c=True)
def test_fcntl(self):
self.add_pre_run("FS.createDataFile('/', 'test', 'abcdef', true, true, false);")
self.do_run_in_out_file_test('tests', 'fcntl', 'test_fcntl')
def test_fcntl_open(self):
self.do_run_in_out_file_test('tests', 'fcntl', 'test_fcntl_open')
def test_fcntl_misc(self):
self.add_pre_run("FS.createDataFile('/', 'test', 'abcdef', true, true, false);")
self.do_run_in_out_file_test('tests', 'fcntl', 'test_fcntl_misc')
def test_poll(self):
self.add_pre_run('''
var dummy_device = FS.makedev(64, 0);
FS.registerDevice(dummy_device, {});
FS.createDataFile('/', 'file', 'abcdef', true, true, false);
FS.mkdev('/device', dummy_device);
''')
self.do_run_in_out_file_test('tests', 'core', 'test_poll')
def test_statvfs(self):
self.do_run_in_out_file_test('tests', 'core', 'test_statvfs')
def test_libgen(self):
self.do_run_in_out_file_test('tests', 'core', 'test_libgen')
def test_utime(self):
src = open(path_from_root('tests', 'utime', 'test_utime.c')).read()
self.do_run(src, 'success', force_c=True)
@no_minimal_runtime('MINIMAL_RUNTIME does not have getValue() and setValue() (TODO add it to a JS library function to get it in)')
def test_utf(self):
self.banned_js_engines = [SPIDERMONKEY_ENGINE] # only node handles utf well
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc'])
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['getValue', 'setValue', 'UTF8ToString', 'stringToUTF8'])
self.do_run_in_out_file_test('tests', 'core', 'test_utf')
def test_utf32(self):
if self.get_setting('MINIMAL_RUNTIME'):
self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$UTF32ToString', '$stringToUTF32', '$lengthBytesUTF32'])
else:
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['UTF32ToString', 'stringToUTF32', 'lengthBytesUTF32'])
self.do_run(open(path_from_root('tests', 'utf32.cpp')).read(), 'OK.')
self.do_run(open(path_from_root('tests', 'utf32.cpp')).read(), 'OK.', args=['-fshort-wchar'])
def test_utf8(self):
if self.get_setting('MINIMAL_RUNTIME'):
self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$AsciiToString', '$stringToAscii', '$writeAsciiToMemory'])
else:
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS',
['UTF8ToString', 'stringToUTF8', 'AsciiToString', 'stringToAscii'])
self.do_run(open(path_from_root('tests', 'utf8.cpp')).read(), 'OK.')
@also_with_wasm_bigint
def test_utf8_textdecoder(self):
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['UTF8ToString', 'stringToUTF8'])
self.emcc_args += ['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt']
self.do_run(open(path_from_root('tests', 'benchmark_utf8.cpp')).read(), 'OK.')
# Test that invalid character in UTF8 does not cause decoding to crash.
def test_utf8_invalid(self):
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['UTF8ToString', 'stringToUTF8'])
for decoder_mode in [[], ['-s', 'TEXTDECODER=1']]:
self.emcc_args += decoder_mode
print(str(decoder_mode))
self.do_run(open(path_from_root('tests', 'utf8_invalid.cpp')).read(), 'OK.')
# Test that invalid character in UTF8 does not cause decoding to crash.
def test_minimal_runtime_utf8_invalid(self):
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['UTF8ToString', 'stringToUTF8'])
for decoder_mode in [[], ['-s', 'TEXTDECODER=1']]:
self.emcc_args += ['-s', 'MINIMAL_RUNTIME=1'] + decoder_mode
print(str(decoder_mode))
self.do_run(open(path_from_root('tests', 'utf8_invalid.cpp')).read(), 'OK.')
def test_utf16_textdecoder(self):
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['UTF16ToString', 'stringToUTF16', 'lengthBytesUTF16'])
self.emcc_args += ['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt']
self.do_run(open(path_from_root('tests', 'benchmark_utf16.cpp')).read(), 'OK.')
def test_wprintf(self):
self.do_run_in_out_file_test('tests', 'core', 'test_wprintf')
def test_write_stdout_fileno(self):
self.do_run_in_out_file_test('tests', 'core', 'test_write_stdout_fileno')
self.do_run_in_out_file_test('tests', 'core', 'test_write_stdout_fileno', args=['-s', 'FILESYSTEM=0'])
def test_direct_string_constant_usage(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_direct_string_constant_usage')
def test_std_cout_new(self):
self.do_run_in_out_file_test('tests', 'core', 'test_std_cout_new')
def test_istream(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
for linkable in [0]: # , 1]:
print(linkable)
# regression check for issue #273
self.set_setting('LINKABLE', linkable)
self.do_run_in_out_file_test('tests', 'core', 'test_istream')
def test_fs_base(self):
# TODO(sbc): It seems that INCLUDE_FULL_LIBRARY will generally generate
# undefined symbols at link time so perhaps have it imply this setting?
self.set_setting('WARN_ON_UNDEFINED_SYMBOLS', 0)
self.set_setting('INCLUDE_FULL_LIBRARY', 1)
self.add_pre_run(open(path_from_root('tests', 'filesystem', 'src.js')).read())
src = 'int main() {return 0;}\n'
expected = open(path_from_root('tests', 'filesystem', 'output.txt')).read()
self.do_run(src, expected)
@also_with_noderawfs
@is_slow_test
def test_fs_nodefs_rw(self):
self.emcc_args += ['-lnodefs.js']
self.set_setting('SYSCALL_DEBUG', 1)
src = open(path_from_root('tests', 'fs', 'test_nodefs_rw.c')).read()
self.do_run(src, 'success', force_c=True)
if '-g' not in self.emcc_args:
print('closure')
self.emcc_args += ['--closure', '1']
self.do_run(src, 'success', force_c=True)
@also_with_noderawfs
def test_fs_nodefs_cloexec(self):
self.emcc_args += ['-lnodefs.js']
src = open(path_from_root('tests', 'fs', 'test_nodefs_cloexec.c')).read()
self.do_run(src, 'success', force_c=True)
def test_fs_nodefs_home(self):
self.set_setting('FORCE_FILESYSTEM', 1)
self.emcc_args += ['-lnodefs.js']
src = open(path_from_root('tests', 'fs', 'test_nodefs_home.c')).read()
self.do_run(src, 'success', js_engines=[NODE_JS])
def test_fs_nodefs_nofollow(self):
self.emcc_args += ['-lnodefs.js']
src = open(path_from_root('tests', 'fs', 'test_nodefs_nofollow.c')).read()
self.do_run(src, 'success', js_engines=[NODE_JS])
def test_fs_trackingdelegate(self):
src = path_from_root('tests', 'fs', 'test_trackingdelegate.c')
out = path_from_root('tests', 'fs', 'test_trackingdelegate.out')
self.do_run_from_file(src, out)
@also_with_noderawfs
def test_fs_writeFile(self):
self.emcc_args += ['-s', 'DISABLE_EXCEPTION_CATCHING=1'] # see issue 2334
src = path_from_root('tests', 'fs', 'test_writeFile.cpp')
out = path_from_root('tests', 'fs', 'test_writeFile.out')
self.do_run_from_file(src, out)
def test_fs_write(self):
src = path_from_root('tests', 'fs', 'test_write.cpp')
out = path_from_root('tests', 'fs', 'test_write.out')
self.do_run_from_file(src, out)
@also_with_noderawfs
def test_fs_emptyPath(self):
src = path_from_root('tests', 'fs', 'test_emptyPath.c')
out = path_from_root('tests', 'fs', 'test_emptyPath.out')
self.do_run_from_file(src, out)
@also_with_noderawfs
def test_fs_append(self):
src = open(path_from_root('tests', 'fs', 'test_append.c')).read()
self.do_run(src, 'success', force_c=True)
def test_fs_mmap(self):
orig_compiler_opts = self.emcc_args[:]
for fs in ['MEMFS', 'NODEFS']:
src = path_from_root('tests', 'fs', 'test_mmap.c')
out = path_from_root('tests', 'fs', 'test_mmap.out')
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run_from_file(src, out)
@also_with_noderawfs
def test_fs_errorstack(self):
# Enables strict mode, which may catch some strict-mode-only errors
# so that users can safely work with strict JavaScript if enabled.
create_test_file('pre.js', '"use strict";')
self.emcc_args += ['--pre-js', 'pre.js']
self.set_setting('FORCE_FILESYSTEM', 1)
self.set_setting('ASSERTIONS', 1)
self.do_run(r'''
#include <emscripten.h>
#include <iostream>
int main(void) {
std::cout << "hello world\n"; // should work with strict mode
EM_ASM(
try {
FS.readFile('/dummy.txt');
} catch (err) {
err.stack = err.stack; // should be writable
throw err;
}
);
return 0;
}
''', 'at Object.readFile', assert_returncode=None) # engines has different error stack format
@also_with_noderawfs
def test_fs_llseek(self):
self.set_setting('FORCE_FILESYSTEM', 1)
src = open(path_from_root('tests', 'fs', 'test_llseek.c')).read()
self.do_run(src, 'success', force_c=True)
def test_fs_64bit(self):
src = open(path_from_root('tests', 'fs', 'test_64bit.c')).read()
self.do_run(src, 'success', force_c=True)
@no_windows('https://github.com/emscripten-core/emscripten/issues/8882')
def test_unistd_access(self):
self.clear()
orig_compiler_opts = self.emcc_args[:]
for fs in ['MEMFS', 'NODEFS']:
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run_in_out_file_test('tests', 'unistd', 'access', js_engines=[NODE_JS])
# Node.js fs.chmod is nearly no-op on Windows
if not WINDOWS:
self.emcc_args = orig_compiler_opts
self.emcc_args += ['-s', 'NODERAWFS=1']
self.do_run_in_out_file_test('tests', 'unistd', 'access', js_engines=[NODE_JS])
def test_unistd_curdir(self):
src = open(path_from_root('tests', 'unistd', 'curdir.c')).read()
expected = open(path_from_root('tests', 'unistd', 'curdir.out')).read()
self.do_run(src, expected)
@also_with_noderawfs
def test_unistd_close(self):
src = open(path_from_root('tests', 'unistd', 'close.c')).read()
expected = open(path_from_root('tests', 'unistd', 'close.out')).read()
self.do_run(src, expected)
def test_unistd_confstr(self):
src = open(path_from_root('tests', 'unistd', 'confstr.c')).read()
expected = open(path_from_root('tests', 'unistd', 'confstr.out')).read()
self.do_run(src, expected)
def test_unistd_ttyname(self):
src = open(path_from_root('tests', 'unistd', 'ttyname.c')).read()
self.do_run(src, 'success', force_c=True)
@also_with_noderawfs
def test_unistd_pipe(self):
src = open(path_from_root('tests', 'unistd', 'pipe.c')).read()
self.do_run(src, 'success', force_c=True)
@also_with_noderawfs
def test_unistd_dup(self):
src = open(path_from_root('tests', 'unistd', 'dup.c')).read()
expected = open(path_from_root('tests', 'unistd', 'dup.out')).read()
self.do_run(src, expected)
def test_unistd_pathconf(self):
src = open(path_from_root('tests', 'unistd', 'pathconf.c')).read()
expected = open(path_from_root('tests', 'unistd', 'pathconf.out')).read()
self.do_run(src, expected)
def test_unistd_truncate(self):
self.clear()
orig_compiler_opts = self.emcc_args[:]
for fs in ['MEMFS', 'NODEFS']:
src = open(path_from_root('tests', 'unistd', 'truncate.c')).read()
expected = open(path_from_root('tests', 'unistd', 'truncate.out')).read()
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run(src, expected, js_engines=[NODE_JS])
@no_windows("Windows throws EPERM rather than EACCES or EINVAL")
@unittest.skipIf(WINDOWS or os.geteuid() == 0, "Root access invalidates this test by being able to write on readonly files")
def test_unistd_truncate_noderawfs(self):
# FIXME
self.skipTest('fails on some node versions and OSes, e.g. 10.13.0 on linux')
self.emcc_args += ['-s', 'NODERAWFS=1']
self.do_run_in_out_file_test('tests', 'unistd', 'truncate', js_engines=[NODE_JS])
def test_unistd_swab(self):
self.do_run_in_out_file_test('tests', 'unistd', 'swab')
def test_unistd_isatty(self):
src = open(path_from_root('tests', 'unistd', 'isatty.c')).read()
self.do_run(src, 'success', force_c=True)
@also_with_standalone_wasm
def test_unistd_sysconf(self):
self.do_run_in_out_file_test('tests', 'unistd', 'sysconf')
@no_asan('ASan alters memory layout')
def test_unistd_sysconf_phys_pages(self):
src = open(path_from_root('tests', 'unistd', 'sysconf_phys_pages.c')).read()
if self.get_setting('ALLOW_MEMORY_GROWTH'):
expected = (2 * 1024 * 1024 * 1024) // 16384
else:
expected = 16 * 1024 * 1024 // 16384
self.do_run(src, str(expected) + ', errno: 0')
def test_unistd_login(self):
self.do_run_in_out_file_test('tests', 'unistd', 'login')
@no_windows('https://github.com/emscripten-core/emscripten/issues/8882')
def test_unistd_unlink(self):
self.clear()
orig_compiler_opts = self.emcc_args[:]
src = open(path_from_root('tests', 'unistd', 'unlink.c')).read()
for fs in ['MEMFS', 'NODEFS']:
self.emcc_args = orig_compiler_opts + ['-D' + fs]
# symlinks on node.js on non-linux behave differently (e.g. on Windows they require administrative privileges)
# so skip testing those bits on that combination.
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
if WINDOWS:
self.emcc_args += ['-DNO_SYMLINK=1']
if MACOS:
continue
self.do_run(src, 'success', force_c=True, js_engines=[NODE_JS])
# Several differences/bugs on non-linux including https://github.com/nodejs/node/issues/18014
if not WINDOWS and not MACOS:
self.emcc_args = orig_compiler_opts + ['-DNODERAWFS']
# 0 if root user
if os.geteuid() == 0:
self.emcc_args += ['-DSKIP_ACCESS_TESTS']
self.emcc_args += ['-s', 'NODERAWFS=1']
self.do_run(src, 'success', force_c=True, js_engines=[NODE_JS])
def test_unistd_links(self):
self.clear()
orig_compiler_opts = self.emcc_args[:]
for fs in ['MEMFS', 'NODEFS']:
if WINDOWS and fs == 'NODEFS':
print('Skipping NODEFS part of this test for test_unistd_links on Windows, since it would require administrative privileges.', file=sys.stderr)
# Also, other detected discrepancies if you do end up running this test on NODEFS:
# test expects /, but Windows gives \ as path slashes.
# Calling readlink() on a non-link gives error 22 EINVAL on Unix, but simply error 0 OK on Windows.
continue
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run_in_out_file_test('tests', 'unistd', 'links', js_engines=[NODE_JS])
@no_windows('Skipping NODEFS test, since it would require administrative privileges.')
def test_unistd_symlink_on_nodefs(self):
# Also, other detected discrepancies if you do end up running this test on NODEFS:
# test expects /, but Windows gives \ as path slashes.
# Calling readlink() on a non-link gives error 22 EINVAL on Unix, but simply error 0 OK on Windows.
self.emcc_args += ['-lnodefs.js']
self.clear()
src = open(path_from_root('tests', 'unistd', 'symlink_on_nodefs.c')).read()
expected = open(path_from_root('tests', 'unistd', 'symlink_on_nodefs.out')).read()
self.do_run(src, expected, js_engines=[NODE_JS])
def test_unistd_sleep(self):
src = open(path_from_root('tests', 'unistd', 'sleep.c')).read()
expected = open(path_from_root('tests', 'unistd', 'sleep.out')).read()
self.do_run(src, expected)
@also_with_wasm_bigint
def test_unistd_io(self):
self.set_setting('INCLUDE_FULL_LIBRARY', 1) # uses constants from ERRNO_CODES
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0) # avoid errors when linking in full library
orig_compiler_opts = self.emcc_args[:]
src = open(path_from_root('tests', 'unistd', 'io.c')).read()
expected = open(path_from_root('tests', 'unistd', 'io.out')).read()
for fs in ['MEMFS', 'NODEFS']:
self.clear()
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run(src, expected)
@no_windows('https://github.com/emscripten-core/emscripten/issues/8882')
def test_unistd_misc(self):
orig_compiler_opts = self.emcc_args[:]
for fs in ['MEMFS', 'NODEFS']:
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run_in_out_file_test('tests', 'unistd', 'misc', js_engines=[NODE_JS])
# i64s in the API, which we'd need to legalize for JS, so in standalone mode
# all we can test is wasm VMs
@also_with_standalone_wasm
def test_posixtime(self):
test_path = path_from_root('tests', 'core', 'test_posixtime')
src, output = (test_path + s for s in ('.c', '.out'))
self.banned_js_engines = [V8_ENGINE] # v8 lacks monotonic time
self.do_run_from_file(src, output)
def test_uname(self):
self.do_run_in_out_file_test('tests', 'core', 'test_uname')
def test_unary_literal(self):
self.do_run_in_out_file_test('tests', 'core', 'test_unary_literal')
def test_env(self):
src = open(path_from_root('tests', 'env', 'src.c')).read()
expected = open(path_from_root('tests', 'env', 'output.txt')).read()
if not self.is_wasm_backend():
# the fastcomp implementation is incorrect in one way
expected = expected.replace('after alteration: Qest5', 'after alteration: test5')
self.do_run(src, [
expected.replace('{{{ THIS_PROGRAM }}}', self.in_dir('src.cpp.o.js')).replace('\\', '/'), # node, can find itself properly
expected.replace('{{{ THIS_PROGRAM }}}', './this.program') # spidermonkey, v8
])
def test_environ(self):
src = open(path_from_root('tests', 'env', 'src-mini.c')).read()
expected = open(path_from_root('tests', 'env', 'output-mini.txt')).read()
self.do_run(src, [
expected.replace('{{{ THIS_PROGRAM }}}', self.in_dir('src.cpp.o.js')).replace('\\', '/'), # node, can find itself properly
expected.replace('{{{ THIS_PROGRAM }}}', './this.program') # spidermonkey, v8
])
def test_systypes(self):
self.do_run_in_out_file_test('tests', 'core', 'test_systypes')
def test_stddef(self):
self.do_run_in_out_file_test('tests', 'core', 'test_stddef')
self.do_run_in_out_file_test('tests', 'core', 'test_stddef', force_c=True)
def test_getloadavg(self):
self.do_run_in_out_file_test('tests', 'core', 'test_getloadavg')
def test_nl_types(self):
self.do_run_in_out_file_test('tests', 'core', 'test_nl_types')
def test_799(self):
src = open(path_from_root('tests', '799.cpp')).read()
self.do_run(src, '''Set PORT family: 0, port: 3979
Get PORT family: 0
PORT: 3979
''')
def test_ctype(self):
self.do_run_in_out_file_test('tests', 'core', 'test_ctype')
def test_strcasecmp(self):
self.do_run_in_out_file_test('tests', 'core', 'test_strcasecmp')
def test_atomic(self):
self.do_run_in_out_file_test('tests', 'core', 'test_atomic')
def test_atomic_cxx(self):
# the wasm backend has lock-free atomics, but not asm.js or asm2wasm
is_lock_free = self.is_wasm_backend()
self.emcc_args += ['-DIS_64BIT_LOCK_FREE=%d' % is_lock_free]
self.do_run_in_out_file_test('tests', 'core', 'test_atomic_cxx')
if self.get_setting('ALLOW_MEMORY_GROWTH') == 0 and not self.is_wasm() \
and not self.is_wasm_backend():
print('main module')
self.set_setting('MAIN_MODULE', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_atomic_cxx')
# TODO: test with USE_PTHREADS in wasm backend as well
def test_phiundef(self):
self.do_run_in_out_file_test('tests', 'core', 'test_phiundef')
def test_netinet_in(self):
src = open(path_from_root('tests', 'netinet', 'in.cpp')).read()
expected = open(path_from_root('tests', 'netinet', 'in.out')).read()
self.do_run(src, expected)
@needs_dlfcn
def test_main_module_static_align(self):
if self.get_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('no shared modules with memory growth')
self.set_setting('MAIN_MODULE', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_main_module_static_align')
# libc++ tests
def test_iostream_and_determinism(self):
src = '''
#include <iostream>
int main()
{
std::cout << "hello world" << std::endl << 77 << "." << std::endl;
return 0;
}
'''
num = 5
for i in range(num):
print('(iteration %d)' % i)
# add some timing nondeterminism here, not that we need it, but whatever
time.sleep(random.random() / (10 * num))
self.do_run(src, 'hello world\n77.\n')
# Verify that this build is identical to the previous one
if os.path.exists('src.js.previous'):
self.assertBinaryEqual('src.cpp.o.js', 'src.js.previous')
shutil.copy2('src.cpp.o.js', 'src.js.previous')
# Same but for the wasm file.
if self.get_setting('WASM') and not self.get_setting('WASM2JS'):
if os.path.exists('src.wasm.previous'):
self.assertBinaryEqual('src.cpp.o.wasm', 'src.wasm.previous')
shutil.copy2('src.cpp.o.wasm', 'src.wasm.previous')
def test_stdvec(self):
self.do_run_in_out_file_test('tests', 'core', 'test_stdvec')
def test_random_device(self):
self.do_run_in_out_file_test('tests', 'core', 'test_random_device')
def test_reinterpreted_ptrs(self):
self.do_run_in_out_file_test('tests', 'core', 'test_reinterpreted_ptrs')
def test_js_libraries(self):
create_test_file('main.cpp', '''
#include <stdio.h>
extern "C" {
extern void printey();
extern int calcey(int x, int y);
}
int main() {
printey();
printf("*%d*\\n", calcey(10, 22));
return 0;
}
''')
create_test_file('mylib1.js', '''
mergeInto(LibraryManager.library, {
printey: function() {
out('hello from lib!');
}
});
''')
create_test_file('mylib2.js', '''
mergeInto(LibraryManager.library, {
calcey: function(x, y) {
return x + y;
}
});
''')
self.emcc_args += ['--js-library', 'mylib1.js', '--js-library', 'mylib2.js']
self.do_run(open('main.cpp').read(), 'hello from lib!\n*32*\n')
def test_unicode_js_library(self):
create_test_file('main.cpp', '''
#include <stdio.h>
extern "C" {
extern void printey();
}
int main() {
printey();
return 0;
}
''')
self.emcc_args += ['--js-library', path_from_root('tests', 'unicode_library.js')]
self.do_run(open('main.cpp').read(), u'Unicode snowman \u2603 says hello!')
def test_funcptr_import_type(self):
self.emcc_args += ['--js-library', path_from_root('tests', 'core', 'test_funcptr_import_type.js')]
self.do_run_in_out_file_test('tests', 'core', 'test_funcptr_import_type')
@no_asan('ASan does not work with EXPORT_ALL')
def test_constglobalunion(self):
self.emcc_args += ['-s', 'EXPORT_ALL=1']
self.do_run(r'''
#include <stdio.h>
struct one_const {
long a;
};
struct two_consts {
long a;
long b;
};
union some_consts {
struct one_const one;
struct two_consts two;
};
union some_consts my_consts = {{
1
}};
struct one_const addr_of_my_consts = {
(long)(&my_consts)
};
int main(void) {
printf("%li\n", (long)!!addr_of_my_consts.a);
return 0;
}
''', '1')
### 'Medium' tests
def test_fannkuch(self):
results = [(1, 0), (2, 1), (3, 2), (4, 4), (5, 7), (6, 10), (7, 16), (8, 22)]
src = open(path_from_root('tests', 'fannkuch.cpp')).read()
self.build(src, self.get_dir(), 'fannkuch.cpp')
for i, j in results:
print(i, j)
self.do_run('fannkuch.cpp.o.js', 'Pfannkuchen(%d) = %d.' % (i, j), [str(i)], no_build=True)
def test_raytrace(self):
# TODO: Should we remove this test?
self.skipTest('Relies on double value rounding, extremely sensitive')
src = open(path_from_root('tests', 'raytrace.cpp')).read().replace('double', 'float')
output = open(path_from_root('tests', 'raytrace.ppm')).read()
self.do_run(src, output, ['3', '16'])
def test_fasta(self):
results = [(1, '''GG*ctt**tgagc*'''),
(20, '''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tacgtgtagcctagtgtttgtgttgcgttatagtctatttgtggacacagtatggtcaaa**tgacgtcttttgatctgacggcgttaacaaagatactctg*'''),
(50, '''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA*TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACAT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tactDtDagcctatttSVHtHttKtgtHMaSattgWaHKHttttagacatWatgtRgaaa**NtactMcSMtYtcMgRtacttctWBacgaa**agatactctgggcaacacacatacttctctcatgttgtttcttcggacctttcataacct**ttcctggcacatggttagctgcacatcacaggattgtaagggtctagtggttcagtgagc**ggaatatcattcgtcggtggtgttaatctatctcggtgtagcttataaatgcatccgtaa**gaatattatgtttatttgtcggtacgttcatggtagtggtgtcgccgatttagacgtaaa**ggcatgtatg*''')]
old = self.emcc_args
orig_src = open(path_from_root('tests', 'fasta.cpp')).read()
def test(extra_args):
self.emcc_args = old + extra_args
for precision in [0, 1, 2]:
self.set_setting('PRECISE_F32', precision)
for t in ['float', 'double']:
print(precision, t)
src = orig_src.replace('double', t)
self.build(src, self.get_dir(), 'fasta.cpp')
for arg, output in results:
self.do_run('fasta.cpp.o.js', output, [str(arg)], lambda x, err: x.replace('\n', '*'), no_build=True)
shutil.copyfile('fasta.cpp.o.js', '%d_%s.js' % (precision, t))
test([])
@bleeding_edge_wasm_backend
def test_fasta_nontrapping(self):
self.emcc_args += ['-mnontrapping-fptoint']
self.test_fasta()
def test_whets(self):
self.do_run(open(path_from_root('tests', 'whets.cpp')).read(), 'Single Precision C Whetstone Benchmark', assert_returncode=None)
def test_dlmalloc_inline(self):
self.banned_js_engines = [NODE_JS] # slower, and fail on 64-bit
# needed with typed arrays
self.set_setting('INITIAL_MEMORY', 128 * 1024 * 1024)
src = open(path_from_root('system', 'lib', 'dlmalloc.c')).read() + '\n\n\n' + open(path_from_root('tests', 'dlmalloc_test.c')).read()
self.do_run(src, '*1,0*', ['200', '1'], force_c=True)
self.do_run(None, '*400,0*', ['400', '400'], force_c=True, no_build=True)
def test_dlmalloc(self):
self.banned_js_engines = [NODE_JS] # slower, and fail on 64-bit
# needed with typed arrays
self.set_setting('INITIAL_MEMORY', 128 * 1024 * 1024)
# Linked version
src = open(path_from_root('tests', 'dlmalloc_test.c')).read()
self.do_run(src, '*1,0*', ['200', '1'])
self.do_run(None, '*400,0*', ['400', '400'], no_build=True)
# TODO: do this in other passes too, passing their opts into emcc
if self.emcc_args == []:
# emcc should build in dlmalloc automatically, and do all the sign correction etc. for it
try_delete('src.cpp.o.js')
run_process([EMCC, path_from_root('tests', 'dlmalloc_test.c'), '-s', 'INITIAL_MEMORY=128MB', '-o', 'src.cpp.o.js'], stdout=PIPE, stderr=self.stderr_redirect)
self.do_run(None, '*1,0*', ['200', '1'], no_build=True)
self.do_run(None, '*400,0*', ['400', '400'], no_build=True)
# The same for new and all its variants
src = open(path_from_root('tests', 'new.cpp')).read()
for new, delete in [
('malloc(100)', 'free'),
('new char[100]', 'delete[]'),
('new Structy', 'delete'),
('new int', 'delete'),
('new Structy[10]', 'delete[]'),
]:
self.do_run(src.replace('{{{ NEW }}}', new).replace('{{{ DELETE }}}', delete), '*1,0*')
@no_asan('asan also changes malloc, and that ends up linking in new twice')
def test_dlmalloc_partial(self):
# present part of the symbols of dlmalloc, not all
src = open(path_from_root('tests', 'new.cpp')).read().replace('{{{ NEW }}}', 'new int').replace('{{{ DELETE }}}', 'delete') + '''
#include <new>
void *
operator new(size_t size) throw(std::bad_alloc)
{
printf("new %d!\\n", size);
return malloc(size);
}
'''
self.do_run(src, 'new 4!\n*1,0*')
@no_asan('asan also changes malloc, and that ends up linking in new twice')
def test_dlmalloc_partial_2(self):
if 'SAFE_HEAP' in str(self.emcc_args):
self.skipTest('we do unsafe stuff here')
# present part of the symbols of dlmalloc, not all. malloc is harder to link than new which is weak.
self.do_run_in_out_file_test('tests', 'core', 'test_dlmalloc_partial_2', assert_returncode=None)
def test_libcxx(self):
self.do_run(open(path_from_root('tests', 'hashtest.cpp')).read(),
'june -> 30\nPrevious (in alphabetical order) is july\nNext (in alphabetical order) is march')
self.do_run('''
#include <set>
#include <stdio.h>
int main() {
std::set<int> *fetchOriginatorNums = new std::set<int>();
fetchOriginatorNums->insert(171);
printf("hello world\\n");
return 0;
}
''', 'hello world')
def test_typeid(self):
self.do_run_in_out_file_test('tests', 'core', 'test_typeid')
def test_static_variable(self):
# needs atexit
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_static_variable')
def test_fakestat(self):
self.do_run_in_out_file_test('tests', 'core', 'test_fakestat')
def test_mmap(self):
self.set_setting('INITIAL_MEMORY', 128 * 1024 * 1024)
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_mmap')
def test_mmap_file(self):
for extra_args in [[], ['--no-heap-copy']]:
self.emcc_args += ['--embed-file', 'data.dat'] + extra_args
x = 'data from the file........'
s = ''
while len(s) < 9000:
if len(s) + len(x) < 9000:
s += x
continue
s += '.'
assert len(s) == 9000
create_test_file('data.dat', s)
src = open(path_from_root('tests', 'mmap_file.c')).read()
self.do_run(src, '*\n' + s[0:20] + '\n' + s[4096:4096 + 20] + '\n*\n')
def test_cubescript(self):
# uses register keyword
self.emcc_args.append('-std=c++03')
if self.run_name == 'asm3':
self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage
self.emcc_args = [x for x in self.emcc_args if x != '-g'] # remove -g, so we have one test without it by default
def test():
self.do_run(path_from_root('tests', 'cubescript'), '*\nTemp is 33\n9\n5\nhello, everyone\n*', main_file='command.cpp')
test()
def count_relocations():
generated = open('src.cpp.o.js').read()
generated = re.sub(r'\n+[ \n]*\n+', '\n', generated)
start = '\nfunction __apply_relocations() {'
relocs_start = generated.find(start)
if relocs_start == -1:
return "", 0
relocs_start += len(start)
relocs_end = generated.find('\n}', relocs_start)
relocs = generated[relocs_start:relocs_end]
num_relocs = relocs.count('\n')
return relocs, num_relocs
# TODO: wrappers for wasm modules
if not self.get_setting('WASM') and not self.is_wasm_backend():
print('relocatable')
assert self.get_setting('RELOCATABLE') == self.get_setting('EMULATED_FUNCTION_POINTERS') == 0
self.set_setting('RELOCATABLE', 1)
self.set_setting('EMULATED_FUNCTION_POINTERS', 1)
test()
self.set_setting('RELOCATABLE', 0)
self.set_setting('EMULATED_FUNCTION_POINTERS', 0)
if self.is_wasm_backend():
print('asyncify') # extra coverage
self.emcc_args += ['-s', 'ASYNCIFY=1']
test()
@needs_dlfcn
def test_relocatable_void_function(self):
self.set_setting('RELOCATABLE', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_relocatable_void_function')
@wasm_simd
def test_wasm_builtin_simd(self):
# Improves test readability
self.emcc_args.append('-Wno-c++11-narrowing')
self.do_run(open(path_from_root('tests', 'test_wasm_builtin_simd.cpp')).read(), 'Success!')
self.emcc_args.append('-munimplemented-simd128')
self.build(open(path_from_root('tests', 'test_wasm_builtin_simd.cpp')).read(),
self.get_dir(), os.path.join(self.get_dir(), 'src.cpp'))
@wasm_simd
def test_wasm_intrinsics_simd(self):
def run():
self.do_run(
open(path_from_root('tests', 'test_wasm_intrinsics_simd.c')).read(),
'Success!')
# Improves test readability
self.emcc_args.append('-Wno-c++11-narrowing')
self.emcc_args.extend(['-Wpedantic', '-Werror', '-Wall', '-xc++'])
run()
self.emcc_args.append('-funsigned-char')
run()
self.emcc_args.extend(['-munimplemented-simd128', '-xc', '-std=c99'])
self.build(open(path_from_root('tests', 'test_wasm_intrinsics_simd.c')).read(),
self.get_dir(), os.path.join(self.get_dir(), 'src.cpp'))
# Tests invoking the SIMD API via x86 SSE1 xmmintrin.h header (_mm_x() functions)
@wasm_simd
def test_sse1(self):
src = path_from_root('tests', 'sse', 'test_sse1.cpp')
run_process([shared.CLANG_CXX, src, '-msse', '-o', 'test_sse1', '-D_CRT_SECURE_NO_WARNINGS=1'] + building.get_native_building_args(), stdout=PIPE)
native_result = run_process('./test_sse1', stdout=PIPE, env=building.get_building_env(native=True)).stdout
orig_args = self.emcc_args
self.emcc_args = orig_args + ['-I' + path_from_root('tests', 'sse'), '-msse']
self.maybe_closure()
self.do_run(open(src).read(), native_result)
# Tests invoking the SIMD API via x86 SSE2 emmintrin.h header (_mm_x() functions)
@wasm_simd
def test_sse2(self):
src = path_from_root('tests', 'sse', 'test_sse2.cpp')
run_process([shared.CLANG_CXX, src, '-msse2', '-Wno-argument-outside-range', '-o', 'test_sse2', '-D_CRT_SECURE_NO_WARNINGS=1'] + building.get_native_building_args(), stdout=PIPE)
native_result = run_process('./test_sse2', stdout=PIPE, env=building.get_building_env(native=True)).stdout
orig_args = self.emcc_args
self.emcc_args = orig_args + ['-I' + path_from_root('tests', 'sse'), '-msse2', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_run(open(src).read(), native_result)
# Tests invoking the SIMD API via x86 SSE3 pmmintrin.h header (_mm_x() functions)
@wasm_simd
def test_sse3(self):
src = path_from_root('tests', 'sse', 'test_sse3.cpp')
run_process([shared.CLANG_CXX, src, '-msse3', '-Wno-argument-outside-range', '-o', 'test_sse3', '-D_CRT_SECURE_NO_WARNINGS=1'] + building.get_native_building_args(), stdout=PIPE)
native_result = run_process('./test_sse3', stdout=PIPE, env=building.get_building_env(native=True)).stdout
orig_args = self.emcc_args
self.emcc_args = orig_args + ['-I' + path_from_root('tests', 'sse'), '-msse3', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_run(open(src).read(), native_result)
# Tests invoking the SIMD API via x86 SSSE3 tmmintrin.h header (_mm_x() functions)
@wasm_simd
def test_ssse3(self):
src = path_from_root('tests', 'sse', 'test_ssse3.cpp')
run_process([shared.CLANG_CXX, src, '-mssse3', '-Wno-argument-outside-range', '-o', 'test_ssse3', '-D_CRT_SECURE_NO_WARNINGS=1'] + building.get_native_building_args(), stdout=PIPE)
native_result = run_process('./test_ssse3', stdout=PIPE, env=building.get_building_env(native=True)).stdout
orig_args = self.emcc_args
self.emcc_args = orig_args + ['-I' + path_from_root('tests', 'sse'), '-mssse3', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_run(open(src).read(), native_result)
# Tests invoking the SIMD API via x86 SSE4.1 smmintrin.h header (_mm_x() functions)
@wasm_simd
def test_sse4_1(self):
src = path_from_root('tests', 'sse', 'test_sse4_1.cpp')
run_process([shared.CLANG_CXX, src, '-msse4.1', '-Wno-argument-outside-range', '-o', 'test_sse4_1', '-D_CRT_SECURE_NO_WARNINGS=1'] + building.get_native_building_args(), stdout=PIPE)
native_result = run_process('./test_sse4_1', stdout=PIPE, env=building.get_building_env(native=True)).stdout
orig_args = self.emcc_args
self.emcc_args = orig_args + ['-I' + path_from_root('tests', 'sse'), '-msse4.1', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_run(open(src).read(), native_result)
# Tests invoking the SIMD API via x86 SSE4.2 nmmintrin.h header (_mm_x() functions)
@wasm_simd
def test_sse4_2(self):
src = path_from_root('tests', 'sse', 'test_sse4_2.cpp')
run_process([shared.CLANG_CXX, src, '-msse4.2', '-Wno-argument-outside-range', '-o', 'test_sse4_2', '-D_CRT_SECURE_NO_WARNINGS=1'] + building.get_native_building_args(), stdout=PIPE)
native_result = run_process('./test_sse4_2', stdout=PIPE, env=building.get_building_env(native=True)).stdout
orig_args = self.emcc_args
self.emcc_args = orig_args + ['-I' + path_from_root('tests', 'sse'), '-msse4.2', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_run(open(src).read(), native_result)
# Tests invoking the SIMD API via x86 AVX avxintrin.h header (_mm_x() functions)
@wasm_simd
def test_avx(self):
src = path_from_root('tests', 'sse', 'test_avx.cpp')
run_process([shared.CLANG_CXX, src, '-mavx', '-Wno-argument-outside-range', '-o', 'test_avx', '-D_CRT_SECURE_NO_WARNINGS=1'] + building.get_native_building_args(), stdout=PIPE)
native_result = run_process('./test_avx', stdout=PIPE, env=building.get_building_env(native=True)).stdout
orig_args = self.emcc_args
self.emcc_args = orig_args + ['-I' + path_from_root('tests', 'sse'), '-mavx', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_run(open(src).read(), native_result)
@no_asan('call stack exceeded on some versions of node')
def test_gcc_unmangler(self):
self.emcc_args += ['-I' + path_from_root('third_party')]
self.do_run(open(path_from_root('third_party', 'gcc_demangler.c')).read(), '*d_demangle(char const*, int, unsigned int*)*', args=['_ZL10d_demanglePKciPj'], assert_returncode=None)
@needs_make('make')
def test_lua(self):
self.emcc_args.remove('-Werror')
self.do_run('',
'hello lua world!\n17\n1\n2\n3\n4\n7',
args=['-e', '''print("hello lua world!");print(17);for x = 1,4 do print(x) end;print(10-3)'''],
libraries=self.get_library(os.path.join('third_party', 'lua'), [os.path.join('src', 'lua.o'), os.path.join('src', 'liblua.a')], make=['make', 'generic'], configure=None),
includes=[path_from_root('tests', 'lua')],
output_nicerizer=lambda string, err: (string + err).replace('\n\n', '\n').replace('\n\n', '\n'))
@no_asan('issues with freetype itself')
@needs_make('configure script')
@is_slow_test
def test_freetype(self):
if self.run_name == 'asm2g':
# flip for some more coverage here
self.set_setting('ALIASING_FUNCTION_POINTERS', 1 - self.get_setting('ALIASING_FUNCTION_POINTERS'))
self.add_pre_run("FS.createDataFile('/', 'font.ttf', %s, true, false, false);" % str(
list(bytearray(open(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), 'rb').read()))
))
# Not needed for js, but useful for debugging
shutil.copyfile(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), 'font.ttf')
# Main
self.do_run(open(path_from_root('tests', 'freetype', 'main.c')).read(),
open(path_from_root('tests', 'freetype', 'ref.txt')).read(),
['font.ttf', 'test!', '150', '120', '25'],
libraries=self.get_freetype_library(),
includes=[path_from_root('tests', 'third_party', 'freetype', 'include')])
# github issue 324
print('[issue 324]')
self.do_run(open(path_from_root('tests', 'freetype', 'main_2.c')).read(),
open(path_from_root('tests', 'freetype', 'ref_2.txt')).read(),
['font.ttf', 'w', '32', '32', '25'],
libraries=self.get_freetype_library(),
includes=[path_from_root('tests', 'third_party', 'freetype', 'include')])
print('[issue 324 case 2]')
self.do_run(open(path_from_root('tests', 'freetype', 'main_3.c')).read(),
open(path_from_root('tests', 'freetype', 'ref_3.txt')).read(),
['font.ttf', 'W', '32', '32', '0'],
libraries=self.get_freetype_library(),
includes=[path_from_root('tests', 'third_party', 'freetype', 'include')])
print('[issue 324 case 3]')
self.do_run(None,
open(path_from_root('tests', 'freetype', 'ref_4.txt')).read(),
['font.ttf', 'ea', '40', '32', '0'],
no_build=True)
@no_asan('local count too large for VMs')
def test_sqlite(self):
self.set_setting('DISABLE_EXCEPTION_CATCHING', 1)
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_sqlite3_open', '_sqlite3_close', '_sqlite3_exec', '_sqlite3_free'])
if self.get_setting('ASM_JS') == 1 and '-g' in self.emcc_args:
print("disabling inlining") # without registerize (which -g disables), we generate huge amounts of code
self.set_setting('INLINING_LIMIT', 50)
# newer clang has a warning for implicit conversions that lose information,
# which happens in sqlite (see #9138)
self.emcc_args += ['-Wno-implicit-int-float-conversion']
# temporarily ignore unknown flags, which lets the above flag be used on our CI which doesn't
# yet have the new clang with that flag
self.emcc_args += ['-Wno-unknown-warning-option']
self.emcc_args += ['-I' + path_from_root('tests', 'third_party', 'sqlite')]
src = '''
#define SQLITE_DISABLE_LFS
#define LONGDOUBLE_TYPE double
#define SQLITE_INT64_TYPE long long int
#define SQLITE_THREADSAFE 0
'''
src += open(path_from_root('tests', 'third_party', 'sqlite', 'sqlite3.c')).read()
src += open(path_from_root('tests', 'sqlite', 'benchmark.c')).read()
self.do_run(src,
open(path_from_root('tests', 'sqlite', 'benchmark.txt')).read(),
includes=[path_from_root('tests', 'sqlite')],
force_c=True)
@needs_make('mingw32-make')
@is_slow_test
@parameterized({
'cmake': (True,),
'configure': (False,)
})
def test_zlib(self, use_cmake):
if WINDOWS and not use_cmake:
self.skipTest("Windows cannot run configure sh scripts")
self.maybe_closure()
if self.run_name == 'asm2g':
self.emcc_args += ['-g4'] # more source maps coverage
if self.run_name == 'asm2f':
return self.skipTest('asm2f affects cflags in a way that changes zlib compile flag reporting, so the stdout is different')
if use_cmake:
make_args = []
configure = [path_from_root('emcmake'), 'cmake', '.']
else:
make_args = ['libz.a']
configure = ['sh', './configure']
self.do_run(open(path_from_root('tests', 'third_party', 'zlib', 'example.c')).read(),
open(path_from_root('tests', 'core', 'test_zlib.out')).read(),
libraries=self.get_library(os.path.join('third_party', 'zlib'), 'libz.a', make_args=make_args, configure=configure),
includes=[path_from_root('tests', 'third_party', 'zlib'), 'building', 'zlib'],
force_c=True)
@needs_make('make')
@is_slow_test
@parameterized({
'cmake': (True,),
'autoconf': (False,)
})
# Called thus so it runs late in the alphabetical cycle... it is long
def test_bullet(self, use_cmake):
if WINDOWS and not use_cmake:
self.skipTest("Windows cannot run configure sh scripts")
self.set_setting('DEAD_FUNCTIONS', ['__ZSt9terminatev'])
self.emcc_args += ['-Wno-c++11-narrowing', '-Wno-deprecated-register', '-Wno-writable-strings']
asserts = self.get_setting('ASSERTIONS')
# extra testing for ASSERTIONS == 2
self.set_setting('ASSERTIONS', 2 if use_cmake else asserts)
self.do_run(open(path_from_root('tests', 'third_party', 'bullet', 'Demos', 'HelloWorld', 'HelloWorld.cpp')).read(),
[open(path_from_root('tests', 'bullet', 'output.txt')).read(), # different roundings
open(path_from_root('tests', 'bullet', 'output2.txt')).read(),
open(path_from_root('tests', 'bullet', 'output3.txt')).read(),
open(path_from_root('tests', 'bullet', 'output4.txt')).read()],
libraries=self.get_bullet_library(use_cmake),
includes=[path_from_root('tests', 'third_party', 'bullet', 'src')])
@no_asan('issues with freetype itself')
@needs_make('depends on freetype')
@is_slow_test
def test_poppler(self):
def test():
pdf_data = open(path_from_root('tests', 'poppler', 'paper.pdf'), 'rb').read()
create_test_file('paper.pdf.js', str(list(bytearray(pdf_data))))
create_test_file('pre.js', '''
Module.preRun = function() {
FS.createDataFile('/', 'paper.pdf', eval(read_('paper.pdf.js')), true, false, false);
};
Module.postRun = function() {
var FileData = MEMFS.getFileDataAsRegularArray(FS.root.contents['filename-1.ppm']);
out("Data: " + JSON.stringify(FileData.map(function(x) { return unSign(x, 8) })));
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
ppm_data = str(list(bytearray(open(path_from_root('tests', 'poppler', 'ref.ppm'), 'rb').read())))
self.do_run('', ppm_data.replace(' ', ''),
libraries=self.get_poppler_library(),
args=['-scale-to', '512', 'paper.pdf', 'filename'])
test()
if self.supports_js_dfe():
print("Testing poppler with ELIMINATE_DUPLICATE_FUNCTIONS set to 1", file=sys.stderr)
num_original_funcs = self.count_funcs('src.cpp.o.js')
self.set_setting('ELIMINATE_DUPLICATE_FUNCTIONS', 1)
test()
# Make sure that DFE ends up eliminating more than 200 functions (if we can view source)
assert (num_original_funcs - self.count_funcs('src.cpp.o.js')) > 200
@needs_make('make')
@is_slow_test
def test_openjpeg(self):
if '-fsanitize=address' in self.emcc_args:
self.set_setting('INITIAL_MEMORY', 128 * 1024 * 1024)
def line_splitter(data):
out = ''
counter = 0
for ch in data:
out += ch
if ch == ' ' and counter > 60:
out += '\n'
counter = 0
else:
counter += 1
return out
# remove -g, so we have one test without it by default
self.emcc_args = [x for x in self.emcc_args if x != '-g']
original_j2k = path_from_root('tests', 'openjpeg', 'syntensity_lobby_s.j2k')
image_bytes = list(bytearray(open(original_j2k, 'rb').read()))
create_test_file('pre.js', """
Module.preRun = function() { FS.createDataFile('/', 'image.j2k', %s, true, false, false); };
Module.postRun = function() {
out('Data: ' + JSON.stringify(MEMFS.getFileDataAsRegularArray(FS.analyzePath('image.raw').object)));
};
""" % line_splitter(str(image_bytes)))
shutil.copy(path_from_root('tests', 'third_party', 'openjpeg', 'opj_config.h'), self.get_dir())
lib = self.get_library(os.path.join('third_party', 'openjpeg'),
[os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/index.c.o'.split('/')),
os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/convert.c.o'.split('/')),
os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/__/common/color.c.o'.split('/')),
os.path.join('bin', 'libopenjpeg.a')],
configure=['cmake', '.'],
# configure_args=['--enable-tiff=no', '--enable-jp3d=no', '--enable-png=no'],
make_args=[]) # no -j 2, since parallel builds can fail
# We use doubles in JS, so we get slightly different values than native code. So we
# check our output by comparing the average pixel difference
def image_compare(output, err):
# Get the image generated by JS, from the JSON.stringify'd array
m = re.search(r'\[[\d, -]*\]', output)
self.assertIsNotNone(m, 'Failed to find proper image output in: ' + output)
# Evaluate the output as a python array
js_data = eval(m.group(0))
js_data = [x if x >= 0 else 256 + x for x in js_data] # Our output may be signed, so unsign it
# Get the correct output
true_data = bytearray(open(path_from_root('tests', 'openjpeg', 'syntensity_lobby_s.raw'), 'rb').read())
# Compare them
assert(len(js_data) == len(true_data))
num = len(js_data)
diff_total = js_total = true_total = 0
for i in range(num):
js_total += js_data[i]
true_total += true_data[i]
diff_total += abs(js_data[i] - true_data[i])
js_mean = js_total / float(num)
true_mean = true_total / float(num)
diff_mean = diff_total / float(num)
image_mean = 83.265
# print '[image stats:', js_mean, image_mean, true_mean, diff_mean, num, ']'
assert abs(js_mean - image_mean) < 0.01, [js_mean, image_mean]
assert abs(true_mean - image_mean) < 0.01, [true_mean, image_mean]
assert diff_mean < 0.01, diff_mean
return output
self.emcc_args += ['--minify', '0'] # to compare the versions
self.emcc_args += ['--pre-js', 'pre.js']
def do_test():
self.do_run(open(path_from_root('tests', 'third_party', 'openjpeg', 'codec', 'j2k_to_image.c')).read(),
'Successfully generated', # The real test for valid output is in image_compare
'-i image.j2k -o image.raw'.split(' '),
libraries=lib,
includes=[path_from_root('tests', 'third_party', 'openjpeg', 'libopenjpeg'),
path_from_root('tests', 'third_party', 'openjpeg', 'codec'),
path_from_root('tests', 'third_party', 'openjpeg', 'common'),
os.path.join(self.get_build_dir(), 'openjpeg')],
force_c=True,
assert_returncode=0,
output_nicerizer=image_compare)
do_test()
# extra testing
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1:
print('no memory growth', file=sys.stderr)
self.set_setting('ALLOW_MEMORY_GROWTH', 0)
do_test()
@no_wasm_backend("uses bitcode compiled with asmjs, and we don't have unified triples")
def test_python(self):
self.set_setting('EMULATE_FUNCTION_POINTER_CASTS', 1)
# The python build contains several undefined symbols
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
bitcode = path_from_root('tests', 'third_party', 'python', 'python.bc')
pyscript = dedent('''\
print '***'
print "hello python world!"
print [x*2 for x in range(4)]
t=2
print 10-3-t
print (lambda x: x*2)(11)
print '%f' % 5.47
print {1: 2}.keys()
print '***'
''')
pyoutput = '***\nhello python world!\n[0, 2, 4, 6]\n5\n22\n5.470000\n[1]\n***'
for lto in [0, 1]:
print('lto:', lto)
if lto == 1:
self.emcc_args += ['--llvm-lto', '1']
self.do_run_object(bitcode, pyoutput, args=['-S', '-c', pyscript])
def test_lifetime(self):
self.do_ll_run(path_from_root('tests', 'lifetime.ll'), 'hello, world!\n')
if '-O1' in self.emcc_args or '-O2' in self.emcc_args:
# lifetime stuff and their vars must be culled
self.assertNotContained('a18', open('lifetime.ll.o.js').read())
# Test cases in separate files. Note that these files may contain invalid .ll!
# They are only valid enough for us to read for test purposes, not for llvm-as
# to process.
@no_wasm_backend("uses bitcode compiled with asmjs, and we don't have unified triples")
@is_slow_test
def test_zzz_cases(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
# These tests don't end up linking with libc due to a behaviour in emcc
# where the llvm-link step is skipped when the input is a single
# object file. Since most of them `printf` (which comes from JS) but
# depends on `strlen` (which comes from musl) these tests almost all
# have an undefined `strlen`, which happens to not get called.
# TODO(sbc): Remove the special case from emcc what bypasses llvm-link
# and then remove this line?
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.set_setting('WARN_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args.remove('-Werror')
emcc_args = self.emcc_args
# The following tests link to libc, whereas others link with -nostdlib
needs_stdlib = [
'muli33_ta2', 'philoop_ta2', 'uadd_overflow_64_ta2', 'i64toi8star',
'legalizer_ta2', 'quotedlabel', 'alignedunaligned', 'sillybitcast',
'invokeundef', 'loadbitcastgep', 'sillybitcast2', 'legalizer_b_ta2',
'emptystruct', 'entry3', 'atomicrmw_i64', 'atomicrmw_b_i64',
'invoke_byval', 'i24_ce_fastcomp',
]
need_no_error_on_undefined_symbols = [
'unsanitized_declare'
]
skip_tests = [
# invalid ir
'aliasbitcast', 'structparam', 'issue_39', 'phinonexist', 'oob_ta2', 'phiself', 'invokebitcast',
# pnacl limitations in ExpandStructRegs
'structphiparam', 'callwithstructural_ta2', 'callwithstructural64_ta2', 'structinparam',
# pnacl limitations in ExpandGetElementPtr
'2xi40',
# current fastcomp limitations FIXME
'quoted',
# assumes malloc exists in JS
'llvm_assume', 'longjmp_tiny', 'longjmp_tiny_invoke', 'longjmp_tiny_invoke_phi',
'longjmp_tiny_keepem', 'longjmp_tiny_keepem_cond', 'longjmp_tiny_phi', 'longjmp_tiny_phi2',
]
skip_wasm = [
# casts a function pointer from (i32, i32)* to (i64)*, which happens to work in asm.js but is a general function pointer undefined behavior
'call_inttoptr_i64',
]
names = glob.glob(path_from_root('tests', 'cases', '*.ll'))
names.sort()
for name in names:
shortname = os.path.splitext(name)[0]
# TODO: test only worked in non-fastcomp (well, these cases)
basename = os.path.basename(shortname)
if basename in skip_tests:
continue
if self.is_wasm() and basename in skip_wasm:
continue
if '_noasm' in shortname and self.get_setting('ASM_JS'):
print('case "%s" not relevant for asm.js' % shortname)
continue
if basename in need_no_error_on_undefined_symbols:
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
print("Testing case '%s'..." % basename)
output_file = path_from_root('tests', 'cases', shortname + '.txt')
if os.path.exists(output_file):
output = open(output_file).read()
else:
output = 'hello, world!'
if output.rstrip() != 'skip':
self.emcc_args = list(emcc_args)
if basename in needs_stdlib:
self.set_setting('FILESYSTEM', 1)
else:
self.emcc_args.append('-nostdlib')
# no libc is linked in; with FILESYSTEM=0 we have a chance at printfing anyhow
self.set_setting('FILESYSTEM', 0)
if os.path.exists(shortname + '.emcc'):
self.emcc_args += json.loads(open(shortname + '.emcc').read())
self.do_ll_run(path_from_root('tests', 'cases', name), output, assert_returncode=None)
# Optional source checking, a python script that gets a global generated with the source
src_checker = path_from_root('tests', 'cases', shortname + '.py')
if os.path.exists(src_checker):
generated = open('src.cpp.o.js').read() # noqa
exec(open(src_checker).read())
@no_asan('call stack exceeded on some versions of node')
@is_slow_test
def test_fuzz(self):
self.emcc_args += ['-I' + path_from_root('tests', 'fuzz', 'include'), '-w']
skip_lto_tests = [
# LLVM LTO bug
'19.c', '18.cpp',
# puts exists before LTO, but is not used; LTO cleans it out, but then creates uses to it (printf=>puts) XXX https://llvm.org/bugs/show_bug.cgi?id=23814
'23.cpp'
]
def run_all(x):
print(x)
for name in sorted(glob.glob(path_from_root('tests', 'fuzz', '*.c')) + glob.glob(path_from_root('tests', 'fuzz', '*.cpp'))):
# if os.path.basename(name) != '4.c':
# continue
if 'newfail' in name:
continue
if os.path.basename(name).startswith('temp_fuzzcode'):
continue
# pnacl legalization issue, see https://code.google.com/p/nativeclient/issues/detail?id=4027
if x == 'lto' and self.run_name in ['default', 'asm2f'] and os.path.basename(name) in ['8.c']:
continue
if x == 'lto' and self.run_name == 'default' and os.path.basename(name) in skip_lto_tests:
continue
if x == 'lto' and os.path.basename(name) in ['21.c']:
continue # LLVM LTO bug
print(name)
if name.endswith('.cpp'):
self.emcc_args.append('-std=c++03')
self.do_run(open(path_from_root('tests', 'fuzz', name)).read(),
open(path_from_root('tests', 'fuzz', name + '.txt')).read(), force_c=name.endswith('.c'), assert_returncode=None)
if name.endswith('.cpp'):
self.emcc_args.remove('-std=c++03')
run_all('normal')
self.emcc_args += ['--llvm-lto', '1']
run_all('lto')
def test_autodebug_bitcode(self):
if self.is_wasm_backend() and '-flto' not in self.get_emcc_args():
return self.skipTest('must use bitcode object files for bitcode autodebug')
self.emcc_args += ['--llvm-opts', '0']
# Autodebug the code
def do_autodebug(filename):
building.llvm_dis(filename + '.o', filename + '.ll')
run_process([PYTHON, AUTODEBUGGER, filename + '.ll', filename + '.auto.ll'])
# rebuild .bc
# TODO: use code in do_autodebug_post for this
self.prep_ll_file(filename, filename + '.auto.ll', force_recompile=True)
# Run a test that should work, generating some code
test_path = path_from_root('tests', 'core', 'test_structs')
src = test_path + '.c'
output = test_path + '.out'
# Add an ll hook, to force ll generation
self.do_run_from_file(src, output, build_ll_hook=lambda x: False)
filename = 'src.c'
do_autodebug(filename)
# Compare to each other, and to expected output
self.do_ll_run(filename + '.auto.ll', 'AD:-1,1')
# Test using build_ll_hook
src = '''
#include <stdio.h>
char cache[256], *next = cache;
int main()
{
cache[10] = 25;
next[20] = 51;
int x = cache[10];
double y = 11.52;
printf("*%d,%d,%.2f*\\n", x, cache[20], y);
return 0;
}
'''
self.do_run(src, 'AD:-1,1', build_ll_hook=do_autodebug)
@no_asan('autodebug logging interferes with asan')
@no_fastcomp('autodebugging wasm is only supported in the wasm backend')
@with_env_modify({'EMCC_AUTODEBUG': '1'})
@also_with_impure_standalone_wasm
def test_autodebug_wasm(self):
# Autodebug does not work with too much shadow memory.
# Memory consumed by autodebug depends on the size of the WASM linear memory.
# With a large shadow memory, the JS engine runs out of memory.
if '-fsanitize=address' in self.emcc_args:
self.set_setting('ASAN_SHADOW_SIZE', 16 * 1024 * 1024)
# test that the program both works and also emits some of the logging
# (but without the specific output, as it is logging the actual locals
# used and so forth, which will change between opt modes and updates of
# llvm etc.)
def check(out, err):
for msg in ['log_execution', 'get_i32', 'set_i32', 'load_ptr', 'load_val', 'store_ptr', 'store_val']:
self.assertIn(msg, out)
return out + err
self.do_run(open(path_from_root('tests', 'core', 'test_autodebug.c')).read(),
'success', output_nicerizer=check)
### Integration tests
@sync
def test_ccall(self):
self.emcc_args.append('-Wno-return-stack-address')
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['ccall', 'cwrap'])
create_test_file('post.js', '''
out('*');
var ret;
ret = Module['ccall']('get_int', 'number'); out([typeof ret, ret].join(','));
ret = ccall('get_float', 'number'); out([typeof ret, ret.toFixed(2)].join(','));
ret = ccall('get_bool', 'boolean'); out([typeof ret, ret].join(','));
ret = ccall('get_string', 'string'); out([typeof ret, ret].join(','));
ret = ccall('print_int', null, ['number'], [12]); out(typeof ret);
ret = ccall('print_float', null, ['number'], [14.56]); out(typeof ret);
ret = ccall('print_bool', null, ['boolean'], [true]); out(typeof ret);
ret = ccall('print_string', null, ['string'], ["cheez"]); out(typeof ret);
ret = ccall('print_string', null, ['array'], [[97, 114, 114, 45, 97, 121, 0]]); out(typeof ret); // JS array
ret = ccall('print_string', null, ['array'], [new Uint8Array([97, 114, 114, 45, 97, 121, 0])]); out(typeof ret); // typed array
ret = ccall('multi', 'number', ['number', 'number', 'number', 'string'], [2, 1.4, 3, 'more']); out([typeof ret, ret].join(','));
var p = ccall('malloc', 'pointer', ['number'], [4]);
setValue(p, 650, 'i32');
ret = ccall('pointer', 'pointer', ['pointer'], [p]); out([typeof ret, getValue(ret, 'i32')].join(','));
out('*');
// part 2: cwrap
var noThirdParam = Module['cwrap']('get_int', 'number');
out(noThirdParam());
var multi = Module['cwrap']('multi', 'number', ['number', 'number', 'number', 'string']);
out(multi(2, 1.4, 3, 'atr'));
out(multi(8, 5.4, 4, 'bret'));
out('*');
// part 3: avoid stack explosion and check it's restored correctly
for (var i = 0; i < TOTAL_STACK/60; i++) {
ccall('multi', 'number', ['number', 'number', 'number', 'string'], [0, 0, 0, '123456789012345678901234567890123456789012345678901234567890']);
}
out('stack is ok.');
ccall('call_ccall_again', null);
''')
self.emcc_args += ['--post-js', 'post.js']
self.set_setting('EXPORTED_FUNCTIONS', ['_get_int', '_get_float', '_get_bool', '_get_string', '_print_int', '_print_float', '_print_bool', '_print_string', '_multi', '_pointer', '_call_ccall_again', '_malloc'])
self.do_run_in_out_file_test('tests', 'core', 'test_ccall')
if '-O2' in self.emcc_args and '-g' not in self.emcc_args:
print('with closure')
self.emcc_args += ['--closure', '1']
self.do_run_in_out_file_test('tests', 'core', 'test_ccall')
def test_EXTRA_EXPORTED_RUNTIME_METHODS(self):
self.do_run_in_out_file_test('tests', 'core', 'EXTRA_EXPORTED_RUNTIME_METHODS')
# test dyncall (and other runtime methods in support.js) can be exported
self.emcc_args += ['-DEXPORTED']
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['dynCall', 'addFunction', 'lengthBytesUTF8', 'getTempRet0', 'setTempRet0'])
self.do_run_in_out_file_test('tests', 'core', 'EXTRA_EXPORTED_RUNTIME_METHODS')
@no_fastcomp('fails mysteriously on fastcomp (dynCall_viji is not defined); ignored, because fastcomp is deprecated')
@no_minimal_runtime('MINIMAL_RUNTIME does not blindly export all symbols to Module to save code size')
def test_dyncall_specific(self):
emcc_args = self.emcc_args[:]
for which, exported_runtime_methods in [
('DIRECT', []),
('EXPORTED', []),
('FROM_OUTSIDE', ['dynCall_viji'])
]:
print(which)
self.emcc_args = emcc_args + ['-D' + which]
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', exported_runtime_methods)
self.do_run_in_out_file_test('tests', 'core', 'dyncall_specific')
def test_getValue_setValue(self):
# these used to be exported, but no longer are by default
def test(output_prefix='', args=[]):
old = self.emcc_args[:]
self.emcc_args += args
self.do_run(open(path_from_root('tests', 'core', 'getValue_setValue.cpp')).read(),
open(path_from_root('tests', 'core', 'getValue_setValue' + output_prefix + '.txt')).read(), assert_returncode=None)
self.emcc_args = old
# see that direct usage (not on module) works. we don't export, but the use
# keeps it alive through JSDCE
test(args=['-DDIRECT'])
# see that with assertions, we get a nice error message
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', [])
self.set_setting('ASSERTIONS', 1)
test('_assert')
self.set_setting('ASSERTIONS', 0)
# see that when we export them, things work on the module
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['getValue', 'setValue'])
test()
def test_FS_exports(self):
# these used to be exported, but no longer are by default
for use_files in (0, 1):
print(use_files)
def test(output_prefix='', args=[], assert_returncode=None):
if use_files:
args += ['-DUSE_FILES']
print(args)
old = self.emcc_args[:]
self.emcc_args += args
self.do_run(open(path_from_root('tests', 'core', 'FS_exports.cpp')).read(),
(open(path_from_root('tests', 'core', 'FS_exports' + output_prefix + '.txt')).read(),
open(path_from_root('tests', 'core', 'FS_exports' + output_prefix + '_2.txt')).read()),
assert_returncode=assert_returncode)
self.emcc_args = old
# see that direct usage (not on module) works. we don't export, but the use
# keeps it alive through JSDCE
test(args=['-DDIRECT', '-s', 'FORCE_FILESYSTEM=1'])
# see that with assertions, we get a nice error message
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', [])
self.set_setting('ASSERTIONS', 1)
test('_assert', assert_returncode=None)
self.set_setting('ASSERTIONS', 0)
# see that when we export them, things work on the module
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['FS_createDataFile'])
test(args=['-s', 'FORCE_FILESYSTEM=1'])
def test_legacy_exported_runtime_numbers(self):
# these used to be exported, but no longer are by default
def test(output_prefix='', args=[]):
old = self.emcc_args[:]
self.emcc_args += args
self.do_run(open(path_from_root('tests', 'core', 'legacy_exported_runtime_numbers.cpp')).read(),
open(path_from_root('tests', 'core', 'legacy_exported_runtime_numbers' + output_prefix + '.txt')).read(), assert_returncode=None)
self.emcc_args = old
# see that direct usage (not on module) works. we don't export, but the use
# keeps it alive through JSDCE
test(args=['-DDIRECT'])
# see that with assertions, we get a nice error message
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', [])
self.set_setting('ASSERTIONS', 1)
test('_assert')
self.set_setting('ASSERTIONS', 0)
# see that when we export them, things work on the module
self.set_setting('EXTRA_EXPORTED_RUNTIME_METHODS', ['ALLOC_DYNAMIC'])
test()
@no_wasm_backend('DEAD_FUNCTIONS elimination is done by the JSOptimizer')
def test_dead_functions(self):
src = r'''
#include <stdio.h>
extern "C" {
__attribute__((noinline)) int unused(int x) {
volatile int y = x;
return y;
}
}
int main(int argc, char **argv) {
printf("*%d*\n", argc > 1 ? unused(1) : 2);
return 0;
}
'''
# Sanity check that it works and the dead function is emitted
self.do_run(src, '*1*', args=['x'])
js = open('src.cpp.o.js').read()
if self.run_name in ['default', 'asm2g']:
assert 'function _unused($' in js
self.do_run(None, '*2*', no_build=True)
# Kill off the dead function, and check a code path using it aborts
self.set_setting('DEAD_FUNCTIONS', ['_unused'])
self.do_run(src, '*2*')
self.do_run(None, 'abort(', args=['x'], no_build=True, assert_returncode=None)
# Kill off a library function, check code aborts
self.set_setting('DEAD_FUNCTIONS', ['_printf'])
self.do_run(src, 'abort(', assert_returncode=None)
self.do_run(None, 'abort(', args=['x'], no_build=True, assert_returncode=None)
def test_response_file(self):
response_data = '-o %s/response_file.js %s' % (self.get_dir(), path_from_root('tests', 'hello_world.cpp'))
create_test_file('rsp_file', response_data.replace('\\', '\\\\'))
run_process([EMCC, "@rsp_file"] + self.get_emcc_args())
self.do_run('response_file.js', 'hello, world', no_build=True)
self.assertContained('response file not found: foo.txt', self.expect_fail([EMCC, '@foo.txt']))
def test_linker_response_file(self):
objfile = 'response_file.o'
run_process([EMCC, '-c', path_from_root('tests', 'hello_world.cpp'), '-o', objfile] + self.get_emcc_args())
# This should expand into -Wl,--start-group <objfile> -Wl,--end-group
response_data = '--start-group ' + objfile + ' --end-group'
create_test_file('rsp_file', response_data.replace('\\', '\\\\'))
run_process([EMCC, "-Wl,@rsp_file", '-o', 'response_file.o.js'] + self.get_emcc_args())
self.do_run('response_file.o.js', 'hello, world', no_build=True)
def test_exported_response(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <emscripten.h>
extern "C" {
int other_function() { return 5; }
}
int main() {
int x = EM_ASM_INT({ return Module._other_function() });
emscripten_run_script_string(""); // Add a reference to a symbol that exists in src/deps_info.json to uncover issue #2836 in the test suite.
printf("waka %d!\n", x);
return 0;
}
'''
create_test_file('exps', '["_main","_other_function"]')
self.emcc_args += ['-s', 'EXPORTED_FUNCTIONS=@exps']
self.do_run(src, '''waka 5!''')
assert 'other_function' in open('src.cpp.o.js').read()
def test_large_exported_response(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <emscripten.h>
extern "C" {
'''
js_funcs = []
num_exports = 5000
count = 0
while count < num_exports:
src += 'int exported_func_from_response_file_%d () { return %d;}\n' % (count, count)
js_funcs.append('_exported_func_from_response_file_%d' % count)
count += 1
src += r'''
}
int main() {
int x = EM_ASM_INT({ return Module._exported_func_from_response_file_4999() });
emscripten_run_script_string(""); // Add a reference to a symbol that exists in src/deps_info.json to uncover issue #2836 in the test suite.
printf("waka %d!\n", x);
return 0;
}
'''
js_funcs.append('_main')
exported_func_json_file = 'large_exported_response.json'
create_test_file(exported_func_json_file, json.dumps(js_funcs))
self.emcc_args += ['-s', 'EXPORTED_FUNCTIONS=@' + exported_func_json_file]
self.do_run(src, '''waka 4999!''')
assert '_exported_func_from_response_file_1' in open('src.cpp.o.js').read()
@sync
def test_add_function(self):
self.set_setting('INVOKE_RUN', 0)
self.set_setting('RESERVED_FUNCTION_POINTERS', 1)
self.set_setting('EXPORTED_RUNTIME_METHODS', ['callMain'])
src = path_from_root('tests', 'interop', 'test_add_function.cpp')
post_js = path_from_root('tests', 'interop', 'test_add_function_post.js')
self.emcc_args += ['--post-js', post_js]
print('basics')
self.do_run_in_out_file_test('tests', 'interop', 'test_add_function')
if '-g' not in self.emcc_args and not self.is_wasm_backend():
print('with --closure')
old = list(self.emcc_args)
self.emcc_args += ['--closure', '1']
self.do_run_in_out_file_test('tests', 'interop', 'test_add_function')
self.emcc_args = old
print(old)
print('with ALIASING_FUNCTION_POINTERS')
self.set_setting('ALIASING_FUNCTION_POINTERS', 1)
self.do_run_in_out_file_test('tests', 'interop', 'test_add_function')
self.clear_setting('ALIASING_FUNCTION_POINTERS')
print('with RESERVED_FUNCTION_POINTERS=0')
self.set_setting('RESERVED_FUNCTION_POINTERS', 0)
if self.is_wasm_backend():
self.do_run(open(src).read(), 'Unable to grow wasm table', assert_returncode=None)
print('- with table growth')
self.set_setting('ALLOW_TABLE_GROWTH', 1)
self.emcc_args += ['-DGROWTH']
# enable costly assertions to verify correct table behavior
self.set_setting('ASSERTIONS', 2)
self.do_run_in_out_file_test('tests', 'interop', 'test_add_function')
else:
self.do_run(open(src).read(), 'Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS.', assert_returncode=None)
self.assertNotContained('jsCall_', open('src.cpp.o.js').read())
if not self.get_setting('WASM') and not self.is_wasm_backend():
# with emulation, we don't need to reserve, except with wasm where
# we still do.
print('- with function pointer emulation')
self.set_setting('EMULATED_FUNCTION_POINTERS', 1)
self.do_run_in_out_file_test('tests', 'interop', 'test_add_function')
def test_getFuncWrapper_sig_alias(self):
src = r'''
#include <stdio.h>
#include <emscripten.h>
void func1(int a) {
printf("func1\n");
}
void func2(int a, int b) {
printf("func2\n");
}
int main() {
EM_ASM({
getFuncWrapper($0, 'vi')(0);
getFuncWrapper($1, 'vii')(0, 0);
}, func1, func2);
return 0;
}
'''
self.do_run(src, 'func1\nfunc2\n')
def test_emulate_function_pointer_casts(self):
self.set_setting('EMULATE_FUNCTION_POINTER_CASTS', 1)
self.do_run(open(path_from_root('tests', 'core', 'test_emulate_function_pointer_casts.cpp')).read(),
('|1.266,1|', # asm.js, double <-> int
'|1.266,1413754136|')) # wasm, reinterpret the bits
@no_wasm2js('TODO: nicely printed names in wasm2js')
@parameterized({
'normal': ([],),
'noexcept': (['-fno-exceptions'],)
})
def test_demangle_stacks(self, extra_args):
self.emcc_args += extra_args
self.set_setting('DEMANGLE_SUPPORT', 1)
self.set_setting('ASSERTIONS', 1)
# ensure function names are preserved
self.emcc_args += ['--profiling-funcs', '--llvm-opts', '0']
self.do_run_in_out_file_test('tests', 'core', 'test_demangle_stacks', assert_returncode=None)
if not self.has_changed_setting('ASSERTIONS'):
print('without assertions, the stack is not printed, but a message suggesting assertions is')
self.set_setting('ASSERTIONS', 0)
self.do_run_in_out_file_test('tests', 'core', 'test_demangle_stacks_noassert', assert_returncode=None)
def test_demangle_stacks_symbol_map(self):
self.set_setting('DEMANGLE_SUPPORT', 1)
if '-O' in str(self.emcc_args) and '-O0' not in self.emcc_args and '-O1' not in self.emcc_args and '-g' not in self.emcc_args:
self.emcc_args += ['--llvm-opts', '0']
else:
self.skipTest("without opts, we don't emit a symbol map")
self.emcc_args += ['--emit-symbol-map']
self.do_run(open(path_from_root('tests', 'core', 'test_demangle_stacks.cpp')).read(), 'abort', assert_returncode=None)
# make sure the shortened name is the right one
full_aborter = None
short_aborter = None
for line in open('src.cpp.o.js.symbols').readlines():
if ':' not in line:
continue
# split by the first ':' (wasm backend demangling may include more :'s later on)
short, full = line.split(':', 1)
if 'Aborter' in full:
short_aborter = short
full_aborter = full
self.assertIsNotNone(full_aborter)
self.assertIsNotNone(short_aborter)
print('full:', full_aborter, 'short:', short_aborter)
if SPIDERMONKEY_ENGINE and os.path.exists(SPIDERMONKEY_ENGINE[0]):
output = run_js('src.cpp.o.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
# we may see the full one, if -g, or the short one if not
if ' ' + short_aborter + ' ' not in output and ' ' + full_aborter + ' ' not in output:
# stack traces may also be ' name ' or 'name@' etc
if '\n' + short_aborter + ' ' not in output and '\n' + full_aborter + ' ' not in output and 'wasm-function[' + short_aborter + ']' not in output:
if '\n' + short_aborter + '@' not in output and '\n' + full_aborter + '@' not in output:
self.assertContained(' ' + short_aborter + ' ' + '\n' + ' ' + full_aborter + ' ', output)
def test_tracing(self):
self.emcc_args += ['--tracing']
self.do_run_in_out_file_test('tests', 'core', 'test_tracing')
@no_wasm_backend('https://github.com/emscripten-core/emscripten/issues/9527')
def test_eval_ctors(self):
if '-O2' not in str(self.emcc_args) or '-O1' in str(self.emcc_args):
self.skipTest('need js optimizations')
if not self.get_setting('WASM'):
self.skipTest('this test uses wasm binaries')
orig_args = self.emcc_args
print('leave printf in ctor')
self.emcc_args = orig_args + ['-s', 'EVAL_CTORS=1']
self.do_run(r'''
#include <stdio.h>
struct C {
C() { printf("constructing!\n"); } // don't remove this!
};
C c;
int main() {}
''', "constructing!\n")
def get_code_size():
if self.is_wasm():
# Use number of functions as a for code size
return self.count_wasm_contents('src.cpp.o.wasm', 'funcs')
else:
return os.path.getsize('src.cpp.o.js')
def get_mem_size():
if self.is_wasm():
# Use number of functions as a for code size
return self.count_wasm_contents('src.cpp.o.wasm', 'memory-data')
if self.uses_memory_init_file():
return os.path.getsize('src.cpp.o.js.mem')
# otherwise we ignore memory size
return 0
def do_test(test):
self.emcc_args = orig_args + ['-s', 'EVAL_CTORS=1']
test()
ec_code_size = get_code_size()
ec_mem_size = get_mem_size()
self.emcc_args = orig_args
test()
code_size = get_code_size()
mem_size = get_mem_size()
if mem_size:
print('mem: ', mem_size, '=>', ec_mem_size)
self.assertGreater(ec_mem_size, mem_size)
print('code:', code_size, '=>', ec_code_size)
self.assertLess(ec_code_size, code_size)
print('remove ctor of just assigns to memory')
def test1():
self.do_run(r'''
#include <stdio.h>
struct C {
int x;
C() {
volatile int y = 10;
y++;
x = y;
}
};
C c;
int main() {
printf("x: %d\n", c.x);
}
''', "x: 11\n")
do_test(test1)
if self.is_wasm_backend():
# The wasm backend currently exports a single initalizer so the ctor
# evaluation is all or nothing. As well as that it doesn't currently
# do DCE of libcxx symbols (because the are marked as visibility(defaault)
# and because of that we end up not being able to eval ctors unless all
# libcxx constrcutors can be eval'd
return
print('libcxx - remove 2 ctors from iostream code')
src = open(path_from_root('tests', 'hello_libcxx.cpp')).read()
output = 'hello, world!'
def test2():
self.do_run(src, output)
do_test(test2)
print('assertions too')
self.set_setting('ASSERTIONS', 1)
self.do_run(src, output)
self.set_setting('ASSERTIONS', 0)
print('remove just some, leave others')
def test3():
self.do_run(r'''
#include <iostream>
#include <string>
class std_string {
public:
std_string(): ptr(nullptr) { std::cout << "std_string()\n"; }
std_string(const char* s): ptr(s) { std::cout << "std_string(const char* s)" << std::endl; }
std_string(const std_string& s): ptr(s.ptr) { std::cout << "std_string(const std_string& s) " << std::endl; }
const char* data() const { return ptr; }
private:
const char* ptr;
};
const std_string txtTestString("212121\0");
const std::string s2text("someweirdtext");
int main() {
std::cout << s2text << std::endl;
std::cout << txtTestString.data() << std::endl;
std::cout << txtTestString.data() << std::endl;
return 0;
}
''', '''std_string(const char* s)
someweirdtext
212121
212121
''') # noqa
do_test(test3)
def test_embind(self):
self.emcc_args += ['--bind']
src = r'''
#include <stdio.h>
#include <emscripten/val.h>
using namespace emscripten;
int main() {
val Math = val::global("Math");
// two ways to call Math.abs
printf("abs(-10): %d\n", Math.call<int>("abs", -10));
printf("abs(-11): %d\n", Math["abs"](-11).as<int>());
return 0;
}
'''
self.do_run(src, 'abs(-10): 10\nabs(-11): 11')
def test_embind_2(self):
self.emcc_args += ['--bind', '--post-js', 'post.js']
create_test_file('post.js', '''
function printLerp() {
out('lerp ' + Module.lerp(100, 200, 66) + '.');
}
''')
src = r'''
#include <stdio.h>
#include <emscripten.h>
#include <emscripten/bind.h>
using namespace emscripten;
int lerp(int a, int b, int t) {
return (100 - t) * a + t * b;
}
EMSCRIPTEN_BINDINGS(my_module) {
function("lerp", &lerp);
}
int main(int argc, char **argv) {
EM_ASM(printLerp());
return 0;
}
'''
self.do_run(src, 'lerp 166')
def test_embind_3(self):
self.emcc_args += ['--bind', '--post-js', 'post.js']
create_test_file('post.js', '''
function ready() {
try {
Module.compute(new Uint8Array([1,2,3]));
} catch(e) {
out(e);
}
}
''')
src = r'''
#include <emscripten.h>
#include <emscripten/bind.h>
using namespace emscripten;
int compute(int array[]) {
return 0;
}
EMSCRIPTEN_BINDINGS(my_module) {
function("compute", &compute, allow_raw_pointers());
}
int main(int argc, char **argv) {
EM_ASM(ready());
return 0;
}
'''
self.do_run(src, 'UnboundTypeError: Cannot call compute due to unbound types: Pi')
@no_wasm_backend('long doubles are f128s in wasm backend')
def test_embind_4(self):
self.emcc_args += ['--bind', '--post-js', 'post.js']
create_test_file('post.js', '''
function printFirstElement() {
out(Module.getBufferView()[0]);
}
''')
src = r'''
#include <emscripten.h>
#include <emscripten/bind.h>
#include <emscripten/val.h>
#include <stdio.h>
using namespace emscripten;
const size_t kBufferSize = 1024;
long double buffer[kBufferSize];
val getBufferView(void) {
val v = val(typed_memory_view(kBufferSize, buffer));
return v;
}
EMSCRIPTEN_BINDINGS(my_module) {
function("getBufferView", &getBufferView);
}
int main(int argc, char **argv) {
buffer[0] = 107;
EM_ASM(printFirstElement());
return 0;
}
'''
self.do_run(src, '107')
def test_embind_5(self):
self.emcc_args += ['--bind']
self.do_run_in_out_file_test('tests', 'core', 'test_embind_5')
def test_embind_custom_marshal(self):
self.emcc_args += ['--bind', '--pre-js', path_from_root('tests', 'embind', 'test_custom_marshal.js')]
self.do_run_in_out_file_test('tests', 'embind', 'test_custom_marshal', assert_identical=True)
def test_embind_float_constants(self):
self.emcc_args += ['--bind']
self.do_run_from_file(path_from_root('tests', 'embind', 'test_float_constants.cpp'),
path_from_root('tests', 'embind', 'test_float_constants.out'))
def test_embind_negative_constants(self):
self.emcc_args += ['--bind']
self.do_run_from_file(path_from_root('tests', 'embind', 'test_negative_constants.cpp'),
path_from_root('tests', 'embind', 'test_negative_constants.out'))
def test_embind_unsigned(self):
self.emcc_args += ['--bind']
self.do_run_from_file(path_from_root('tests', 'embind', 'test_unsigned.cpp'), path_from_root('tests', 'embind', 'test_unsigned.out'))
@no_asan('FIXME #11158')
def test_embind_val(self):
self.emcc_args += ['--bind']
self.do_run_from_file(path_from_root('tests', 'embind', 'test_val.cpp'), path_from_root('tests', 'embind', 'test_val.out'))
def test_embind_no_rtti(self):
create_test_file('pre.js', '''
Module = {};
Module['postRun'] = function() {
out("dotest retured: " + Module.dotest());
};
''')
src = r'''
#include <emscripten/bind.h>
#include <emscripten/val.h>
#include <stdio.h>
int main(int argc, char** argv){
printf("418\n");
return 0;
}
int test() {
return 42;
}
EMSCRIPTEN_BINDINGS(my_module) {
emscripten::function("dotest", &test);
}
'''
self.emcc_args += ['--bind', '-fno-rtti', '-DEMSCRIPTEN_HAS_UNBOUND_TYPE_NAMES=0', '--pre-js', 'pre.js']
self.do_run(src, '418\ndotest retured: 42\n')
def test_embind_no_rtti_followed_by_rtti(self):
create_test_file('pre.js', '''
Module = {};
Module['postRun'] = function() {
out("dotest retured: " + Module.dotest());
};
''')
src = r'''
#include <emscripten/bind.h>
#include <emscripten/val.h>
#include <stdio.h>
int main(int argc, char** argv){
printf("418\n");
return 0;
}
int test() {
return 42;
}
EMSCRIPTEN_BINDINGS(my_module) {
emscripten::function("dotest", &test);
}
'''
self.emcc_args += ['--bind', '-fno-rtti', '-frtti', '--pre-js', 'pre.js']
self.do_run(src, '418\ndotest retured: 42\n')
@sync
def test_webidl(self):
if self.run_name == 'asm2':
self.emcc_args += ['--closure', '1', '-g1'] # extra testing
# avoid closure minified names competing with our test code in the global name space
self.set_setting('MODULARIZE', 1)
def do_test_in_mode(mode, allow_memory_growth):
print('testing mode', mode, ', memory growth =', allow_memory_growth)
# Force IDL checks mode
os.environ['IDL_CHECKS'] = mode
run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
# Export things on "TheModule". This matches the typical use pattern of the bound library
# being used as Box2D.* or Ammo.*, and we cannot rely on "Module" being always present (closure may remove it).
create_test_file('export.js', '''
// test purposes: remove printErr output, whose order is unpredictable when compared to print
err = err = function(){};
''')
self.emcc_args += ['-s', 'EXPORTED_FUNCTIONS=["_malloc"]', '--post-js', 'glue.js', '--post-js', 'export.js']
if allow_memory_growth:
self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH', '-Wno-almost-asm']
shutil.copyfile(path_from_root('tests', 'webidl', 'test.h'), 'test.h')
shutil.copyfile(path_from_root('tests', 'webidl', 'test.cpp'), 'test.cpp')
src = open('test.cpp').read()
def post(filename):
with open(filename, 'a') as f:
f.write('\n\n')
if self.run_name == 'asm2':
f.write('var TheModule = Module();\n')
else:
f.write('var TheModule = Module;\n')
f.write('\n\n')
if allow_memory_growth:
f.write("var isMemoryGrowthAllowed = true;")
else:
f.write("var isMemoryGrowthAllowed = false;")
f.write(open(path_from_root('tests', 'webidl', 'post.js')).read())
f.write('\n\n')
output = open(path_from_root('tests', 'webidl', "output_%s.txt" % mode)).read()
self.do_run(src, output, post_build=post, output_nicerizer=(lambda out, err: out))
do_test_in_mode('ALL', False)
do_test_in_mode('FAST', False)
do_test_in_mode('DEFAULT', False)
do_test_in_mode('ALL', True)
### Tests for tools
@no_wasm2js('TODO: source maps in wasm2js')
def test_source_map(self):
if '-g' not in self.emcc_args:
self.emcc_args.append('-g')
src = '''
#include <stdio.h>
#include <assert.h>
__attribute__((noinline)) int foo() {
printf("hi"); // line 6
return 1; // line 7
}
int main() {
printf("%d", foo()); // line 11
return 0; // line 12
}
'''
create_test_file('src.cpp', src)
out_filename = 'a.out.js'
wasm_filename = 'a.out.wasm'
no_maps_filename = 'no-maps.out.js'
assert '-g4' not in self.emcc_args
building.emcc('src.cpp',
self.serialize_settings() + self.emcc_args + self.emcc_args,
out_filename)
# the file name may find its way into the generated code, so make sure we
# can do an apples-to-apples comparison by compiling with the same file name
shutil.move(out_filename, no_maps_filename)
with open(no_maps_filename) as f:
no_maps_file = f.read()
no_maps_file = re.sub(' *//[@#].*$', '', no_maps_file, flags=re.MULTILINE)
self.emcc_args.append('-g4')
building.emcc(os.path.abspath('src.cpp'),
self.serialize_settings() + self.emcc_args + self.emcc_args,
out_filename,
stderr=PIPE)
map_referent = out_filename if not self.get_setting('WASM') else wasm_filename
# after removing the @line and @sourceMappingURL comments, the build
# result should be identical to the non-source-mapped debug version.
# this is worth checking because the parser AST swaps strings for token
# objects when generating source maps, so we want to make sure the
# optimizer can deal with both types.
map_filename = map_referent + '.map'
def encode_utf8(data):
if isinstance(data, dict):
for key in data:
data[key] = encode_utf8(data[key])
return data
elif isinstance(data, list):
for i in range(len(data)):
data[i] = encode_utf8(data[i])
return data
elif isinstance(data, type(u'')):
return data.encode('utf8')
else:
return data
def source_map_file_loc(name):
if shared.Settings.WASM_BACKEND:
return name
# in fastcomp, we have the absolute path, which is not good
return os.path.abspath(name)
data = json.load(open(map_filename))
if str is bytes:
# Python 2 compatibility
data = encode_utf8(data)
if hasattr(data, 'file'):
# the file attribute is optional, but if it is present it needs to refer
# the output file.
self.assertPathsIdentical(map_referent, data['file'])
assert len(data['sources']) == 1, data['sources']
self.assertPathsIdentical(source_map_file_loc('src.cpp'), data['sources'][0])
if hasattr(data, 'sourcesContent'):
# the sourcesContent attribute is optional, but if it is present it
# needs to containt valid source text.
self.assertTextDataIdentical(src, data['sourcesContent'][0])
mappings = json.loads(jsrun.run_js(
path_from_root('tools', 'source-maps', 'sourcemap2json.js'),
shared.NODE_JS, [map_filename]))
if str is bytes:
# Python 2 compatibility
mappings = encode_utf8(mappings)
seen_lines = set()
for m in mappings:
self.assertPathsIdentical(source_map_file_loc('src.cpp'), m['source'])
seen_lines.add(m['originalLine'])
# ensure that all the 'meaningful' lines in the original code get mapped
# when optimizing, the binaryen optimizer may remove some of them (by inlining, etc.)
if is_optimizing(self.emcc_args):
assert seen_lines.issuperset([11, 12]), seen_lines
else:
assert seen_lines.issuperset([6, 7, 11, 12]), seen_lines
@no_wasm2js('TODO: source maps in wasm2js')
@no_fastcomp('DWARF is only supported in upstream')
def test_dwarf(self):
self.emcc_args.append('-g')
create_test_file('src.cpp', '''
#include <emscripten.h>
EM_JS(int, out_to_js, (int x), {})
void foo() {
out_to_js(0); // line 5
out_to_js(1); // line 6
out_to_js(2); // line 7
// A silly possible recursion to avoid binaryen doing any inlining.
if (out_to_js(3)) foo();
}
int main() {
foo();
}
''')
js_filename = 'a.out.js'
wasm_filename = 'a.out.wasm'
building.emcc('src.cpp',
self.serialize_settings() + self.emcc_args,
js_filename)
LLVM_DWARFDUMP = os.path.join(LLVM_ROOT, 'llvm-dwarfdump')
out = run_process([LLVM_DWARFDUMP, wasm_filename, '-all'], stdout=PIPE).stdout
# parse the sections
sections = {}
curr_section_name = ''
curr_section_body = ''
def add_section():
if curr_section_name:
sections[curr_section_name] = curr_section_body
for line in out.splitlines():
if ' contents:' in line:
# a new section, a line like ".debug_str contents:"
add_section()
curr_section_name = line.split(' ')[0]
curr_section_body = ''
else:
# possibly a line in a section
if curr_section_name:
curr_section_body += line + '\n'
add_section()
# make sure the right sections exist
self.assertIn('.debug_abbrev', sections)
self.assertIn('.debug_info', sections)
self.assertIn('.debug_line', sections)
self.assertIn('.debug_str', sections)
self.assertIn('.debug_ranges', sections)
# verify some content in the sections
self.assertIn('"src.cpp"', sections['.debug_info'])
# the line section looks like this:
# Address Line Column File ISA Discriminator Flags
# ------------------ ------ ------ ------ --- ------------- -------------
# 0x000000000000000b 5 0 3 0 0 is_stmt
src_to_addr = {}
for line in sections['.debug_line'].splitlines():
if line.startswith('0x'):
while ' ' in line:
line = line.replace(' ', ' ')
addr, line, col = line.split(' ')[:3]
key = (int(line), int(col))
src_to_addr.setdefault(key, []).append(addr)
# each of the calls must remain in the binary, and be mapped
self.assertIn((5, 9), src_to_addr)
self.assertIn((6, 9), src_to_addr)
self.assertIn((7, 9), src_to_addr)
def get_dwarf_addr(line, col):
addrs = src_to_addr[(line, col)]
assert len(addrs) == 1, 'we assume the simple calls have one address'
return int(addrs[0], 0)
# the lines must appear in sequence (as calls to JS, the optimizer cannot
# reorder them)
self.assertLess(get_dwarf_addr(5, 9), get_dwarf_addr(6, 9))
self.assertLess(get_dwarf_addr(6, 9), get_dwarf_addr(7, 9))
# get the wat, printing with -g which has binary offsets
wat = run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'),
wasm_filename, '-g', '--print'], stdout=PIPE).stdout
# we expect to see a pattern like this, as in both debug and opt builds
# there isn't much that can change with such calls to JS (they can't be
# reordered or anything else):
#
# ;; code offset: 0x?
# (drop
# ;; code offset: 0x?
# (call $out_to_js
# ;; code offset: 0x?
# (local.get ?) or (i32.const ?)
# )
# )
#
# In stacky stream of instructions form, it is
# local.get or i32.const
# call $out_to_js
# drop
# get_wat_addr gets the address of one of the 3 interesting calls, by its
# index (0,1,2).
def get_wat_addr(call_index):
# find the call_index-th call
call_loc = -1
for i in range(call_index + 1):
call_loc = wat.find('call $out_to_js', call_loc + 1)
assert call_loc > 0
# the call begins with the local.get/i32.const printed below it, which is
# the first instruction in the stream, so it has the lowest address
start_addr_loc = wat.find('0x', call_loc)
assert start_addr_loc > 0
start_addr_loc_end = wat.find('\n', start_addr_loc)
start_addr = int(wat[start_addr_loc:start_addr_loc_end], 0)
# the call ends with the drop, which is the last in the stream, at the
# highest address
end_addr_loc = wat.rfind('drop', 0, call_loc)
assert end_addr_loc > 0
end_addr_loc = wat.rfind('0x', 0, end_addr_loc)
assert end_addr_loc > 0
end_addr_loc_end = wat.find('\n', end_addr_loc)
assert end_addr_loc_end > 0
end_addr = int(wat[end_addr_loc:end_addr_loc_end], 0)
return (start_addr, end_addr)
# match up the DWARF and the wat
for i in range(3):
dwarf_addr = get_dwarf_addr(5 + i, 9)
start_wat_addr, end_wat_addr = get_wat_addr(i)
# the dwarf may match any of the 3 instructions that form the stream of
# of instructions implementing the call in the source code, in theory
self.assertLessEqual(start_wat_addr, dwarf_addr)
self.assertLessEqual(dwarf_addr, end_wat_addr)
def test_modularize_closure_pre(self):
# test that the combination of modularize + closure + pre-js works. in that mode,
# closure should not minify the Module object in a way that the pre-js cannot use it.
self.emcc_args += [
'--pre-js', path_from_root('tests', 'core', 'modularize_closure_pre.js'),
'--closure', '1',
'-g1',
'-s',
'MODULARIZE=1',
]
def post(filename):
with open(filename, 'a') as f:
f.write('\n\n')
f.write('var TheModule = Module();\n')
self.do_run_in_out_file_test('tests', 'core', 'modularize_closure_pre', post_build=post)
@no_wasm('wasmifying destroys debug info and stack tracability')
@no_wasm2js('source maps support')
def test_emscripten_log(self):
self.banned_js_engines = [V8_ENGINE] # v8 doesn't support console.log
self.emcc_args += ['-s', 'DEMANGLE_SUPPORT=1']
if self.get_setting('ASM_JS'):
# XXX Does not work in SpiderMonkey since callstacks cannot be captured when running in asm.js, see https://bugzilla.mozilla.org/show_bug.cgi?id=947996
self.banned_js_engines += [SPIDERMONKEY_ENGINE]
if '-g' not in self.emcc_args:
self.emcc_args.append('-g')
self.emcc_args += ['-DRUN_FROM_JS_SHELL']
self.do_run(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read(), '''test print 123
12.345679 9.123457 1.353180
12345678 9123456 1353179
12.345679 9123456 1353179
12345678 9.123457 1353179
12345678 9123456 1.353180
12345678 9.123457 1.353180
12.345679 9123456 1.353180
12.345679 9.123457 1353179
Success!
''')
# test closure compiler as well
if self.run_name == 'asm2':
print('closure')
self.emcc_args += ['--closure', '1', '-g1'] # extra testing
self.do_run_in_out_file_test('tests', 'emscripten_log', 'emscripten_log_with_closure')
def test_float_literals(self):
self.do_run_in_out_file_test('tests', 'test_float_literals')
def test_exit_status(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
src = r'''
#include <stdio.h>
#include <stdlib.h>
static void cleanup() {
printf("cleanup\n");
}
int main() {
atexit(cleanup); // this atexit should still be called
printf("hello, world!\n");
// Unusual exit status to make sure it's working!
if (CAPITAL_EXIT) {
_Exit(118);
} else {
exit(118);
}
}
'''
create_test_file('pre.js', '''
Module.preInit = function() {
addOnExit(function () {
out('I see exit status: ' + EXITSTATUS);
});
}
''')
self.emcc_args += ['--pre-js', 'pre.js']
self.do_run(src.replace('CAPITAL_EXIT', '0'), 'hello, world!\ncleanup\nI see exit status: 118', assert_returncode=None)
self.do_run(src.replace('CAPITAL_EXIT', '1'), 'hello, world!\ncleanup\nI see exit status: 118', assert_returncode=None)
def test_noexitruntime(self):
src = r'''
#include <emscripten.h>
#include <stdio.h>
static int testPre = TEST_PRE;
struct Global {
Global() {
printf("in Global()\n");
if (testPre) { EM_ASM(noExitRuntime = true;); }
}
~Global() { printf("ERROR: in ~Global()\n"); }
} global;
int main() {
if (!testPre) { EM_ASM(noExitRuntime = true;); }
printf("in main()\n");
}
'''
self.do_run(src.replace('TEST_PRE', '0'), 'in Global()\nin main()')
self.do_run(src.replace('TEST_PRE', '1'), 'in Global()\nin main()')
def test_minmax(self):
self.do_run(open(path_from_root('tests', 'test_minmax.c')).read(), 'NAN != NAN\nSuccess!')
def test_locale(self):
self.do_run_from_file(path_from_root('tests', 'test_locale.c'), path_from_root('tests', 'test_locale.out'))
def test_vswprintf_utf8(self):
self.do_run_from_file(path_from_root('tests', 'vswprintf_utf8.c'), path_from_root('tests', 'vswprintf_utf8.out'))
@no_asan('asan is not compatible with asyncify stack operations; may also need to not instrument asan_c_load_4, TODO')
def test_async(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
self.banned_js_engines = [SPIDERMONKEY_ENGINE, V8_ENGINE] # needs setTimeout which only node has
if self.is_wasm_backend():
self.set_setting('ASYNCIFY', 1)
else:
self.skipTest('fastcomp Asyncify was removed')
src = r'''
#include <stdio.h>
#include <emscripten.h>
void f(void *p) {
*(int*)p = 99;
printf("!");
}
int main() {
int i = 0;
printf("Hello");
emscripten_async_call(f, &i, 1);
printf("World");
emscripten_sleep(100);
printf("%d\n", i);
}
'''
self.do_run(src, 'HelloWorld!99')
if self.is_wasm_backend():
print('check bad ccall use')
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("Hello");
emscripten_sleep(100);
printf("World\n");
}
'''
self.set_setting('ASSERTIONS', 1)
self.set_setting('INVOKE_RUN', 0)
create_test_file('pre.js', '''
Module['onRuntimeInitialized'] = function() {
try {
ccall('main', 'number', ['number', 'string'], [2, 'waka']);
var never = true;
} catch(e) {
out(e);
assert(!never);
}
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
self.do_run(src, 'The call to main is running asynchronously.')
print('check reasonable ccall use')
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("Hello");
emscripten_sleep(100);
printf("World\n");
}
'''
create_test_file('pre.js', '''
Module['onRuntimeInitialized'] = function() {
ccall('main', null, ['number', 'string'], [2, 'waka'], { async: true });
};
''')
self.do_run(src, 'HelloWorld')
print('check ccall promise')
self.set_setting('EXPORTED_FUNCTIONS', ['_stringf', '_floatf'])
src = r'''
#include <stdio.h>
#include <emscripten.h>
extern "C" {
const char* stringf(char* param) {
emscripten_sleep(20);
printf(param);
return "second";
}
double floatf() {
emscripten_sleep(20);
emscripten_sleep(20);
return 6.4;
}
}
'''
create_test_file('pre.js', r'''
Module['onRuntimeInitialized'] = function() {
ccall('stringf', 'string', ['string'], ['first\n'], { async: true })
.then(function(val) {
console.log(val);
ccall('floatf', 'number', null, null, { async: true }).then(console.log);
});
};
''')
self.do_run(src, 'first\nsecond\n6.4')
@no_wasm_backend('ASYNCIFY coroutines are not yet supported in the LLVM wasm backend')
def do_test_coroutine(self, additional_settings):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME', 1)
src = open(path_from_root('tests', 'test_coroutines.cpp')).read()
for (k, v) in additional_settings.items():
self.set_setting(k, v)
self.do_run(src, '*leaf-0-100-1-101-1-102-2-103-3-104-5-105-8-106-13-107-21-108-34-109-*')
@no_wasm_backend('ASYNCIFY coroutines are not yet supported in the LLVM wasm backend')
@no_fastcomp('ASYNCIFY has been removed from fastcomp')
def test_coroutine_asyncify(self):
self.do_test_coroutine({'ASYNCIFY': 1})
@no_asan('asyncify stack operations confuse asan')
@no_fastcomp('Fibers are not implemented for fastcomp')
def test_fibers_asyncify(self):
self.set_setting('ASYNCIFY', 1)
src = open(path_from_root('tests', 'test_fibers.cpp')).read()
self.do_run(src, '*leaf-0-100-1-101-1-102-2-103-3-104-5-105-8-106-13-107-21-108-34-109-*')
@no_wasm_backend('ASYNCIFY is not supported in the LLVM wasm backend')
@no_fastcomp('ASYNCIFY has been removed from fastcomp')
def test_asyncify_unused(self):
# test a program not using asyncify, but the pref is set
self.set_setting('ASYNCIFY', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_hello_world')
@parameterized({
'normal': ([], True),
'blacklist_a': (['-s', 'ASYNCIFY_BLACKLIST=["foo(int, double)"]'], False),
'blacklist_b': (['-s', 'ASYNCIFY_BLACKLIST=["bar()"]'], True),
'blacklist_c': (['-s', 'ASYNCIFY_BLACKLIST=["baz()"]'], False),
'whitelist_a': (['-s', 'ASYNCIFY_WHITELIST=["main","__original_main","foo(int, double)","baz()","c_baz","Structy::funcy()","bar()"]'], True),
'whitelist_b': (['-s', 'ASYNCIFY_WHITELIST=["main","__original_main","foo(int, double)","baz()","c_baz","Structy::funcy()"]'], True),
'whitelist_c': (['-s', 'ASYNCIFY_WHITELIST=["main","__original_main","foo(int, double)","baz()","c_baz"]'], False),
'whitelist_d': (['-s', 'ASYNCIFY_WHITELIST=["foo(int, double)","baz()","c_baz","Structy::funcy()"]'], False),
'whitelist_b_response': ([], True, '["main","__original_main","foo(int, double)","baz()","c_baz","Structy::funcy()"]'),
'whitelist_c_response': ([], False, '["main","__original_main","foo(int, double)","baz()","c_baz"]'),
})
@no_asan('asan is not compatible with asyncify stack operations; may also need to not instrument asan_c_load_4, TODO')
@no_fastcomp('new asyncify only')
def test_asyncify_lists(self, args, should_pass, response=None):
if response is not None:
create_test_file('response.file', response)
self.emcc_args += ['-s', '[email protected]']
self.set_setting('ASYNCIFY', 1)
self.emcc_args += args
try:
self.do_run_in_out_file_test('tests', 'core', 'test_asyncify_lists', assert_identical=True)
if not should_pass:
should_pass = True
raise Exception('should not have passed')
except Exception:
if should_pass:
raise
@no_asan('asyncify stack operations confuse asan')
@no_fastcomp('wasm-backend specific feature')
def test_emscripten_scan_registers(self):
self.set_setting('ASYNCIFY', 1)
self.do_run_in_out_file_test('tests', 'core', 'emscripten_scan_registers')
@no_fastcomp('wasm-backend specific feature')
def test_asyncify_assertions(self):
self.set_setting('ASYNCIFY', 1)
self.set_setting('ASYNCIFY_IMPORTS', ['suspend'])
self.set_setting('ASSERTIONS', 1)
self.do_run_in_out_file_test('tests', 'core', 'asyncify_assertions')
@no_asan('asyncify stack operations confuse asan')
@no_fastcomp('wasm-backend specific feature')
@no_wasm2js('TODO: lazy loading in wasm2js')
@parameterized({
'conditional': (True,),
'unconditional': (False,),
})
def test_emscripten_lazy_load_code(self, conditional):
self.set_setting('ASYNCIFY', 1)
self.set_setting('ASYNCIFY_LAZY_LOAD_CODE', 1)
self.set_setting('ASYNCIFY_IGNORE_INDIRECT', 1)
self.set_setting('MALLOC', 'emmalloc')
self.emcc_args += ['--profiling-funcs'] # so that we can find the functions for the changes below
if conditional:
self.emcc_args += ['-DCONDITIONAL']
self.do_run_in_out_file_test('tests', 'core', 'emscripten_lazy_load_code', args=['0'])
first_size = os.path.getsize('src.cpp.o.wasm')
second_size = os.path.getsize('src.cpp.o.wasm.lazy.wasm')
print('first wasm size', first_size)
print('second wasm size', second_size)
if not conditional and is_optimizing(self.emcc_args):
# If the call to lazy-load is unconditional, then the optimizer can dce
# out more than half
self.assertLess(first_size, 0.5 * second_size)
with open('src.cpp.o.wasm', 'rb') as f:
with open('src.cpp.o.wasm.lazy.wasm', 'rb') as g:
self.assertNotEqual(f.read(), g.read())
# attempts to "break" the wasm by adding an unreachable in $foo_end. returns whether we found it.
def break_wasm(name):
wat = run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), name], stdout=PIPE).stdout
lines = wat.splitlines()
wat = None
for i in range(len(lines)):
if '(func $foo_end ' in lines[i]:
j = i + 1
while '(local ' in lines[j]:
j += 1
# we found the first line after the local defs
lines[j] = '(unreachable)' + lines[j]
wat = '\n'.join(lines)
break
if wat is None:
# $foo_end is not present in the wasm, nothing to break
shutil.copyfile(name, name + '.orig')
return False
with open('wat.wat', 'w') as f:
f.write(wat)
shutil.move(name, name + '.orig')
run_process([os.path.join(building.get_binaryen_bin(), 'wasm-as'), 'wat.wat', '-o', name, '-g'])
return True
def verify_working(args=['0']):
self.assertContained('foo_end', run_js('src.cpp.o.js', args=args))
def verify_broken(args=['0']):
self.assertNotContained('foo_end', run_js('src.cpp.o.js', args=args, stderr=STDOUT, assert_returncode=None))
# the first-loaded wasm will not reach the second call, since we call it after lazy-loading.
# verify that by changing the first wasm to throw in that function
found_foo_end = break_wasm('src.cpp.o.wasm')
if not conditional and is_optimizing(self.emcc_args):
self.assertFalse(found_foo_end, 'should have optimizd out $foo_end')
verify_working()
# but breaking the second wasm actually breaks us
break_wasm('src.cpp.o.wasm.lazy.wasm')
verify_broken()
# restore
shutil.copyfile('src.cpp.o.wasm.orig', 'src.cpp.o.wasm')
shutil.copyfile('src.cpp.o.wasm.lazy.wasm.orig', 'src.cpp.o.wasm.lazy.wasm')
verify_working()
if conditional:
# if we do not call the lazy load function, then we do not need the lazy wasm,
# and we do the second call in the first wasm
os.remove('src.cpp.o.wasm.lazy.wasm')
verify_broken()
verify_working(['42'])
break_wasm('src.cpp.o.wasm')
verify_broken()
# Test basic wasm2js functionality in all core compilation modes.
@no_fastcomp('wasm-backend specific feature')
@no_asan('no wasm2js support yet in asan')
def test_wasm2js(self):
if self.get_setting('WASM') == 0:
self.skipTest('redundant to test wasm2js in wasm2js* mode')
self.set_setting('WASM', 0)
self.do_run_in_out_file_test('tests', 'core', 'test_hello_world')
# a mem init file is emitted just like with JS
expect_memory_init_file = self.uses_memory_init_file()
see_memory_init_file = os.path.exists('src.c.o.js.mem')
assert expect_memory_init_file == see_memory_init_file, 'memory init file expectation wrong: %s' % expect_memory_init_file
if see_memory_init_file:
with open('src.c.o.js.mem', 'rb') as f:
self.assertTrue(f.read()[-1] != b'\0')
@no_fastcomp('wasm-backend specific feature')
@no_asan('no wasm2js support yet in asan')
def test_maybe_wasm2js(self):
if self.get_setting('WASM') == 0:
self.skipTest('redundant to test wasm2js in wasm2js* mode')
self.set_setting('MAYBE_WASM2JS', 1)
# see that running as wasm works
self.do_run_in_out_file_test('tests', 'core', 'test_hello_world')
# run wasm2js, bundle the code, and use the wasm2js path
cmd = [PYTHON, path_from_root('tools', 'maybe_wasm2js.py'), 'src.c.o.js', 'src.c.o.wasm']
if is_optimizing(self.emcc_args):
cmd += ['-O2']
run_process(cmd, stdout=open('do_wasm2js.js', 'w')).stdout
# remove the wasm to make sure we never use it again
os.remove('src.c.o.wasm')
# verify that it runs
self.assertContained('hello, world!', run_js('do_wasm2js.js'))
@no_fastcomp('wasm-backend specific feature')
@no_asan('no wasm2js support yet in asan')
def test_wasm2js_fallback(self):
if self.get_setting('WASM') == 0:
self.skipTest('redundant to test wasm2js in wasm2js* mode')
for args in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
cmd = [EMCC, path_from_root('tests', 'small_hello_world.c'), '-s', 'WASM=2'] + args
run_process(cmd)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('a.out.wasm.js', 'a.out.wasm.js.unused')
self.assertContained('hello!', run_js('a.out.js'))
os.rename('a.out.wasm.js.unused', 'a.out.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
open('b.out.js', 'w').write('WebAssembly = undefined;\n' + open('a.out.js', 'r').read())
os.remove('a.out.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.assertContained('hello!', run_js('b.out.js'))
def test_cxx_self_assign(self):
# See https://github.com/emscripten-core/emscripten/pull/2688 and http://llvm.org/bugs/show_bug.cgi?id=18735
self.do_run(r'''
#include <map>
#include <stdio.h>
int main() {
std::map<int, int> m;
m[0] = 1;
m = m;
// size should still be one after self assignment
if (m.size() == 1) {
printf("ok.\n");
}
}
''', 'ok.')
def test_memprof_requirements(self):
# This test checks for the global variables required to run the memory
# profiler. It would fail if these variables were made no longer global
# or if their identifiers were changed.
create_test_file('main.cpp', '''
extern "C" {
void check_memprof_requirements();
}
int main() {
check_memprof_requirements();
return 0;
}
''')
create_test_file('lib.js', '''
mergeInto(LibraryManager.library, {
check_memprof_requirements: function() {
if (typeof STATIC_BASE === 'number' &&
typeof STACK_BASE === 'number' &&
typeof STACK_MAX === 'number' &&
typeof STACKTOP === 'number' &&
typeof DYNAMIC_BASE === 'number' &&
typeof DYNAMICTOP_PTR === 'number') {
out('able to run memprof');
} else {
out('missing the required variables to run memprof');
}
}
});
''')
self.emcc_args += ['--js-library', 'lib.js']
self.do_run(open('main.cpp').read(), 'able to run memprof')
def test_fs_dict(self):
self.set_setting('FORCE_FILESYSTEM', 1)
self.emcc_args += ['-lidbfs.js']
self.emcc_args += ['-lnodefs.js']
create_test_file('pre.js', '''
Module = {};
Module['preRun'] = function() {
out(typeof FS.filesystems['MEMFS']);
out(typeof FS.filesystems['IDBFS']);
out(typeof FS.filesystems['NODEFS']);
// Globals
console.log(typeof MEMFS);
console.log(typeof IDBFS);
console.log(typeof NODEFS);
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
self.do_run('int main() { return 0; }', 'object\nobject\nobject\nobject\nobject\nobject')
def test_fs_dict_none(self):
# if IDBFS and NODEFS are not enabled, they are not present.
self.set_setting('FORCE_FILESYSTEM', 1)
self.set_setting('ASSERTIONS', 1)
create_test_file('pre.js', '''
Module = {};
Module['preRun'] = function() {
out(typeof FS.filesystems['MEMFS']);
out(typeof FS.filesystems['IDBFS']);
out(typeof FS.filesystems['NODEFS']);
// Globals
if (ASSERTIONS) {
console.log(typeof MEMFS);
console.log(IDBFS);
console.log(NODEFS);
FS.mkdir('/working1');
try {
FS.mount(IDBFS, {}, '/working1');
} catch (e) {
console.log('|' + e + '|');
}
}
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
expected = '''\
object
undefined
undefined
object
IDBFS is no longer included by default; build with -lidbfs.js
NODEFS is no longer included by default; build with -lnodefs.js
|IDBFS is no longer included by default; build with -lidbfs.js|'''
self.do_run('int main() { return 0; }', expected)
@sync
@no_wasm_backend("https://github.com/emscripten-core/emscripten/issues/9039")
def test_stack_overflow_check(self):
args = self.emcc_args + ['-s', 'TOTAL_STACK=1048576']
self.emcc_args = args + ['-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'ASSERTIONS=0']
self.do_run(open(path_from_root('tests', 'stack_overflow.cpp')).read(), 'Stack overflow! Attempted to allocate', assert_returncode=None)
self.emcc_args = args + ['-s', 'ASSERTIONS=1']
self.do_run(open(path_from_root('tests', 'stack_overflow.cpp')).read(), 'Stack overflow! Attempted to allocate', assert_returncode=None)
@no_wasm_backend('uses BINARYEN_TRAP_MODE (the wasm backend only supports non-trapping)')
def test_binaryen_trap_mode(self):
if not self.is_wasm():
self.skipTest('wasm test')
TRAP_OUTPUTS = ('trap', 'RuntimeError')
default = 'allow'
print('default is', default)
for mode in ['js', 'clamp', 'allow', '']:
if mode == 'js' and self.is_wasm_backend():
# wasm backend does not use asm2wasm imports, which js trap mode requires
continue
print('mode:', mode)
self.set_setting('BINARYEN_TRAP_MODE', mode or default)
if not mode:
mode = default
print(' idiv')
self.do_run(open(path_from_root('tests', 'wasm', 'trap-idiv.cpp')).read(), {
'js': '|0|',
'clamp': '|0|',
'allow': TRAP_OUTPUTS
}[mode], assert_returncode=None)
print(' f2i')
self.do_run(open(path_from_root('tests', 'wasm', 'trap-f2i.cpp')).read(), {
'js': '|1337|\n|4294967295|', # JS did an fmod 2^32 | normal
'clamp': '|-2147483648|\n|4294967295|',
'allow': TRAP_OUTPUTS
}[mode], assert_returncode=None)
@also_with_standalone_wasm
def test_sbrk(self):
self.do_run(open(path_from_root('tests', 'sbrk_brk.cpp')).read(), 'OK.')
def test_brk(self):
self.emcc_args += ['-DTEST_BRK=1']
self.do_run(open(path_from_root('tests', 'sbrk_brk.cpp')).read(), 'OK.')
# Tests that we can use the dlmalloc mallinfo() function to obtain information
# about malloc()ed blocks and compute how much memory is used/freed.
@no_asan('mallinfo is not part of ASan malloc')
def test_mallinfo(self):
self.do_run(open(path_from_root('tests', 'mallinfo.cpp')).read(), 'OK.')
@no_asan('cannot replace malloc/free with ASan')
def test_wrap_malloc(self):
self.do_run(open(path_from_root('tests', 'wrap_malloc.cpp')).read(), 'OK.')
def test_environment(self):
self.set_setting('ASSERTIONS', 1)
def test():
self.do_run_in_out_file_test('tests', 'core', 'test_hello_world', assert_returncode=None)
js = open('src.c.o.js').read()
assert ('require(' in js) == ('node' in self.get_setting('ENVIRONMENT')), 'we should have require() calls only if node js specified'
for engine in JS_ENGINES:
print(engine)
# set us to test in just this engine
self.banned_js_engines = [e for e in JS_ENGINES if e != engine]
# tell the compiler to build with just that engine
if engine == NODE_JS:
right = 'node'
wrong = 'shell'
else:
right = 'shell'
wrong = 'node'
# test with the right env
self.set_setting('ENVIRONMENT', right)
print('ENVIRONMENT =', self.get_setting('ENVIRONMENT'))
test()
# test with the wrong env
self.set_setting('ENVIRONMENT', wrong)
print('ENVIRONMENT =', self.get_setting('ENVIRONMENT'))
try:
test()
raise Exception('unexpected success')
except Exception as e:
self.assertContained('not compiled for this environment', str(e))
# test with a combined env
self.set_setting('ENVIRONMENT', right + ',' + wrong)
print('ENVIRONMENT =', self.get_setting('ENVIRONMENT'))
test()
def test_dfe(self):
if not self.supports_js_dfe():
self.skipTest('dfe-only')
self.set_setting('ELIMINATE_DUPLICATE_FUNCTIONS', 1)
self.do_run_in_out_file_test('tests', 'core', 'test_hello_world')
self.emcc_args += ['-g2'] # test for issue #6331
self.do_run_in_out_file_test('tests', 'core', 'test_hello_world')
def test_postrun_exception(self):
# verify that an exception thrown in postRun() will not trigger the
# compilation failed handler, and will be printed to stderr.
self.add_post_run('ThisFunctionDoesNotExist()')
src = open(path_from_root('tests', 'core', 'test_hello_world.c')).read()
self.build(src, self.get_dir(), 'src.c')
output = run_js('src.c.o.js', assert_returncode=None, stderr=STDOUT)
self.assertNotContained('failed to asynchronously prepare wasm', output)
self.assertContained('hello, world!', output)
self.assertContained('ThisFunctionDoesNotExist is not defined', output)
# Tests that building with -s DECLARE_ASM_MODULE_EXPORTS=0 works
def test_no_declare_asm_module_exports(self):
self.set_setting('DECLARE_ASM_MODULE_EXPORTS', 0)
self.set_setting('WASM_ASYNC_COMPILATION', 0)
self.maybe_closure()
self.do_run(open(path_from_root('tests', 'declare_asm_module_exports.cpp')).read(), 'jsFunction: 1')
js = open('src.cpp.o.js').read()
occurances = js.count('cFunction')
if is_optimizing(self.emcc_args) and '-g' not in self.emcc_args:
# In optimized builds only the single reference cFunction that exists in the EM_ASM should exist
if self.is_wasm():
self.assertEqual(occurances, 1)
else:
# With js the asm module itself also contains a reference for the cFunction name
self.assertEqual(occurances, 2)
else:
print(occurances)
# Tests that building with -s DECLARE_ASM_MODULE_EXPORTS=0 works
def test_minimal_runtime_no_declare_asm_module_exports(self):
self.set_setting('DECLARE_ASM_MODULE_EXPORTS', 0)
self.set_setting('WASM_ASYNC_COMPILATION', 0)
self.maybe_closure()
self.set_setting('MINIMAL_RUNTIME', 1)
self.do_run(open(path_from_root('tests', 'declare_asm_module_exports.cpp')).read(), 'jsFunction: 1')
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
@parameterized({
'default': ([],),
'streaming': (['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION=1'],),
'streaming_inst': (['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION=1'],),
'no_export': (['-s', 'DECLARE_ASM_MODULE_EXPORTS=0'],)
})
def test_minimal_runtime_hello_world(self, args):
# TODO: Support for non-Node.js shells has not yet been added to MINIMAL_RUNTIME
self.banned_js_engines = [V8_ENGINE, SPIDERMONKEY_ENGINE]
self.emcc_args = ['-s', 'MINIMAL_RUNTIME=1'] + args
self.set_setting('MINIMAL_RUNTIME', 1)
self.maybe_closure()
self.do_run(open(path_from_root('tests', 'small_hello_world.c')).read(), 'hello')
# Test that printf() works in MINIMAL_RUNTIME=1
@parameterized({
'fs': (['-s', 'FORCE_FILESYSTEM=1'],),
'nofs': (['-s', 'NO_FILESYSTEM=1'],),
})
def test_minimal_runtime_hello_printf(self, args):
self.emcc_args = ['-s', 'MINIMAL_RUNTIME=1'] + args
self.maybe_closure()
self.do_run(open(path_from_root('tests', 'hello_world.c')).read(), 'hello, world!')
# Tests that -s MINIMAL_RUNTIME=1 works well with SAFE_HEAP
def test_minimal_runtime_safe_heap(self):
self.emcc_args = ['-s', 'MINIMAL_RUNTIME=1', '-s', 'SAFE_HEAP=1']
self.maybe_closure()
self.do_run(open(path_from_root('tests', 'small_hello_world.c')).read(), 'hello')
# Tests global initializer with -s MINIMAL_RUNTIME=1
def test_minimal_runtime_global_initializer(self):
self.set_setting('MINIMAL_RUNTIME', 1)
self.maybe_closure()
self.do_run(open(path_from_root('tests', 'test_global_initializer.cpp')).read(), 't1 > t0: 1')
@no_fastcomp('return address not supported on fastcomp')
@no_optimize('return address test cannot work with optimizations')
def test_return_address(self):
self.emcc_args += ['-s', 'USE_OFFSET_CONVERTER']
self.do_run(open(path_from_root('tests', 'core', 'test_return_address.cpp')).read(), 'passed')
@no_wasm2js('TODO: sanitizers in wasm2js')
@no_fastcomp('ubsan not supported on fastcomp')
@no_asan('-fsanitize-minimal-runtime cannot be used with ASan')
def test_ubsan_minimal_too_many_errors(self):
self.emcc_args += ['-fsanitize=undefined', '-fsanitize-minimal-runtime']
if self.get_setting('WASM') == 0:
if is_optimizing(self.emcc_args):
self.skipTest('test can only be run without optimizations on asm.js')
# Need to use `-g` to get proper line numbers in asm.js
self.emcc_args += ['-g']
self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_minimal_too_many_errors.c')).read(),
expected_output='ubsan: add-overflow\n' * 20 + 'ubsan: too many errors\n')
@no_wasm2js('TODO: sanitizers in wasm2js')
@no_fastcomp('ubsan not supported on fastcomp')
@no_asan('-fsanitize-minimal-runtime cannot be used with ASan')
def test_ubsan_minimal_errors_same_place(self):
self.emcc_args += ['-fsanitize=undefined', '-fsanitize-minimal-runtime']
if self.get_setting('WASM') == 0:
if is_optimizing(self.emcc_args):
self.skipTest('test can only be run without optimizations on asm.js')
# Need to use `-g` to get proper line numbers in asm.js
self.emcc_args += ['-g']
self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_minimal_errors_same_place.c')).read(),
expected_output='ubsan: add-overflow\n' * 5)
@parameterized({
'fsanitize_undefined': (['-fsanitize=undefined'],),
'fsanitize_integer': (['-fsanitize=integer'],),
'fsanitize_overflow': (['-fsanitize=signed-integer-overflow'],),
})
@no_fastcomp('ubsan not supported on fastcomp')
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_overflow(self, args):
self.emcc_args += args
self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_full_overflow.c')).read(),
assert_all=True, expected_output=[
"src.cpp:3:5: runtime error: signed integer overflow: 2147483647 + 1 cannot be represented in type 'int'",
"src.cpp:7:7: runtime error: signed integer overflow: 2147483647 + 1 cannot be represented in type 'int'",
])
@parameterized({
'fsanitize_undefined': (['-fsanitize=undefined'],),
'fsanitize_return': (['-fsanitize=return'],),
})
@no_wasm2js('TODO: sanitizers in wasm2js')
@no_fastcomp('ubsan not supported on fastcomp')
def test_ubsan_full_no_return(self, args):
self.emcc_args += ['-Wno-return-type'] + args
self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_full_no_return.c')).read(),
expected_output='src.cpp:1:5: runtime error: execution reached the end of a value-returning function without returning a value', assert_returncode=None)
@parameterized({
'fsanitize_undefined': (['-fsanitize=undefined'],),
'fsanitize_integer': (['-fsanitize=integer'],),
'fsanitize_shift': (['-fsanitize=shift'],),
})
@no_fastcomp('ubsan not supported on fastcomp')
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_left_shift(self, args):
self.emcc_args += args
self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_full_left_shift.c')).read(),
assert_all=True, expected_output=[
'src.cpp:3:5: runtime error: left shift of negative value -1',
"src.cpp:7:5: runtime error: left shift of 16 by 29 places cannot be represented in type 'int'"
])
@parameterized({
'fsanitize_undefined': (['-fsanitize=undefined'],),
'fsanitize_null': (['-fsanitize=null'],),
})
@no_fastcomp('ubsan not supported on fastcomp')
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_null_ref(self, args):
self.emcc_args += args
self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_full_null_ref.cpp')).read(),
assert_all=True, expected_output=[
"src.cpp:3:12: runtime error: reference binding to null pointer of type 'int'",
"src.cpp:4:13: runtime error: reference binding to null pointer of type 'int'",
"src.cpp:5:14: runtime error: reference binding to null pointer of type 'int'",
])
@parameterized({
'fsanitize_undefined': (['-fsanitize=undefined'],),
'fsanitize_vptr': (['-fsanitize=vptr'],),
})
@no_fastcomp('ubsan not supported on fastcomp')
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_static_cast(self, args):
self.emcc_args += args
self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_full_static_cast.cpp')).read(),
assert_all=True, expected_output=[
"src.cpp:18:10: runtime error: downcast of address",
"which does not point to an object of type 'R'",
])
@parameterized({
'g': ('-g', [
"src.cpp:3:12: runtime error: reference binding to null pointer of type 'int'",
'in main',
]),
'g4': ('-g4', [
"src.cpp:3:12: runtime error: reference binding to null pointer of type 'int'",
'in main ',
'src.cpp:3:8'
]),
})
@no_fastcomp('ubsan not supported on fastcomp')
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_stack_trace(self, g_flag, expected_output):
self.emcc_args += ['-fsanitize=null', g_flag, '-s', 'ALLOW_MEMORY_GROWTH=1']
if g_flag == '-g4':
if not self.get_setting('WASM'):
self.skipTest('wasm2js has no source map support')
elif '-Oz' in self.emcc_args:
self.skipTest('-Oz breaks stack traces')
def modify_env(filename):
with open(filename) as f:
contents = f.read()
contents = 'Module = {UBSAN_OPTIONS: "print_stacktrace=1"};' + contents
with open(filename, 'w') as f:
f.write(contents)
self.do_run(open(path_from_root('tests', 'core', 'test_ubsan_full_null_ref.cpp')).read(),
post_build=modify_env, assert_all=True, expected_output=expected_output)
def test_template_class_deduction(self):
self.emcc_args += ['-std=c++17']
self.do_run_in_out_file_test('tests', 'core', 'test_template_class_deduction')
@parameterized({
'c': ['test_asan_no_error.c'],
'cpp': ['test_asan_no_error.cpp'],
})
@no_fastcomp('asan not supported on fastcomp')
def test_asan_no_error(self, name):
self.emcc_args += ['-fsanitize=address', '-s', 'ALLOW_MEMORY_GROWTH=1']
self.do_run(open(path_from_root('tests', 'core', name)).read(),
basename=name, expected_output=[''], assert_returncode=None)
# note: these tests have things like -fno-builtin-memset in order to avoid
# clang optimizing things away. for example, a memset might be optimized into
# stores, and then the stores identified as dead, which leaves nothing for
# asan to test. here we want to test asan itself, so we work around that.
@parameterized({
'use_after_free_c': ('test_asan_use_after_free.c', [
'AddressSanitizer: heap-use-after-free on address',
]),
'use_after_free_cpp': ('test_asan_use_after_free.cpp', [
'AddressSanitizer: heap-use-after-free on address',
]),
'use_after_return': ('test_asan_use_after_return.c', [
'AddressSanitizer: stack-use-after-return on address',
], ['-Wno-return-stack-address']),
'static_buffer_overflow': ('test_asan_static_buffer_overflow.c', [
'AddressSanitizer: global-buffer-overflow on address',
], ['-fno-builtin-memset']),
'heap_buffer_overflow_c': ('test_asan_heap_buffer_overflow.c', [
'AddressSanitizer: heap-buffer-overflow on address',
], ['-fno-builtin-memset']),
'heap_buffer_overflow_cpp': ('test_asan_heap_buffer_overflow.cpp', [
'AddressSanitizer: heap-buffer-overflow on address',
], ['-fno-builtin-memset']),
'stack_buffer_overflow': ('test_asan_stack_buffer_overflow.c', [
'AddressSanitizer: stack-buffer-overflow'
], ['-fno-builtin-memset']),
'stack_buffer_overflow_js': ('test_asan_stack_buffer_overflow_js.c', [
'AddressSanitizer: stack-buffer-overflow'
], ['-fno-builtin-memset']),
'bitfield_unround_size': ('test_asan_bitfield_unround_size.c', [
'AddressSanitizer: stack-buffer-overflow'
], ['-fno-builtin-memset']),
'bitfield_unround_offset': ('test_asan_bitfield_unround_offset.c', [
'AddressSanitizer: stack-buffer-overflow'
], ['-fno-builtin-memset']),
'bitfield_round': ('test_asan_bitfield_round.c', [
'AddressSanitizer: stack-buffer-overflow'
], ['-fno-builtin-memset']),
'memset_null': ('test_asan_memset_null.c', [
'AddressSanitizer: null-pointer-dereference on address 0x00000001'
], ['-fno-builtin-memset']),
'memset_freed': ('test_asan_memset_freed.c', [
'AddressSanitizer: heap-use-after-free on address'
], ['-fno-builtin-memset']),
'strcpy': ('test_asan_strcpy.c', [
'AddressSanitizer: heap-buffer-overflow on address'
], ['-fno-builtin-strcpy']),
'memcpy': ('test_asan_memcpy.c', [
'AddressSanitizer: heap-buffer-overflow on address'
], ['-fno-builtin-memcpy']),
'memchr': ('test_asan_memchr.c', [
'AddressSanitizer: global-buffer-overflow on address'
], ['-fno-builtin-memchr']),
'vector': ('test_asan_vector.cpp', [
'AddressSanitizer: container-overflow on address'
]),
})
@no_fastcomp('asan not supported on fastcomp')
def test_asan(self, name, expected_output, cflags=None):
if '-Oz' in self.emcc_args:
self.skipTest('-Oz breaks source maps')
if not self.get_setting('WASM'):
self.skipTest('wasm2js has no ASan support')
self.emcc_args += ['-fsanitize=address', '-s', 'ALLOW_MEMORY_GROWTH=1']
if cflags:
self.emcc_args += cflags
self.do_run(open(path_from_root('tests', 'core', name)).read(),
basename='src.c' if name.endswith('.c') else 'src.cpp',
expected_output=expected_output, assert_all=True,
check_for_error=False, assert_returncode=None)
@no_wasm2js('TODO: ASAN in wasm2js')
@no_fastcomp('asan not supported on fastcomp')
def test_asan_js_stack_op(self):
self.emcc_args += ['-fsanitize=address', '-s', 'ALLOW_MEMORY_GROWTH=1']
self.do_run(open(path_from_root('tests', 'core', 'test_asan_js_stack_op.c')).read(),
basename='src.c', expected_output='Hello, World!')
@no_fastcomp('WASM backend stack protection')
def test_safe_stack(self):
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.set_setting('TOTAL_STACK', 65536)
self.do_run(open(path_from_root('tests', 'core', 'test_safe_stack.c')).read(),
expected_output=['abort(stack overflow)', '__handle_stack_overflow'], assert_returncode=None)
@no_fastcomp('WASM backend stack protection')
def test_safe_stack_alloca(self):
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.set_setting('TOTAL_STACK', 65536)
self.do_run(open(path_from_root('tests', 'core', 'test_safe_stack_alloca.c')).read(),
expected_output=['abort(stack overflow)', '__handle_stack_overflow'], assert_returncode=None)
@needs_dlfcn
@no_fastcomp('WASM backend stack protection')
def test_safe_stack_dylink(self):
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.set_setting('TOTAL_STACK', 65536)
self.dylink_test(r'''
#include <stdio.h>
extern void sidey();
int main() {
sidey();
}
''', '''
#include <string.h>
int f(int *b) {
int a[64];
memset(b, 0, 2048 * sizeof(int));
return f(a);
}
void sidey() {
int a[2048];
f(a);
}
''', ['abort(stack overflow)', '__handle_stack_overflow'], assert_returncode=None)
def test_fpic_static(self):
self.emcc_args.append('-fPIC')
self.do_run_in_out_file_test('tests', 'core', 'test_hello_world')
@node_pthreads
def test_pthreads_create(self):
def test():
self.do_run_in_out_file_test('tests', 'core', 'pthread', 'create')
test()
# with a pool, we can synchronously depend on workers being available
self.set_setting('PTHREAD_POOL_SIZE', '2')
self.emcc_args += ['-DPOOL']
test()
@no_fastcomp('new wasm backend atomics')
def test_emscripten_atomics_stub(self):
self.do_run_in_out_file_test('tests', 'core', 'pthread', 'emscripten_atomics')
@no_asan('incompatibility with atomics')
@no_fastcomp('new wasm backend atomics')
@node_pthreads
def test_emscripten_atomics(self):
self.set_setting('USE_PTHREADS', '1')
self.do_run_in_out_file_test('tests', 'core', 'pthread', 'emscripten_atomics')
# Tests the emscripten_get_exported_function() API.
def test_emscripten_get_exported_function(self):
# Could also test with -s ALLOW_TABLE_GROWTH=1
self.set_setting('RESERVED_FUNCTION_POINTERS', 2)
self.emcc_args += ['-lexports.js']
self.do_run_in_out_file_test('tests', 'core', 'test_get_exported_function')
# Tests the emscripten_get_exported_function() API.
def test_minimal_runtime_emscripten_get_exported_function(self):
# Could also test with -s ALLOW_TABLE_GROWTH=1
self.set_setting('RESERVED_FUNCTION_POINTERS', 2)
self.emcc_args += ['-lexports.js', '-s', 'MINIMAL_RUNTIME=1']
self.do_run_in_out_file_test('tests', 'core', 'test_get_exported_function')
# Marked as impure since the WASI reactor modules (modules without main)
# are not yet suppored by the wasm engines we test against.
@also_with_impure_standalone_wasm
def test_undefined_main(self):
# Traditionally in emscripten we allow main to be undefined. This allows programs with a main
# and libraries without a main to be compiled identically.
# However we are trying to move away from that model to a more explicit opt-out model. See:
# https://github.com/emscripten-core/emscripten/issues/9640
if not self.get_setting('LLD_REPORT_UNDEFINED') and not self.get_setting('STRICT') and not self.get_setting('STANDALONE_WASM'):
self.do_run_in_out_file_test('tests', 'core', 'test_ctors_no_main')
# Disabling IGNORE_MISSING_MAIN should cause link to fail due to missing main
self.set_setting('IGNORE_MISSING_MAIN', 0)
err = self.expect_fail([EMCC, path_from_root('tests', 'core', 'test_ctors_no_main.cpp')] + self.get_emcc_args())
self.assertContained('error: entry symbol not defined (pass --no-entry to suppress): main', err)
# If we pass --no-entry or set EXPORTED_FUNCTIONS to empty should never see any errors
self.emcc_args.append('--no-entry')
self.do_run_in_out_file_test('tests', 'core', 'test_ctors_no_main')
self.emcc_args.remove('--no-entry')
self.set_setting('EXPORTED_FUNCTIONS', [])
self.do_run_in_out_file_test('tests', 'core', 'test_ctors_no_main')
# Tests the operation of API found in #include <emscripten/math.h>
def test_emscripten_math(self):
self.do_run_in_out_file_test('tests', 'core', 'test_emscripten_math')
# Generate tests for everything
def make_run(name, emcc_args, settings=None, env=None):
if env is None:
env = {}
if settings is None:
settings = {}
TT = type(name, (TestCoreBase,), dict(run_name=name, env=env, __module__=__name__)) # noqa
def tearDown(self):
try:
super(TT, self).tearDown()
finally:
for k, v in self.env.items():
del os.environ[k]
TT.tearDown = tearDown
def setUp(self):
super(TT, self).setUp()
for k, v in self.env.items():
assert k not in os.environ, k + ' should not be in environment'
os.environ[k] = v
os.chdir(self.get_dir()) # Ensure the directory exists and go there
for k, v in settings.items():
self.set_setting(k, v)
self.emcc_args += emcc_args
# avoid various compiler warnings in our test output
self.emcc_args += [
'-Wno-dynamic-class-memaccess', '-Wno-format',
'-Wno-format-extra-args', '-Wno-format-security',
'-Wno-pointer-bool-conversion', '-Wno-unused-volatile-lvalue',
'-Wno-c++11-compat-deprecated-writable-strings',
'-Wno-invalid-pp-token', '-Wno-shift-negative-value'
]
TT.setUp = setUp
return TT
# Main asm.js test modes
if not shared.Settings.WASM_BACKEND:
asm0 = make_run('asm0', emcc_args=[], settings={'ASM_JS': 2, 'WASM': 0})
asm2 = make_run('asm2', emcc_args=['-O2'], settings={'WASM': 0})
asm3 = make_run('asm3', emcc_args=['-O3'], settings={'WASM': 0})
asm2g = make_run('asm2g', emcc_args=['-O2', '-g'], settings={'WASM': 0, 'ASSERTIONS': 1, 'SAFE_HEAP': 1})
# Main wasm test modes
wasm0 = make_run('wasm0', emcc_args=['-O0'])
wasm0g = make_run('wasm0g', emcc_args=['-O0', '-g'])
wasm1 = make_run('wasm1', emcc_args=['-O1'])
wasm2 = make_run('wasm2', emcc_args=['-O2'])
wasm2g = make_run('wasm2g', emcc_args=['-O2', '-g'])
wasm3 = make_run('wasm3', emcc_args=['-O3'])
wasms = make_run('wasms', emcc_args=['-Os'])
wasmz = make_run('wasmz', emcc_args=['-Oz'])
wasmlto0 = make_run('wasmlto0', emcc_args=['-flto', '-O0', '--llvm-lto', '1'])
wasmlto1 = make_run('wasmlto1', emcc_args=['-flto', '-O1', '--llvm-lto', '1'])
wasmlto2 = make_run('wasmlto2', emcc_args=['-flto', '-O2', '--llvm-lto', '1'])
wasmlto3 = make_run('wasmlto3', emcc_args=['-flto', '-O3', '--llvm-lto', '1'])
wasmltos = make_run('wasmltos', emcc_args=['-flto', '-Os', '--llvm-lto', '1'])
wasmltoz = make_run('wasmltoz', emcc_args=['-flto', '-Oz', '--llvm-lto', '1'])
if shared.Settings.WASM_BACKEND:
wasm2js0 = make_run('wasm2js0', emcc_args=['-O0'], settings={'WASM': 0})
wasm2js1 = make_run('wasm2js1', emcc_args=['-O1'], settings={'WASM': 0})
wasm2js2 = make_run('wasm2js2', emcc_args=['-O2'], settings={'WASM': 0})
wasm2js3 = make_run('wasm2js3', emcc_args=['-O3'], settings={'WASM': 0})
wasm2jss = make_run('wasm2jss', emcc_args=['-Os'], settings={'WASM': 0})
wasm2jsz = make_run('wasm2jsz', emcc_args=['-Oz'], settings={'WASM': 0})
# Secondary test modes - run directly when there is a specific need
# features
simd2 = make_run('simd2', emcc_args=['-O2', '-msimd128'])
bulkmem2 = make_run('bulkmem2', emcc_args=['-O2', '-mbulk-memory'])
# asm.js
asm2f = make_run('asm2f', emcc_args=['-Oz', '-Wno-almost-asm'], settings={'PRECISE_F32': 1, 'ALLOW_MEMORY_GROWTH': 1, 'WASM': 0})
asm2nn = make_run('asm2nn', emcc_args=['-O2'], settings={'WASM': 0}, env={'EMCC_NATIVE_OPTIMIZER': '0'})
# wasm
wasm2s = make_run('wasm2s', emcc_args=['-O2'], settings={'SAFE_HEAP': 1})
wasm2ss = make_run('wasm2ss', emcc_args=['-O2'], settings={'STACK_OVERFLOW_CHECK': 2})
# Add DEFAULT_TO_CXX=0
strict = make_run('strict', emcc_args=[], settings={'STRICT': 1})
if shared.Settings.WASM_BACKEND:
lsan = make_run('lsan', emcc_args=['-fsanitize=leak'], settings={'ALLOW_MEMORY_GROWTH': 1})
asan = make_run('asan', emcc_args=['-fsanitize=address'], settings={'ALLOW_MEMORY_GROWTH': 1, 'ASAN_SHADOW_SIZE': 128 * 1024 * 1024})
asani = make_run('asani', emcc_args=['-fsanitize=address', '--pre-js', os.path.join(os.path.dirname(__file__), 'asan-no-leak.js')],
settings={'ALLOW_MEMORY_GROWTH': 1})
# Experimental modes (not tested by CI)
lld = make_run('lld', emcc_args=[], settings={'LLD_REPORT_UNDEFINED': 1})
# TestCoreBase is just a shape for the specific subclasses, we don't test it itself
del TestCoreBase # noqa
| 35.728547 | 545 | 0.628471 | [
"MIT"
] | Lectem/emscripten | tests/test_core.py | 313,518 | Python |
"""
License:
This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
"""
from collections.abc import MutableMapping
import posixpath
import boto3
import botocore
from botocore.exceptions import ClientError
from s3fs import S3FileSystem
from hub.exceptions import S3Exception
from hub.log import logger
class S3Storage(MutableMapping):
def __init__(
self,
s3fs: S3FileSystem,
url: str = None,
public=False,
aws_access_key_id=None,
aws_secret_access_key=None,
aws_session_token=None,
parallel=25,
endpoint_url=None,
aws_region=None,
):
self.s3fs = s3fs
self.root = {}
self.url = url
self.public = public
self.parallel = parallel
self.aws_region = aws_region
self.endpoint_url = endpoint_url
self.bucket = url.split("/")[2]
self.path = "/".join(url.split("/")[3:])
if self.bucket == "s3:":
# FIXME for some reason this is wasabi case here, probably url is something like wasabi://s3://...
self.bucket = url.split("/")[4]
self.path = "/".join(url.split("/")[5:])
self.bucketpath = posixpath.join(self.bucket, self.path)
self.protocol = "object"
client_config = botocore.config.Config(
max_pool_connections=parallel,
)
self.client = boto3.client(
"s3",
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
config=client_config,
endpoint_url=endpoint_url,
region_name=aws_region,
)
self.resource = boto3.resource(
"s3",
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
config=client_config,
endpoint_url=endpoint_url,
region_name=aws_region,
)
def __setitem__(self, path, content):
try:
path = posixpath.join(self.path, path)
content = bytearray(memoryview(content))
attrs = {
"Bucket": self.bucket,
"Body": content,
"Key": path,
"ContentType": ("application/octet-stream"),
}
self.client.put_object(**attrs)
except Exception as err:
logger.error(err)
raise S3Exception(err)
def __getitem__(self, path):
try:
path = posixpath.join(self.path, path)
resp = self.client.get_object(
Bucket=self.bucket,
Key=path,
)
x = resp["Body"].read()
return x
except ClientError as err:
if err.response["Error"]["Code"] == "NoSuchKey":
raise KeyError(err)
else:
raise
except Exception as err:
logger.error(err)
raise S3Exception(err)
def __delitem__(self, path):
try:
path = posixpath.join(self.bucketpath, path)
self.s3fs.rm(path, recursive=True)
except Exception as err:
logger.error(err)
raise S3Exception(err)
def __len__(self):
return len(self.s3fs.ls(self.bucketpath, detail=False, refresh=True))
def __iter__(self):
items = self.s3fs.ls(self.bucketpath, detail=False, refresh=True)
yield from [item[len(self.bucketpath) + 1 :] for item in items]
| 31.125 | 110 | 0.578581 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | DebadityaPal/Hub | hub/store/s3_storage.py | 3,735 | Python |
"""
Implementation of the `CID spec <https://github.com/multiformats/cid>`_.
This module differs from other modules of :mod:`~multiformats`, in that the functionality is completely
encapsulated by a single class :class:`CID`, which is imported from top level instead
of the module itself:
>>> from multiformats import CID
"""
from typing import Any, cast, FrozenSet, Tuple, Type, TypeVar, Union
from typing_extensions import Literal, Final
from typing_validation import validate
from bases import base58btc
from multiformats import varint, multicodec, multibase, multihash
from multiformats.multicodec import Multicodec
from multiformats.multibase import Multibase
from multiformats.multihash import Multihash, _validate_raw_digest_size
from multiformats.varint import BytesLike, byteslike
_CIDSubclass = TypeVar("_CIDSubclass", bound="CID")
CIDVersion = Literal[0, 1]
CIDVersionNumbers: Final[FrozenSet[int]] = frozenset({0, 1})
def _binary_cid_from_str(cid: str) -> Tuple[bytes, Multibase]:
if len(cid) == 46 and cid.startswith("Qm"):
# CIDv0 to be decoded as base58btc
return base58btc.decode(cid), multibase.get("base58btc")
mb, b = multibase.decode_raw(cid)
if b[0] == 0x12:
# CIDv0 may not be multibase encoded (0x12 is the first byte of sha2-256 multihashes)
# CIDv18 (first byte 18=0x12) will be skipped to prevent ambiguity
raise ValueError("CIDv0 may not be multibase encoded (found multibase encoded bytes starting with 0x12).")
return b, mb
def _CID_validate_multibase(base: Union[str, Multibase]) -> Multibase:
if isinstance(base, str):
base = multibase.get(base)
else:
multibase.validate_multibase(base)
return base
def _CID_validate_multicodec(codec: Union[str, int, Multicodec]) -> Multicodec:
if isinstance(codec, str):
codec = multicodec.get(codec)
elif isinstance(codec, int):
codec = multicodec.get(code=codec)
else:
multicodec.validate_multicodec(codec)
return codec
def _CID_validate_multihash(hashfun: Union[str, int, Multihash]) -> Multihash:
if isinstance(hashfun, str):
hashfun = multihash.get(hashfun)
elif isinstance(hashfun, int):
hashfun = multihash.get(code=hashfun)
else:
pass
return hashfun
def _CID_validate_raw_digest(raw_digest: Union[str, BytesLike], hashfun: Multihash) -> bytes:
if isinstance(raw_digest, str):
raw_digest = bytes.fromhex(raw_digest)
else:
validate(raw_digest, BytesLike)
if not isinstance(raw_digest, bytes):
raw_digest = bytes(raw_digest)
_, max_digest_size = hashfun.implementation
_validate_raw_digest_size(hashfun.name, raw_digest, max_digest_size)
return raw_digest
def _CID_validate_multihash_digest(digest: Union[str, BytesLike]) -> Tuple[Multihash, bytes]:
if isinstance(digest, str):
digest = bytes.fromhex(digest)
raw_digest: BytesLike
code, raw_digest = multihash.unwrap_raw(digest)
hashfun = _CID_validate_multihash(code)
raw_digest = _CID_validate_raw_digest(raw_digest, hashfun)
return hashfun, raw_digest
def _CID_validate_version(version: int, base: Multibase, codec: Multicodec, hashfun: Multihash) -> int:
if version in (2, 3):
raise ValueError("CID versions 2 and 3 are reserved for future use.")
if version not in (0, 1):
raise ValueError(f"CID version {version} is not allowed.")
if version == 0:
if base.name != 'base58btc':
raise ValueError(f"CIDv0 multibase must be 'base58btc', found {repr(base.name)} instead.")
if codec.name != "dag-pb":
raise ValueError(f"CIDv0 multicodec must be 'dag-pb', found {repr(codec.name)} instead.")
if hashfun.name != "sha2-256":
raise ValueError(f"CIDv0 multihash must be 'sha2-256', found {repr(hashfun.name)} instead.")
return version
class CID:
"""
Container class for `Content IDentifiers <https://github.com/multiformats/cid>`_.
CIDs can be explicitly instantiated by passing multibase, CID version, multicodec and multihash digest to the constructor:
>>> cid = CID("base58btc", 1, "raw",
... "12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95")
>>> str(cid)
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
Alternatively, a pair of multihash codec and raw hash digest can be passed in lieu of the multihash digest:
>>> raw_digest = bytes.fromhex(
... "6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95")
>>> cid = CID("base58btc", 1, "raw", ("sha2-256", raw_digest))
>>> str(cid)
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
The multihash digest and raw digest values can be passed either as :obj:`bytes`-like objects or as the corresponding hex strings:
>>> isinstance(raw_digest, bytes)
True
>>> raw_digest.hex()
'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
Note: the hex strings are not multibase encoded.
Calling :obj:`bytes` on an instance of this class returns its binary representation, as a :obj:`bytes` object:
>>> cid = CID("base58btc", 1, "raw",
... "12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95")
>>> raw_digest.hex()
'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
>>> bytes(cid).hex()
'015512206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
#^^ 0x01 = CIDv1
# ^^ 0x55 = 'raw' codec
>>> bytes(cid)
:param base: default multibase to use when encoding this CID
:type base: :obj:`str` or :class:`~multiformats.multibase.Multibase`
:param version: the CID version
:type version: 0 or 1
:param codec: the content multicodec
:type codec: :obj:`str`, :obj:`int` or :class:`~multiformats.multicodec.Multicodec`
:param digest: the content multihash digest, or a pair of multihash codec and raw content digest
:type digest: see below
The ``digest`` parameter can be specified in the following ways:
- as a :obj:`str`, in which case it is treated as a hex-string and converted to :obj:`bytes` using :obj:`bytes.fromhex`
- as a :obj:`~multiformats.varint.BytesLike`, in which case it is converted to :obj:`bytes` directly
- as a pair ``(multihash_codec, raw_digest)`` of a multihash and raw hash digest, which are used to produce a multihash digest
via the :meth:`~multiformats.multihash.Multihash.wrap` metho
If ``digest`` is specified by a pair, the ``multihash_codec`` value can be specified in the following ways:
- by multihash multicodec name, as a :obj:`str`
- by multihash multicodec code, as a :obj:`int`
- as a :class:`~multiformats.multihash.Multihash` object
If ``digest`` is specified by a pair, the ``raw_digest`` value can be specified in the following ways:
- as a :obj:`str`, in which case it is treated as a hex-string and converted to :obj:`bytes` using :obj:`bytes.fromhex`
- as a :obj:`~multiformats.varint.BytesLike`, in which case it is converted to :obj:`bytes` directly
:raises ValueError: if the CID version is unsupported
:raises ValueError: if version is 0 but base is not 'base58btc' or codec is not 'dag-pb'
:raises KeyError: if the multibase, multicodec or multihash are unknown
"""
_base: Multibase
_version: CIDVersion
_codec: Multicodec
_hashfun: Multihash
_digest: bytes
__slots__ = ("__weakref__", "_base", "_version", "_codec", "_hashfun", "_digest")
def __new__(cls: Type[_CIDSubclass],
base: Union[str, Multibase],
version: int,
codec: Union[str, int, Multicodec],
digest: Union[str, BytesLike, Tuple[Union[str, int, Multihash], Union[str, BytesLike]]],
) -> _CIDSubclass:
# pylint: disable = too-many-arguments
base = _CID_validate_multibase(base)
codec = _CID_validate_multicodec(codec)
raw_digest: Union[str, bytes]
hashfun: Union[str, int, Multihash]
if isinstance(digest, (str,)+byteslike):
hashfun, raw_digest = _CID_validate_multihash_digest(digest)
else:
validate(digest, Tuple[Union[str, int, Multihash], Union[str, BytesLike]])
hashfun, raw_digest = digest
hashfun = _CID_validate_multihash(hashfun)
raw_digest = _CID_validate_raw_digest(raw_digest, hashfun)
version = _CID_validate_version(version, base, codec, hashfun)
if isinstance(digest, bytes):
return CID._new_instance(cls, base, version, codec, hashfun, digest)
return CID._new_instance(cls, base, version, codec, hashfun, (hashfun, raw_digest))
@staticmethod
def _new_instance(CID_subclass: Type[_CIDSubclass],
base: Multibase,
version: int,
codec: Multicodec,
hashfun: Multihash,
digest: Union[bytes, Tuple[Multihash, bytes]],
) -> _CIDSubclass:
# pylint: disable = too-many-arguments
instance: _CIDSubclass = super().__new__(CID_subclass)
instance._base = base
assert version in (0, 1)
instance._version = cast(Literal[0, 1], version)
instance._codec = codec
instance._hashfun = hashfun
if isinstance(digest, bytes):
instance._digest = digest
elif isinstance(digest, byteslike):
instance._digest = bytes(digest)
else:
_hashfun, raw_digest = digest
if not isinstance(raw_digest, bytes):
raw_digest = bytes(raw_digest)
assert _hashfun == hashfun, "You passed different multihashes to a _new_instance call with digest as a pair."
instance._digest = hashfun.wrap(raw_digest)
return instance
@property
def version(self) -> CIDVersion:
"""
CID version.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.version
1
"""
return self._version
@property
def base(self) -> Multibase:
"""
Multibase used to encode the CID:
- if a CIDv1 was decoded from a multibase-encoded string, the encoding multibase is used
- if a CIDv1 was decoded from a bytestring, the 'base58btc' multibase is used
- for a CIDv0, 'base58btc' is always used
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.base
Multibase(name='base58btc', code='z',
status='default', description='base58 bitcoin')
"""
return self._base
@property
def codec(self) -> Multicodec:
"""
Codec that the multihash digest refers to.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.codec
Multicodec(name='raw', tag='ipld', code='0x55',
status='permanent', description='raw binary')
"""
return self._codec
@property
def hashfun(self) -> Multihash:
"""
Multihash used to produce the multihash digest.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.hashfun
Multicodec(name='sha2-256', tag='multihash', code='0x12',
status='permanent', description='')
"""
return self._hashfun
@property
def digest(self) -> bytes:
"""
Multihash digest.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.digest.hex()
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
"""
return self._digest
@property
def raw_digest(self) -> bytes:
"""
Raw hash digest, decoded from the multihash digest.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.raw_digest.hex()
'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
"""
return multihash.unwrap(self._digest)
@property
def human_readable(self) -> str:
"""
Human-readable representation of the CID.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.human_readable
'base58btc - cidv1 - raw - (sha2-256 : 256 : 6E6FF7950A36187A801613426E858DCE686CD7D7E3C0FC42EE0330072D245C95)'
"""
raw_digest = self.raw_digest
hashfun_str = f"({self.hashfun.name} : {len(raw_digest)*8} : {raw_digest.hex().upper()})"
return f"{self.base.name} - cidv{self.version} - {self.codec.name} - {hashfun_str}"
def encode(self, base: Union[None, str, Multibase] = None) -> str:
"""
Encodes the CID using a given multibase. If :obj:`None` is given,
the CID's own multibase is used by default.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.encode() # default: cid.base
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
>>> cid.encode("base32")
'bafkreidon73zkcrwdb5iafqtijxildoonbwnpv7dyd6ef3qdgads2jc4su'
:param base: the multibase to be used for encoding
:type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*
:raises KeyError: see :meth:`multiformats.multibase.Multibase.encode`
"""
if self.version == 0:
if base is not None:
raise ValueError("CIDv0 cannot be multibase-encoded, please set multibase=None.")
return base58btc.encode(bytes(self))
if base is None or base == self.base:
base = self.base # use CID's own multibase as default
else:
if isinstance(base, str):
base = multibase.get(base)
else:
multibase.validate_multibase(base)
return base.encode(bytes(self))
def set(self, *,
base: Union[None, str, Multibase] = None,
version: Union[None, int] = None,
codec: Union[None, str, int, Multicodec] = None
) -> "CID":
"""
Returns a new CID obtained by setting new values for one or more of:
``base``, ``version``, or ``codec``.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(base="base32")
CID('base32', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(codec="dag-cbor")
CID('base58btc', 1, 'dag-cbor',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0, codec="dag-pb")
CID('base58btc', 0, 'dag-pb',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
# Note: 'CID.set' returns new instances,
# the original 'cid' instance is unchanged
If setting ``version`` to 0, ``base`` must be 'base58btc' and ``codec`` must be 'dag-pb'.
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0, codec="dag-pb")
CID('base58btc', 0, 'dag-pb',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0)
ValueError: CIDv0 multicodec must be 'dag-pb', found 'raw' instead.
>>> cid.set(version=0, codec="dag-pb", base="base32")
ValueError: CIDv0 multibase must be 'base58btc', found 'base32' instead
:param base: the new CID multibase, or :obj:`None` if multibase unchanged
:type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*
:param version: the new CID version, or :obj:`None` if version unchanged
:type version: :obj:`None`, 0 or 1, *optional*
:param codec: the new content multicodec, or :obj:`None` if multicodec unchanged
:type codec: :obj:`None`, :obj:`str` or :class:`~multiformats.multicodec.Multicodec`, *optional*
:raises KeyError: if the multibase or multicodec are unknown
"""
hashfun = self.hashfun
digest = self.digest
if base is not None and base not in (self.base, self.base.name):
base = _CID_validate_multibase(base)
else:
base = self.base
if codec is not None and codec not in (self.codec, self.codec.name, self.codec.code):
codec = _CID_validate_multicodec(codec)
else:
codec = self.codec
if version is not None and version != self.version:
_CID_validate_version(version, base, codec, hashfun)
else:
version = self.version
return CID._new_instance(CID, base, version, codec, hashfun, digest)
def __bytes__(self) -> bytes:
if self.version == 0:
return self.digest
return varint.encode(self.version)+varint.encode(self.codec.code)+self.digest
def __str__(self) -> str:
return self.encode()
def __repr__(self) -> str:
mb = self.base.name
v = self.version
mc = self.codec.name
d = self.digest
return f"CID({repr(mb)}, {v}, {repr(mc)}, {repr(d.hex())})"
@property
def _as_tuple(self) -> Tuple[Type["CID"], int, Multicodec, bytes]:
return (CID, self.version, self.codec, self.digest)
def __hash__(self) -> int:
return hash(self._as_tuple)
def __eq__(self, other: Any) -> bool:
if self is other:
return True
if not isinstance(other, CID):
return NotImplemented
return self._as_tuple == other._as_tuple
@staticmethod
def decode(cid: Union[str, BytesLike]) -> "CID":
"""
Decodes a CID from a bytestring or a hex string (which will be converted to :obj:`bytes`
using :obj:`bytes.fromhex`). Note: the hex string is not multibase encoded.
Example usage for CIDv1 multibase-encoded string:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> CID.decode(s)
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
Example usage for CIDv1 bytestring (multibase always set to 'base58btc'):
>>> b = bytes.fromhex(
... "015512206e6ff7950a36187a801613426e85"
... "8dce686cd7d7e3c0fc42ee0330072d245c95")
>>> CID.decode(b)
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
Example usage for CIDv0 base58-encoded string:
>>> s = "QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR"
>>> CID.decode(s)
CID('base58btc', 0, 'dag-pb',
'1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a')
Example usage for CIDv0 bytestring (multibase always set to 'base58btc'):
>>> b = bytes.fromhex(
... "1220c3c4733ec8affd06cf9e9ff50ffc6b"
... "cd2ec85a6170004bb709669c31de94391a")
>>> CID.decode(b)
CID('base58btc', 0, 'dag-pb',
'1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a')
:param cid: the CID bytes or multibase-encoded string
:type cid: :obj:`str` or :obj:`~multiformats.varint.BytesLike`
:raises ValueError: if the CID is malformed or the CID version is unsupported
:raises KeyError: if the multibase, multicodec or multihash are unknown
"""
if isinstance(cid, str):
cid, mb = _binary_cid_from_str(cid)
else:
mb = multibase.get("base58btc")
validate(cid, BytesLike)
cid = memoryview(cid)
# if len(cid) == 34 and cid.startswith(b"\x12\x20"):
if len(cid) == 34 and cid[0] == 0x12 and cid[1] == 0x20:
v = 0 # CID version
mc_code = 0x70 # multicodec.get("dag-pb")
digest = cid # multihash digest is what's left
else:
v, _, cid = varint.decode_raw(cid) # CID version
if v == 0:
raise ValueError("CIDv0 is malformed.")
if v in (2, 3):
raise ValueError("CID versions 2 and 3 are reserved for future use.")
if v != 1:
raise ValueError(f"CIDv{v} is currently not supported.")
mc_code, _, cid = multicodec.unwrap_raw(cid) # multicodec
digest = cid # multihash digest is what's left
mc = multicodec.get(code=mc_code)
mh_code, _ = multihash.unwrap_raw(digest)
mh = multihash.get(code=mh_code)
return CID._new_instance(CID, mb, v, mc, mh, digest)
@staticmethod
def peer_id(pk_bytes: Union[str, BytesLike]) -> "CID":
"""
Wraps the raw hash of a public key into a `PeerID <https://docs.libp2p.io/concepts/peer-id/>`_, as a CIDv1.
The ``pk_bytes`` argument should be the binary public key, encoded according to the
`PeerID spec <https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md>`_.
This can be passed as a bytestring or as a hex string (which will be converted to :obj:`bytes` using :obj:`bytes.fromhex`).
Note: the hex string is not multibase encoded.
Example usage with Ed25519 public key:
>>> pk_bytes = bytes.fromhex(
... "1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93")
... # a 32-byte Ed25519 public key
>>> peer_id = CID.peer_id(pk_bytes)
>>> peer_id
CID('base32', 1, 'libp2p-key',
'00201498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93')
#^^ 0x00 = 'identity' multihash used (public key length <= 42)
# ^^ 0x20 = 32-bytes of raw hash digestlength
>>> str(peer_id)
'bafzaaiautc2um6td375c3soz4bu4v4dv2fx4gp65jq5qdp5nvzsdg5t5sm'
Snippet showing how to obtain the `Ed25519 <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ed25519/>`_
public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library:
>>> from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
>>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
>>> private_key = Ed25519PrivateKey.generate()
>>> public_key = private_key.public_key()
>>> pk_bytes = public_key.public_bytes(
... encoding=Encoding.Raw,
... format=PublicFormat.Raw
... )
>>> pk_bytes.hex()
"1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93"
Example usage with DER-encoded RSA public key:
>>> pk_bytes = bytes.fromhex(
... "30820122300d06092a864886f70d01010105000382010f003082010a02820101"
... "009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e"
... "5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70"
... "b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5"
... "591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036"
... "26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d"
... "2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557"
... "87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8"
... "6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec"
... "370203010001")
... # a 294-byte RSA public key
>>> peer_id = CID.peer_id(pk_bytes)
>>> peer_id
CID('base32', 1, 'libp2p-key',
'1220c1a6513ffb14f202f75453c49666a5b9d7ed9a1a068891daf824d477573f829f')
#^^ 0x12 = 'sha2-256' multihash used (public key length > 42)
# ^^ 0x20 = 32-bytes of raw hash digest length
>>> str(peer_id)
'bafzbeigbuzit76yu6ibpovctyslgnjnz27wzugqgrci5v6be2r3vop4ct4'
Snippet showing how to obtain the `RSA <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/>`_
public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library:
>>> from cryptography.hazmat.primitives.asymmetric import rsa
>>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
>>> private_key = rsa.generate_private_key(
... public_exponent=65537,
... key_size=2048,
... )
>>> public_key = private_key.public_key()
>>> pk_bytes = public_key.public_bytes(
... encoding=Encoding.DER,
... format=PublicFormat.SubjectPublicKeyInfo
... )
>>> pk_bytes.hex()
"30820122300d06092a864886f70d01010105000382010f003082010a02820101"
"009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e"
"5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70"
"b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5"
"591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036"
"26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d"
"2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557"
"87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8"
"6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec"
"370203010001"
:param pk_bytes: the public key bytes
:type pk_bytes: :obj:`str` or :obj:`~multiformats.varint.BytesLike`
:raises ValueError: if ``pk_bytes`` is passed as a string and is not the hex-string of some bytes
"""
if isinstance(pk_bytes, str):
pk_bytes = bytes.fromhex(pk_bytes)
else:
validate(pk_bytes, BytesLike)
if len(pk_bytes) <= 42:
mh = multihash.get("identity")
digest = multihash.digest(pk_bytes, mh)
else:
mh = multihash.get("sha2-256")
digest = multihash.digest(pk_bytes, mh)
mc = multicodec.get(code=0x72) # multicodec.get("libp2p-key")
mb = multibase.get("base32")
return CID._new_instance(CID, mb, 1, mc, mh, digest)
| 42.852359 | 137 | 0.627371 | [
"MIT"
] | hashberg-io/multiformats | multiformats/cid/__init__.py | 28,154 | Python |
import argparse
import torch
import os
import numpy as np
import random as rd
from models import GCN
from utils import get_folder_path
from base_solver import BaseSolver
MODEL = 'GCN'
parser = argparse.ArgumentParser()
# Dataset params
parser.add_argument("--dataset", type=str, default='Movielens', help="")
parser.add_argument("--dataset_name", type=str, default='1m', help="")
parser.add_argument("--if_use_features", type=bool, default=False, help="")
parser.add_argument("--num_core", type=int, default=10, help="")
parser.add_argument("--num_feat_core", type=int, default=10, help="")
parser.add_argument("--train_ratio", type=float, default=0.8, help="")
# Model params
parser.add_argument("--dropout", type=float, default=0.5, help="")
parser.add_argument("--emb_dim", type=int, default=64, help="")
parser.add_argument("--repr_dim", type=int, default=16, help="")
parser.add_argument("--hidden_size", type=int, default=64, help="")
# Train params
parser.add_argument("--num_negative_samples", type=int, default=5, help="")
parser.add_argument("--init_eval", type=bool, default=True, help="")
parser.add_argument("--device", type=str, default='cuda', help="")
parser.add_argument("--gpu_idx", type=str, default='0', help="")
parser.add_argument("--runs", type=int, default=100, help="")
parser.add_argument("--epochs", type=int, default=100, help="")
parser.add_argument("--opt", type=str, default='adam', help="")
parser.add_argument("--loss", type=str, default='mse', help="")
parser.add_argument("--batch_size", type=int, default=4, help="")
parser.add_argument("--lr", type=float, default=1e-4, help="")
parser.add_argument("--weight_decay", type=float, default=1e-3, help="")
parser.add_argument("--early_stopping", type=int, default=60, help="")
parser.add_argument("--save_epochs", type=list, default=[10, 40, 80], help="")
parser.add_argument("--save_every_epoch", type=int, default=40, help="")
args = parser.parse_args()
# Setup data and weights file path
data_folder, weights_folder, logger_folder = \
get_folder_path(model=MODEL, dataset=args.dataset + args.dataset_name)
# Setup device
if not torch.cuda.is_available() or args.device == 'cpu':
device = 'cpu'
else:
device = 'cuda:{}'.format(args.gpu_idx)
# Setup args
dataset_args = {
'root': data_folder, 'dataset': args.dataset, 'name': args.dataset_name,
'if_use_features': args.if_use_features,
'num_core': args.num_core, 'num_feat_core': args.num_feat_core,
'train_ratio': args.train_ratio
}
model_args = {
'if_use_features': args.if_use_features,
'emb_dim': args.emb_dim, 'hidden_size': args.hidden_size,
'repr_dim': args.repr_dim, 'dropout': args.dropout
}
train_args = {
'init_eval': args.init_eval, 'num_negative_samples': args.num_negative_samples,
'opt': args.opt, 'loss': args.loss,
'runs': args.runs, 'epochs': args.epochs, 'batch_size': args.batch_size,
'weight_decay': args.weight_decay, 'lr': args.lr, 'device': device,
'weights_folder': os.path.join(weights_folder, str(model_args)),
'logger_folder': os.path.join(logger_folder, str(model_args)),
'save_epochs': args.save_epochs, 'save_every_epoch': args.save_every_epoch
}
print('dataset params: {}'.format(dataset_args))
print('task params: {}'.format(model_args))
print('train params: {}'.format(train_args))
class GCNSolver(BaseSolver):
def __init__(self, GCN, dataset_args, model_args, train_args):
super(GCNSolver, self).__init__(GCN, dataset_args, model_args, train_args)
def prepare_model_input(self, data, if_use_features=False):
edge_index_np = np.hstack(data.edge_index_nps[0].values())
edge_index_np = np.hstack([edge_index_np, np.flip(edge_index_np, 0)])
edge_index = torch.from_numpy(edge_index_np).long().to(self.train_args['device'])
kwargs = {'edge_index': edge_index}
if if_use_features:
kwargs['x'] = data.x
return kwargs
def train_negative_sampling(self, u_nid, train_pos_unid_inid_map, test_pos_unid_inid_map, neg_unid_inid_map, data):
"""
Unliked popular movie negative sampling:
:param u_nid:
:param train_pos_unid_inid_map:
:param test_pos_unid_inid_map:
:param neg_unid_inid_map:
:param data:
:return:
"""
num_pos_samples = len(train_pos_unid_inid_map[u_nid])
negative_inids = test_pos_unid_inid_map[u_nid] + neg_unid_inid_map[u_nid]
nid_occs = np.array([data.item_nid_occs[0][nid] for nid in negative_inids])
nid_occs = nid_occs / np.sum(nid_occs)
negative_inids = rd.choices(population=negative_inids, weights=nid_occs, k=num_pos_samples * 5)
return negative_inids
def generate_candidates(self, train_pos_unid_inid_map, test_pos_unid_inid_map, neg_unid_inid_map, u_nid):
pos_i_nids = test_pos_unid_inid_map[u_nid]
neg_i_nids = np.array(neg_unid_inid_map[u_nid])
neg_i_nids_indices = np.array(rd.sample(range(neg_i_nids.shape[0]), 99), dtype=int)
return pos_i_nids, list(neg_i_nids[neg_i_nids_indices])
if __name__ == '__main__':
solver = GCNSolver(GCN, dataset_args, model_args, train_args)
solver.run()
| 40.341085 | 119 | 0.710031 | [
"MIT"
] | 356255531/pytorch_geometric | benchmark/recsys/gcn_solver.py | 5,204 | Python |
"""add x,y to markers
Revision ID: 20f14f4f1de7
Revises: 21b54c24a2c8
Create Date: 2018-10-01 23:27:21.307860
"""
# revision identifiers, used by Alembic.
revision = '20f14f4f1de7'
down_revision = '21b54c24a2c8'
branch_labels = None
depends_on = None
import sqlalchemy as sa
from alembic import op
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('markers', sa.Column('x', sa.Float(), nullable=True))
op.add_column('markers', sa.Column('y', sa.Float(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('markers', 'y')
op.drop_column('markers', 'x')
### end Alembic commands ###
| 23.34375 | 71 | 0.682731 | [
"BSD-3-Clause"
] | AdiRoH/anyway | alembic/versions/20f14f4f1de7_add_x_y_to_markers.py | 747 | Python |
def mergeSort(_list):
n = len(_list)
if n > 1:
mid = n // 2 # int
left = _list[:mid]
right = _list[mid:]
mergeSort(left)
mergeSort(right)
i = j = k = 0
# 左右比較
while i < len(left) and j < len(right):
if left[i] < right[j]: # left right compared
_list[k] = left[i]
i += 1
else:
_list[k] = right[j]
j += 1
k += 1
# 看有沒有剩,直接塞滿
while i < len(left):
_list[k] = left[i]
i += 1
k += 1
while j < len(right):
_list[k] = right[j]
j += 1
k += 1
| 23.566667 | 57 | 0.35785 | [
"Apache-2.0"
] | sychen6192/Highlevel_Algorithm | sorting/0853426_HW1_merge.py | 735 | Python |
from collections.abc import Generator
x: Generator[str, str, None, None]
| 15 | 37 | 0.76 | [
"BSD-3-Clause"
] | kracekumar/pep585-upgrade | tests/example_files/classvar.py | 75 | Python |
#
# This file is part of SEQGIBBS
# (https://github.com/I-Bouros/seqgibbs.git) which is released
# under the MIT license. See accompanying LICENSE for copyright
# notice and full license details.
#
import unittest
import scipy.stats
import numpy as np
import numpy.testing as npt
import seqgibbs as gibbs
def fun(x):
"""
Function returning the parameters of the normal sampler.
mean = product of elements of x
variance = exp(|x|)/(1+exp(|x|)).
"""
return np.prod(x), np.exp(np.sum(x))/(np.exp(np.sum(x))+1)
def another_fun(x):
"""
Function returning the parameters of the normal sampler.
mean = sum of elements of x
variance = exp(|x|)/(1+exp(|x|)).
"""
return np.sum(x), np.exp(np.sum(x))/(np.exp(np.sum(x))+1)
class TestSysGibbsAlgoClass(unittest.TestCase):
"""
Test the 'SysGibbsAlgo' class.
"""
def test__init__(self):
sampler = gibbs.SysGibbsAlgo(num_dim=2)
self.assertEqual(sampler.num_dim, 2)
self.assertEqual(len(sampler.one_d_samplers), 0)
self.assertEqual(len(sampler.chain_states), 1)
npt.assert_array_equal(sampler.initial_state, np.zeros(2))
npt.assert_array_equal(sampler.current_state, np.zeros(2))
with self.assertRaises(TypeError):
gibbs.SysGibbsAlgo('0', np.ones(2))
with self.assertRaises(ValueError):
gibbs.SysGibbsAlgo(0, np.ones(2))
with self.assertRaises(ValueError):
gibbs.SysGibbsAlgo(3, np.ones(2))
with self.assertRaises(ValueError):
gibbs.SysGibbsAlgo(2, [[1], [2]])
def test_change_initial_state(self):
sampler = gibbs.SysGibbsAlgo(num_dim=2)
sampler.change_initial_state(new_state=np.array([2, 0]))
npt.assert_array_equal(sampler.initial_state, np.array([2, 0]))
with self.assertRaises(ValueError):
sampler.change_initial_state(new_state=np.array([[1], [2]]))
with self.assertRaises(ValueError):
sampler.change_initial_state(new_state=np.array([1, 2, 0]))
def test_add_1_d_sampler(self):
sampler = gibbs.SysGibbsAlgo(num_dim=2, initial_state=np.array([2, 3]))
new_1_d_sampler = gibbs.OneDimSampler(scipy.stats.norm.rvs, fun)
sampler.add_1_d_sampler(new_1_d_sampler)
self.assertEqual(len(sampler.one_d_samplers), 1)
with self.assertRaises(TypeError):
sampler.add_1_d_sampler(0)
def test_run(self):
sampler = gibbs.SysGibbsAlgo(
num_dim=2, initial_state=np.array([2, 3]))
# Feed in the two partial conditional samplers
first_1_d_sampler = gibbs.OneDimSampler(scipy.stats.norm.rvs, fun)
second_1_d_sampler = gibbs.OneDimSampler(
scipy.stats.norm.rvs, another_fun)
sampler.add_1_d_sampler(first_1_d_sampler)
sampler.add_1_d_sampler(second_1_d_sampler)
# Run 3 complete scan cycles of the algorithm
sampler.run(num_cycles=3)
last_state = sampler.chain_states[-1]
self.assertEqual(len(sampler.chain_states), 4)
self.assertEqual(len(last_state), len(sampler.initial_state))
npt.assert_array_equal(last_state, sampler.current_state)
# Run 3 more complete scan cycles of the algorithm
sampler.run(num_cycles=3, mode='continue')
self.assertEqual(len(sampler.chain_states), 7)
# Rerun for 3 complete scan cycles of the algorithm
sampler.run(num_cycles=3, mode='restart')
self.assertEqual(len(sampler.chain_states), 4)
with self.assertRaises(ValueError):
sampler.run(num_cycles=3, mode='0')
with self.assertRaises(TypeError):
sampler.run(num_cycles=3.5)
with self.assertRaises(ValueError):
sampler.run(num_cycles=0, mode='restart')
class TestRandGibbsAlgoClass(unittest.TestCase):
"""
Test the 'RandGibbsAlgo' class.
"""
def test__init__(self):
sampler = gibbs.RandGibbsAlgo(num_dim=2)
self.assertEqual(sampler.num_dim, 2)
self.assertEqual(len(sampler.one_d_samplers), 0)
self.assertEqual(len(sampler.chain_states), 1)
npt.assert_array_equal(sampler.initial_state, np.zeros(2))
npt.assert_array_equal(sampler.current_state, np.zeros(2))
with self.assertRaises(ValueError):
gibbs.RandGibbsAlgo(3, dimen_prob=np.ones(2))
with self.assertRaises(ValueError):
gibbs.RandGibbsAlgo(2, dimen_prob=[[1], [2]])
def test_change_dimen_prob(self):
sampler = gibbs.RandGibbsAlgo(num_dim=3)
sampler.change_dimen_prob(new_probs=np.array([2, 0, 1]))
npt.assert_array_equal(
sampler.dimen_prob,
np.array([2, 0, 1])/np.sum(np.array([2, 0, 1])))
with self.assertRaises(ValueError):
sampler.change_dimen_prob(new_probs=np.array([[2], [0], [1]]))
with self.assertRaises(ValueError):
sampler.change_dimen_prob(new_probs=np.array([2, 1]))
def test_run(self):
sampler = gibbs.RandGibbsAlgo(
num_dim=2,
initial_state=np.array([2, 3]),
dimen_prob=np.array([2, 5]))
# Feed in the two partial conditional samplers
first_1_d_sampler = gibbs.OneDimSampler(scipy.stats.norm.rvs, fun)
second_1_d_sampler = gibbs.OneDimSampler(
scipy.stats.norm.rvs, another_fun)
sampler.add_1_d_sampler(first_1_d_sampler)
sampler.add_1_d_sampler(second_1_d_sampler)
# Run 3 complete scan cycles of the algorithm
sampler.run(num_cycles=3)
last_state = sampler.chain_states[-1]
self.assertEqual(len(sampler.chain_states), 4)
self.assertEqual(len(last_state), len(sampler.initial_state))
npt.assert_array_equal(last_state, sampler.current_state)
# Run 3 more complete scan cycles of the algorithm
sampler.run(num_cycles=3, mode='continue')
self.assertEqual(len(sampler.chain_states), 7)
# Rerun for 3 complete scan cycles of the algorithm
sampler.run(num_cycles=3, mode='restart')
self.assertEqual(len(sampler.chain_states), 4)
| 34.043956 | 79 | 0.658328 | [
"MIT"
] | I-Bouros/seqgibbs | seqgibbs/tests/test_samplers.py | 6,196 | Python |
"""Implementation of group based authorization API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import grp
import logging
_LOGGER = logging.getLogger(__name__)
def _group(template, resource, action, proid):
"""Render group template."""
return template.format(
resource=resource,
action=action,
proid=proid
)
class API:
"""Group based authorization REST api."""
def __init__(self, **kwargs):
groups = kwargs.get('groups', [])
for group in groups:
_LOGGER.info('Using authorization template: %s', group)
# TODO: add schema validation.
def authorize(user, action, resource, resource_id, payload):
"""Authorize user/action/resource"""
del payload
_LOGGER.info(
'Authorize: %s %s %s %s', user, action, resource, resource_id
)
proid = None
if resource_id:
proid = resource_id.partition('.')[0]
why = []
for group_template in groups:
group_name = _group(
group_template,
action=action,
resource=resource,
proid=proid
)
_LOGGER.info('Check authorization group: %s', group_name)
try:
group = grp.getgrnam(group_name)
username = user.partition('@')[0]
members = group.gr_mem
_LOGGER.info(
'Authorized: User %s is member of %s.',
username,
group_name
)
if username in members:
return True, why
else:
why.append(
'{} not member of {}'.format(
username,
group_name
)
)
except KeyError:
_LOGGER.info('Group does not exist: %s', group_name)
why.append('no such group: {}'.format(group_name))
return False, why
self.authorize = authorize
| 29.469136 | 77 | 0.485547 | [
"Apache-2.0"
] | bothejjms/treadmill | lib/python/treadmill/api/authz/group.py | 2,387 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright 2012-2018 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Python Client Automatically generated with:
# https://github.com/ebi-wp/webservice-clients-generator
#
# EMBOSS pepwindow (REST) web service Python client using xmltramp2.
#
# For further information see:
# https://www.ebi.ac.uk/Tools/webservices/
#
###############################################################################
from __future__ import print_function
import os
import sys
import time
import requests
import platform
from xmltramp2 import xmltramp
from optparse import OptionParser
try:
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from urllib.request import __version__ as urllib_version
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError
from urllib2 import __version__ as urllib_version
# allow unicode(str) to be used in python 3
try:
unicode('')
except NameError:
unicode = str
# Base URL for service
baseUrl = u'https://www.ebi.ac.uk/Tools/services/rest/emboss_pepwindow'
version = u'2019-07-03 12:51'
# Set interval for checking status
pollFreq = 3
# Output level
outputLevel = 1
# Debug level
debugLevel = 0
# Number of option arguments.
numOpts = len(sys.argv)
# Process command-line options
parser = OptionParser(add_help_option=False)
# Tool specific options (Try to print all the commands automatically)
parser.add_option('--sequence', type=str, help=('The sequence to be analysed can be entered directly into this form.'
'The sequence can be in GCG, FASTA, PIR, NBRF, PHYLIP or'
'UniProtKB/Swiss-Prot format. Partially formatted sequences are not'
'accepted..'))
parser.add_option('--windowsize', type=int, help=('Window size for averaging (smoothing) the hydropathy plot. Use an'
'integer between 1 and 200.'))
parser.add_option('--normalize', action='store_true', help=('Normalize data values (mean = 0.0, standard deviation = 1.0)'))
# General options
parser.add_option('-h', '--help', action='store_true', help='Show this help message and exit.')
parser.add_option('--email', help='E-mail address.')
parser.add_option('--title', help='Job title.')
parser.add_option('--outfile', help='File name for results.')
parser.add_option('--outformat', help='Output format for results.')
parser.add_option('--asyncjob', action='store_true', help='Asynchronous mode.')
parser.add_option('--jobid', help='Job identifier.')
parser.add_option('--polljob', action="store_true", help='Get job result.')
parser.add_option('--pollFreq', type='int', default=3, help='Poll frequency in seconds (default 3s).')
parser.add_option('--status', action="store_true", help='Get job status.')
parser.add_option('--resultTypes', action='store_true', help='Get result types.')
parser.add_option('--params', action='store_true', help='List input parameters.')
parser.add_option('--paramDetail', help='Get details for parameter.')
parser.add_option('--quiet', action='store_true', help='Decrease output level.')
parser.add_option('--verbose', action='store_true', help='Increase output level.')
parser.add_option('--version', action='store_true', help='Prints out the version of the Client and exit.')
parser.add_option('--debugLevel', type='int', default=debugLevel, help='Debugging level.')
parser.add_option('--baseUrl', default=baseUrl, help='Base URL for service.')
(options, args) = parser.parse_args()
# Increase output level
if options.verbose:
outputLevel += 1
# Decrease output level
if options.quiet:
outputLevel -= 1
# Debug level
if options.debugLevel:
debugLevel = options.debugLevel
if options.pollFreq:
pollFreq = options.pollFreq
if options.baseUrl:
baseUrl = options.baseUrl
# Debug print
def printDebugMessage(functionName, message, level):
if (level <= debugLevel):
print(u'[' + functionName + u'] ' + message, file=sys.stderr)
# User-agent for request (see RFC2616).
def getUserAgent():
printDebugMessage(u'getUserAgent', u'Begin', 11)
# Agent string for urllib2 library.
urllib_agent = u'Python-urllib/%s' % urllib_version
clientRevision = version
# Prepend client specific agent string.
try:
pythonversion = platform.python_version()
pythonsys = platform.system()
except ValueError:
pythonversion, pythonsys = "Unknown", "Unknown"
user_agent = u'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % (
clientRevision, os.path.basename(__file__),
pythonversion, pythonsys, urllib_agent)
printDebugMessage(u'getUserAgent', u'user_agent: ' + user_agent, 12)
printDebugMessage(u'getUserAgent', u'End', 11)
return user_agent
# Wrapper for a REST (HTTP GET) request
def restRequest(url):
printDebugMessage(u'restRequest', u'Begin', 11)
printDebugMessage(u'restRequest', u'url: ' + url, 11)
try:
# Set the User-agent.
user_agent = getUserAgent()
http_headers = {u'User-Agent': user_agent}
req = Request(url, None, http_headers)
# Make the request (HTTP GET).
reqH = urlopen(req)
resp = reqH.read()
contenttype = reqH.info()
if (len(resp) > 0 and contenttype != u"image/png;charset=UTF-8"
and contenttype != u"image/jpeg;charset=UTF-8"
and contenttype != u"application/gzip;charset=UTF-8"):
try:
result = unicode(resp, u'utf-8')
except UnicodeDecodeError:
result = resp
else:
result = resp
reqH.close()
# Errors are indicated by HTTP status codes.
except HTTPError as ex:
result = requests.get(url).content
printDebugMessage(u'restRequest', u'End', 11)
return result
# Get input parameters list
def serviceGetParameters():
printDebugMessage(u'serviceGetParameters', u'Begin', 1)
requestUrl = baseUrl + u'/parameters'
printDebugMessage(u'serviceGetParameters', u'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage(u'serviceGetParameters', u'End', 1)
return doc[u'id':]
# Print list of parameters
def printGetParameters():
printDebugMessage(u'printGetParameters', u'Begin', 1)
idList = serviceGetParameters()
for id_ in idList:
print(id_)
printDebugMessage(u'printGetParameters', u'End', 1)
# Get input parameter information
def serviceGetParameterDetails(paramName):
printDebugMessage(u'serviceGetParameterDetails', u'Begin', 1)
printDebugMessage(u'serviceGetParameterDetails', u'paramName: ' + paramName, 2)
requestUrl = baseUrl + u'/parameterdetails/' + paramName
printDebugMessage(u'serviceGetParameterDetails', u'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage(u'serviceGetParameterDetails', u'End', 1)
return doc
# Print description of a parameter
def printGetParameterDetails(paramName):
printDebugMessage(u'printGetParameterDetails', u'Begin', 1)
doc = serviceGetParameterDetails(paramName)
print(unicode(doc.name) + u"\t" + unicode(doc.type))
print(doc.description)
if hasattr(doc, 'values'):
for value in doc.values:
print(value.value)
if unicode(value.defaultValue) == u'true':
print(u'default')
print(u"\t" + unicode(value.label))
if hasattr(value, u'properties'):
for wsProperty in value.properties:
print(u"\t" + unicode(wsProperty.key) + u"\t" + unicode(wsProperty.value))
printDebugMessage(u'printGetParameterDetails', u'End', 1)
# Submit job
def serviceRun(email, title, params):
printDebugMessage(u'serviceRun', u'Begin', 1)
# Insert e-mail and title into params
params[u'email'] = email
if title:
params[u'title'] = title
requestUrl = baseUrl + u'/run/'
printDebugMessage(u'serviceRun', u'requestUrl: ' + requestUrl, 2)
# Get the data for the other options
requestData = urlencode(params)
printDebugMessage(u'serviceRun', u'requestData: ' + requestData, 2)
# Errors are indicated by HTTP status codes.
try:
# Set the HTTP User-agent.
user_agent = getUserAgent()
http_headers = {u'User-Agent': user_agent}
req = Request(requestUrl, None, http_headers)
# Make the submission (HTTP POST).
reqH = urlopen(req, requestData.encode(encoding=u'utf_8', errors=u'strict'))
jobId = unicode(reqH.read(), u'utf-8')
reqH.close()
except HTTPError as ex:
print(xmltramp.parse(unicode(ex.read(), u'utf-8'))[0][0])
quit()
printDebugMessage(u'serviceRun', u'jobId: ' + jobId, 2)
printDebugMessage(u'serviceRun', u'End', 1)
return jobId
# Get job status
def serviceGetStatus(jobId):
printDebugMessage(u'serviceGetStatus', u'Begin', 1)
printDebugMessage(u'serviceGetStatus', u'jobId: ' + jobId, 2)
requestUrl = baseUrl + u'/status/' + jobId
printDebugMessage(u'serviceGetStatus', u'requestUrl: ' + requestUrl, 2)
status = restRequest(requestUrl)
printDebugMessage(u'serviceGetStatus', u'status: ' + status, 2)
printDebugMessage(u'serviceGetStatus', u'End', 1)
return status
# Print the status of a job
def printGetStatus(jobId):
printDebugMessage(u'printGetStatus', u'Begin', 1)
status = serviceGetStatus(jobId)
if outputLevel > 0:
print("Getting status for job %s" % jobId)
print(status)
if outputLevel > 0 and status == "FINISHED":
print("To get results: python %s --polljob --jobid %s"
"" % (os.path.basename(__file__), jobId))
printDebugMessage(u'printGetStatus', u'End', 1)
# Get available result types for job
def serviceGetResultTypes(jobId):
printDebugMessage(u'serviceGetResultTypes', u'Begin', 1)
printDebugMessage(u'serviceGetResultTypes', u'jobId: ' + jobId, 2)
requestUrl = baseUrl + u'/resulttypes/' + jobId
printDebugMessage(u'serviceGetResultTypes', u'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage(u'serviceGetResultTypes', u'End', 1)
return doc[u'type':]
# Print list of available result types for a job.
def printGetResultTypes(jobId):
printDebugMessage(u'printGetResultTypes', u'Begin', 1)
if outputLevel > 0:
print("Getting result types for job %s" % jobId)
resultTypeList = serviceGetResultTypes(jobId)
if outputLevel > 0:
print("Available result types:")
for resultType in resultTypeList:
print(resultType[u'identifier'])
if hasattr(resultType, u'label'):
print(u"\t", resultType[u'label'])
if hasattr(resultType, u'description'):
print(u"\t", resultType[u'description'])
if hasattr(resultType, u'mediaType'):
print(u"\t", resultType[u'mediaType'])
if hasattr(resultType, u'fileSuffix'):
print(u"\t", resultType[u'fileSuffix'])
if outputLevel > 0:
print("To get results:\n python %s --polljob --jobid %s\n"
" python %s --polljob --outformat <type> --jobid %s"
"" % (os.path.basename(__file__), jobId,
os.path.basename(__file__), jobId))
printDebugMessage(u'printGetResultTypes', u'End', 1)
# Get result
def serviceGetResult(jobId, type_):
printDebugMessage(u'serviceGetResult', u'Begin', 1)
printDebugMessage(u'serviceGetResult', u'jobId: ' + jobId, 2)
printDebugMessage(u'serviceGetResult', u'type_: ' + type_, 2)
requestUrl = baseUrl + u'/result/' + jobId + u'/' + type_
result = restRequest(requestUrl)
printDebugMessage(u'serviceGetResult', u'End', 1)
return result
# Client-side poll
def clientPoll(jobId):
printDebugMessage(u'clientPoll', u'Begin', 1)
result = u'PENDING'
while result == u'RUNNING' or result == u'PENDING':
result = serviceGetStatus(jobId)
if outputLevel > 0:
print(result)
if result == u'RUNNING' or result == u'PENDING':
time.sleep(pollFreq)
printDebugMessage(u'clientPoll', u'End', 1)
# Get result for a jobid
# Allows more than one output file written when 'outformat' is defined.
def getResult(jobId):
printDebugMessage(u'getResult', u'Begin', 1)
printDebugMessage(u'getResult', u'jobId: ' + jobId, 1)
if outputLevel > 1:
print("Getting results for job %s" % jobId)
# Check status and wait if necessary
clientPoll(jobId)
# Get available result types
resultTypes = serviceGetResultTypes(jobId)
for resultType in resultTypes:
# Derive the filename for the result
if options.outfile:
filename = (options.outfile + u'.' + unicode(resultType[u'identifier']) +
u'.' + unicode(resultType[u'fileSuffix']))
else:
filename = (jobId + u'.' + unicode(resultType[u'identifier']) +
u'.' + unicode(resultType[u'fileSuffix']))
# Write a result file
outformat_parm = str(options.outformat).split(',')
for outformat_type in outformat_parm:
outformat_type = outformat_type.replace(' ', '')
if outformat_type == 'None':
outformat_type = None
if not outformat_type or outformat_type == unicode(resultType[u'identifier']):
if outputLevel > 1:
print("Getting %s" % unicode(resultType[u'identifier']))
# Get the result
result = serviceGetResult(jobId, unicode(resultType[u'identifier']))
if (unicode(resultType[u'mediaType']) == u"image/png"
or unicode(resultType[u'mediaType']) == u"image/jpeg"
or unicode(resultType[u'mediaType']) == u"application/gzip"):
fmode = 'wb'
else:
fmode = 'w'
try:
fh = open(filename, fmode)
fh.write(result)
fh.close()
except TypeError:
fh.close()
fh = open(filename, "wb")
fh.write(result)
fh.close()
if outputLevel > 0:
print("Creating result file: " + filename)
printDebugMessage(u'getResult', u'End', 1)
# Read a file
def readFile(filename):
printDebugMessage(u'readFile', u'Begin', 1)
fh = open(filename, 'r')
data = fh.read()
fh.close()
printDebugMessage(u'readFile', u'End', 1)
return data
def print_usage():
print("""\
EMBL-EBI EMBOSS pepwindow Python Client:
Sequence statistics and plots with pepwindow.
[Required (for job submission)]
--email E-mail address.
--sequence The sequence to be analysed can be entered directly into
this form. The sequence can be in GCG, FASTA, PIR, NBRF,
PHYLIP or UniProtKB/Swiss-Prot format. Partially formatted
sequences are not accepted.
[Optional]
--windowsize Window size for averaging (smoothing) the hydropathy plot.
Use an integer between 1 and 200.
--normalize Normalize data values (mean = 0.0, standard deviation = 1.0).
[General]
-h, --help Show this help message and exit.
--asyncjob Forces to make an asynchronous query.
--title Title for job.
--status Get job status.
--resultTypes Get available result types for job.
--polljob Poll for the status of a job.
--pollFreq Poll frequency in seconds (default 3s).
--jobid JobId that was returned when an asynchronous job was submitted.
--outfile File name for results (default is JobId; for STDOUT).
--outformat Result format(s) to retrieve. It accepts comma-separated values.
--params List input parameters.
--paramDetail Display details for input parameter.
--verbose Increase output.
--version Prints out the version of the Client and exit.
--quiet Decrease output.
--baseUrl Base URL. Defaults to:
https://www.ebi.ac.uk/Tools/services/rest/emboss_pepwindow
Synchronous job:
The results/errors are returned as soon as the job is finished.
Usage: python emboss_pepwindow.py --email <[email protected]> [options...] <SeqFile|SeqID(s)>
Returns: results as an attachment
Asynchronous job:
Use this if you want to retrieve the results at a later time. The results
are stored for up to 24 hours.
Usage: python emboss_pepwindow.py --asyncjob --email <[email protected]> [options...] <SeqFile|SeqID(s)>
Returns: jobid
Check status of Asynchronous job:
Usage: python emboss_pepwindow.py --status --jobid <jobId>
Retrieve job data:
Use the jobid to query for the status of the job. If the job is finished,
it also returns the results/errors.
Usage: python emboss_pepwindow.py --polljob --jobid <jobId> [--outfile string]
Returns: string indicating the status of the job and if applicable, results
as an attachment.
Further information:
https://www.ebi.ac.uk/Tools/webservices and
https://github.com/ebi-wp/webservice-clients
Support/Feedback:
https://www.ebi.ac.uk/support/""")
# No options... print help.
if numOpts < 2:
print_usage()
elif options.help:
print_usage()
# List parameters
elif options.params:
printGetParameters()
# Get parameter details
elif options.paramDetail:
printGetParameterDetails(options.paramDetail)
# Print Client version
elif options.version:
print("Revision: %s" % version)
sys.exit()
# Submit job
elif options.email and not options.jobid:
params = {}
if len(args) == 1 and "true" not in args and "false" not in args:
if os.path.exists(args[0]): # Read file into content
params[u'sequence'] = readFile(args[0])
else: # Argument is a sequence id
params[u'sequence'] = args[0]
elif len(args) == 2 and "true" not in args and "false" not in args:
if os.path.exists(args[0]) and os.path.exists(args[1]): # Read file into content
params[u'asequence'] = readFile(args[0])
params[u'bsequence'] = readFile(args[1])
else: # Argument is a sequence id
params[u'asequence'] = args[0]
params[u'bsequence'] = args[0]
elif hasattr(options, "sequence") or (hasattr(options, "asequence") and hasattr(options, "bsequence")): # Specified via option
if hasattr(options, "sequence"):
if os.path.exists(options.sequence): # Read file into content
params[u'sequence'] = readFile(options.sequence)
else: # Argument is a sequence id
params[u'sequence'] = options.sequence
elif hasattr(options, "asequence") and hasattr(options, "bsequence"):
if os.path.exists(options.asequence) and os.path.exists(options.bsequence): # Read file into content
params[u'asequence'] = readFile(options.asequence)
params[u'bsequence'] = readFile(options.bsequence)
else: # Argument is a sequence id
params[u'asequence'] = options.asequence
params[u'bsequence'] = options.bsequence
# Pass default values and fix bools (without default value)
if options.windowsize:
params['windowsize'] = options.windowsize
if not options.normalize:
params['normalize'] = 'false'
if options.normalize:
params['normalize'] = options.normalize
# Submit the job
jobId = serviceRun(options.email, options.title, params)
if options.asyncjob: # Async mode
print(jobId)
if outputLevel > 0:
print("To check status: python %s --status --jobid %s"
"" % (os.path.basename(__file__), jobId))
else:
# Sync mode
if outputLevel > 0:
print("JobId: " + jobId, file=sys.stderr)
else:
print(jobId)
time.sleep(pollFreq)
getResult(jobId)
# Get job status
elif options.jobid and options.status:
printGetStatus(options.jobid)
elif options.jobid and (options.resultTypes or options.polljob):
status = serviceGetStatus(options.jobid)
if status == 'PENDING' or status == 'RUNNING':
print("Error: Job status is %s. "
"To get result types the job must be finished." % status)
quit()
# List result types for job
if options.resultTypes:
printGetResultTypes(options.jobid)
# Get results for job
elif options.polljob:
getResult(options.jobid)
else:
# Checks for 'email' parameter
if not options.email:
print('\nParameter "--email" is missing in your command. It is required!\n')
print(u'Error: unrecognised argument combination', file=sys.stderr)
print_usage()
| 38.036522 | 131 | 0.64821 | [
"Apache-2.0"
] | SamFent/webservice-clients | python/emboss_pepwindow.py | 21,872 | Python |
# ccm node
from __future__ import absolute_import, with_statement
import os
import re
import shutil
import signal
import stat
import subprocess
import time
import yaml
from six import iteritems, print_
from ccmlib import common, extension
from ccmlib.node import Node, NodeError, ToolError
class DseNode(Node):
"""
Provides interactions to a DSE node.
"""
def __init__(self, name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save=True, binary_interface=None, byteman_port='0', environment_variables=None):
super(DseNode, self).__init__(name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save, binary_interface, byteman_port, environment_variables=environment_variables)
self.get_cassandra_version()
self._dse_config_options = {}
if self.cluster.hasOpscenter():
self._copy_agent()
def get_install_cassandra_root(self):
return os.path.join(self.get_install_dir(), 'resources', 'cassandra')
def get_node_cassandra_root(self):
return os.path.join(self.get_path(), 'resources', 'cassandra')
def get_conf_dir(self):
"""
Returns the path to the directory where Cassandra config are located
"""
return os.path.join(self.get_path(), 'resources', 'cassandra', 'conf')
def get_tool(self, toolname):
return common.join_bin(os.path.join(self.get_install_dir(), 'resources', 'cassandra'), 'bin', toolname)
def get_tool_args(self, toolname):
return [common.join_bin(os.path.join(self.get_install_dir(), 'resources', 'cassandra'), 'bin', 'dse'), toolname]
def get_env(self):
(node_ip, _) = self.network_interfaces['binary']
return common.make_dse_env(self.get_install_dir(), self.get_path(), node_ip)
def get_cassandra_version(self):
return common.get_dse_cassandra_version(self.get_install_dir())
def set_workloads(self, workloads):
self.workloads = workloads
self._update_config()
if 'solr' in self.workloads:
self.__generate_server_xml()
if 'graph' in self.workloads:
(node_ip, _) = self.network_interfaces['binary']
conf_file = os.path.join(self.get_path(), 'resources', 'dse', 'conf', 'dse.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
graph_options = data['graph']
graph_options['gremlin_server']['host'] = node_ip
self.set_dse_configuration_options({'graph': graph_options})
self.__update_gremlin_config_yaml()
if 'dsefs' in self.workloads:
dsefs_options = {'dsefs_options': {'enabled': 'true',
'work_dir': os.path.join(self.get_path(), 'dsefs'),
'data_directories': [{'dir': os.path.join(self.get_path(), 'dsefs', 'data')}]}}
self.set_dse_configuration_options(dsefs_options)
if 'spark' in self.workloads:
self._update_spark_env()
def set_dse_configuration_options(self, values=None):
if values is not None:
for k, v in iteritems(values):
self._dse_config_options[k] = v
self.import_dse_config_files()
def watch_log_for_alive(self, nodes, from_mark=None, timeout=720, filename='system.log'):
"""
Watch the log of this node until it detects that the provided other
nodes are marked UP. This method works similarly to watch_log_for_death.
We want to provide a higher default timeout when this is called on DSE.
"""
super(DseNode, self).watch_log_for_alive(nodes, from_mark=from_mark, timeout=timeout, filename=filename)
def get_launch_bin(self):
cdir = self.get_install_dir()
launch_bin = common.join_bin(cdir, 'bin', 'dse')
# Copy back the dse scripts since profiling may have modified it the previous time
shutil.copy(launch_bin, self.get_bin_dir())
return common.join_bin(self.get_path(), 'bin', 'dse')
def add_custom_launch_arguments(self, args):
args.append('cassandra')
for workload in self.workloads:
if 'hadoop' in workload:
args.append('-t')
if 'solr' in workload:
args.append('-s')
if 'spark' in workload:
args.append('-k')
if 'cfs' in workload:
args.append('-c')
if 'graph' in workload:
args.append('-g')
def start(self,
join_ring=True,
no_wait=False,
verbose=False,
update_pid=True,
wait_other_notice=True,
replace_token=None,
replace_address=None,
jvm_args=None,
wait_for_binary_proto=False,
profile_options=None,
use_jna=False,
quiet_start=False,
allow_root=False,
set_migration_task=True):
process = super(DseNode, self).start(join_ring, no_wait, verbose, update_pid, wait_other_notice, replace_token,
replace_address, jvm_args, wait_for_binary_proto, profile_options, use_jna,
quiet_start, allow_root, set_migration_task)
if self.cluster.hasOpscenter():
self._start_agent()
def _start_agent(self):
agent_dir = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_dir):
self._write_agent_address_yaml(agent_dir)
self._write_agent_log4j_properties(agent_dir)
args = [os.path.join(agent_dir, 'bin', common.platform_binary('datastax-agent'))]
subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def stop(self, wait=True, wait_other_notice=False, gently=True):
if self.cluster.hasOpscenter():
self._stop_agent()
return super(DseNode, self).stop(wait, wait_other_notice, gently)
def _stop_agent(self):
agent_dir = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_dir):
pidfile = os.path.join(agent_dir, 'datastax-agent.pid')
if os.path.exists(pidfile):
with open(pidfile, 'r') as f:
pid = int(f.readline().strip())
f.close()
if pid is not None:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
os.remove(pidfile)
def nodetool(self, cmd, username=None, password=None, capture_output=True, wait=True):
if password is not None:
cmd = '-pw {} '.format(password) + cmd
if username is not None:
cmd = '-u {} '.format(username) + cmd
return super(DseNode, self).nodetool(cmd)
def dsetool(self, cmd):
env = self.get_env()
extension.append_to_client_env(self, env)
dsetool = common.join_bin(self.get_install_dir(), 'bin', 'dsetool')
args = [dsetool, '-h', 'localhost', '-j', str(self.jmx_port)]
args += cmd.split()
p = subprocess.Popen(args, env=env)
p.wait()
def dse(self, dse_options=None):
if dse_options is None:
dse_options = []
env = self.get_env()
extension.append_to_client_env(self, env)
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse]
args += dse_options
p = subprocess.Popen(args, env=env)
p.wait()
def hadoop(self, hadoop_options=None):
if hadoop_options is None:
hadoop_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'hadoop']
args += hadoop_options
p = subprocess.Popen(args, env=env)
p.wait()
def hive(self, hive_options=None):
if hive_options is None:
hive_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'hive']
args += hive_options
p = subprocess.Popen(args, env=env)
p.wait()
def pig(self, pig_options=None):
if pig_options is None:
pig_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'pig']
args += pig_options
p = subprocess.Popen(args, env=env)
p.wait()
def sqoop(self, sqoop_options=None):
if sqoop_options is None:
sqoop_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'sqoop']
args += sqoop_options
p = subprocess.Popen(args, env=env)
p.wait()
def spark(self, spark_options=None):
if spark_options is None:
spark_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'spark']
args += spark_options
p = subprocess.Popen(args, env=env)
p.wait()
def import_dse_config_files(self):
self._update_config()
if not os.path.isdir(os.path.join(self.get_path(), 'resources', 'dse', 'conf')):
os.makedirs(os.path.join(self.get_path(), 'resources', 'dse', 'conf'))
common.copy_directory(os.path.join(self.get_install_dir(), 'resources', 'dse', 'conf'), os.path.join(self.get_path(), 'resources', 'dse', 'conf'))
self.__update_yaml()
def copy_config_files(self):
for product in ['dse', 'cassandra', 'hadoop', 'hadoop2-client', 'sqoop', 'hive', 'tomcat', 'spark', 'shark', 'mahout', 'pig', 'solr', 'graph']:
src_conf = os.path.join(self.get_install_dir(), 'resources', product, 'conf')
dst_conf = os.path.join(self.get_path(), 'resources', product, 'conf')
if not os.path.isdir(src_conf):
continue
if os.path.isdir(dst_conf):
common.rmdirs(dst_conf)
shutil.copytree(src_conf, dst_conf)
if product == 'solr':
src_web = os.path.join(self.get_install_dir(), 'resources', product, 'web')
dst_web = os.path.join(self.get_path(), 'resources', product, 'web')
if os.path.isdir(dst_web):
common.rmdirs(dst_web)
shutil.copytree(src_web, dst_web)
if product == 'tomcat':
src_lib = os.path.join(self.get_install_dir(), 'resources', product, 'lib')
dst_lib = os.path.join(self.get_path(), 'resources', product, 'lib')
if os.path.isdir(dst_lib):
common.rmdirs(dst_lib)
if os.path.exists(src_lib):
shutil.copytree(src_lib, dst_lib)
src_webapps = os.path.join(self.get_install_dir(), 'resources', product, 'webapps')
dst_webapps = os.path.join(self.get_path(), 'resources', product, 'webapps')
if os.path.isdir(dst_webapps):
common.rmdirs(dst_webapps)
shutil.copytree(src_webapps, dst_webapps)
src_lib = os.path.join(self.get_install_dir(), 'resources', product, 'gremlin-console', 'conf')
dst_lib = os.path.join(self.get_path(), 'resources', product, 'gremlin-console', 'conf')
if os.path.isdir(dst_lib):
common.rmdirs(dst_lib)
if os.path.exists(src_lib):
shutil.copytree(src_lib, dst_lib)
def import_bin_files(self):
cassandra_bin_dir = os.path.join(self.get_path(), 'resources', 'cassandra', 'bin')
shutil.rmtree(cassandra_bin_dir, ignore_errors=True)
os.makedirs(cassandra_bin_dir)
common.copy_directory(os.path.join(self.get_install_dir(), 'bin'), self.get_bin_dir())
common.copy_directory(os.path.join(self.get_install_dir(), 'resources', 'cassandra', 'bin'), cassandra_bin_dir)
self.export_dse_home_in_dse_env_sh()
def export_dse_home_in_dse_env_sh(self):
'''
Due to the way CCM lays out files, separating the repository
from the node(s) confs, the `dse-env.sh` script of each node
needs to have its DSE_HOME var set and exported. Since DSE
4.5.x, the stock `dse-env.sh` file includes a commented-out
place to do exactly this, intended for installers.
Basically: read in the file, write it back out and add the two
lines.
'sstableloader' is an example of a node script that depends on
this, when used in a CCM-built cluster.
'''
with open(self.get_bin_dir() + "/dse-env.sh", "r") as dse_env_sh:
buf = dse_env_sh.readlines()
with open(self.get_bin_dir() + "/dse-env.sh", "w") as out_file:
for line in buf:
out_file.write(line)
if line == "# This is here so the installer can force set DSE_HOME\n":
out_file.write("DSE_HOME=" + self.get_install_dir() + "\nexport DSE_HOME\n")
def _update_log4j(self):
super(DseNode, self)._update_log4j()
conf_file = os.path.join(self.get_conf_dir(), common.LOG4J_CONF)
append_pattern = 'log4j.appender.V.File='
log_file = os.path.join(self.get_path(), 'logs', 'solrvalidation.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
append_pattern = 'log4j.appender.A.File='
log_file = os.path.join(self.get_path(), 'logs', 'audit.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
append_pattern = 'log4j.appender.B.File='
log_file = os.path.join(self.get_path(), 'logs', 'audit', 'dropped-events.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
def __update_yaml(self):
conf_file = os.path.join(self.get_path(), 'resources', 'dse', 'conf', 'dse.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
data['system_key_directory'] = os.path.join(self.get_path(), 'keys')
# Get a map of combined cluster and node configuration with the node
# configuration taking precedence.
full_options = common.merge_configuration(
self.cluster._dse_config_options,
self._dse_config_options, delete_empty=False)
# Merge options with original yaml data.
data = common.merge_configuration(data, full_options)
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
def __generate_server_xml(self):
server_xml = os.path.join(self.get_path(), 'resources', 'tomcat', 'conf', 'server.xml')
if os.path.isfile(server_xml):
os.remove(server_xml)
with open(server_xml, 'w+') as f:
f.write('<Server port="8005" shutdown="SHUTDOWN">\n')
f.write(' <Service name="Solr">\n')
f.write(' <Connector port="8983" address="%s" protocol="HTTP/1.1" connectionTimeout="20000" maxThreads = "200" URIEncoding="UTF-8"/>\n' % self.network_interfaces['thrift'][0])
f.write(' <Engine name="Solr" defaultHost="localhost">\n')
f.write(' <Host name="localhost" appBase="../solr/web"\n')
f.write(' unpackWARs="true" autoDeploy="true"\n')
f.write(' xmlValidation="false" xmlNamespaceAware="false">\n')
f.write(' </Host>\n')
f.write(' </Engine>\n')
f.write(' </Service>\n')
f.write('</Server>\n')
f.close()
def __update_gremlin_config_yaml(self):
(node_ip, _) = self.network_interfaces['binary']
conf_file = os.path.join(self.get_path(), 'resources', 'graph', 'gremlin-console', 'conf', 'remote.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
data['hosts'] = [node_ip]
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
def _get_directories(self):
dirs = []
for i in ['data', 'commitlogs', 'saved_caches', 'logs', 'bin', 'keys', 'resources', os.path.join('data', 'hints')]:
dirs.append(os.path.join(self.get_path(), i))
return dirs
def _copy_agent(self):
agent_source = os.path.join(self.get_install_dir(), 'datastax-agent')
agent_target = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_source) and not os.path.exists(agent_target):
shutil.copytree(agent_source, agent_target)
def _write_agent_address_yaml(self, agent_dir):
address_yaml = os.path.join(agent_dir, 'conf', 'address.yaml')
if not os.path.exists(address_yaml):
with open(address_yaml, 'w+') as f:
(ip, port) = self.network_interfaces['thrift']
jmx = self.jmx_port
f.write('stomp_interface: 127.0.0.1\n')
f.write('local_interface: %s\n' % ip)
f.write('agent_rpc_interface: %s\n' % ip)
f.write('agent_rpc_broadcast_address: %s\n' % ip)
f.write('cassandra_conf: %s\n' % os.path.join(self.get_path(), 'resources', 'cassandra', 'conf', 'cassandra.yaml'))
f.write('cassandra_install: %s\n' % self.get_path())
f.write('cassandra_logs: %s\n' % os.path.join(self.get_path(), 'logs'))
f.write('thrift_port: %s\n' % port)
f.write('jmx_port: %s\n' % jmx)
f.close()
def _write_agent_log4j_properties(self, agent_dir):
log4j_properties = os.path.join(agent_dir, 'conf', 'log4j.properties')
with open(log4j_properties, 'w+') as f:
f.write('log4j.rootLogger=INFO,R\n')
f.write('log4j.logger.org.apache.http=OFF\n')
f.write('log4j.logger.org.eclipse.jetty.util.log=WARN,R\n')
f.write('log4j.appender.R=org.apache.log4j.RollingFileAppender\n')
f.write('log4j.appender.R.maxFileSize=20MB\n')
f.write('log4j.appender.R.maxBackupIndex=5\n')
f.write('log4j.appender.R.layout=org.apache.log4j.PatternLayout\n')
f.write('log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %m%n\n')
f.write('log4j.appender.R.File=./log/agent.log\n')
f.close()
def _update_spark_env(self):
try:
node_num = re.search(u'node(\d+)', self.name).group(1)
except AttributeError:
node_num = 0
conf_file = os.path.join(self.get_path(), 'resources', 'spark', 'conf', 'spark-env.sh')
env = self.get_env()
content = []
with open(conf_file, 'r') as f:
for line in f.readlines():
for spark_var in env.keys():
if line.startswith('export %s=' % spark_var) or line.startswith('export %s=' % spark_var, 2):
line = 'export %s=%s\n' % (spark_var, env[spark_var])
break
content.append(line)
with open(conf_file, 'w') as f:
f.writelines(content)
# starting with DSE 5.0 (Spark 1.6) we need to set a unique
# spark.shuffle.service.port for each node
if self.cluster.version() > '5.0':
print_('Writing shuffle')
defaults_file = os.path.join(self.get_path(), 'resources', 'spark', 'conf', 'spark-defaults.conf')
with open(defaults_file, 'a') as f:
port_num = 7737 + int(node_num)
f.write("\nspark.shuffle.service.port %s\n" % port_num)
| 44.422658 | 232 | 0.595488 | [
"Apache-2.0"
] | thobbs/ccm | ccmlib/dse_node.py | 20,390 | Python |
#!/usr/bin/env python
import os
import cv2
import numpy as np
from enum import Enum
import math
class Calc (Enum):
OPENCV = 1
GSL_MULTI_ROOT = 2
GSL_MULTI_FIT = 3
image_file_name = "Man2_10deg.png"
use_calc = Calc.GSL_MULTI_FIT
#use_calc = Calc.GSL_MULTI_ROOT
#use_calc = Calc.OPENCV
def get_project_xy(A, R, X, Y, Z):
P = np.array([X, Y, Z, 1])
pp = A.dot(R.dot(P))
return [pp[0]/pp[2], pp[1]/pp[2]]
def get_project_uv(A, R, X, Y, Z):
fx, fy, cx, cy = A[0][0], A[1][1], A[0][2], A[1][2]
r11, r12, r13, t1 = R[0][0], R[0][1], R[0][2], R[0][3]
r21, r22, r23, t2 = R[1][0], R[1][1], R[1][2], R[1][3]
r31, r32, r33, t3 = R[2][0], R[2][1], R[2][2], R[2][3]
s = r31 * X + r32 * Y + r33 * Z + t3
# print("%f * %f + %f * %f + %f * %f + %f = %f\n" % (r31, X, r32, Y, r33, Z, t3, s))
u = ((fx*r11 + cx*r31)*X + (fx*r12 + cx*r32)*Y + (fx*r13 + cx*r33)*Z + fx*t1 + cx*t3)/s
v = ((fy*r21 + cy*r31)*X + (fy*r22 + cy*r32)*Y +(fy*r23 + cy*r33)*Z + fy*t2 + cy*t3)/s
# print("%f/%f" % ((fx*r11 + cx*r31)*X + (fx*r12 + cx*r32)*Y + (fx*r13 + cx*r33)*Z + fx*t1 + cx*t3, s))
# print("%f/%f" % ((fy*r21 + cy*r31)*X + (fy*r22 + cy*r32)*Y +(fy*r23 + cy*r33)*Z + fy*t2 + cy*t3, s))
return u, v
def get_rot_tran_matrix2(M):
a = []
for i in range(0, 12):
a.append(float(M[i]))
R = np.array([[a[0], a[1], a[2], a[9]], [a[3], a[4], a[5], a[10]], [a[6], a[7], a[8], a[11]]])
return R
def print_rotation_angle(RT):
R = RT[:, 0:3]
# print('R:', R)
V = R.dot(np.array([0, 0, 1]))
print('\033[92mV:', V)
print('phi = %f degree' % math.degrees(math.atan(V[0] / V[2])))
print('theta = %f degree' % (math.sqrt(V[0]**2 + V[2]**2)))
print('\033[0m')
def verification_rot_tran_matrix(A, R, u, v, X, Y, Z):
P = np.array([X, Y, Z, 1], dtype="double")
pp = A.dot(R.dot(P))
diff = np.fabs(u - pp[0]/pp[2]) + np.fabs(v - pp[1]/pp[2])
print(u, v, '<->', pp[0]/pp[2], pp[1]/pp[2])
return diff
def verification_rot_tran_matrix_2(A, R, u, v, X, Y, Z):
ud, vd = get_project_uv(A, R, X, Y, Z)
print(u, v, '<->', ud, vd)
def get_rot_tran_matrix(img_pnts, mod_pnts, cam_matrix): # s = 1
(u1, v1) = img_pnts[0] # nose tip
(u2, v2) = img_pnts[1] # left eye
(u3, v3) = img_pnts[2] # right eye
(u4, v4) = img_pnts[3] # left mouth
(u5, v5) = img_pnts[4] # right mouth
(X1, Y1, Z1) = model_points[0]
(X2, Y2, Z2) = model_points[1]
(X3, Y3, Z3) = model_points[2]
(X4, Y4, Z4) = model_points[3]
(X5, Y5, Z5) = model_points[4]
fx = cam_matrix[0][0]
fy = cam_matrix[1][1]
cx = cam_matrix[0][2]
cy = cam_matrix[1][2]
r31, r32, r33, t3 = 0, 0, 0, 1
D = np.array([[X1, Y1, Z1, 1], [X2, Y2, Z2, 1], [X3, Y3, Z3, 1], [X4, Y4, Z4, 1]])
D1 = np.array([[(v1 - cy) / fy, Y1, Z1, 1], [(v2 - cy) / fy, Y2, Z2, 1], [(v3 - cy) / fy, Y3, Z3, 1],
[(v4 - cy) / fy, Y4, Z4, 1]])
D2 = np.array([[X1, (v1 - cy) / fy, Z1, 1], [X2, (v2 - cy) / fy, Z2, 1], [X3, (v3 - cy) / fy, Z3, 1],
[X4, (v4 - cy) / fy, Z4, 1]])
D3 = np.array([[X1, Y1, (v1 - cy) / fy, 1], [X2, Y2, (v2 - cy) / fy, 1], [X3, Y3, (v3 - cy) / fy, 1],
[X4, Y4, (v4 - cy) / fy, 1]])
D4 = np.array([[X1, Y1, Z1, (v1 - cy) / fy], [X2, Y2, Z2, (v2 - cy) / fy], [X3, Y3, Z3, (v3 - cy) / fy],
[X4, Y4, Z4, (v4 - cy) / fy]])
r21 = np.linalg.det(D1) / np.linalg.det(D)
r22 = np.linalg.det(D2) / np.linalg.det(D)
r23 = np.linalg.det(D3) / np.linalg.det(D)
t2 = np.linalg.det(D4) / np.linalg.det(D)
D1 = np.array([[(u1 - cx) / fx, Y1, Z1, 1], [(u2 - cx) / fx, Y2, Z2, 1], [(u3 - cx) / fx, Y3, Z3, 1],
[(u4 - cx) / fx, Y4, Z4, 1]])
D2 = np.array([[X1, (u1 - cx) / fx, Z1, 1], [X2, (u2 - cx) / fx, Z2, 1], [X3, (u3 - cx) / fx, Z3, 1],
[X4, (u4 - cx) / fx, Z4, 1]])
D3 = np.array([[X1, Y1, (u1 - cx) / fx, 1], [X2, Y2, (u2 - cx) / fx, 1], [X3, Y3, (u3 - cx) / fx, 1],
[X4, Y4, (u4 - cx) / fx, 1]])
D4 = np.array([[X1, Y1, Z1, (u1 - cx) / fx], [X2, Y2, Z2, (v2 - cy) / fy], [X3, Y3, Z3, (u3 - cx) / fx],
[X4, Y4, Z4, (u4 - cx) / fx]])
r11 = np.linalg.det(D1) / np.linalg.det(D)
r12 = np.linalg.det(D2) / np.linalg.det(D)
r13 = np.linalg.det(D3) / np.linalg.det(D)
t1 = np.linalg.det(D4) / np.linalg.det(D)
R = np.array([[r11, r12, r13, t1], [r21, r22, r23, t2], [r31, r32, r33, t3]])
return R
if __name__ == '__main__':
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(-50.0, -40.0, 20.0), # Left eye left corner
(50.0, -40.0, 20.0), # Right eye right corner
(-27.5, 30.0, 10.0), # Left Mouth corner
(27.5, 30.0, 10.0) # Right mouth corner
])
index = 4
points_file = "points.txt"
image_file = []
key_points = []
matrix = []
if not os.path.exists(points_file):
print('do not have file %s' % points_file)
exit(0)
points_f = open(points_file, 'r')
for line in points_f:
a = line.split('|')
b = a[0].split(',')
image_file.append(b[0])
key_points.append(b[1:11])
matrix.append(a[1].split(','))
points_f.close()
image_points = np.array([
(int(key_points[index][0]), int(key_points[index][5])), # Nose tip
(int(key_points[index][1]), int(key_points[index][6])), # Left eye left corner
(int(key_points[index][2]), int(key_points[index][7])), # Right eye right corner
(int(key_points[index][3]), int(key_points[index][8])), # Left Mouth corner
(int(key_points[index][4]), int(key_points[index][9])) # Right mouth corner
], dtype="double")
# Read Image
im = cv2.imread(image_file[index])
size = im.shape
# Camera internals
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype="double")
R = get_rot_tran_matrix2(matrix[index]) # read gsl result
print("\033[94m----check----")
for i in range(0, 5):
verification_rot_tran_matrix_2(camera_matrix, R, image_points[i][0], image_points[i][1],
model_points[i][0], model_points[i][1], model_points[i][2])
print("----end-----\033[0m")
print_rotation_angle(R)
print("rotation_matrix:\n {0}".format(R))
# draw axes
axis_length = 100.0
if False:
Z_pnt = get_project_uv(camera_matrix, R, 0, 0, axis_length)
Y_pnt = get_project_uv(camera_matrix, R, 0, axis_length, 0)
X_pnt = get_project_uv(camera_matrix, R, axis_length, 0, 0)
Org_pnt = get_project_uv(camera_matrix, R, 0, 0, 0)
else:
Z_pnt = get_project_xy(camera_matrix, R, 0, 0, axis_length)
Y_pnt = get_project_xy(camera_matrix, R, 0, axis_length, 0)
X_pnt = get_project_xy(camera_matrix, R, axis_length, 0, 0)
Org_pnt = get_project_xy(camera_matrix, R, 0, 0, 0)
#print('Rt:\033[93m', R, '\033[0m')
# print('X:\033[93m', R[:, 0:3].dot(np.array([axis_length, 0, 0])), '\033[0m')
# print('Y:\033[93m', R[:, 0:3].dot(np.array([0, axis_length, 0])), '\033[0m')
# print('Z:\033[93m', R[:, 0:3].dot(np.array([0, 0, axis_length])), '\033[0m')
for p in image_points:
cv2.circle(im, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)
p1 = (int(Org_pnt[0]), int(Org_pnt[1]))
p2 = (int(Z_pnt[0]), int(Z_pnt[1]))
cv2.line(im, p1, p2, (255, 0, 0), 2) #blue:Z
p1 = (int(Org_pnt[0]), int(Org_pnt[1]))
p2 = (int(Y_pnt[0]), int(Y_pnt[1]))
cv2.line(im, p1, p2, (0, 255, 0), 2) #green:Y
p1 = (int(Org_pnt[0]), int(Org_pnt[1]))
p2 = (int(X_pnt[0]), int(X_pnt[1]))
cv2.line(im, p1, p2, (0, 255, 255), 2) #yellow: X
# Display image
cv2.imshow("Output", im)
cv2.waitKey(0)
| 38.468599 | 108 | 0.513374 | [
"Apache-2.0"
] | WeihanSun/python_sample | opencv/src/face_motion1.py | 7,963 | Python |
"""
Get attributes about images
Inspired by https://github.com/CSAILVision/places365/blob/master/run_placesCNN_unified.py
"""
from pathlib import Path
import argparse
from typing import List, Iterator, Tuple, Optional, Union, Dict
import hashlib
import json
from multiprocessing import Pool
import urllib.request
import sys
import csv
from tqdm.auto import tqdm
import torch
from torchvision import transforms as trn
from torch import nn
from torch.utils.data._utils.collate import default_collate
from torch.nn import functional as F
import numpy as np
import cv2
from PIL import Image
import argtyped
from torch.utils.data import Dataset, DataLoader
import scripts.wideresnet as wideresnet
csv.field_size_limit(sys.maxsize)
TSV_FIELDNAMES = [
"listing_id",
"photo_id",
"category",
"attributes",
"is_indoor",
]
class Arguments(argtyped.Arguments, underscore=True):
outfile: Path = Path("places365/detect.tsv")
images: Path = Path("images")
batch_size: int = 100
visualize: bool = False
num_cat: int = 5
num_attr: int = 10
num_splits: int = 1
start: int = 0
num_workers: int = -1
# hacky way to deal with the Pytorch 1.0 update
def recursion_change_bn(module: nn.Module) -> nn.Module:
if isinstance(module, nn.BatchNorm2d):
module.track_running_stats = 1 # type: ignore
else:
for i, (name, module1) in enumerate(module._modules.items()): # type: ignore
module1 = recursion_change_bn(module1)
return module
def download_url(url, cache_dir):
stem = hashlib.sha1(str(url).encode())
filename = cache_dir / stem.hexdigest()
if not filename.is_file():
urllib.request.urlretrieve(url, filename)
return filename
def load_labels(
cache_dir: Union[Path, str]
) -> Tuple[Tuple[str, ...], np.ndarray, List[str], np.ndarray]:
"""
prepare all the labels
"""
# indoor and outdoor relevant
filename_io = download_url(
"https://raw.githubusercontent.com/csailvision/places365/master/IO_places365.txt",
cache_dir,
)
with open(filename_io) as f:
lines = f.readlines()
labels_IO = []
for line in lines:
items = line.rstrip().split()
labels_IO.append(int(items[-1]) - 1) # 0 is indoor, 1 is outdoor
labels_IO = np.array(labels_IO)
# scene category relevant
filename_category = download_url(
"https://raw.githubusercontent.com/csailvision/places365/master/categories_places365.txt",
cache_dir,
)
_classes = list()
with open(filename_category) as class_file:
for line in class_file:
_classes.append(line.strip().split(" ")[0][3:])
classes = tuple(_classes)
# scene attribute relevant
filename_attribute = download_url(
"https://raw.githubusercontent.com/csailvision/places365/master/labels_sunattribute.txt",
cache_dir,
)
with open(filename_attribute) as f:
lines = f.readlines()
labels_attribute = [item.rstrip() for item in lines]
filename_W = download_url(
"http://places2.csail.mit.edu/models_places365/W_sceneattribute_wideresnet18.npy",
cache_dir,
)
W_attribute = np.load(filename_W)
return classes, labels_IO, labels_attribute, W_attribute
def get_tf():
# load the image transformer
tf = trn.Compose(
[
trn.Resize((224, 224)),
trn.ToTensor(),
trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
return tf
class NormalizeInverse(trn.Normalize):
"""
Undoes the normalization and returns the reconstructed images in the input domain.
"""
def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
mean = torch.tensor(mean)
std = torch.tensor(std)
std_inv = 1 / (std + 1e-7) # type: ignore
mean_inv = -mean * std_inv
super().__init__(mean=mean_inv, std=std_inv)
def __call__(self, array: np.ndarray):
tensor = torch.tensor(array)
tensor = super().__call__(tensor.clone())
array = np.transpose(np.uint8(255 * tensor.numpy()), (1, 2, 0))
return array
class Hooker:
def __init__(self, model: nn.Module, features_names=("layer4", "avgpool")):
self.features: List[np.ndarray] = []
# this is the last conv layer of the resnet
for name in features_names:
model._modules.get(name).register_forward_hook(self) # type: ignore
def __call__(self, module: nn.Module, input, output):
self.features.append(output.data.cpu().numpy())
def reset(self):
self.features = []
# load the model
def load_model(cache_dir: Union[Path, str]) -> nn.Module:
# this model has a last conv feature map as 14x14
model_file = download_url(
"http://places2.csail.mit.edu/models_places365/wideresnet18_places365.pth.tar",
cache_dir,
)
model = wideresnet.resnet18(num_classes=365)
checkpoint = torch.load(model_file, map_location=lambda storage, loc: storage)
state_dict = {
str.replace(k, "module.", ""): v for k, v in checkpoint["state_dict"].items()
}
model.load_state_dict(state_dict)
# hacky way to deal with the upgraded batchnorm2D and avgpool layers...
for i, (name, module) in enumerate(model._modules.items()): # type: ignore
module = recursion_change_bn(model) # type: ignore
model.avgpool = torch.nn.AvgPool2d(kernel_size=14, stride=1, padding=0) # type: ignore
model.eval()
return model
def search_locations(image_folder: Path) -> List[Path]:
return [f for f in image_folder.iterdir() if f.is_dir()]
def load_photo_paths(locations: List[Path]) -> Iterator[Path]:
for location in tqdm(locations):
for photo in location.glob("*.jpg"):
yield photo
def load_photos(images: Path, cache_dir: Union[Path, str]) -> List[Union[str, Path]]:
photo_cache = Path(cache_dir) / "photos.txt"
if photo_cache.is_file():
with open(photo_cache, "r") as fid:
photos: List[Union[str, Path]] = [l.strip() for l in fid.readlines()]
else:
print("Preloading every images")
photos = list(images.rglob("*.jpg"))
with open(photo_cache, "w") as fid:
fid.writelines(f"{l}\n" for l in photos)
return photos
class ImageDataset(Dataset):
def __init__(self, photos: List[Union[Path, str]]):
self.photos = photos
self.tf = get_tf() # image transformer
def __len__(self):
return len(self.photos)
def __getitem__(
self, index: int
) -> Optional[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
path = Path(self.photos[index])
try:
image = Image.open(path)
image = image.convert("RGB")
except:
return None
tensor = self.tf(image)
listing_id, photo_id = map(int, path.stem.split("-"))
return torch.tensor(listing_id), torch.tensor(photo_id), tensor
def collate_fn(batch: Tuple):
batch = tuple([b for b in batch if b is not None])
if not batch:
return None
return default_collate(batch)
def class_activation_map(
feature_conv: np.ndarray, weight_softmax: np.ndarray, class_idx: List[int]
):
# generate the class activation maps upsample to 256x256
size_upsample = (256, 256)
nc, h, w = feature_conv.shape
output_cam = []
for _ in class_idx:
cam = weight_softmax[class_idx].dot(feature_conv.reshape((nc, h * w)))
cam = cam.reshape(h, w)
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255 * cam_img)
output_cam.append(cv2.resize(cam_img, size_upsample)) # type: ignore
return output_cam
def get_key(listing_id, photo_id) -> str:
return f"{listing_id}_{photo_id}"
def is_indoor(idx, labels_io):
# vote for the indoor or outdoor
io_image = np.mean(labels_io[idx[:10]])
ans = bool(io_image < 0.5)
return io_image, ans
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0) # type: ignore
@torch.no_grad()
def run_model(
batch: List[torch.Tensor],
model,
hook,
classes: Tuple[str, ...],
labels_IO: np.ndarray,
labels_attribute: List[str],
W_attribute: np.ndarray,
num_cat: int,
num_attr: int,
weight_softmax: Optional[np.ndarray] = None,
) -> List[Dict]:
listing_ids, photo_ids, input_img = batch
# forward pass
logit = model.forward(input_img.cuda())
h_x = F.softmax(logit, 1)
detections = []
for i, p in enumerate(h_x): # type: ignore
listing_id = int(listing_ids[i])
photo_id = int(photo_ids[i])
key = get_key(listing_id, photo_id)
probs, idx = p.sort(0, True) # type: ignore
probs = probs.detach().cpu().numpy()
idx = idx.detach().cpu().numpy()
# scene category
category = [(probs[j], classes[idx[j]]) for j in range(0, num_cat)]
# output the scene attributes
ft = [np.squeeze(f[i]) for f in hook.features]
responses_attribute = softmax(W_attribute.dot(ft[1]))
idx_a = np.argsort(responses_attribute)
attributes = [
(responses_attribute[idx_a[j]], labels_attribute[idx_a[j]])
for j in range(-1, -num_attr, -1)
]
detections.append(
{
"listing_id": listing_id,
"photo_id": photo_id,
"category": category,
"attributes": attributes,
"is_indoor": is_indoor(idx, labels_IO),
}
)
# generate class activation mapping
if weight_softmax is not None:
ca_map = class_activation_map(ft[0], weight_softmax, [idx[0]])[0]
# render the CAM and output
img = NormalizeInverse()(input_img[i])
height, width, _ = img.shape # type: ignore
heatmap = cv2.applyColorMap( # type: ignore
cv2.resize(ca_map, (width, height)), cv2.COLORMAP_JET # type: ignore
)
result = heatmap * 0.4 + img * 0.5 # type: ignore
cv2.imwrite(f"examples/{key}-heatmap.jpg", result) # type: ignore
cv2.imwrite(f"examples/{key}-image.jpg", img[:, :, ::-1]) # type: ignore
hook.reset()
return detections
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def save_json(data, filename: Union[str, Path]):
with open(filename, "w") as fid:
json.dump(data, fid, indent=2, cls=NumpyEncoder)
def detection(args: Arguments, proc_id: int, cache_dir: Union[Path, str]):
# load the labels
classes, labels_IO, labels_attribute, W_attribute = load_labels(cache_dir)
model = load_model(cache_dir)
hook = Hooker(model)
# load the transformer
# get the softmax weight
params = list(model.parameters())
if args.visualize:
weight_softmax = params[-2].data.numpy()
weight_softmax[weight_softmax < 0] = 0
else:
weight_softmax = None
photos = load_photos(args.images, cache_dir)
print("The dataset contains a total of", len(photos))
photos = photos[proc_id :: args.num_splits]
print("The split", proc_id, "over", args.num_splits, "contains", len(photos), "photos")
dataset = ImageDataset(photos)
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
collate_fn=collate_fn, # type: ignore
)
model = model.cuda()
filename = args.outfile.parent / f"{args.outfile.stem}.{proc_id}.tsv"
print(f"Start split {proc_id} on {len(dataset)} photos")
with open(filename, "wt") as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter="\t", fieldnames=TSV_FIELDNAMES)
for batch in tqdm(dataloader):
if batch is None:
continue
detections = run_model(
batch,
model,
hook,
classes,
labels_IO,
labels_attribute,
W_attribute,
num_cat=args.num_cat,
num_attr=args.num_attr,
weight_softmax=weight_softmax,
)
for d in detections:
writer.writerow(d)
if __name__ == "__main__":
args = Arguments()
print(args.to_string())
cache_dir = Path.home() / ".cache" / args.outfile.parent.name
cache_dir.mkdir(exist_ok=True, parents=True)
start = max(local_rank, 0) + args.start
detection(args, start, cache_dir)
| 30.006897 | 98 | 0.626599 | [
"MIT"
] | airbert-vln/bnb-dataset | scripts/detect_room.py | 13,053 | Python |
'''
Задача 1
Вывести на экран циклом пять строк из нулей, причем каждая строка должна быть пронумерована.
'''
# for i in range(1, 6):
# print(f'{i} --', '0'*i)
#######
# print('num 1')
# i = 0
# while i < 5:
# i += 1
# print('line', i, 'is 0')
#
# print('')
#######
'''
Задача 2
Пользователь в цикле вводит 10 цифр. Найти количество введеных пользователем цифр 5.
'''
# amount = 0
# for i in range(10):
# n = input()
# if '5' in n:
# amount += 1
# print(amount)
#########
# print('num 2')
# sum = 0
# for i in range(10):
# answer = int(input('Напишите цифру: '))
# if answer == 5:
# sum += 1
#
# print('Количество 5 =', sum)
# print('')
#########
'''
Задача 3
Найти сумму ряда чисел от 1 до 100. Полученный результат вывести на экран.
'''
# sum = 0
#
# for i in range(1,101):
# sum+=i
# print(sum)
#############
# print('num 3')
# sum = 0
# for i in range(1,101):
# sum+=i
# print(sum)
# print('')
################
'''
Задача 4
Найти произведение ряда чисел от 1 до 10. Полученный результат вывести на экран.
'''
# n = 1
# for i in range(2, 11):
# n *= i
# print(n)
################
# print('num 4')
# comp = 1
# for i in range(1,11):
# comp *= i
# print(comp)
# print('')
################
'''
Задача 5
Вывести цифры числа на каждой строчке.
'''
# integer_number = 2129
#
# #print(integer_number%10,integer_number//10)
#
# while integer_number>0:
# print(integer_number%10)
# integer_number = integer_number//10
################
# # print('num 5')
# integer_number = 2129
# while integer_number>0:
# print(integer_number%10)
# integer_number = integer_number//10
# print('')
#
################
'''
Задача 6
Найти сумму цифр числа.
'''
# inter_number = 12345
# n = 0
# for i in str(inter_number):
# n += int(i)
# print(n)
################
# inter_number = 12345
# n = 0
# for i in str(inter_number):
# n += int(i)
# print(n)
################
'''
Задача 7
Найти произведение цифр числа.
'''
# n = 1
# for i in str(inter_number):
# n *= int(i)
# print(n)
################
# inter_number = 12345
# x = 1
# for i in str(inter_number):
# x *= int(i)
# print(x)
################
'''
Задача 8
Дать ответ на вопрос: есть ли среди цифр числа 5?
'''
# integer_number = 213413
# while integer_number>0:
# if integer_number%10 == 5:
# print('Yes')
# break
# integer_number = integer_number//10
# else: print('No')
################
# print('num 8')
# integer_number = 2134135
# while integer_number>0:
# if integer_number%10 == 5:
# print('Yes')
# break
# integer_number = integer_number//10
# else: print('No')
# print('')
################
'''
Задача 9
Найти максимальную цифру в числе
'''
# inter_number = 59675
# maximum = 0
# # print(max(str(inter_number)))
# while inter_number != 0:
# if inter_number % 10 > maximum:
# maximum = inter_number % 10
# inter_number //= 10
# print(maximum)
################
# print('num 9')
# integer_number = 59675
# tmp = 0
# while integer_number>0:
# if integer_number%10 >= tmp:
# tmp = integer_number%10
# integer_number = integer_number//10
# print(tmp)
# print('')
################
'''
Задача 10
Найти количество цифр 5 в числе
'''
# inter_number = 595675
# count = 0
# while inter_number != 0:
# if inter_number % 10 == 5:
# count += 1
# inter_number //= 10
# print(count)
################
# print('num 10')
# integer_number = 595675
# tmp = 0
# while integer_number>0:
# if integer_number%10 == 5:
# tmp += 1
# integer_number = integer_number//10
# print(tmp)
# print('')
################ | 16.902778 | 92 | 0.547247 | [
"MIT"
] | mgershevich/testl2 | example-l2.py | 4,165 | Python |
from django.contrib import admin
from .models import *
admin.site.register(Pokemon)
admin.site.register(PokemonEvolution)
admin.site.register(PokemonStat)
admin.site.register(Stat)
admin.site.register(EvolutionChain)
| 24.222222 | 37 | 0.830275 | [
"MIT"
] | JuanJTorres11/pokemon | search_pokemon/admin.py | 218 | Python |
from panda3d.core import CardMaker, Shader, Vec3, Vec2, NodePath, ColorBlendAttrib
from Section2SpaceflightDocking.Common import Common
import random
class Explosion():
cardMaker = None
@staticmethod
def getCard():
if Explosion.cardMaker is None:
Explosion.cardMaker = CardMaker("explosion maker")
Explosion.cardMaker.setFrame(-1, 1, -1, 1)
explosionCard = NodePath(Explosion.cardMaker.generate())
return explosionCard
def __init__(self, size, shaderName, shaderInputs, inputTextureName, randomVal1, randomVal2):
self.explosionCard = Explosion.getCard()
self.explosionCard.setScale(size)
self.explosionCard.setBin("unsorted", 1)
self.explosionCard.setDepthWrite(False)
self.explosionCard.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOne))
self.explosionCard.setBillboardPointEye()
shader = Shader.load(Shader.SL_GLSL,
"../Section2SpaceflightDocking/Shaders/{0}Vertex.glsl".format(shaderName),
"../Section2SpaceflightDocking/Shaders/{0}Fragment.glsl".format(shaderName))
self.explosionCard.setShader(shader)
for inputName, inputValue in shaderInputs.items():
self.explosionCard.setShaderInput(inputName, inputValue)
self.explosionCard.setShaderInput("sourceTex1", Common.framework.showBase.loader.loadTexture("../Section2SpaceflightDocking/Shaders/{0}1.png".format(inputTextureName)))
self.explosionCard.setShaderInput("sourceTex2", Common.framework.showBase.loader.loadTexture("../Section2SpaceflightDocking/Shaders/{0}2.png".format(inputTextureName)))
self.explosionCard.setShaderInput("randomisation1", randomVal1)
self.explosionCard.setShaderInput("randomisation2", randomVal2)
self.calcFullDuration(shaderInputs)
self.startTime = -1000
self.explosionCard.setShaderInput("startTime", self.startTime)
self.velocity = Vec3(0, 0, 0)
def calcFullDuration(self, shaderInputs):
self.duration = 0
if "duration" in shaderInputs:
self.duration += shaderInputs["duration"]
if "starDuration" in shaderInputs:
self.duration += shaderInputs["starDuration"]
def activate(self, velocity, pos):
self.startTime = globalClock.getRealTime()
self.explosionCard.setShaderInput("startTime", self.startTime)
self.velocity = velocity
self.explosionCard.reparentTo(Common.framework.showBase.render)
self.explosionCard.setPos(pos)
def update(self, dt):
self.explosionCard.setPos(self.explosionCard.getPos() + self.velocity*dt)
def isAlive(self):
return (globalClock.getRealTime() - self.startTime) < (self.duration)
def cleanup(self):
if self.explosionCard is not None:
self.explosionCard.removeNode()
self.explosionCard = None | 41.246575 | 176 | 0.698107 | [
"BSD-3-Clause"
] | P3D-Space-Tech-Demo/Section-2-Spaceflight-and-Docking | Explosion.py | 3,011 | Python |
'''Faça um programa que leia uma frase pelo teclado e mostre quantas vezes aparece a letra “A”,
em que posição ela aparece a primeira vez e em que posição ela aparece a última vez.'''
frase=str(input('Digite uma frase: ')).upper().strip()
print('a letra A aparece {} vezes'.format(frase.count('A')))
print('ela aparece a primeira vez na posição: {}'.format(frase.find('A')+1))
print('elaq parece pela ultima vez na posição: {}'.format(frase.rfind('A')+1)) | 76 | 96 | 0.714912 | [
"MIT"
] | cassiakaren/Manipulando-Textos | ex26.py | 470 | Python |
from django.shortcuts import render, redirect
from django.core.urlresolvers import reverse
from .forms import EventCreateForm, NoteCreateForm, PropertyCreateForm, FileUploadForm,AlertCreateForm
from models import Event, Property, Note, File, Alert
from wsgiref.util import FileWrapper
from django.http import HttpResponse, HttpResponseBadRequest
from django.contrib.auth.decorators import login_required
import datetime, mimetypes
# Create your views here.
@login_required(login_url='/accounts/login/')
def index(request):
return render(request, "main/base.html")
@login_required(login_url='/accounts/login/')
def dashboard(request):
return render(request, "main/base.html")
def properties(request):
context = {
"properties" : Property.objects.filter(user = request.user)
}
return render (request, "main/properties.html", context)
def sidebar(request):
today = datetime.datetime.now()
yesterday = today - datetime.timedelta(days=1)
print today
alerts = Alert.objects.filter(event__property__user=request.user, when__gt=yesterday).order_by("when")[:5]
notes = Note.objects.filter(event__property__user=request.user).order_by("-created_at")[:5]
for alert in alerts:
print alert.when
context = {
"alerts" : alerts,
"notes" : notes,
"today" : today
}
return render(request, "main/sidebar.html", context)
def add_property(request):
if request.method == "POST":
prop_form = PropertyCreateForm(request.POST)
if prop_form.is_valid():
prop = prop_form.save(commit=False)
prop.user = request.user
prop.save()
context = {
"properties" : Property.objects.filter(user = request.user)
}
print "valid form"
return HttpResponse(prop.id)
else:
context={
'form':prop_form
}
print "invalid form"
return HttpResponseBadRequest(render (request,'main/add_property.html',context))
else:
context={
'form':PropertyCreateForm()
}
print "GET"
return render(request,'main/add_property.html',context)
def event(request,event_id, prop_id):
property = Property.objects.get(pk=prop_id)
event = Event.objects.get(pk=event_id)
notes = event.note_set.all()
alerts = event.alert_set.all()
context={
'prop_id':prop_id,
'event':event,
"notes":notes,
'alerts':alerts,
'property':property
}
return render(request, 'main/event.html',context)
def events(request, prop_id):
property = Property.objects.get(pk=prop_id)
events = property.event_set.all()
context={
'property' : property,
'events':events
}
return render(request, 'main/events.html',context)
def get_file(request,file_id):
file = File.objects.get(pk=file_id)
mimetype = mimetypes.guess_type(file.docfile.name)
response = HttpResponse(content_type=mimetype[0])
response['Content-Disposition'] = 'inline; filename=' + file.docfile.name.split('/')[-1]
response['Accept-Ranges'] = 'bytes'
response['Content-Length'] = file.docfile.size
response.write(file.docfile.read())
return response
def add_event(request, prop_id):
if request.POST:
event_form = EventCreateForm(request.POST)
if event_form.is_valid():
event = event_form.save(commit=False)
event.property = Property.objects.get(pk=prop_id)
event.save()
return HttpResponse(event.id)
else:
context={
'form':event_form,
'prop_id':prop_id
}
return HttpResponseBadRequest(render (request,'main/add_event.html',context))
else:
context={
'form':EventCreateForm(),
'prop_id':prop_id
}
return render(request,'main/add_event.html',context)
def note(request,event_id,prop_id,note_id):
note = Note.objects.get(pk=note_id)
documents = note.file_set.all()
docNames = []
for document in documents:
docNames.append((document.id,document.docfile.name.split('/')[-1]))
print docNames
form = FileUploadForm()
property = Property.objects.get(pk=prop_id)
event = Event.objects.get(pk=event_id)
context={'form':form, 'documents': documents,'event_id':event_id,
'prop_id':prop_id,"note_id":note.id, 'note':note, 'event':event, 'property':property, "docNames":docNames}
return render(request, 'main/note.html', context)
def notes(request,event_id,prop_id):
event = Event.objects.get(pk=event_id)
notes = event.note_set.all()
context={
'event_id':event_id,
'prop_id':prop_id,
}
return render(request, 'main/note.html', context)
def add_note(request,prop_id, event_id):
if request.POST:
note_form = NoteCreateForm(request.POST)
if note_form.is_valid():
note = note_form.save(commit=False)
note.event = Event.objects.get(pk=event_id)
note.save()
return HttpResponse(note.id)
else:
context={
'form':note_form,
'prop_id':prop_id,
'event_id':event_id
}
return HttpResponseBadRequest(render (request,'main/add_note.html',context))
else:
context={
'form':NoteCreateForm(),
'prop_id':prop_id,
'event_id':event_id
}
return render(request,'main/add_note.html',context)
def update_note(request,prop_id, event_id):
print ('update')
if request.POST:
name = request.POST['name']
name = request.POST['comment']
note = Event.objects.get(pk=event_id)
note.name=name
note.comment=comment
note.save()
return HttpResponse(note.id)
def add_file(request,prop_id, event_id, note_id):
if request.method == 'POST':
form = FileUploadForm(request.POST, request.FILES)
note = Note.objects.get(pk=note_id)
if form.is_valid():
newdoc = File(docfile=request.FILES['docfile'] )
newdoc.note = note
newdoc.save()
return HttpResponse("added file")
else:
form = FileUploadForm()
documents = File.objects.all()
context={'form':form, 'documents': documents,'event_id':event_id,
'prop_id':prop_id,"note_id":note_id}
return HttpResponseBadRequest(render (request,'main/note.html',context))
def alert(request,event_id,prop_id,alert_id):
alert = Alert.objects.get(pk=alert_id)
form = AlertCreateForm()
property = Property.objects.get(pk=prop_id)
event = Event.objects.get(pk=event_id)
context={'form':form, 'event_id':event_id,
'prop_id':prop_id,"alert_id":alert.id, 'alert':alert, 'property':property, 'event':event}
return render(request, 'main/alert.html', context)
def add_alert(request,prop_id, event_id):
if request.POST:
alert_form = AlertCreateForm(request.POST)
if alert_form.is_valid():
alert = alert_form.save(commit=False)
alert.event = Event.objects.get(pk=event_id)
alert.save()
return HttpResponse(alert.id)
else:
context={
'form':alert_form,
'prop_id':prop_id,
'event_id':event_id
}
return HttpResponseBadRequest(render (request,'main/add_alert.html',context))
else:
context={
'form':AlertCreateForm(),
'prop_id':prop_id,
'event_id':event_id
}
return render(request,'main/add_alert.html',context)
| 29.012987 | 107 | 0.727096 | [
"MIT"
] | davidhorst/filecabinet | apps/main/views.py | 6,702 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_monitor_http
short_description: Manages F5 BIG-IP LTM http monitors
description: Manages F5 BIG-IP LTM http monitors.
version_added: 2.5
options:
name:
description:
- Monitor name.
required: True
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(http)
parent on the C(Common) partition.
default: "/Common/http"
send:
description:
- The send string for the monitor call. When creating a new monitor, if
this value is not provided, the default C(GET /\r\n) will be used.
receive:
description:
- The receive string for the monitor call.
receive_disable:
description:
- This setting works like C(receive), except that the system marks the node
or pool member disabled when its response matches the C(receive_disable)
string but not C(receive). To use this setting, you must specify both
C(receive_disable) and C(receive).
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'.
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'. Note that if specifying an IP address, a value between 1 and 65535
must be specified.
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run. If this parameter is not provided when creating
a new monitor, then the default value will be 5. This value B(must)
be less than the C(timeout) value.
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. If this parameter is not
provided when creating a new monitor, then the default value will be 16.
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. If this parameter is not provided when creating
a new monitor, then the default value will be 0.
target_username:
description:
- Specifies the user name, if the monitored target requires authentication.
target_password:
description:
- Specifies the password, if the monitored target requires authentication.
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
state:
description:
- When C(present), ensures that the monitor exists.
- When C(absent), ensures the monitor is removed.
default: present
choices:
- present
- absent
version_added: 2.5
notes:
- Requires BIG-IP software version >= 12
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create HTTP Monitor
bigip_monitor_http:
state: present
ip: 10.10.10.10
server: lb.mydomain.com
user: admin
password: secret
name: my_http_monitor
delegate_to: localhost
- name: Remove HTTP Monitor
bigip_monitor_http:
state: absent
server: lb.mydomain.com
user: admin
password: secret
name: my_http_monitor
delegate_to: localhost
- name: Include a username and password in the HTTP monitor
bigip_monitor_http:
state: absent
server: lb.mydomain.com
user: admin
password: secret
name: my_http_monitor
target_username: monitor_user
target_password: monitor_pass
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: string
sample: http
ip:
description: The new IP of IP/port definition.
returned: changed
type: string
sample: 10.12.13.14
interval:
description: The new interval in which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
time_until_up:
description: The new time in which to mark a system as up after first successful response.
returned: changed
type: int
sample: 2
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import is_valid_ip
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import is_valid_ip
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'timeUntilUp': 'time_until_up',
'defaultsFrom': 'parent',
'recv': 'receive',
'recvDisable': 'receive_disable'
}
api_attributes = [
'timeUntilUp', 'defaultsFrom', 'interval', 'timeout', 'recv', 'send',
'destination', 'username', 'password', 'recvDisable'
]
returnables = [
'parent', 'send', 'receive', 'ip', 'port', 'interval', 'timeout',
'time_until_up', 'receive_disable'
]
updatables = [
'destination', 'send', 'receive', 'interval', 'timeout', 'time_until_up',
'target_username', 'target_password', 'receive_disable'
]
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@destination.setter
def destination(self, value):
ip, port = value.split(':')
self._values['ip'] = ip
self._values['port'] = port
@property
def interval(self):
if self._values['interval'] is None:
return None
# Per BZ617284, the BIG-IP UI does not raise a warning about this.
# So I do
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self):
if self._values['ip'] is None:
return None
if self._values['ip'] in ['*', '0.0.0.0']:
return '*'
elif is_valid_ip(self._values['ip']):
return self._values['ip']
else:
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
@property
def time_until_up(self):
if self._values['time_until_up'] is None:
return None
return int(self._values['time_until_up'])
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def type(self):
return 'http'
@property
def username(self):
return self._values['target_username']
@property
def password(self):
return self._values['target_password']
class Changes(Parameters):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
result = self.__default(param)
return result
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = Parameters(params=self.module.params)
self.changes = Changes()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = Changes(params=changed)
return True
return False
def _announce_deprecations(self):
warnings = []
if self.want:
warnings += self.want._values.get('__warnings', [])
if self.have:
warnings += self.have._values.get('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations()
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.want.timeout is None:
self.want.update({'timeout': 16})
if self.want.interval is None:
self.want.update({'interval': 5})
if self.want.time_until_up is None:
self.want.update({'time_until_up': 0})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
if self.want.send is None:
self.want.update({'send': 'GET /\r\n'})
if self.module.check_mode:
return True
self.create_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the monitor.")
return True
def read_current_from_device(self):
resource = self.client.api.tm.ltm.monitor.https.http.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return Parameters(params=result)
def exists(self):
result = self.client.api.tm.ltm.monitor.https.http.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.ltm.monitor.https.http.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.ltm.monitor.https.http.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove_from_device(self):
result = self.client.api.tm.ltm.monitor.https.http.load(
name=self.want.name,
partition=self.want.partition
)
if result:
result.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/http'),
send=dict(),
receive=dict(),
receive_disable=dict(required=False),
ip=dict(),
port=dict(type='int'),
interval=dict(type='int'),
timeout=dict(type='int'),
time_until_up=dict(type='int'),
target_username=dict(),
target_password=dict(no_log=True),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| 31.276271 | 92 | 0.615618 | [
"Apache-2.0"
] | meverett1167/Ansible_Demos | f5-ansible/library/modules/bigip_monitor_http.py | 18,453 | Python |
def write(path, content):
with open(path, "a+") as dst_file:
dst_file.write(content + '\n')
def read2mem(path):
with open(path) as f:
content = ''
while 1:
try:
lines = f.readlines(100)
except UnicodeDecodeError:
f.close()
continue
if not lines:
break
for line in lines:
content += line
return content | 24.578947 | 40 | 0.466809 | [
"Apache-2.0"
] | dachmx/tfnotes | GDLnotes/src/util/file_helper.py | 467 | Python |
import argparse
import os
from solver import Solver
from data_loader import *
#from data_loader import *
from torch.backends import cudnn
from torch.utils import data
from torchvision import transforms as T
def main(config):
cudnn.benchmark = True
if not os.path.exists(config.model_path):
os.makedirs(config.model_path )
if not os.path.exists(config.result_path):
os.makedirs(config.result_path)
config.result_path = os.path.join(config.result_path)
if not os.path.exists(config.result_path):
os.makedirs(config.result_path)
if config.train_dataset == 'african':
config.img_size = (640, 640)
elif config.train_dataset == 'asian':
config.img_size = (640, 480)
elif config.train_dataset == 'mobile':
config.img_size = (384, 384)
print(config)
train_loader=data.DataLoader(Train_dataset(root=config.train_path, dataset_type=config.train_dataset,img_size = config.img_size ,transform=traindata_augmentation,mode='train'),
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers)
valid_loader=data.DataLoader(Train_valid_dataset(root=config.valid_path, dataset_type=config.train_dataset,img_size = config.img_size ,transform=testdata_augmentation,mode='valid'),
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers)
if config.test_mode == 1:
test1_loader=data.DataLoader(Test1_dataset(root=config.test_path, dataset_type=config.train_dataset,img_size = config.img_size ,transform=testdata_augmentation,mode='test'),
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers)
elif config.test_mode == 2:
test2_loader=data.DataLoader(Test2_dataset(root=config.test_path, dataset_type=config.train_dataset,img_size = config.img_size ,transform=testdata_augmentation,mode='test'),
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers)
if config.test_mode == 1:
solver = Solver(config, train_loader, valid_loader, test1_loader)
elif config.test_mode == 2:
solver = Solver(config, train_loader, valid_loader, test2_loader)
if config.mode == 'train':
solver.train()
elif config.mode == 'test' and config.test_mode == 1:
solver.test_1()
elif config.mode == 'test' and config.test_mode == 2:
solver.test_2()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--lr', type=float, default=0.0002)
parser.add_argument('--beta1', type=float, default=0.9) # momentum1 in Adam
parser.add_argument('--beta2', type=float, default=0.999) # momentum2 in Adam
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--model_path', type=str, default='./models/african_best.pth')
parser.add_argument('--img_size', type=tuple, default=(640, 640))
parser.add_argument('--train_path', type=str, default='./dataset/train/')
parser.add_argument('--valid_path', type=str, default='./dataset/valid/')
parser.add_argument('--test_path', type=str, default='./dataset/test/')
parser.add_argument('--test_mode', type=int, default=1, help='1 or 2')#若test_mode==1,则test时会计算评估指标。若==2,则不计算评估指标。
parser.add_argument('--result_path', type=str, default='./result/')
parser.add_argument('--train_dataset', type=str, default='african', help='choose train datasets, african, asian of mobile')
config = parser.parse_args()
main(config)
| 48.678571 | 185 | 0.652482 | [
"Apache-2.0"
] | sweezin/PI-DECODER | main.py | 4,135 | Python |
from alphafold2.util import *
from alphafold2.modules import * | 31 | 32 | 0.822581 | [
"BSD-3-Clause"
] | brennanaba/alphafold2-pytorch | alphafold2/__init__.py | 62 | Python |
import os
import shutil
from thlib.side.Qt import QtWidgets as QtGui
from thlib.side.Qt import QtGui as Qt4Gui
from thlib.side.Qt import QtCore
from thlib.environment import env_inst, env_tactic, cfg_controls, env_read_config, env_write_config, dl
import thlib.global_functions as gf
import thlib.tactic_classes as tc
from thlib.ui.misc.ui_watch_folders import Ui_ProjectWatchFolder
class Ui_projectWatchFoldersWidget(QtGui.QDialog, Ui_ProjectWatchFolder):
def __init__(self, project, parent=None):
super(self.__class__, self).__init__(parent=parent)
self.project = project
self.watch_folders_dict = self.get_watch_folders_dict()
self.watched_items = set()
env_inst.watch_folders[self.project.get_code()] = self
self.setupUi(self)
self.create_ui()
def create_ui(self):
self.watchFoldersTreeWidget.setStyleSheet('QTreeView::item {padding: 2px;}')
self.setSizeGripEnabled(True)
self.setWindowTitle('Watched Assets for Project: {0}'.format(self.project.info.get('title')))
self.create_fs_watcher()
self.create_watch_folders_tree_context_menu()
self.controls_actions()
self.readSettings()
self.watchEnabledCheckBox.setEnabled(False)
def create_fs_watcher(self):
self.fs_watcher = gf.FSObserver()
self.fs_watcher.set_created_signal(self.handle_watch_created_event)
def create_watch_folders_tree_context_menu(self):
self.watchFoldersTreeWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.watchFoldersTreeWidget.customContextMenuRequested.connect(self.open_menu)
def watch_items_menu(self):
# TODO Make this work
enable_watch = QtGui.QAction('Enable Watch', self.watchFoldersTreeWidget)
enable_watch.setIcon(gf.get_icon('eye'))
# enable_watch.triggered.connect(self.open_file_from_tree)
disable_watch = QtGui.QAction('Disable Watch', self.watchFoldersTreeWidget)
disable_watch.setIcon(gf.get_icon('eye-slash'))
# disable_watch.triggered.connect(self.open_file_from_tree)
edit_watch = QtGui.QAction('Edit Watch', self.watchFoldersTreeWidget)
edit_watch.setIcon(gf.get_icon('edit'))
# edit_watch.triggered.connect(self.open_file_from_tree)
delete_watch = QtGui.QAction('Delete Watch', self.watchFoldersTreeWidget)
delete_watch.setIcon(gf.get_icon('remove'))
# edit_watch.triggered.connect(self.open_file_from_tree)
menu = QtGui.QMenu()
menu.addAction(enable_watch)
menu.addAction(disable_watch)
menu.addAction(edit_watch)
menu.addAction(delete_watch)
return menu
def open_menu(self):
item = self.watchFoldersTreeWidget.currentItem()
if item:
if item.data(0, QtCore.Qt.UserRole):
menu = self.watch_items_menu()
if menu:
menu.exec_(Qt4Gui.QCursor.pos())
def add_item_to_fs_watch(self, skey, path=None, recursive=True):
watch_dict = self.get_watch_dict_by_skey(skey)
if not path:
path = watch_dict['path']
paths = []
for repo in watch_dict['rep']:
abs_path = env_tactic.get_base_dir(repo)['value'][0] + '/' + path
paths.append(gf.form_path(abs_path))
self.fs_watcher.append_watch(watch_name=skey, paths=paths, repos=watch_dict['rep'], pipeline=watch_dict['asset_pipeline'], recursive=recursive)
def remove_item_from_fs_watch(self, skey):
self.fs_watcher.remove_watch(watch_name=skey)
def handle_watch_created_event(self, event, watch):
dl.log(u'File dropped to watch folder {}'.format(event.src_path), group_id='watch_folder')
self.show()
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
QtGui.QDialog.activateWindow(self)
self.show()
self.hide()
search_key = watch.watch_name
pipeline = watch.pipeline
commit_path = gf.extract_dirname(event.src_path)
if watch.path == commit_path:
context = 'publish'
else:
context = gf.form_path(commit_path, 'linux').split('/')[-1]
description = 'From watch folder'
skey_dict = tc.split_search_key(search_key)
checkin_widget = env_inst.get_check_tree(
project_code=skey_dict['project_code'],
tab_code='checkin_out',
wdg_code=skey_dict['pipeline_code'],
)
checkin_widget.do_creating_ui()
match_template = gf.MatchTemplate(['$FILENAME.$EXT'])
files_objects_dict = match_template.get_files_objects([event.src_path])
stypes = self.project.get_stypes()
current_stype = stypes.get(skey_dict['pipeline_code'])
pipelines = current_stype.get_pipeline()
checkin_mode = None
if pipelines:
# here we do pipelines routine
current_pipeline = pipelines.get(pipeline)
if not current_pipeline:
# looks like we don't have pipeline with Search Type name, so we take first of all
# Also this is totally wrong, cause we should know exactly the pipeline and its processes, so need to write proper pipeline_code when creating watch folder
current_pipeline = list(pipelines.values())[0]
current_process = current_pipeline.get_pipeline_process(context)
if current_process:
checkin_mode = current_process.get('checkin_mode')
else:
context = 'publish'
checkin_widget.checkin_file_objects(
search_key=search_key,
context=context,
description=description,
files_objects=files_objects_dict.get('file'),
checkin_type=checkin_mode,
keep_file_name=False
)
else:
# here we go with publish, without pipeline
checkin_widget.checkin_file_objects(
search_key=search_key,
context='publish',
description=description,
files_objects=files_objects_dict.get('file'),
checkin_type=checkin_mode,
keep_file_name=False
)
def controls_actions(self):
pass
def fill_watch_folders_tree_widget(self):
self.watchFoldersTreeWidget.clear()
if self.watch_folders_dict:
for i, asset_skey in enumerate(self.watch_folders_dict.get('assets_skeys')):
root_item = QtGui.QTreeWidgetItem()
root_item.setData(0, QtCore.Qt.UserRole, asset_skey)
root_item.setText(1, self.watch_folders_dict['assets_stypes'][i])
root_item.setText(2, self.watch_folders_dict['assets_names'][i])
repos_names = []
for repo in self.watch_folders_dict['repos'][i]:
repos_names.append(env_tactic.get_base_dir(repo)['value'][1])
root_item.setText(3, ', '.join(repos_names))
# setting actual watch status
if self.watch_folders_dict['statuses'][i]:
if self.check_for_item_in_watch(asset_skey):
root_item.setText(0, 'Watching')
self.start_watch_by_skey(asset_skey)
else:
root_item.setText(0, 'Waiting')
else:
root_item.setText(0, 'Stopped')
self.stop_watch_by_skey(asset_skey)
self.watchFoldersTreeWidget.addTopLevelItem(root_item)
self.watchFoldersTreeWidget.resizeColumnToContents(0)
self.watchFoldersTreeWidget.resizeColumnToContents(1)
self.watchFoldersTreeWidget.resizeColumnToContents(2)
self.watchFoldersTreeWidget.resizeColumnToContents(3)
if self.watched_items:
self.start_watching()
else:
self.stop_watching()
def start_watching(self):
if not self.fs_watcher.is_started():
self.fs_watcher.start()
def stop_watching(self):
if self.fs_watcher.is_started():
self.fs_watcher.stop()
def stop_watch_by_skey(self, skey):
for item in self.watched_items:
if item.get_search_key() == skey:
self.remove_item_from_fs_watch(skey)
item.watchFolderToolButton.setChecked(False)
def start_watch_by_skey(self, skey):
for item in self.watched_items:
if item.get_search_key() == skey:
self.add_item_to_fs_watch(skey, item.get_watch_folder_path(), True)
item.watchFolderToolButton.setChecked(True)
def check_for_item_in_watch(self, skey):
for item in self.watched_items:
if item.get_search_key() == skey:
if item.is_have_watch_folder():
return True
def add_item_to_watch(self, sobject_item):
# checking if watch folder exists
watch_dict = self.get_watch_dict_by_skey(sobject_item.get_search_key())
all_folders_exists = True
base_dirs = env_tactic.get_all_base_dirs()
for key, val in base_dirs:
if val['value'][4] and val['value'][3] in watch_dict['rep']:
abs_path = u'{0}/{1}'.format(val['value'][0], watch_dict['path'])
if not os.path.exists(gf.form_path(abs_path)):
all_folders_exists = False
dl.warning('Folders structure for: {0} is not created. '
'Watch will be ignored.'.format(abs_path),
group_id='watch_folders_ui')
if all_folders_exists:
self.watched_items.add(sobject_item)
self.fill_watch_folders_tree_widget()
def remove_item_from_watch(self, sobject_item):
self.watched_items.discard(sobject_item)
def add_asset_to_watch(self, sobject_item):
# in case of some bugs double checking
if not self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key()):
self.create_repo_editor_ui(sobject_item)
else:
sobject_item.check_watch_folder()
def edit_aseet_watch(self, sobject_item):
watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())
if watch_dict:
self.create_repo_editor_ui(sobject_item, mode='edit')
else:
sobject_item.check_watch_folder(True)
def delete_aseet_from_watch(self, sobject_item):
watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())
if watch_dict:
self.delete_watch_from_watch_folders_dict(sobject_item)
else:
sobject_item.check_watch_folder(True)
@gf.catch_error
def create_watch_folders(self, repos_list, sobject_item):
# creating base folders with paths
for repo in repos_list:
abs_path = env_tactic.get_base_dir(repo)['value'][0] + '/' + sobject_item.get_watch_folder_path()
# creating folder for publish
if not os.path.exists(gf.form_path(abs_path)):
os.makedirs(gf.form_path(abs_path))
# creating folders by processes
if sobject_item.get_process_list(include_hierarchy=True):
for process in sobject_item.get_process_list(include_hierarchy=True):
process_abs_path = abs_path + '/' + process
if not os.path.exists(gf.form_path(process_abs_path)):
os.makedirs(gf.form_path(process_abs_path))
@gf.catch_error
def delete_watch_folders_and_files(self, repos_list, sobject_item):
def onerror(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
# else:
# raise
for repo in repos_list:
abs_path = env_tactic.get_base_dir(repo)['value'][0] + '/' + sobject_item.get_watch_folder_path()
if os.path.exists(gf.form_path(abs_path)):
shutil.rmtree(gf.form_path(abs_path), ignore_errors=True, onerror=onerror)
def add_watch_to_watch_folders_dict(self, repos_list, sobject_item):
self.watch_folders_dict['assets_names'].append(sobject_item.get_title())
self.watch_folders_dict['assets_codes'].append(sobject_item.sobject.info.get('code'))
self.watch_folders_dict['assets_stypes'].append(sobject_item.stype.get_pretty_name())
self.watch_folders_dict['assets_skeys'].append(sobject_item.sobject.get_search_key())
self.watch_folders_dict['assets_pipelines'].append(sobject_item.sobject.get_pipeline_code())
self.watch_folders_dict['paths'].append(sobject_item.get_watch_folder_path())
self.watch_folders_dict['repos'].append(repos_list)
self.watch_folders_dict['statuses'].append(True)
self.create_watch_folders(repos_list, sobject_item)
sobject_item.check_watch_folder()
self.writeSettings()
def save_watch_to_watch_folders_dict(self, repos_list, sobject_item):
watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())
if watch_dict:
idx = watch_dict['idx']
self.watch_folders_dict['assets_names'][idx] = sobject_item.get_title()
self.watch_folders_dict['assets_codes'][idx] = sobject_item.sobject.info.get('code')
self.watch_folders_dict['assets_stypes'][idx] = sobject_item.stype.get_pretty_name()
self.watch_folders_dict['assets_skeys'][idx] = sobject_item.sobject.get_search_key()
self.watch_folders_dict['assets_pipelines'][idx] = sobject_item.sobject.get_pipeline_code()
self.watch_folders_dict['paths'][idx] = sobject_item.get_watch_folder_path()
self.watch_folders_dict['repos'][idx] = repos_list
self.create_watch_folders(repos_list, sobject_item)
sobject_item.check_watch_folder()
self.writeSettings()
def edit_watch_to_watch_folders_dict(self, sobject_item, asset_name=None, asset_code=None,asset_stype=None,
asset_skey=None, asset_pipeline=None, path=None, repo=None, status=False):
watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())
if watch_dict:
idx = watch_dict['idx']
if asset_name:
self.watch_folders_dict['assets_names'][idx] = sobject_item.get_title()
if asset_code:
self.watch_folders_dict['assets_codes'][idx] = sobject_item.sobject.info.get('code')
if asset_stype:
self.watch_folders_dict['assets_stypes'][idx] = sobject_item.stype.get_pretty_name()
if asset_skey:
self.watch_folders_dict['assets_skeys'][idx] = sobject_item.sobject.get_search_key()
if asset_pipeline:
self.watch_folders_dict['assets_pipelines'][idx] = sobject_item.sobject.get_pipeline_code()
if path:
self.watch_folders_dict['paths'][idx] = path
if repo:
self.watch_folders_dict['repos'][idx] = repo
self.watch_folders_dict['statuses'][idx] = status
sobject_item.check_watch_folder()
self.fill_watch_folders_tree_widget()
self.writeSettings()
def delete_watch_from_watch_folders_dict(self, sobject_item):
buttons = (('Remove', QtGui.QMessageBox.YesRole), ('Keep', QtGui.QMessageBox.ActionRole), ('Cancel', QtGui.QMessageBox.NoRole))
reply = gf.show_message_predefined(
'Remove Watch Folder dirs from repos?',
'Watch Folder Directories and Files can also be removed from Your Repositories'
'<br>Remove or Keep this Dirs and Files?</br>',
buttons=buttons,
message_type='question'
)
delete_files = False
delete_watch_folder = False
if reply == QtGui.QMessageBox.YesRole:
delete_files = True
delete_watch_folder = True
elif reply == QtGui.QMessageBox.ActionRole:
delete_files = False
delete_watch_folder = True
if delete_watch_folder:
self.stop_watch_by_skey(sobject_item.sobject.get_search_key())
idx = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())['idx']
self.watch_folders_dict['assets_names'].pop(idx)
self.watch_folders_dict['assets_codes'].pop(idx)
self.watch_folders_dict['assets_stypes'].pop(idx)
self.watch_folders_dict['assets_skeys'].pop(idx)
self.watch_folders_dict['assets_pipelines'].pop(idx)
self.watch_folders_dict['paths'].pop(idx)
repos = self.watch_folders_dict['repos'].pop(idx)
self.watch_folders_dict['statuses'].pop(idx)
sobject_item.check_watch_folder(True)
self.writeSettings()
if delete_files:
self.delete_watch_folders_and_files(repos, sobject_item)
def create_repo_editor_ui(self, sobject_item, mode='create'):
add_watch_ui = Ui_repositoryEditorWidget(sobject_item=sobject_item, mode=mode, parent=env_inst.ui_main)
add_watch_ui.saved_signal.connect(self.add_watch_to_watch_folders_dict)
add_watch_ui.edited_signal.connect(self.save_watch_to_watch_folders_dict)
add_watch_ui.exec_()
def set_watch_folders_from_dict(self, watch_folders_dict=None):
if watch_folders_dict:
print('FILLING WATCH FOLDER')
def get_watch_dict_by_skey(self, skey):
if self.watch_folders_dict:
for i, asset_skey in enumerate(self.watch_folders_dict.get('assets_skeys')):
if skey == asset_skey:
return {
'asset_code': self.watch_folders_dict['assets_codes'][i],
'asset_name': self.watch_folders_dict['assets_names'][i],
'asset_stype': self.watch_folders_dict['assets_stypes'][i],
'asset_skey': self.watch_folders_dict['assets_skeys'][i],
'asset_pipeline': self.watch_folders_dict['assets_pipelines'][i],
'path': self.watch_folders_dict['paths'][i],
'rep': self.watch_folders_dict['repos'][i],
'status': self.watch_folders_dict['statuses'][i],
'idx': i,
}
@staticmethod
def get_watch_folders_dict():
return {
'assets_codes': [],
'assets_names': [],
'assets_stypes': [],
'assets_skeys': [],
'assets_pipelines': [],
'paths': [],
'repos': [],
'statuses': [],
}
def set_settings_from_dict(self, settings_dict=None):
ref_settings_dict = {
'watch_folders_dict': self.watch_folders_dict,
}
settings = gf.check_config(ref_settings_dict, settings_dict)
self.watch_folders_dict = settings['watch_folders_dict']
def get_settings_dict(self):
settings_dict = {
'watch_folders_dict': self.watch_folders_dict,
}
return settings_dict
def readSettings(self):
self.set_settings_from_dict(env_read_config(
filename='ui_watch_folder',
unique_id='ui_main/{0}/{1}'.format(self.project.get_type(), self.project.get_code()),
long_abs_path=True))
def writeSettings(self):
env_write_config(
self.get_settings_dict(),
filename='ui_watch_folder',
unique_id='ui_main/{0}/{1}'.format(self.project.get_type(), self.project.get_code()),
long_abs_path=True)
def showEvent(self, event):
event.accept()
self.fill_watch_folders_tree_widget()
def closeEvent(self, event):
self.writeSettings()
event.accept()
class Ui_repositoryEditorWidget(QtGui.QDialog):
saved_signal = QtCore.Signal(object, object)
edited_signal = QtCore.Signal(object, object)
def __init__(self, sobject_item, mode='create', parent=None):
super(self.__class__, self).__init__(parent=parent)
self.sobject_item = sobject_item
self.mode = mode
self.saved = False
self.exclude_repo_list = self.get_exclude_repo_list()
self.create_ui()
def create_ui(self):
if self.mode == 'create':
self.setWindowTitle('Choose Repositories to Watch')
else:
self.setWindowTitle('Editing Watch Folders')
self.resize(600, 420)
self.setSizeGripEnabled(True)
self.creat_layout()
self.create_repo_path_line_edit()
self.create_repo_combo_box()
self.create_repos_tree_widget()
self.create_buttons()
if self.mode == 'edit':
self.fill_repo_combo_box(self.exclude_repo_list)
self.fill_repo_tree_widget(self.exclude_repo_list)
else:
self.fill_repo_combo_box()
self.fill_repo_tree_widget()
self.check_save_ability()
self.controls_actions()
def controls_actions(self):
self.add_new_button.clicked.connect(self.add_new_repo)
self.remove_button.clicked.connect(self.delete_selected_repo)
self.save_button.clicked.connect(self.save_and_close)
self.close_button.clicked.connect(self.close)
def creat_layout(self):
self.main_layout = QtGui.QGridLayout()
self.main_layout.setContentsMargins(9, 9, 9, 9)
self.main_layout.setColumnStretch(0, 1)
self.setLayout(self.main_layout)
def create_repos_tree_widget(self):
self.repos_tree_widget = QtGui.QTreeWidget()
self.repos_tree_widget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.repos_tree_widget.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.repos_tree_widget.setRootIsDecorated(False)
self.repos_tree_widget.setHeaderHidden(True)
self.repos_tree_widget.setObjectName('repos_tree_widget')
self.repos_tree_widget.setStyleSheet(gf.get_qtreeview_style())
self.main_layout.addWidget(self.repos_tree_widget, 2, 0, 2, 1)
def create_repo_path_line_edit(self):
self.repo_path_line_edit_layout = QtGui.QHBoxLayout()
self.repo_path_line_edit_layout.addWidget(QtGui.QLabel('Relative Watch Path:'))
self.repo_path_line_edit = QtGui.QLineEdit()
self.repo_path_line_edit_layout.addWidget(self.repo_path_line_edit)
if self.mode == 'create':
paths = tc.get_dirs_with_naming(self.sobject_item.get_search_key(), process_list=['watch_folder'])
self.repo_path_line_edit.setText(paths['versionless'][0])
elif self.mode == 'edit':
self.repo_path_line_edit.setText(self.sobject_item.get_watch_folder_path())
self.main_layout.addLayout(self.repo_path_line_edit_layout, 0, 0, 1, 2)
def create_repo_combo_box(self):
self.repo_combo_box = QtGui.QComboBox()
self.main_layout.addWidget(self.repo_combo_box, 1, 0, 1, 1)
def check_save_ability(self):
if self.repos_tree_widget.topLevelItemCount() < 1:
self.save_button.setEnabled(False)
else:
self.save_button.setEnabled(True)
def get_exclude_repo_list(self):
watch_folder_ui = env_inst.watch_folders.get(self.sobject_item.project.get_code())
watch_dict = watch_folder_ui.get_watch_dict_by_skey(self.sobject_item.get_search_key())
if watch_dict:
return watch_dict['rep']
else:
return []
def fill_repo_combo_box(self, exlude_list=None):
self.repo_combo_box.clear()
if not exlude_list:
exlude_list = []
base_dirs = env_tactic.get_all_base_dirs()
# Default repo states
for key, val in base_dirs:
if val['value'][4] and val['value'][3] not in exlude_list:
self.repo_combo_box.addItem(val['value'][1])
self.repo_combo_box.setItemData(self.repo_combo_box.count() - 1, val)
self.repo_combo_box.addItem('All Repos')
current_repo = gf.get_value_from_config(cfg_controls.get_checkin(), 'repositoryComboBox')
if current_repo:
self.repo_combo_box.setCurrentIndex(current_repo)
def fill_repo_tree_widget(self, exlude_list=None):
self.repos_tree_widget.clear()
if not exlude_list:
exlude_list = []
base_dirs = env_tactic.get_all_base_dirs()
# Default repo states
for key, val in base_dirs:
if val['value'][4] and val['value'][3] in exlude_list:
root_item = QtGui.QTreeWidgetItem()
root_item.setText(0, val['value'][1])
root_item.setData(0, QtCore.Qt.UserRole, val)
self.repos_tree_widget.addTopLevelItem(root_item)
def create_buttons(self):
self.add_new_button = QtGui.QPushButton('Add')
self.add_new_button.setMinimumWidth(90)
self.remove_button = QtGui.QPushButton('Remove')
self.remove_button.setMinimumWidth(90)
self.save_button = QtGui.QPushButton('Save and Close')
self.save_button.setMinimumWidth(90)
self.close_button = QtGui.QPushButton('Cancel')
self.close_button.setMinimumWidth(90)
self.main_layout.addWidget(self.add_new_button, 1, 1, 1, 1)
self.main_layout.addWidget(self.remove_button, 2, 1, 1, 1)
self.main_layout.addWidget(self.save_button, 4, 0, 1, 1)
self.main_layout.addWidget(self.close_button, 4, 1, 1, 1)
spacer = QtGui.QSpacerItem(0, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.main_layout.addItem(spacer, 3, 1, 1, 1)
def add_new_repo(self):
current_repo_index = self.repo_combo_box.currentIndex()
current_repo = self.repo_combo_box.itemData(current_repo_index)
if current_repo:
self.repo_combo_box.removeItem(current_repo_index)
root_item = QtGui.QTreeWidgetItem()
root_item.setText(0, current_repo['value'][1])
root_item.setData(0, QtCore.Qt.UserRole, current_repo)
self.exclude_repo_list.append(current_repo['value'][3])
self.repos_tree_widget.addTopLevelItem(root_item)
else:
for i in range(self.repo_combo_box.count()-1):
current_repo = self.repo_combo_box.itemData(i)
root_item = QtGui.QTreeWidgetItem()
root_item.setText(0, current_repo['value'][1])
root_item.setData(0, QtCore.Qt.UserRole, current_repo)
self.exclude_repo_list.append(current_repo['value'][3])
self.repos_tree_widget.addTopLevelItem(root_item)
self.fill_repo_combo_box(self.exclude_repo_list)
self.check_save_ability()
def delete_selected_repo(self):
current_repo_item = self.repos_tree_widget.currentItem()
if current_repo_item:
current_repo = current_repo_item.data(0, QtCore.Qt.UserRole)
self.exclude_repo_list.remove(current_repo['value'][3])
self.repos_tree_widget.takeTopLevelItem(self.repos_tree_widget.currentIndex().row())
self.fill_repo_combo_box(self.exclude_repo_list)
self.check_save_ability()
def set_saved(self):
self.saved = True
def save_and_close(self):
self.set_saved()
params = (self.get_repos_list(), self.sobject_item)
self.sobject_item.set_watch_folder_path(str(self.repo_path_line_edit.text()))
if self.mode == 'create':
self.saved_signal.emit(*params)
if self.mode == 'edit':
self.edited_signal.emit(*params)
self.close()
def get_repos_list(self):
repos_list = []
for i in range(self.repos_tree_widget.topLevelItemCount()):
top_item = self.repos_tree_widget.topLevelItem(i)
repo_dict = top_item.data(0, QtCore.Qt.UserRole)
repos_list.append(repo_dict['value'][3])
return repos_list
| 38.301987 | 171 | 0.645861 | [
"EPL-1.0"
] | listyque/TACTIC-Handler | thlib/ui_classes/ui_watch_folder_classes.py | 28,918 | Python |
# !/usr/bin/env python
# coding: utf-8
'''
Description:
Given a binary tree, return all root-to-leaf paths.
For example, given the following binary tree:
1
/ \
2 3
\
5
All root-to-leaf paths are: ["1->2->5", "1->3"]
Tags: Tree, Depth-first Search
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {string[]}
def binaryTreePaths(self, root):
result, path = [], []
self.binaryTreePathsRecu(self, node, path, result)
return result
def binaryTreePathsRecu(root, path, result):
if node is None:
return
if node.left is node.right is None:
ans = ''
for n in path:
ans += str(n.val) + '->'
result.append(ans + str(node.val))
if node.left:
path.append(node)
self.binaryTreePathsRecu(node.left, path, result)
path.pop()
if node.right:
path.append(node)
self.binaryTreePathsRecu(node.right, path, result)
path.pop()
| 23.660377 | 62 | 0.528708 | [
"MIT"
] | Jan-zou/LeetCode | python/Tree/257_binary_tree_paths.py | 1,254 | Python |
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
"""Add finished/verified dates to cycle tasks
Revision ID: 13e52f6a9deb
Revises: 18bdb0671010
Create Date: 2016-01-04 13:52:43.017848
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '13e52f6a9deb'
down_revision = '18bdb0671010'
def upgrade():
op.add_column('cycle_task_group_object_tasks', sa.Column('finished_date', sa.DateTime(), nullable=True))
op.add_column('cycle_task_group_object_tasks', sa.Column('verified_date', sa.DateTime(), nullable=True))
op.execute("""
UPDATE cycle_task_group_object_tasks
SET finished_date = updated_at
WHERE status = "Finished"
""")
op.execute("""
UPDATE cycle_task_group_object_tasks
SET verified_date = updated_at, finished_date = updated_at
WHERE status = "Verified"
""")
def downgrade():
op.drop_column('cycle_task_group_object_tasks', 'verified_date')
op.drop_column('cycle_task_group_object_tasks', 'finished_date')
| 31.974359 | 106 | 0.754611 | [
"Apache-2.0"
] | Smotko/ggrc-core | src/ggrc_workflows/migrations/versions/20160104135243_13e52f6a9deb_add_finished_verified_dates_to_cycle_.py | 1,247 | Python |
print('|------------------------------------------------|');
print('| COMUTADOR DE GARRAFAS POR DINHEIRO |');
print('| |');
print('|--------GARRAFAS 1 LT - R$0.10 CENTAVOS---------|');
print('|--------GARRAFAS 2 LT - R$0.25 CENTAVOS---------|');
print('|------------------------------------------------|');
print('');
va1Lt = float(0.10)
va2Lt = float(0.25)
id = input('INSIRA A QUANTIDADE DE GARRAFAS DE 1LT PARA TROCA: ');
g_1Lt = float(id)
print('valor total 1 litro: R$ %1.2f'%(g_1Lt * va1Lt));
print()
id2 = input('INSIRA A QUANTIDADE DE GARRAFAS DE 2LT PARA TROCA: ');
g_2Lt = float(id2)
print('valor total 2 litros: R$ %1.2f'%(g_2Lt * va2Lt));
total = g_1Lt * va1Lt + g_2Lt * va2Lt
print();
print('VALOR TOTAL A RECEBER: R$%1.2f' %(total));
import os
os.system("pause")
| 31.296296 | 67 | 0.494675 | [
"MIT"
] | carlosjrbk/Logica-de-Programa--o---IFPE | lista_1/exercicio 4.py | 845 | Python |
# Time: O(n^2)
# Space: O(1)
class Solution(object):
def triangleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
nums.sort()
for i in reversed(xrange(2, len(nums))):
left, right = 0, i-1
while left < right:
if nums[left]+nums[right] > nums[i]:
result += right-left
right -= 1
else:
left += 1
return result
# Time: O(n^2)
# Space: O(1)
class Solution2(object):
def triangleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
nums.sort()
for i in xrange(len(nums)-2):
if nums[i] == 0:
continue
k = i+2
for j in xrange(i+1, len(nums)-1):
while k < len(nums) and nums[i] + nums[j] > nums[k]:
k += 1
result += k-j-1
return result
| 24.023256 | 68 | 0.414327 | [
"MIT"
] | 20kzhan/LeetCode-Solutions | Python/valid-triangle-number.py | 1,033 | Python |
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
from Evolution.Systems.CurvedScalarWave.Characteristics import (
char_speed_vpsi, char_speed_vzero, char_speed_vplus, char_speed_vminus)
def error(face_mesh_velocity, normal_covector, normal_vector, psi, phi,
inertial_coords, gamma1, gamma2, lapse, shift, dt_psi, dt_pi, dt_phi,
d_psi, d_phi):
return None
def dt_psi_constraint_preserving_spherical_radiation(
face_mesh_velocity, normal_covector, normal_vector, psi, phi,
inertial_coords, gamma1, gamma2, lapse, shift, dt_psi, dt_pi, dt_phi,
d_psi, d_phi):
char_speed_psi = char_speed_vpsi(gamma1, lapse, shift, normal_covector)
if face_mesh_velocity is not None:
char_speed_psi -= np.dot(normal_covector, face_mesh_velocity)
return np.dot(normal_vector, d_psi - phi) * min(0., char_speed_psi)
def dt_phi_constraint_preserving_spherical_radiation(
face_mesh_velocity, normal_covector, normal_vector, psi, phi,
inertial_coords, gamma1, gamma2, lapse, shift, dt_psi, dt_pi, dt_phi,
d_psi, d_phi):
char_speed_zero = char_speed_vzero(gamma1, lapse, shift, normal_covector)
if face_mesh_velocity is not None:
char_speed_zero -= np.dot(normal_covector, face_mesh_velocity)
return 0.5 * np.einsum("ij,j", d_phi.T - d_phi, normal_vector) * min(
0, char_speed_zero)
def dt_pi_constraint_preserving_spherical_radiation(
face_mesh_velocity, normal_covector, normal_vector, psi, phi,
inertial_coords, gamma1, gamma2, lapse, shift, dt_psi, dt_pi, dt_phi,
d_psi, d_phi):
dt_psi_correction = dt_psi_constraint_preserving_spherical_radiation(
face_mesh_velocity, normal_covector, normal_vector, psi, phi,
inertial_coords, gamma1, gamma2, lapse, shift, dt_psi, dt_pi, dt_phi,
d_psi, d_phi)
inv_radius = 1. / np.linalg.norm(inertial_coords)
bc_dt_pi = (2. * inv_radius**2 * psi + 4. * inv_radius * dt_psi +
4. * inv_radius * np.dot(normal_vector, phi) +
2. * np.dot(normal_vector, dt_phi) + np.dot(shift, dt_phi) +
np.einsum("i,j,ij", normal_vector, normal_vector, d_phi))
bc_dt_pi /= lapse
return bc_dt_pi - dt_pi + gamma2 * dt_psi_correction
| 42.981132 | 79 | 0.723881 | [
"MIT"
] | AlexCarpenter46/spectre | tests/Unit/Evolution/Systems/CurvedScalarWave/BoundaryConditions/ConstraintPreservingSphericalRadiation.py | 2,278 | Python |
v1=float(input("Digite o valor de um dos lados "))
v2=float(input("Digite o valor de outrpo lados "))
v3=float(input("Digite o valor do ultimo lado lados "))
if v1+v2 > v3 and v1+v3 > v2 and v2+v3 > v1:
if v1 == v2 == v3:
print(f'Com esses valores é possivel formar um triangulo equilátero')
elif v1 != v2 or v3:
print(f'Com essas medidas pode ser formado um triangulo escaleno')
elif v1 == v2 and v2 != v3 or v1 == v3 and v3 != v2 or v3 == v2 and v2 != v1:
print(f'Com essas medidas pode ser formado um triangulo isóceles')
else:
print(f'Com essas medidas não pode ser formado um triangulo')
| 48.769231 | 81 | 0.660883 | [
"MIT"
] | ThiagoPereira232/tecnico-informatica | 1 ano/logica-de-programacao/triangulo-tipo.py | 638 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017-2020 AVSystem <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib.request
import argparse
import collections
import logging
import sys
import os
from xml.etree import ElementTree
from itertools import groupby
from operator import attrgetter
class Lwm2mObjectEntry:
"""
LwM2M Object Registry entry.
Available attributes are the same as tag names in the DDF XML structure.
"""
def __init__(self, tree):
self._tree = tree
def __getattr__(self, name):
node = self._tree.find(name)
if node is not None and node.text is not None:
return node.text.strip()
return self._tree.get(name)
def __lt__(self, other):
return (self.ObjectID, self.Ver) < (other.ObjectID, other.Ver)
def _read_url(url: str) -> bytes:
# we need to change the User-Agent - default one causes the server
# to respond with 403 Forbidden
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req) as f:
return f.read()
class Lwm2mObjectRegistry:
def __init__(self, repo_url='https://raw.githubusercontent.com/OpenMobileAlliance/lwm2m-registry/test'):
self.repo_url = repo_url
ddf_url = repo_url + '/DDF.xml'
root = ElementTree.fromstring(_read_url(ddf_url))
entries = (Lwm2mObjectEntry(obj) for obj in root.findall('Item'))
grouped = ((int(key), list(group)) for key, group in groupby(entries, attrgetter('ObjectID')))
self.objects = collections.OrderedDict(grouped)
def _print_object_list():
for oid, objs in Lwm2mObjectRegistry().objects.items():
for obj in objs:
print('%d\t%s\t%s' % (oid, obj.Ver, obj.Name))
def get_object_definition(urn_or_oid, version):
urn = urn_or_oid.strip()
if urn.startswith('urn:oma:lwm2m:'):
oid = int(urn.split(':')[-1])
else:
oid = int(urn)
try:
registry = Lwm2mObjectRegistry()
objects = registry.objects[oid]
available_versions_message = 'Available versions for object with ID %d: %s' % (
oid, ', '.join(str(obj.Ver) for obj in objects))
if version is None:
if (len(objects) > 1):
logging.info('%s; defaulting to maximum available version: %s' % (
available_versions_message, max(objects).Ver))
object_ddf_url = max(objects).DDF
else:
object_ddf_url = next(obj for obj in objects if obj.Ver == version).DDF
if not object_ddf_url:
raise ValueError("Object with ID = %d doesn't have attached XML definition" % oid)
if not object_ddf_url.startswith('http'):
object_ddf_url = registry.repo_url + '/' + object_ddf_url
return _read_url(object_ddf_url).decode('utf-8-sig')
except KeyError:
raise ValueError('Object with ID = %d not found' % oid)
except StopIteration:
raise ValueError(available_versions_message)
def _print_object_definition(urn_or_oid, version):
print(get_object_definition(urn_or_oid, version))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser(description="Accesses LwM2M Object registry")
parser.add_argument("-l", "--list", action='store_true', help="List all registered LwM2M Objects")
parser.add_argument("-g", "--get-xml", type=str, metavar='urn_or_oid', help="Get Object definition XML by URN or ID")
parser.add_argument("-v", "--object-version", metavar='ver', type=str, help=
"Explicitly choose version of an object if there exists more than one with the same ObjectID. Applicable only "
"with --get-xml argument. Without --object-version specified, most up to date version is chosen.")
args = parser.parse_args()
if args.list and args.get_xml is not None:
print('conflicting options: --list, --get-xml', file=sys.stderr)
sys.exit(1)
if args.object_version is not None and args.get_xml is None:
print('--object-version option is applicable only with --get-xml', file=sys.stderr)
sys.exit(1)
if args.list:
_print_object_list()
elif args.get_xml is not None:
_print_object_definition(args.get_xml, args.object_version)
else:
parser.print_usage()
sys.exit(1)
| 36.279412 | 121 | 0.673085 | [
"Apache-2.0"
] | bularcasergiu/Anjay | tools/lwm2m_object_registry.py | 4,934 | Python |
#!/usr/bin/python3
import os, argparse, difflib
lookup = {
"flare-form-field": "viur-form-bone",
"flare-form-submit": "viur-form-submit",
"flare-form": "viur-form",
"boneField": "ViurFormBone",
"sendForm": "ViurFormSubmit",
"viurForm": "ViurForm",
"boneSelector": "BoneSelector",
"moduleWidgetSelector": "ModuleWidgetSelector",
"displayDelegateSelector": "DisplayDelegateSelector",
"from flare.forms.formtags import": "from flare.viur import",
"from flare.forms": "from flare.viur",
}
if __name__ == "__main__":
# Get arguments
ap = argparse.ArgumentParser(
description="Flare source code porting tool"
)
ap.add_argument(
"project_root",
type=str,
help="Flare project root"
)
ap.add_argument(
"-d", "--dryrun",
action="store_true",
help="Dry-run for testing, don't modify files"
)
ap.add_argument(
"-x", "--daredevil",
action="store_true",
help="Don't make backups of files, just replace and deal with it"
)
args = ap.parse_args()
# Iterate all files in current folder
for root, dirs, files in os.walk(args.project_root):
# Ignore ViUR library folders
if any(ignore in root for ignore in ["flare"]):
continue
for filename in files:
# Ignore anything without a .py-extension
ext = os.path.splitext(filename)[1].lower()[1:]
if ext not in ["py"]:
continue
filename = os.path.join(root, filename)
with open(filename, "r") as f:
original_content = content = f.read()
count = 0
for k, v in lookup.items():
if k in content:
content = content.replace(k, v)
count += 1
if count:
if not args.dryrun:
if not args.daredevil:
os.rename(filename, filename + ".bak")
with open(filename, "w") as f:
f.write(content)
print("Modified %r" % filename)
else:
print(
"\n".join(
difflib.unified_diff(
original_content.splitlines(),
content.splitlines(),
filename,
filename
)
)
)
| 28.94382 | 73 | 0.494565 | [
"MIT"
] | incub8/flare | tools/flare-update.py | 2,576 | Python |
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from profiles_api import views
router = DefaultRouter()
router.register('hello-viewset', views.HelloViewSet, base_name='hello-viewset')
router.register('profile', views.UserProfileViewSet)
router.register('feed', views.UserProfileFeedViewSet)
urlpatterns = [
path('hello-view/', views.HelloApiView.as_view()),
path('login/', views.UserLoginApiView.as_view()),
path('', include(router.urls))
]
| 25.578947 | 79 | 0.781893 | [
"MIT"
] | yoniv/profiles-rest-api | profiles_api/urls.py | 486 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.3
# Email : [email protected]
###################################################################
"""MDockWidget"""
from dayu_widgets.qt import QDockWidget
class MDockWidget(QDockWidget):
"""
Just apply the qss. No more extend.
"""
def __init__(self, title='', parent=None, flags=0):
super(MDockWidget, self).__init__(title, parent=parent, flags=flags)
| 27.842105 | 76 | 0.489603 | [
"MIT"
] | 317431629/dayu_widgets | dayu_widgets/dock_widget.py | 529 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.ftu.app import Ftu
from gaiatest.apps.homescreen.app import Homescreen
class TestFtu(GaiaTestCase):
def setUp(self):
GaiaTestCase.setUp(self)
self.ftu = Ftu(self.marionette)
self.ftu.launch()
def test_ftu_with_tour(self):
"""
https://moztrap.mozilla.org/manage/case/6119/
"""
# Go through the FTU setup as quickly as possible to get to the Tour section
self.ftu.run_ftu_setup_with_default_values()
# Take the tour
self.ftu.tap_take_tour()
# Walk through the tour
self.assertEqual(self.ftu.step1_header_text, "Swipe up and down to browse your apps and bookmarks. Tap and hold an icon to delete, move, or edit it.")
self.ftu.tap_tour_next()
self.assertEqual(self.ftu.step2_header_text, "Tap to expand and collapse app groups. Drag an app into a new space to create a group.")
self.ftu.tap_tour_next()
self.assertEqual(self.ftu.step3_header_text, "Swipe down to access recent notifications, usage information and settings.")
self.ftu.tap_tour_next()
self.assertEqual(self.ftu.step4_header_text, "Drag from the left edge of your screen to return to recently used apps.")
self.ftu.tap_tour_next()
self.assertEqual(self.ftu.step5_header_text, "Tap on the search box anytime to start a search or go to a website.")
# Try going back a step
self.ftu.tap_back()
self.assertEqual(self.ftu.step4_header_text, "Drag from the left edge of your screen to return to recently used apps.")
self.ftu.tap_tour_next()
self.assertEqual(self.ftu.step5_header_text, "Tap on the search box anytime to start a search or go to a website.")
self.ftu.tap_tour_next()
self.ftu.wait_for_finish_tutorial_section()
self.ftu.tap_lets_go_button()
# Switch back to top level now that FTU app is gone
self.wait_for_condition(lambda m: self.apps.displayed_app.name == Homescreen.name)
| 45.14 | 158 | 0.698715 | [
"Apache-2.0"
] | ADLR-es/gaia | tests/python/gaia-ui-tests/gaiatest/tests/functional/ftu/test_ftu_with_tour.py | 2,257 | Python |
import os
import pytest
from cassis import *
FIXTURE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_files")
# Small xmi
@pytest.fixture
def small_xmi_path():
return os.path.join(FIXTURE_DIR, "xmi", "small_cas.xmi")
@pytest.fixture
def small_xmi(small_xmi_path):
with open(small_xmi_path, "r") as f:
return f.read()
# CAS with inheritance
@pytest.fixture
def cas_with_inheritance_xmi_path():
return os.path.join(FIXTURE_DIR, "xmi", "cas_with_inheritance.xmi")
@pytest.fixture
def cas_with_inheritance_xmi(cas_with_inheritance_xmi_path):
with open(cas_with_inheritance_xmi_path, "r") as f:
return f.read()
# Small type system
@pytest.fixture
def small_typesystem_path():
return os.path.join(FIXTURE_DIR, "typesystems", "small_typesystem.xml")
@pytest.fixture
def small_typesystem_xml(small_typesystem_path):
with open(small_typesystem_path, "r") as f:
return f.read()
# Small type system with document annotation
@pytest.fixture
def small_typesystem_with_predefined_types_path():
return os.path.join(FIXTURE_DIR, "typesystems", "small_typesystem_with_predefined_types.xml")
@pytest.fixture
def small_typesystem_with_predefined_types_xml(small_typesystem_with_predefined_types_path):
with open(small_typesystem_with_predefined_types_path, "r") as f:
return f.read()
# Type system with types without namespace
# https://github.com/dkpro/dkpro-cassis/issues/43
@pytest.fixture
def typesystem_has_types_with_no_namespace_path():
return os.path.join(FIXTURE_DIR, "typesystems", "typesystem_has_types_with_no_namespace.xml")
@pytest.fixture
def typesystem_has_types_with_no_namespace_xml(typesystem_has_types_with_no_namespace_path):
with open(typesystem_has_types_with_no_namespace_path, "r") as f:
return f.read()
# Type system with inheritance
@pytest.fixture
def typesystem_with_inheritance_path():
return os.path.join(FIXTURE_DIR, "typesystems", "typesystem_with_inheritance.xml")
@pytest.fixture
def typesystem_with_inheritance_xml(typesystem_with_inheritance_path):
with open(typesystem_with_inheritance_path, "r") as f:
return f.read()
@pytest.fixture
def dkpro_typesystem_path():
return os.path.join(FIXTURE_DIR, "typesystems", "important-dkpro-types.xml")
@pytest.fixture
def dkpro_typesystem_xml(dkpro_typesystem_path):
with open(dkpro_typesystem_path, "r") as f:
return f.read()
# Annotations
@pytest.fixture
def tokens(small_typesystem_xml):
typesystem = load_typesystem(small_typesystem_xml)
TokenType = typesystem.get_type("cassis.Token")
return [
TokenType(xmiID=3, sofa=1, begin=0, end=3, id="0", pos="NNP"),
TokenType(xmiID=4, sofa=1, begin=4, end=10, id="1", pos="VBD"),
TokenType(xmiID=5, sofa=1, begin=11, end=14, id="2", pos="IN"),
TokenType(xmiID=6, sofa=1, begin=15, end=18, id="3", pos="DT"),
TokenType(xmiID=7, sofa=1, begin=19, end=24, id="4", pos="NN"),
TokenType(xmiID=8, sofa=1, begin=25, end=26, id="5", pos="."),
TokenType(xmiID=9, sofa=1, begin=27, end=30, id="6", pos="DT"),
TokenType(xmiID=10, sofa=1, begin=31, end=36, id="7", pos="NN"),
TokenType(xmiID=11, sofa=1, begin=37, end=40, id="8", pos="VBD"),
TokenType(xmiID=12, sofa=1, begin=41, end=45, id="9", pos="JJ"),
TokenType(xmiID=13, sofa=1, begin=46, end=47, id="10", pos="."),
]
@pytest.fixture
def sentences(small_typesystem_xml):
typesystem = load_typesystem(small_typesystem_xml)
SentenceType = typesystem.get_type("cassis.Sentence")
return [
SentenceType(xmiID=14, sofa=1, begin=0, end=26, id="0"),
SentenceType(xmiID=15, sofa=1, begin=27, end=47, id="1"),
]
| 27.510949 | 97 | 0.71584 | [
"Apache-2.0"
] | cgaege/dkpro-cassis | tests/fixtures.py | 3,769 | Python |
import os
import pytest
import toml
from voluptuous import MultipleInvalid
from worker.conf_schemata import validate_general, validate_storage
def test_toml_loader():
config_dict = toml.load(os.getcwd() + '/src/tests/worker/config_handling/conf.toml')
validate_general(config_dict['general'])
validate_storage(config_dict['archive1'])
assert config_dict['archive1']['copytool']['copytool'] == 'tar'
assert config_dict['archive1']['copytool']['gzip'] is True
assert config_dict['archive1']['copytool']['retrycount'] == 0
assert config_dict['archive1']['type'] == 'archive'
def test_corrupted_toml_loader():
config_dict = toml.load(os.getcwd() + '/src/tests/worker/config_handling/corrupted_conf.toml')
with pytest.raises(MultipleInvalid):
validate_general(config_dict['general'])
with pytest.raises(MultipleInvalid):
validate_storage(config_dict['archive1'])
| 35.384615 | 98 | 0.738043 | [
"MIT"
] | mehsoy/jaws | src/tests/worker/config_handling/test_toml_config_parser.py | 920 | Python |
import gym
import numpy as np
from gym import spaces
from stable_baselines.common.running_mean_std import RunningMeanStd
class ScaleRewardEnv(gym.RewardWrapper):
def __init__(self, env: gym.Env, scale):
gym.RewardWrapper.__init__(self, env)
self.scale = scale
def reward(self, reward: float) -> float:
return reward * self.scale
class RepeatGoalEnv(gym.Wrapper):
def __init__(
self,
env: gym.Env,
gamma,
max_d,
max_t,
lambda_dt,
anoise_type=None,
anoise_prob=0.,
anoise_std=0.,
):
gym.Wrapper.__init__(self, env)
self.epsilon_std = 1e-3
self.gamma = gamma
self.max_d = max_d
self.max_t = max_t
self.lambda_dt = lambda_dt
self.anoise_type = anoise_type
self.anoise_prob = anoise_prob
self.anoise_std = anoise_std
self.body_key = None
part_keys = set(self.env.sim.model._body_name2id.keys())
target_keys = ['torso', 'cart', 'body1']
for target_key in target_keys:
if target_key in part_keys:
self.body_key = target_key
break
if self.anoise_type in ['ext_fpc']:
low = np.concatenate([self.observation_space.low, [-np.inf] * 3])
high = np.concatenate([self.observation_space.high, [np.inf] * 3])
self.observation_space = spaces.Box(
low=low, high=high,
shape=(self.observation_space.shape[0] + 3,), dtype=self.observation_space.dtype,
)
self.obs_dim = self.observation_space.shape[0] + 3
self.cur_force = np.zeros(3)
else:
self.obs_dim = self.observation_space.shape[0]
action_dim = self.env.action_space.shape[0]
self.ori_action_dim = action_dim
low = self.env.action_space.low
high = self.env.action_space.high
if self.max_d is not None or self.max_t is not None:
action_dim += 1
low = np.r_[low, -1.]
high = np.r_[high, 1.]
self.action_space = spaces.Box(
low=low, high=high, shape=(action_dim,), dtype=env.action_space.dtype
)
self.cur_obs = None
self.obs_rms = RunningMeanStd(shape=self.observation_space.shape)
self.reset_update_obs_estimate = False
self.num_steps = 0
self.eval_mode = False
def _update_obs_estimate(self, obs):
if not self.eval_mode:
self.obs_rms.update(obs[:, :self.obs_dim])
def step(self, aug_action):
cur_idx = self.ori_action_dim
action = aug_action[:self.ori_action_dim]
if self.anoise_type == 'action':
if np.random.rand() < self.anoise_prob:
action = action + np.random.randn(*action.shape) * self.anoise_std
action = np.clip(action, self.action_space.low[:len(action)], self.action_space.high[:len(action)])
elif self.anoise_type is not None and 'ext' in self.anoise_type:
if np.random.rand() < self.anoise_prob:
if self.env.spec.id == 'Reacher-v2':
force = np.zeros(3)
torque = np.random.randn(3) * self.anoise_std
cur_info = torque
else:
force = np.random.randn(3) * self.anoise_std
torque = np.zeros(3)
cur_info = force
if self.anoise_type == 'ext_fpc':
self.cur_force = np.clip(cur_info, -1, 1)
self.env.sim.data.xfrc_applied[self.env.sim.model._body_name2id[self.body_key], :] = np.r_[
force, torque]
else:
self.env.sim.data.xfrc_applied[self.env.sim.model._body_name2id[self.body_key], :] = [0] * 6
if self.max_d is not None or self.max_t is not None:
u = aug_action[cur_idx]
cur_idx += 1
norm_u = (u + 1) / 2
u = norm_u
else:
u = None
lambda_dt = self.lambda_dt
total_reward = 0.0
done = None
cur_gamma = 1.0
first_obs = self.cur_obs
for i in range(100000000):
obs, reward, done, info = self.env.step(action)
if self.anoise_type in ['ext_fpc']:
obs = np.concatenate([obs, self.cur_force])
if not done:
self._update_obs_estimate(obs[np.newaxis, ...])
self.reset_update_obs_estimate = True
total_reward += reward * cur_gamma
cur_gamma *= self.gamma
if done:
break
if self.max_d is None and self.max_t is None:
break
if self.max_t is not None:
t_delta = (i + 1) * self.env.dt
if self.max_d is not None:
norm_obs = (obs - self.obs_rms.mean) / (np.sqrt(self.obs_rms.var) + self.epsilon_std)
norm_first_obs = (first_obs - self.obs_rms.mean) / (np.sqrt(self.obs_rms.var) + self.epsilon_std)
d_delta = np.linalg.norm(norm_obs - norm_first_obs, ord=1) / len(obs)
if self.max_d is not None and self.max_t is not None:
if lambda_dt is None:
if d_delta >= u * self.max_d:
break
if t_delta >= self.max_t:
break
else:
ori_t_delta = t_delta
t_delta = t_delta / self.max_t
d_delta = d_delta / self.max_d
delta = lambda_dt * d_delta + (1 - lambda_dt) * t_delta
if delta >= u:
break
if ori_t_delta >= self.max_t:
break
elif self.max_t is not None:
if t_delta >= u * self.max_t:
break
elif self.max_d is not None:
if d_delta >= u * self.max_d:
break
self.cur_obs = obs
info['w'] = i + 1
info['t_diff'] = (i + 1) * self.env.dt
if u is not None:
if self.max_d is not None and self.max_t is not None:
pass
elif self.max_t is not None:
info['t'] = u * self.max_t
elif self.max_d is not None:
info['d'] = u * self.max_d
info['u'] = u
if lambda_dt is not None:
info['lambda_dt'] = lambda_dt
self.num_steps += 1
return self.cur_obs, total_reward, done, info
def reset(self, **kwargs):
obs = self.env.reset(**kwargs)
if self.anoise_type in ['ext_fpc']:
self.cur_force = np.zeros(3)
obs = np.concatenate([obs, self.cur_force])
if self.reset_update_obs_estimate:
self._update_obs_estimate(obs[np.newaxis, ...])
self.reset_update_obs_estimate = False
self.cur_obs = obs
return self.cur_obs
| 35.133663 | 115 | 0.533888 | [
"MIT"
] | artberryx/SAR | sb/stable_baselines_ex/common/wrappers_ex.py | 7,097 | Python |
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
api_key_sid = "SKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
api_key_secret = "your_api_key_secret"
client = Client(api_key_sid, api_key_secret)
publishedtrack = client.video.rooms('DailyStandup').participants.get(
'Alice').published_tracks.get('Camera')
print(publishedtrack.fetch().date_created)
| 35.153846 | 72 | 0.809628 | [
"MIT"
] | PatNeedham/api-snippets | video/rooms/participants/published-track/retrieve-track-published-by-participant/retrieve-track-published-by-participant.py | 457 | Python |
import argparse
import importlib
import mmcv
import numpy as np
import os
import os.path as osp
import time
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from openselfsup.datasets import build_dataloader, build_dataset
from openselfsup.models import build_model
from openselfsup.models.utils import MultiPooling
from openselfsup.utils import dist_forward_collect, nondist_forward_collect
from openselfsup.utils import get_root_logger
class ExtractProcess(object):
def __init__(self,
pool_type='specified',
backbone='resnet50',
layer_indices=(0, 1, 2, 3, 4)):
self.multi_pooling = MultiPooling(
pool_type, in_indices=layer_indices, backbone=backbone)
def _forward_func(self, model, **x):
backbone_feats = model(mode='extract', **x)
pooling_feats = self.multi_pooling(backbone_feats)
flat_feats = [xx.view(xx.size(0), -1) for xx in pooling_feats]
feat_dict = {'feat{}'.format(i + 1): feat.cpu() \
for i, feat in enumerate(flat_feats)}
return feat_dict
def extract(self, model, data_loader, distributed=False):
model.eval()
func = lambda **x: self._forward_func(model, **x)
if distributed:
rank, world_size = get_dist_info()
results = dist_forward_collect(func, data_loader, rank,
len(data_loader.dataset))
else:
results = nondist_forward_collect(func, data_loader,
len(data_loader.dataset))
return results
def parse_args():
parser = argparse.ArgumentParser(
description='OpenSelfSup extract features of a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', default=None, help='checkpoint file')
parser.add_argument(
'--pretrained', default='random',
help='pretrained model file, exclusive to --checkpoint')
parser.add_argument(
'--dataset-config',
default='benchmarks/extract_info/voc07.py',
help='extract dataset config file path')
parser.add_argument(
'--layer-ind',
type=str,
help='layer indices, separated by comma, e.g., "0,1,2,3,4"')
parser.add_argument(
'--work_dir',
type=str,
default=None,
help='the dir to save logs and models')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--port', type=int, default=29500,
help='port only works when launcher=="slurm"')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
layer_ind = [int(idx) for idx in args.layer_ind.split(',')]
cfg.model.backbone.out_indices = layer_ind
# checkpoint and pretrained are exclusive
assert args.pretrained == "random" or args.checkpoint is None, \
"Checkpoint and pretrained are exclusive."
# check memcached package exists
if importlib.util.find_spec('mc') is None:
for field in ['train', 'val', 'test']:
if hasattr(cfg.data, field):
getattr(cfg.data, field).data_source.memcached = False
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
if args.launcher == 'slurm':
cfg.dist_params['port'] = args.port
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# logger
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, 'extract_{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# build the dataloader
dataset_cfg = mmcv.Config.fromfile(args.dataset_config)
dataset = build_dataset(dataset_cfg.data.extract)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=dataset_cfg.data.imgs_per_gpu,
workers_per_gpu=dataset_cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# specify pretrained model
if args.pretrained != 'random':
assert isinstance(args.pretrained, str)
cfg.model.pretrained = args.pretrained
# build the model and load checkpoint
model = build_model(cfg.model)
if args.checkpoint is not None:
logger.info("Use checkpoint: {} to extract features".format(
args.checkpoint))
load_checkpoint(model, args.checkpoint, map_location='cpu')
elif args.pretrained != "random":
logger.info('Use pretrained model: {} to extract features'.format(
args.pretrained))
else:
logger.info('No checkpoint or pretrained is give, use random init.')
if not distributed:
model = MMDataParallel(model, device_ids=[0])
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
# build extraction processor
extractor = ExtractProcess(
pool_type='specified', backbone='resnet50', layer_indices=layer_ind)
# run
outputs = extractor.extract(model, data_loader, distributed=distributed)
rank, _ = get_dist_info()
mmcv.mkdir_or_exist("{}/features/".format(args.work_dir))
if rank == 0:
for key, val in outputs.items():
split_num = len(dataset_cfg.split_name)
split_at = dataset_cfg.split_at
for ss in range(split_num):
output_file = "{}/features/{}_{}.npy".format(
args.work_dir, dataset_cfg.split_name[ss], key)
if ss == 0:
np.save(output_file, val[:split_at[0]])
elif ss == split_num - 1:
np.save(output_file, val[split_at[-1]:])
else:
np.save(output_file, val[split_at[ss - 1]:split_at[ss]])
if __name__ == '__main__':
main()
| 36.917582 | 77 | 0.637 | [
"Apache-2.0"
] | speedcell4/OpenSelfSup | tools/extract.py | 6,719 | Python |
"""Temkin Approximation isotherm model."""
import numpy
import scipy
from ..utilities.exceptions import CalculationError
from .base_model import IsothermBaseModel
class TemkinApprox(IsothermBaseModel):
r"""
Asymptotic approximation to the Temkin isotherm.
.. math::
n(p) = n_m \frac{K p}{1 + K p} + n_m \theta (\frac{K p}{1 + K p})^2 (\frac{K p}{1 + K p} -1)
Notes
-----
The Temkin adsorption isotherm [#]_, like the Langmuir model, considers
a surface with n_m identical adsorption sites, but takes into account adsorbate-
adsorbate interactions by assuming that the enthalpy of adsorption is a linear
function of the coverage. The Temkin isotherm is derived [#]_ using a
mean-field argument and used an asymptotic approximation
to obtain an explicit equation for the loading.
Here, :math:`n_m` and K have the same physical meaning as in the Langmuir model.
The additional parameter :math:`\theta` describes the strength of the adsorbate-adsorbate
interactions (:math:`\theta < 0` for attractions).
References
----------
.. [#] V. P. M.I. Tempkin, Kinetics of ammonia synthesis on promoted iron
catalyst, Acta Phys. Chim. USSR 12 (1940) 327–356.
.. [#] Phys. Chem. Chem. Phys., 2014,16, 5499-5513
"""
# Model parameters
name = 'TemkinApprox'
calculates = 'loading'
param_names = ["n_m", "K", "tht"]
param_bounds = {
"n_m": [0, numpy.inf],
"K": [0, numpy.inf],
"tht": [0, numpy.inf],
}
def __init__(self):
"""Instantiation function."""
self.params = {"n_m": numpy.nan, "K": numpy.nan, "tht": numpy.nan}
def loading(self, pressure):
"""
Calculate loading at specified pressure.
Parameters
----------
pressure : float
The pressure at which to calculate the loading.
Returns
-------
float
Loading at specified pressure.
"""
lang_load = self.params["K"] * pressure / (1.0 + self.params["K"] * pressure)
return self.params["n_m"] * (lang_load + self.params["tht"] * lang_load ** 2 *
(lang_load - 1))
def pressure(self, loading):
"""
Calculate pressure at specified loading.
For the TemkinApprox model, the pressure will
be computed numerically as no analytical inversion is possible.
Parameters
----------
loading : float
The loading at which to calculate the pressure.
Returns
-------
float
Pressure at specified loading.
"""
def fun(x):
return self.loading(x) - loading
opt_res = scipy.optimize.root(fun, 0, method='hybr')
if not opt_res.success:
raise CalculationError("""
Root finding for value {0} failed.
""".format(loading))
return opt_res.x
def spreading_pressure(self, pressure):
r"""
Calculate spreading pressure at specified gas pressure.
Function that calculates spreading pressure by solving the
following integral at each point i.
.. math::
\pi = \int_{0}^{p_i} \frac{n_i(p_i)}{p_i} dp_i
The integral for the TemkinApprox model is solved analytically.
.. math::
\pi = n_m \Big( \ln{(1 + K p)} + \frac{\theta (2 K p + 1)}{2(1 + K p)^2}\Big)
Parameters
----------
pressure : float
The pressure at which to calculate the spreading pressure.
Returns
-------
float
Spreading pressure at specified pressure.
"""
one_plus_kp = 1.0 + self.params["K"] * pressure
return self.params["n_m"] * (numpy.log(one_plus_kp) +
self.params["tht"] * (2.0 * self.params["K"] * pressure + 1.0) /
(2.0 * one_plus_kp ** 2))
def initial_guess(self, pressure, loading):
"""
Return initial guess for fitting.
Parameters
----------
pressure : ndarray
Pressure data.
loading : ndarray
Loading data.
Returns
-------
dict
Dictionary of initial guesses for the parameters.
"""
saturation_loading, langmuir_k = super().initial_guess(pressure, loading)
guess = {"n_m": saturation_loading, "K": langmuir_k, "tht": 0.0}
for param in guess:
if guess[param] < self.param_bounds[param][0]:
guess[param] = self.param_bounds[param][0]
if guess[param] > self.param_bounds[param][1]:
guess[param] = self.param_bounds[param][1]
return guess
| 30.0875 | 101 | 0.566265 | [
"MIT"
] | ReginaPeralta/ReginaPeralta | src/pygaps/modelling/temkinapprox.py | 4,816 | Python |
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2015-2019, Johannes Köster"
__email__ = "[email protected]"
__license__ = "MIT"
import html
import os
import shutil
import textwrap
import time
import tarfile
from collections import defaultdict, Counter
from itertools import chain, filterfalse, groupby
from functools import partial
from pathlib import Path
import uuid
import math
from snakemake.io import PeriodicityDetector, wait_for_files, is_flagged
from snakemake.jobs import Reason, JobFactory, GroupJobFactory, Job
from snakemake.exceptions import MissingInputException
from snakemake.exceptions import MissingRuleException, AmbiguousRuleException
from snakemake.exceptions import CyclicGraphException, MissingOutputException
from snakemake.exceptions import IncompleteFilesException, ImproperOutputException
from snakemake.exceptions import PeriodicWildcardError
from snakemake.exceptions import RemoteFileException, WorkflowError, ChildIOException
from snakemake.exceptions import InputFunctionException
from snakemake.logging import logger
from snakemake.common import DYNAMIC_FILL, group_into_chunks
from snakemake.deployment import conda, singularity
from snakemake.output_index import OutputIndex
from snakemake import workflow
class Batch:
"""Definition of a batch for calculating only a partial DAG."""
def __init__(self, rulename: str, idx: int, batches: int):
assert idx <= batches
assert idx > 0
self.rulename = rulename
self.idx = idx
self.batches = batches
def get_batch(self, items: list):
"""Return the defined batch of the given items.
Items are usually input files."""
# make sure that we always consider items in the same order
if len(items) < self.batches:
raise WorkflowError(
"Batching rule {} has less input files than batches. "
"Please choose a smaller number of batches.".format(self.rulename)
)
items = sorted(items)
batch_len = math.floor(len(items) / self.batches)
# self.batch is one-based, hence we have to subtract 1
idx = self.idx - 1
i = idx * batch_len
if self.is_final:
# extend the last batch to cover rest of list
return items[i:]
else:
return items[i : i + batch_len]
@property
def is_final(self):
return self.idx == self.batches
def __str__(self):
return "{}/{} (rule {})".format(self.idx, self.batches, self.rulename)
class DAG:
"""Directed acyclic graph of jobs."""
def __init__(
self,
workflow,
rules=None,
dryrun=False,
targetfiles=None,
targetrules=None,
forceall=False,
forcerules=None,
forcefiles=None,
priorityfiles=None,
priorityrules=None,
untilfiles=None,
untilrules=None,
omitfiles=None,
omitrules=None,
ignore_ambiguity=False,
force_incomplete=False,
ignore_incomplete=False,
notemp=False,
keep_remote_local=False,
batch=None,
):
self.dryrun = dryrun
self.dependencies = defaultdict(partial(defaultdict, set))
self.depending = defaultdict(partial(defaultdict, set))
self._needrun = set()
self._priority = dict()
self._reason = defaultdict(Reason)
self._finished = set()
self._dynamic = set()
self._len = 0
self.workflow = workflow
self.rules = set(rules)
self.ignore_ambiguity = ignore_ambiguity
self.targetfiles = targetfiles
self.targetrules = targetrules
self.priorityfiles = priorityfiles
self.priorityrules = priorityrules
self.targetjobs = set()
self.prioritytargetjobs = set()
self._ready_jobs = set()
self.notemp = notemp
self.keep_remote_local = keep_remote_local
self._jobid = dict()
self.job_cache = dict()
self.conda_envs = dict()
self.container_imgs = dict()
self._progress = 0
self._group = dict()
self.job_factory = JobFactory()
self.group_job_factory = GroupJobFactory()
self.forcerules = set()
self.forcefiles = set()
self.untilrules = set()
self.untilfiles = set()
self.omitrules = set()
self.omitfiles = set()
self.updated_subworkflow_files = set()
if forceall:
self.forcerules.update(self.rules)
elif forcerules:
self.forcerules.update(forcerules)
if forcefiles:
self.forcefiles.update(forcefiles)
if untilrules:
self.untilrules.update(set(rule.name for rule in untilrules))
if untilfiles:
self.untilfiles.update(untilfiles)
if omitrules:
self.omitrules.update(set(rule.name for rule in omitrules))
if omitfiles:
self.omitfiles.update(omitfiles)
self.has_dynamic_rules = any(rule.dynamic_output for rule in self.rules)
self.omitforce = set()
self.batch = batch
if batch is not None and not batch.is_final:
# Since not all input files of a batching rule are considered, we cannot run
# beyond that rule.
# For the final batch, we do not need to omit anything.
self.omitrules.add(batch.rulename)
self.force_incomplete = force_incomplete
self.ignore_incomplete = ignore_incomplete
self.periodic_wildcard_detector = PeriodicityDetector()
self.update_output_index()
def init(self, progress=False):
""" Initialise the DAG. """
for job in map(self.rule2job, self.targetrules):
job = self.update([job], progress=progress)
self.targetjobs.add(job)
for file in self.targetfiles:
job = self.update(self.file2jobs(file), file=file, progress=progress)
self.targetjobs.add(job)
self.cleanup()
self.update_needrun()
self.set_until_jobs()
self.delete_omitfrom_jobs()
self.update_jobids()
self.check_directory_outputs()
# check if remaining jobs are valid
for i, job in enumerate(self.jobs):
job.is_valid()
def check_directory_outputs(self):
"""Check that no output file is contained in a directory output of the same or another rule."""
outputs = sorted(
{
(path(f), job)
for job in self.jobs
for f in job.output
for path in (os.path.abspath, os.path.realpath)
}
)
for i in range(len(outputs) - 1):
(a, job_a), (b, job_b) = outputs[i : i + 2]
try:
common = os.path.commonpath([a, b])
except ValueError:
# commonpath raises error if windows drives are different.
continue
if a != b and common == os.path.commonpath([a]) and job_a != job_b:
raise ChildIOException(parent=outputs[i], child=outputs[i + 1])
@property
def checkpoint_jobs(self):
for job in self.needrun_jobs:
if job.is_checkpoint:
yield job
def update_checkpoint_outputs(self):
workflow.checkpoints.future_output = set(
f for job in self.checkpoint_jobs for f in job.output
)
def update_jobids(self):
for job in self.jobs:
if job not in self._jobid:
self._jobid[job] = len(self._jobid)
def cleanup_workdir(self):
for io_dir in set(
os.path.dirname(io_file)
for job in self.jobs
for io_file in chain(job.output, job.input)
if not os.path.exists(io_file)
):
if os.path.exists(io_dir) and not len(os.listdir(io_dir)):
os.removedirs(io_dir)
def cleanup(self):
self.job_cache.clear()
final_jobs = set(self.jobs)
todelete = [job for job in self.dependencies if job not in final_jobs]
for job in todelete:
del self.dependencies[job]
try:
del self.depending[job]
except KeyError:
pass
def create_conda_envs(
self, dryrun=False, forceall=False, init_only=False, quiet=False
):
# First deduplicate based on job.conda_env_file
jobs = self.jobs if forceall else self.needrun_jobs
env_set = {
(job.conda_env_file, job.container_img_url)
for job in jobs
if job.conda_env_file
}
# Then based on md5sum values
self.conda_envs = dict()
for (env_file, simg_url) in env_set:
simg = None
if simg_url and self.workflow.use_singularity:
assert (
simg_url in self.container_imgs
), "bug: must first pull singularity images"
simg = self.container_imgs[simg_url]
env = conda.Env(
env_file,
self,
container_img=simg,
cleanup=self.workflow.conda_cleanup_pkgs,
)
self.conda_envs[(env_file, simg_url)] = env
if not init_only:
for env in self.conda_envs.values():
if not dryrun or not quiet:
env.create(dryrun)
def pull_container_imgs(self, dryrun=False, forceall=False, quiet=False):
# First deduplicate based on job.conda_env_file
jobs = self.jobs if forceall else self.needrun_jobs
img_set = {job.container_img_url for job in jobs if job.container_img_url}
for img_url in img_set:
img = singularity.Image(img_url, self)
if not dryrun or not quiet:
img.pull(dryrun)
self.container_imgs[img_url] = img
def update_output_index(self):
"""Update the OutputIndex."""
self.output_index = OutputIndex(self.rules)
def check_incomplete(self):
"""Check if any output files are incomplete. This is done by looking up
markers in the persistence module."""
if not self.ignore_incomplete:
incomplete = self.incomplete_files
if incomplete:
if self.force_incomplete:
logger.debug("Forcing incomplete files:")
logger.debug("\t" + "\n\t".join(incomplete))
self.forcefiles.update(incomplete)
else:
raise IncompleteFilesException(incomplete)
def incomplete_external_jobid(self, job):
"""Return the external jobid of the job if it is marked as incomplete.
Returns None, if job is not incomplete, or if no external jobid has been
registered or if force_incomplete is True.
"""
if self.force_incomplete:
return None
jobids = self.workflow.persistence.external_jobids(job)
if len(jobids) == 1:
return jobids[0]
elif len(jobids) > 1:
raise WorkflowError(
"Multiple different external jobids registered "
"for output files of incomplete job {} ({}). This job "
"cannot be resumed. Execute Snakemake with --rerun-incomplete "
"to fix this issue.".format(job.jobid, jobids)
)
def check_dynamic(self):
"""Check dynamic output and update downstream rules if necessary."""
if self.has_dynamic_rules:
for job in filter(
lambda job: (job.dynamic_output and not self.needrun(job)), self.jobs
):
self.update_dynamic(job)
self.postprocess()
def is_edit_notebook_job(self, job):
return self.workflow.edit_notebook and job.targetfile in self.targetfiles
@property
def dynamic_output_jobs(self):
"""Iterate over all jobs with dynamic output files."""
return (job for job in self.jobs if job.dynamic_output)
@property
def jobs(self):
""" All jobs in the DAG. """
for job in self.bfs(self.dependencies, *self.targetjobs):
yield job
@property
def needrun_jobs(self):
""" Jobs that need to be executed. """
for job in filter(
self.needrun,
self.bfs(self.dependencies, *self.targetjobs, stop=self.noneedrun_finished),
):
yield job
@property
def local_needrun_jobs(self):
"""Iterate over all jobs that need to be run and are marked as local."""
return filter(lambda job: job.is_local, self.needrun_jobs)
@property
def finished_jobs(self):
""" Iterate over all jobs that have been finished."""
for job in filter(self.finished, self.bfs(self.dependencies, *self.targetjobs)):
yield job
@property
def ready_jobs(self):
"""Jobs that are ready to execute."""
return self._ready_jobs
def needrun(self, job):
"""Return whether a given job needs to be executed."""
return job in self._needrun
def priority(self, job):
"""Return priority of given job."""
return self._priority[job]
def noneedrun_finished(self, job):
"""
Return whether a given job is finished or was not
required to run at all.
"""
return not self.needrun(job) or self.finished(job)
def reason(self, job):
""" Return the reason of the job execution. """
return self._reason[job]
def finished(self, job):
""" Return whether a job is finished. """
return job in self._finished
def dynamic(self, job):
"""
Return whether a job is dynamic (i.e. it is only a placeholder
for those that are created after the job with dynamic output has
finished.
"""
if job.is_group():
for j in job:
if j in self._dynamic:
return True
else:
return job in self._dynamic
def requested_files(self, job):
"""Return the files a job requests."""
return set(*self.depending[job].values())
@property
def incomplete_files(self):
"""Return list of incomplete files."""
return list(
chain(
*(
job.output
for job in filter(
self.workflow.persistence.incomplete,
filterfalse(self.needrun, self.jobs),
)
)
)
)
@property
def newversion_files(self):
"""Return list of files where the current version is newer than the
recorded version.
"""
return list(
chain(
*(
job.output
for job in filter(self.workflow.persistence.newversion, self.jobs)
)
)
)
def missing_temp(self, job):
"""
Return whether a temp file that is input of the given job is missing.
"""
for job_, files in self.depending[job].items():
if self.needrun(job_) and any(not f.exists for f in files):
return True
return False
def check_and_touch_output(
self,
job,
wait=3,
ignore_missing_output=False,
no_touch=False,
force_stay_on_remote=False,
):
""" Raise exception if output files of job are missing. """
expanded_output = [job.shadowed_path(path) for path in job.expanded_output]
if job.benchmark:
expanded_output.append(job.benchmark)
if not ignore_missing_output:
try:
wait_for_files(
expanded_output,
latency_wait=wait,
force_stay_on_remote=force_stay_on_remote,
ignore_pipe=True,
)
except IOError as e:
raise MissingOutputException(
str(e) + "\nThis might be due to "
"filesystem latency. If that is the case, consider to increase the "
"wait time with --latency-wait.",
rule=job.rule,
)
# Ensure that outputs are of the correct type (those flagged with directory()
# are directories and not files and vice versa).
for f in expanded_output:
if (f.is_directory and not os.path.isdir(f)) or (
os.path.isdir(f) and not f.is_directory
):
raise ImproperOutputException(job.rule, [f])
# It is possible, due to archive expansion or cluster clock skew, that
# the files appear older than the input. But we know they must be new,
# so touch them to update timestamps. This also serves to touch outputs
# when using the --touch flag.
# Note that if the input files somehow have a future date then this will
# not currently be spotted and the job will always be re-run.
if not no_touch:
for f in expanded_output:
# This won't create normal files if missing, but will create
# the flag file for directories.
if f.exists_local:
f.touch()
def unshadow_output(self, job, only_log=False):
""" Move files from shadow directory to real output paths. """
if not job.shadow_dir or not job.expanded_output:
return
files = job.log if only_log else chain(job.expanded_output, job.log)
for real_output in files:
shadow_output = job.shadowed_path(real_output).file
# Remake absolute symlinks as relative
if os.path.islink(shadow_output):
dest = os.readlink(shadow_output)
if os.path.isabs(dest):
rel_dest = os.path.relpath(dest, job.shadow_dir)
os.remove(shadow_output)
os.symlink(rel_dest, shadow_output)
if os.path.realpath(shadow_output) == os.path.realpath(real_output):
continue
logger.debug(
"Moving shadow output {} to destination {}".format(
shadow_output, real_output
)
)
shutil.move(shadow_output, real_output)
shutil.rmtree(job.shadow_dir)
def check_periodic_wildcards(self, job):
"""Raise an exception if a wildcard of the given job appears to be periodic,
indicating a cyclic dependency."""
for wildcard, value in job.wildcards_dict.items():
periodic_substring = self.periodic_wildcard_detector.is_periodic(value)
if periodic_substring is not None:
raise PeriodicWildcardError(
"The value {} in wildcard {} is periodically repeated ({}). "
"This would lead to an infinite recursion. "
"To avoid this, e.g. restrict the wildcards in this rule to certain values.".format(
periodic_substring, wildcard, value
),
rule=job.rule,
)
def handle_protected(self, job):
""" Write-protect output files that are marked with protected(). """
for f in job.expanded_output:
if f in job.protected_output:
logger.info("Write-protecting output file {}.".format(f))
f.protect()
def handle_touch(self, job):
""" Touches those output files that are marked for touching. """
for f in job.expanded_output:
if f in job.touch_output:
f = job.shadowed_path(f)
logger.info("Touching output file {}.".format(f))
f.touch_or_create()
assert os.path.exists(f)
def temp_input(self, job):
for job_, files in self.dependencies[job].items():
for f in filter(job_.temp_output.__contains__, files):
yield f
def temp_size(self, job):
"""Return the total size of temporary input files of the job.
If none, return 0.
"""
return sum(f.size for f in self.temp_input(job))
def handle_temp(self, job):
""" Remove temp files if they are no longer needed. Update temp_mtimes. """
if self.notemp:
return
is_temp = lambda f: is_flagged(f, "temp")
# handle temp input
needed = lambda job_, f: any(
f in files
for j, files in self.depending[job_].items()
if not self.finished(j) and self.needrun(j) and j != job
)
def unneeded_files():
# temp input
for job_, files in self.dependencies[job].items():
tempfiles = set(f for f in job_.expanded_output if is_temp(f))
yield from filterfalse(partial(needed, job_), tempfiles & files)
# temp output
if not job.dynamic_output and (
job not in self.targetjobs or job.rule.name == self.workflow.first_rule
):
tempfiles = (
f
for f in job.expanded_output
if is_temp(f) and f not in self.targetfiles
)
yield from filterfalse(partial(needed, job), tempfiles)
for f in unneeded_files():
logger.info("Removing temporary output file {}.".format(f))
f.remove(remove_non_empty_dir=True)
def handle_log(self, job, upload_remote=True):
for f in job.log:
if not f.exists_local:
# If log file was not created during job, create an empty one.
f.touch_or_create()
if upload_remote and f.is_remote and not f.should_stay_on_remote:
f.upload_to_remote()
if not f.exists_remote:
raise RemoteFileException(
"The file upload was attempted, but it does not "
"exist on remote. Check that your credentials have "
"read AND write permissions."
)
def handle_remote(self, job, upload=True):
""" Remove local files if they are no longer needed and upload. """
if upload:
# handle output files
files = list(job.expanded_output)
if job.benchmark:
files.append(job.benchmark)
for f in files:
if f.is_remote and not f.should_stay_on_remote:
f.upload_to_remote()
remote_mtime = f.mtime
# immediately force local mtime to match remote,
# since conversions from S3 headers are not 100% reliable
# without this, newness comparisons may fail down the line
f.touch(times=(remote_mtime, remote_mtime))
if not f.exists_remote:
raise RemoteFileException(
"The file upload was attempted, but it does not "
"exist on remote. Check that your credentials have "
"read AND write permissions."
)
if not self.keep_remote_local:
# handle input files
needed = lambda job_, f: any(
f in files
for j, files in self.depending[job_].items()
if not self.finished(j) and self.needrun(j) and j != job
)
def unneeded_files():
putative = (
lambda f: f.is_remote
and not f.protected
and not f.should_keep_local
)
generated_input = set()
for job_, files in self.dependencies[job].items():
generated_input |= files
for f in filter(putative, files):
if not needed(job_, f):
yield f
for f, f_ in zip(job.output, job.rule.output):
if putative(f) and not needed(job, f) and not f in self.targetfiles:
if f in job.dynamic_output:
for f_ in job.expand_dynamic(f_):
yield f_
else:
yield f
for f in filter(putative, job.input):
# TODO what about remote inputs that are used by multiple jobs?
if f not in generated_input:
yield f
for f in unneeded_files():
if f.exists_local:
logger.info("Removing local output file: {}".format(f))
f.remove()
def jobid(self, job):
"""Return job id of given job."""
if job.is_group():
return job.jobid
else:
return self._jobid[job]
def update(
self, jobs, file=None, visited=None, skip_until_dynamic=False, progress=False
):
""" Update the DAG by adding given jobs and their dependencies. """
if visited is None:
visited = set()
producer = None
exceptions = list()
jobs = sorted(jobs, reverse=not self.ignore_ambiguity)
cycles = list()
for job in jobs:
logger.dag_debug(dict(status="candidate", job=job))
if file in job.input:
cycles.append(job)
continue
if job in visited:
cycles.append(job)
continue
try:
self.check_periodic_wildcards(job)
self.update_(
job,
visited=set(visited),
skip_until_dynamic=skip_until_dynamic,
progress=progress,
)
# TODO this might fail if a rule discarded here is needed
# elsewhere
if producer:
if job < producer or self.ignore_ambiguity:
break
elif producer is not None:
raise AmbiguousRuleException(file, job, producer)
producer = job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
exceptions.append(ex)
except RecursionError as e:
raise WorkflowError(
e,
"If building the DAG exceeds the recursion limit, "
"this is likely due to a cyclic dependency."
"E.g. you might have a sequence of rules that "
"can generate their own input. Try to make "
"the output files more specific. "
"A common pattern is to have different prefixes "
"in the output files of different rules."
+ "\nProblematic file pattern: {}".format(file)
if file
else "",
)
if producer is None:
if cycles:
job = cycles[0]
raise CyclicGraphException(job.rule, file, rule=job.rule)
if len(exceptions) > 1:
raise WorkflowError(*exceptions)
elif len(exceptions) == 1:
raise exceptions[0]
else:
logger.dag_debug(dict(status="selected", job=producer))
logger.dag_debug(
dict(
file=file,
msg="Producer found, hence exceptions are ignored.",
exception=WorkflowError(*exceptions),
)
)
n = len(self.dependencies)
if progress and n % 1000 == 0 and n and self._progress != n:
logger.info("Processed {} potential jobs.".format(n))
self._progress = n
return producer
def update_(self, job, visited=None, skip_until_dynamic=False, progress=False):
""" Update the DAG by adding the given job and its dependencies. """
if job in self.dependencies:
return
if visited is None:
visited = set()
visited.add(job)
dependencies = self.dependencies[job]
potential_dependencies = self.collect_potential_dependencies(job)
skip_until_dynamic = skip_until_dynamic and not job.dynamic_output
missing_input = set()
producer = dict()
exceptions = dict()
for file, jobs in potential_dependencies.items():
# If possible, obtain inventory information starting from
# given file and store it in the IOCache.
# This should provide faster access to existence and mtime information
# than querying file by file. If the file type does not support inventory
# information, this call is a no-op.
file.inventory()
if not jobs:
# no producing job found
if not file.exists:
# file not found, hence missing input
missing_input.add(file)
# file found, no problem
continue
try:
selected_job = self.update(
jobs,
file=file,
visited=visited,
skip_until_dynamic=skip_until_dynamic or file in job.dynamic_input,
progress=progress,
)
producer[file] = selected_job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
if not file.exists:
self.delete_job(job, recursive=False) # delete job from tree
raise ex
else:
logger.dag_debug(
dict(
file=file,
msg="No producers found, but file is present on disk.",
exception=ex,
)
)
for file, job_ in producer.items():
dependencies[job_].add(file)
self.depending[job_][job].add(file)
if self.is_batch_rule(job.rule) and self.batch.is_final:
# For the final batch, ensure that all input files from
# previous batches are present on disk.
if any(
f for f in job.input if f not in potential_dependencies and not f.exists
):
raise WorkflowError(
"Unable to execute batch {} because not all previous batches "
"have been completed before or files have been deleted.".format(
self.batch
)
)
if missing_input:
self.delete_job(job, recursive=False) # delete job from tree
raise MissingInputException(job.rule, missing_input)
if skip_until_dynamic:
self._dynamic.add(job)
def update_needrun(self):
""" Update the information whether a job needs to be executed. """
output_mintime = dict()
def update_output_mintime(job):
try:
return output_mintime[job]
except KeyError:
for job_ in chain([job], self.depending[job]):
try:
t = output_mintime[job_]
except KeyError:
t = job_.output_mintime
if t is not None:
output_mintime[job] = t
return
output_mintime[job] = None
def update_needrun(job):
reason = self.reason(job)
noinitreason = not reason
updated_subworkflow_input = self.updated_subworkflow_files.intersection(
job.input
)
if (
job not in self.omitforce
and job.rule in self.forcerules
or not self.forcefiles.isdisjoint(job.output)
):
reason.forced = True
elif updated_subworkflow_input:
reason.updated_input.update(updated_subworkflow_input)
elif job in self.targetjobs:
# TODO find a way to handle added/removed input files here?
if not job.output and not job.benchmark:
if job.input:
if job.rule.norun:
reason.updated_input_run.update(
[f for f in job.input if not f.exists]
)
else:
reason.nooutput = True
else:
reason.noio = True
else:
if job.rule in self.targetrules:
missing_output = job.missing_output()
else:
missing_output = job.missing_output(
requested=set(chain(*self.depending[job].values()))
| self.targetfiles
)
reason.missing_output.update(missing_output)
if not reason:
output_mintime_ = output_mintime.get(job)
if output_mintime_:
updated_input = [
f for f in job.input if f.exists and f.is_newer(output_mintime_)
]
reason.updated_input.update(updated_input)
if noinitreason and reason:
reason.derived = False
reason = self.reason
_needrun = self._needrun
dependencies = self.dependencies
depending = self.depending
_needrun.clear()
candidates = list(self.jobs)
# Update the output mintime of all jobs.
# We traverse them in BFS (level order) starting from target jobs.
# Then, we check output mintime of job itself and all direct descendants,
# which have already been visited in the level before.
# This way, we achieve a linear runtime.
for job in candidates:
update_output_mintime(job)
# update prior reason for all candidate jobs
for job in candidates:
update_needrun(job)
queue = list(filter(reason, candidates))
visited = set(queue)
while queue:
job = queue.pop(0)
_needrun.add(job)
for job_, files in dependencies[job].items():
missing_output = job_.missing_output(requested=files)
reason(job_).missing_output.update(missing_output)
if missing_output and not job_ in visited:
visited.add(job_)
queue.append(job_)
for job_, files in depending[job].items():
if job_ in candidates:
reason(job_).updated_input_run.update(files)
if not job_ in visited:
visited.add(job_)
queue.append(job_)
# update len including finished jobs (because they have already increased the job counter)
self._len = len(self._finished | self._needrun)
def in_until(self, job):
"""Return whether given job has been specified via --until."""
return job.rule.name in self.untilrules or not self.untilfiles.isdisjoint(
job.output
)
def in_omitfrom(self, job):
"""Return whether given job has been specified via --omit-from."""
return job.rule.name in self.omitrules or not self.omitfiles.isdisjoint(
job.output
)
def until_jobs(self):
"""Returns a generator of jobs specified by untiljobs."""
return (job for job in self.jobs if self.in_until(job))
def omitfrom_jobs(self):
"""Returns a generator of jobs specified by omitfromjobs."""
return (job for job in self.jobs if self.in_omitfrom(job))
def downstream_of_omitfrom(self):
"""Returns the downstream of --omit-from rules or files and themselves."""
return self.bfs(self.depending, *self.omitfrom_jobs())
def delete_omitfrom_jobs(self):
"""Removes jobs downstream of jobs specified by --omit-from."""
if not self.omitrules and not self.omitfiles:
return
downstream_jobs = list(
self.downstream_of_omitfrom()
) # need to cast as list before deleting jobs
for job in downstream_jobs:
self.delete_job(job, recursive=False, add_dependencies=True)
def set_until_jobs(self):
"""Removes jobs downstream of jobs specified by --omit-from."""
if not self.untilrules and not self.untilfiles:
return
self.targetjobs = set(self.until_jobs())
def update_priority(self):
""" Update job priorities. """
prioritized = (
lambda job: job.rule in self.priorityrules
or not self.priorityfiles.isdisjoint(job.output)
)
for job in self.needrun_jobs:
self._priority[job] = job.rule.priority
for job in self.bfs(
self.dependencies,
*filter(prioritized, self.needrun_jobs),
stop=self.noneedrun_finished,
):
self._priority[job] = Job.HIGHEST_PRIORITY
def update_groups(self):
groups = dict()
for job in self.needrun_jobs:
if job.group is None:
continue
stop = lambda j: j.group != job.group
# BFS into depending needrun jobs if in same group
# Note: never go up here (into depending), because it may contain
# jobs that have been sorted out due to e.g. ruleorder.
group = self.group_job_factory.new(
job.group,
(
job
for job in self.bfs(self.dependencies, job, stop=stop)
if self.needrun(job)
),
)
# merge with previously determined groups if present
for j in group:
if j in groups:
other = groups[j]
other.merge(group)
group = other
# update assignment
for j in group:
if j not in groups:
groups[j] = group
self._group = groups
self._update_group_components()
def _update_group_components(self):
# span connected components if requested
for groupid, conn_components in groupby(
set(self._group.values()), key=lambda group: group.groupid
):
n_components = self.workflow.group_components.get(groupid, 1)
if n_components > 1:
for chunk in group_into_chunks(n_components, conn_components):
if len(chunk) > 1:
primary = chunk[0]
for secondary in chunk[1:]:
primary.merge(secondary)
for j in primary:
self._group[j] = primary
def update_ready(self, jobs=None):
"""Update information whether a job is ready to execute.
Given jobs must be needrun jobs!
"""
if jobs is None:
jobs = self.needrun_jobs
candidate_groups = set()
for job in jobs:
if not self.finished(job) and self._ready(job):
if job.group is None:
self._ready_jobs.add(job)
else:
group = self._group[job]
group.finalize()
candidate_groups.add(group)
self._ready_jobs.update(
group
for group in candidate_groups
if all(self._ready(job) for job in group)
)
def get_jobs_or_groups(self):
visited_groups = set()
for job in self.jobs:
if job.group is None:
yield job
else:
group = self._group[job]
if group in visited_groups:
continue
visited_groups.add(group)
yield group
def close_remote_objects(self):
"""Close all remote objects."""
for job in self.jobs:
if not self.needrun(job):
job.close_remote()
def postprocess(self):
"""Postprocess the DAG. This has to be invoked after any change to the
DAG topology."""
self.update_jobids()
self.update_needrun()
self.update_priority()
self.handle_pipes()
self.update_groups()
self.update_ready()
self.close_remote_objects()
self.update_checkpoint_outputs()
def handle_pipes(self):
"""Use pipes to determine job groups. Check if every pipe has exactly
one consumer"""
for job in self.needrun_jobs:
candidate_groups = set()
if job.group is not None:
candidate_groups.add(job.group)
all_depending = set()
has_pipe = False
for f in job.output:
if is_flagged(f, "pipe"):
if job.is_run:
raise WorkflowError(
"Rule defines pipe output but "
"uses a 'run' directive. This is "
"not possible for technical "
"reasons. Consider using 'shell' or "
"'script'.",
rule=job.rule,
)
has_pipe = True
depending = [
j for j, files in self.depending[job].items() if f in files
]
if len(depending) > 1:
raise WorkflowError(
"Output file {} is marked as pipe "
"but more than one job depends on "
"it. Make sure that any pipe "
"output is only consumed by one "
"job".format(f),
rule=job.rule,
)
elif len(depending) == 0:
raise WorkflowError(
"Output file {} is marked as pipe "
"but it has no consumer. This is "
"invalid because it can lead to "
"a dead lock.".format(f),
rule=job.rule,
)
depending = depending[0]
if depending.is_run:
raise WorkflowError(
"Rule consumes pipe input but "
"uses a 'run' directive. This is "
"not possible for technical "
"reasons. Consider using 'shell' or "
"'script'.",
rule=job.rule,
)
all_depending.add(depending)
if depending.group is not None:
candidate_groups.add(depending.group)
if not has_pipe:
continue
if len(candidate_groups) > 1:
raise WorkflowError(
"An output file is marked as "
"pipe, but consuming jobs "
"are part of conflicting "
"groups.",
rule=job.rule,
)
elif candidate_groups:
# extend the candidate group to all involved jobs
group = candidate_groups.pop()
else:
# generate a random unique group name
group = str(uuid.uuid4())
job.group = group
for j in all_depending:
j.group = group
def _ready(self, job):
"""Return whether the given job is ready to execute."""
group = self._group.get(job, None)
if group is None:
is_external_needrun_dep = self.needrun
else:
def is_external_needrun_dep(j):
g = self._group.get(j, None)
return self.needrun(j) and (g is None or g != group)
return self._finished.issuperset(
filter(is_external_needrun_dep, self.dependencies[job])
)
def update_checkpoint_dependencies(self, jobs=None):
"""Update dependencies of checkpoints."""
updated = False
self.update_checkpoint_outputs()
if jobs is None:
jobs = [job for job in self.jobs if not self.needrun(job)]
for job in jobs:
if job.is_checkpoint:
depending = list(self.depending[job])
# re-evaluate depending jobs, replace and update DAG
for j in depending:
logger.info("Updating job {} ({}).".format(self.jobid(j), j))
newjob = j.updated()
self.replace_job(j, newjob, recursive=False)
updated = True
if updated:
# This has to be done for each checkpoint,
# otherwise, jobs may be missing in the end.
self.postprocess()
return updated
def finish(self, job, update_dynamic=True):
"""Finish a given job (e.g. remove from ready jobs, mark depending jobs
as ready)."""
try:
self._ready_jobs.remove(job)
except KeyError:
pass
if job.is_group():
jobs = job
else:
jobs = [job]
self._finished.update(jobs)
updated_dag = False
if update_dynamic:
updated_dag = self.update_checkpoint_dependencies(jobs)
# mark depending jobs as ready
# skip jobs that are marked as until jobs
self.update_ready(
j
for job in jobs
for j in self.depending[job]
if not self.in_until(job) and self.needrun(j)
)
for job in jobs:
if update_dynamic and job.dynamic_output:
logger.info("Dynamically updating jobs")
newjob = self.update_dynamic(job)
if newjob:
# simulate that this job ran and was finished before
self.omitforce.add(newjob)
self._needrun.add(newjob)
self._finished.add(newjob)
updated_dag = True
self.postprocess()
self.handle_protected(newjob)
self.handle_touch(newjob)
if updated_dag:
# We might have new jobs, so we need to ensure that all conda envs
# and singularity images are set up.
if self.workflow.use_singularity:
self.pull_container_imgs()
if self.workflow.use_conda:
self.create_conda_envs()
def new_job(self, rule, targetfile=None, format_wildcards=None):
"""Create new job for given rule and (optional) targetfile.
This will reuse existing jobs with the same wildcards."""
key = (rule, targetfile)
if key in self.job_cache:
assert targetfile is not None
return self.job_cache[key]
wildcards_dict = rule.get_wildcards(targetfile)
job = self.job_factory.new(
rule,
self,
wildcards_dict=wildcards_dict,
format_wildcards=format_wildcards,
targetfile=targetfile,
)
self.cache_job(job)
return job
def cache_job(self, job):
for f in job.products:
self.job_cache[(job.rule, f)] = job
def update_dynamic(self, job):
"""Update the DAG by evaluating the output of the given job that
contains dynamic output files."""
dynamic_wildcards = job.dynamic_wildcards
if not dynamic_wildcards:
# this happens e.g. in dryrun if output is not yet present
return
depending = list(
filter(lambda job_: not self.finished(job_), self.bfs(self.depending, job))
)
newrule, non_dynamic_wildcards = job.rule.dynamic_branch(
dynamic_wildcards, input=False
)
self.specialize_rule(job.rule, newrule)
# no targetfile needed for job
newjob = self.new_job(newrule, format_wildcards=non_dynamic_wildcards)
self.replace_job(job, newjob)
for job_ in depending:
needs_update = any(
f.get_wildcard_names() & dynamic_wildcards.keys()
for f in job_.rule.dynamic_input
)
if needs_update:
newrule_ = job_.rule.dynamic_branch(dynamic_wildcards)
if newrule_ is not None:
self.specialize_rule(job_.rule, newrule_)
if not self.dynamic(job_):
logger.debug("Updating job {}.".format(job_))
newjob_ = self.new_job(
newrule_, targetfile=job_.output[0] if job_.output else None
)
unexpected_output = self.reason(
job_
).missing_output.intersection(newjob.existing_output)
if unexpected_output:
logger.warning(
"Warning: the following output files of rule {} were not "
"present when the DAG was created:\n{}".format(
newjob_.rule, unexpected_output
)
)
self.replace_job(job_, newjob_)
return newjob
def delete_job(self, job, recursive=True, add_dependencies=False):
"""Delete given job from DAG."""
if job in self.targetjobs:
self.targetjobs.remove(job)
if add_dependencies:
for _job in self.dependencies[job]:
self.targetjobs.add(_job)
for job_ in self.depending[job]:
del self.dependencies[job_][job]
del self.depending[job]
for job_ in self.dependencies[job]:
depending = self.depending[job_]
del depending[job]
if not depending and recursive:
self.delete_job(job_)
del self.dependencies[job]
if job in self._needrun:
self._len -= 1
self._needrun.remove(job)
del self._reason[job]
if job in self._finished:
self._finished.remove(job)
if job in self._dynamic:
self._dynamic.remove(job)
if job in self._ready_jobs:
self._ready_jobs.remove(job)
# remove from cache
for f in job.output:
try:
del self.job_cache[(job.rule, f)]
except KeyError:
pass
def replace_job(self, job, newjob, recursive=True):
"""Replace given job with new job."""
add_to_targetjobs = job in self.targetjobs
depending = list(self.depending[job].items())
if self.finished(job):
self._finished.add(newjob)
self.delete_job(job, recursive=recursive)
if add_to_targetjobs:
self.targetjobs.add(newjob)
self.cache_job(newjob)
self.update([newjob])
logger.debug("Replace {} with dynamic branch {}".format(job, newjob))
for job_, files in depending:
# if not job_.dynamic_input:
logger.debug("updating depending job {}".format(job_))
self.dependencies[job_][newjob].update(files)
self.depending[newjob][job_].update(files)
def specialize_rule(self, rule, newrule):
"""Specialize the given rule by inserting newrule into the DAG."""
assert newrule is not None
self.rules.add(newrule)
self.update_output_index()
def is_batch_rule(self, rule):
"""Return True if the underlying rule is to be used for batching the DAG."""
return self.batch is not None and rule.name == self.batch.rulename
def collect_potential_dependencies(self, job):
"""Collect all potential dependencies of a job. These might contain
ambiguities. The keys of the returned dict represent the files to be considered."""
dependencies = defaultdict(list)
# use a set to circumvent multiple jobs for the same file
# if user specified it twice
file2jobs = self.file2jobs
input_files = list(job.unique_input)
if self.is_batch_rule(job.rule):
# only consider the defined partition of the input files
input_batch = self.batch.get_batch(input_files)
if len(input_batch) != len(input_files):
logger.info(
"Considering only batch {} for DAG computation.\n"
"All jobs beyond the batching rule are omitted until the final batch.\n"
"Don't forget to run the other batches too.".format(self.batch)
)
input_files = input_batch
for file in input_files:
# omit the file if it comes from a subworkflow
if file in job.subworkflow_input:
continue
try:
if file in job.dependencies:
jobs = [self.new_job(job.dependencies[file], targetfile=file)]
else:
jobs = file2jobs(file)
dependencies[file].extend(jobs)
except MissingRuleException as ex:
# no dependency found
dependencies[file] = []
return dependencies
def bfs(self, direction, *jobs, stop=lambda job: False):
"""Perform a breadth-first traversal of the DAG."""
queue = list(jobs)
visited = set(queue)
while queue:
job = queue.pop(0)
if stop(job):
# stop criterion reached for this node
continue
yield job
for job_, _ in direction[job].items():
if not job_ in visited:
queue.append(job_)
visited.add(job_)
def level_bfs(self, direction, *jobs, stop=lambda job: False):
"""Perform a breadth-first traversal of the DAG, but also yield the
level together with each job."""
queue = [(job, 0) for job in jobs]
visited = set(jobs)
while queue:
job, level = queue.pop(0)
if stop(job):
# stop criterion reached for this node
continue
yield level, job
level += 1
for job_, _ in direction[job].items():
if not job_ in visited:
queue.append((job_, level))
visited.add(job_)
def dfs(self, direction, *jobs, stop=lambda job: False, post=True):
"""Perform depth-first traversal of the DAG."""
visited = set()
def _dfs(job):
"""Inner function for DFS traversal."""
if stop(job):
return
if not post:
yield job
for job_ in direction[job]:
if not job_ in visited:
visited.add(job_)
for j in _dfs(job_):
yield j
if post:
yield job
for job in jobs:
for job_ in self._dfs(direction, job, visited, stop=stop, post=post):
yield job_
def new_wildcards(self, job):
"""Return wildcards that are newly introduced in this job,
compared to its ancestors."""
new_wildcards = set(job.wildcards.items())
for job_ in self.dependencies[job]:
if not new_wildcards:
return set()
for wildcard in job_.wildcards.items():
new_wildcards.discard(wildcard)
return new_wildcards
def rule2job(self, targetrule):
"""Generate a new job from a given rule."""
if targetrule.has_wildcards():
raise WorkflowError(
"Target rules may not contain wildcards. Please specify concrete files or a rule without wildcards."
)
return self.new_job(targetrule)
def file2jobs(self, targetfile):
rules = self.output_index.match(targetfile)
jobs = []
exceptions = list()
for rule in rules:
if rule.is_producer(targetfile):
try:
jobs.append(self.new_job(rule, targetfile=targetfile))
except InputFunctionException as e:
exceptions.append(e)
if not jobs:
if exceptions:
raise exceptions[0]
raise MissingRuleException(targetfile)
return jobs
def rule_dot2(self):
dag = defaultdict(list)
visited = set()
preselect = set()
def preselect_parents(job):
for parent in self.depending[job]:
if parent in preselect:
continue
preselect.add(parent)
preselect_parents(parent)
def build_ruledag(job, key=lambda job: job.rule.name):
if job in visited:
return
visited.add(job)
deps = sorted(self.dependencies[job], key=key)
deps = [
(
group[0]
if preselect.isdisjoint(group)
else preselect.intersection(group).pop()
)
for group in (list(g) for _, g in groupby(deps, key))
]
dag[job].extend(deps)
preselect_parents(job)
for dep in deps:
build_ruledag(dep)
for job in self.targetjobs:
build_ruledag(job)
return self._dot(dag.keys(), print_wildcards=False, print_types=False, dag=dag)
def rule_dot(self):
graph = defaultdict(set)
for job in self.jobs:
graph[job.rule].update(dep.rule for dep in self.dependencies[job])
return self._dot(graph)
def dot(self):
def node2style(job):
if not self.needrun(job):
return "rounded,dashed"
if self.dynamic(job) or job.dynamic_input:
return "rounded,dotted"
return "rounded"
def format_wildcard(wildcard):
name, value = wildcard
if DYNAMIC_FILL in value:
value = "..."
return "{}: {}".format(name, value)
node2rule = lambda job: job.rule
node2label = lambda job: "\\n".join(
chain(
[job.rule.name], sorted(map(format_wildcard, self.new_wildcards(job)))
)
)
dag = {job: self.dependencies[job] for job in self.jobs}
return self._dot(
dag, node2rule=node2rule, node2style=node2style, node2label=node2label
)
def _dot(
self,
graph,
node2rule=lambda node: node,
node2style=lambda node: "rounded",
node2label=lambda node: node,
):
# color rules
huefactor = 2 / (3 * len(self.rules))
rulecolor = {
rule: "{:.2f} 0.6 0.85".format(i * huefactor)
for i, rule in enumerate(self.rules)
}
# markup
node_markup = '\t{}[label = "{}", color = "{}", style="{}"];'.format
edge_markup = "\t{} -> {}".format
# node ids
ids = {node: i for i, node in enumerate(graph)}
# calculate nodes
nodes = [
node_markup(
ids[node],
node2label(node),
rulecolor[node2rule(node)],
node2style(node),
)
for node in graph
]
# calculate edges
edges = [
edge_markup(ids[dep], ids[node])
for node, deps in graph.items()
for dep in deps
]
return textwrap.dedent(
"""\
digraph snakemake_dag {{
graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, \
fontsize=10, penwidth=2];
edge[penwidth=2, color=grey];
{items}
}}\
"""
).format(items="\n".join(nodes + edges))
def filegraph_dot(
self,
node2rule=lambda node: node,
node2style=lambda node: "rounded",
node2label=lambda node: node,
):
# NOTE: This is code from the rule_dot method.
# This method could be split like there as well, however,
# it cannot easily reuse the _dot method due to the different node type
graph = defaultdict(set)
for job in self.jobs:
graph[job.rule].update(dep.rule for dep in self.dependencies[job])
# node ids
ids = {node: i for i, node in enumerate(graph)}
# Compute colors for rules
def hsv_to_htmlhexrgb(h, s, v):
"""Convert hsv colors to hex-encoded rgb colors usable by html."""
import colorsys
hex_r, hex_g, hex_b = (round(255 * x) for x in colorsys.hsv_to_rgb(h, s, v))
return "#{hex_r:0>2X}{hex_g:0>2X}{hex_b:0>2X}".format(
hex_r=hex_r, hex_g=hex_g, hex_b=hex_b
)
huefactor = 2 / (3 * len(self.rules))
rulecolor = {
rule: hsv_to_htmlhexrgb(i * huefactor, 0.6, 0.85)
for i, rule in enumerate(self.rules)
}
def resolve_input_functions(input_files):
"""Iterate over all input files and replace input functions
with a fixed string.
"""
files = []
for f in input_files:
if callable(f):
files.append("<input function>")
# NOTE: This is a workaround. It would be more informative
# to show the code of the input function here (if it is
# short enough). This cannot be easily done with the inspect
# module, since the line numbers in the Snakefile do not
# behave as expected. One (complicated) solution for this
# would be to find the Snakefile and directly extract the
# code of the function.
else:
files.append(repr(f).strip("'"))
return files
def html_node(node_id, node, color):
"""Assemble a html style node for graphviz"""
input_files = resolve_input_functions(node._input)
output_files = [repr(f).strip("'") for f in node._output]
input_header = (
'<b><font point-size="14">↪ input</font></b>'
if input_files
else ""
)
output_header = (
'<b><font point-size="14">output →</font></b>'
if output_files
else ""
)
html_node = [
'{node_id} [ shape=none, margin=0, label=<<table border="2" color="{color}" cellspacing="3" cellborder="0">'.format(
node_id=node_id, color=color
),
"<tr><td>",
'<b><font point-size="18">{node.name}</font></b>'.format(node=node),
"</td></tr>",
"<hr/>",
'<tr><td align="left"> {input_header} </td></tr>'.format(
input_header=input_header
),
]
for filename in sorted(input_files):
# Escape html relevant chars like '<' and '>' in filenames
# These can be added by input functions etc. and cannot be
# displayed in graphviz HTML nodes.
in_file = html.escape(filename)
html_node.extend(
[
"<tr>",
'<td align="left"><font face="monospace">{in_file}</font></td>'.format(
in_file=in_file
),
"</tr>",
]
)
html_node.append("<hr/>")
html_node.append(
'<tr><td align="right"> {output_header} </td> </tr>'.format(
output_header=output_header
)
)
for filename in sorted(output_files):
out_file = html.escape(filename)
html_node.extend(
[
"<tr>",
'<td align="left"><font face="monospace">{out_file}</font></td>'
"</tr>".format(out_file=out_file),
]
)
html_node.append("</table>>]")
return "\n".join(html_node)
nodes = [
html_node(ids[node], node, rulecolor[node2rule(node)]) for node in graph
]
# calculate edges
edge_markup = "\t{} -> {}".format
edges = [
edge_markup(ids[dep], ids[node], ids[dep], ids[node])
for node, deps in graph.items()
for dep in deps
]
return textwrap.dedent(
"""\
digraph snakemake_dag {{
graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, \
fontsize=10, penwidth=2];
edge[penwidth=2, color=grey];
{items}
}}\
"""
).format(items="\n".join(nodes + edges))
def summary(self, detailed=False):
if detailed:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tinput-file(s)\tshellcmd\tstatus\tplan"
else:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tstatus\tplan"
for job in self.jobs:
output = job.rule.output if self.dynamic(job) else job.expanded_output
for f in output:
rule = self.workflow.persistence.rule(f)
rule = "-" if rule is None else rule
version = self.workflow.persistence.version(f)
version = "-" if version is None else str(version)
date = time.ctime(f.mtime) if f.exists else "-"
pending = "update pending" if self.reason(job) else "no update"
log = self.workflow.persistence.log(f)
log = "-" if log is None else ",".join(log)
input = self.workflow.persistence.input(f)
input = "-" if input is None else ",".join(input)
shellcmd = self.workflow.persistence.shellcmd(f)
shellcmd = "-" if shellcmd is None else shellcmd
# remove new line characters, leading and trailing whitespace
shellcmd = shellcmd.strip().replace("\n", "; ")
status = "ok"
if not f.exists:
status = "missing"
elif self.reason(job).updated_input:
status = "updated input files"
elif self.workflow.persistence.version_changed(job, file=f):
status = "version changed to {}".format(job.rule.version)
elif self.workflow.persistence.code_changed(job, file=f):
status = "rule implementation changed"
elif self.workflow.persistence.input_changed(job, file=f):
status = "set of input files changed"
elif self.workflow.persistence.params_changed(job, file=f):
status = "params changed"
if detailed:
yield "\t".join(
(f, date, rule, version, log, input, shellcmd, status, pending)
)
else:
yield "\t".join((f, date, rule, version, log, status, pending))
def archive(self, path):
"""Archives workflow such that it can be re-run on a different system.
Archiving includes git versioned files (i.e. Snakefiles, config files, ...),
ancestral input files and conda environments.
"""
if path.endswith(".tar"):
mode = "x"
elif path.endswith("tar.bz2"):
mode = "x:bz2"
elif path.endswith("tar.xz"):
mode = "x:xz"
elif path.endswith("tar.gz"):
mode = "x:gz"
else:
raise WorkflowError(
"Unsupported archive format "
"(supported: .tar, .tar.gz, .tar.bz2, .tar.xz)"
)
if os.path.exists(path):
raise WorkflowError("Archive already exists:\n" + path)
self.create_conda_envs(forceall=True)
try:
workdir = Path(os.path.abspath(os.getcwd()))
with tarfile.open(path, mode=mode, dereference=True) as archive:
archived = set()
def add(path):
if workdir not in Path(os.path.abspath(path)).parents:
logger.warning(
"Path {} cannot be archived: "
"not within working directory.".format(path)
)
else:
f = os.path.relpath(path)
if f not in archived:
archive.add(f)
archived.add(f)
logger.info("archived " + f)
logger.info(
"Archiving snakefiles, scripts and files under "
"version control..."
)
for f in self.workflow.get_sources():
add(f)
logger.info("Archiving external input files...")
for job in self.jobs:
# input files
for f in job.input:
if not any(
f in files for files in self.dependencies[job].values()
):
# this is an input file that is not created by any job
add(f)
logger.info("Archiving conda environments...")
envs = set()
for job in self.jobs:
if job.conda_env_file:
env_archive = job.archive_conda_env()
envs.add(env_archive)
for env in envs:
add(env)
except (Exception, BaseException) as e:
os.remove(path)
raise e
def clean(self, only_temp=False, dryrun=False):
"""Removes files generated by the workflow."""
for job in self.jobs:
for f in job.output:
if not only_temp or is_flagged(f, "temp"):
# The reason for the second check is that dangling
# symlinks fail f.exists.
if f.exists or os.path.islink(f):
if f.protected:
logger.error("Skipping write-protected file {}.".format(f))
else:
msg = "Deleting {}" if not dryrun else "Would delete {}"
logger.info(msg.format(f))
if not dryrun:
# Remove non-empty dirs if flagged as temp()
f.remove(remove_non_empty_dir=only_temp)
def list_untracked(self):
"""List files in the workdir that are not in the dag."""
used_files = set()
files_in_cwd = set()
for job in self.jobs:
used_files.update(
os.path.relpath(file)
for file in chain(job.local_input, job.local_output, job.log)
)
for root, dirs, files in os.walk(os.getcwd()):
# Ignore hidden files and don't traverse into hidden dirs
files_in_cwd.update(
[
os.path.relpath(os.path.join(root, f))
for f in files
if not f[0] == "."
]
)
dirs[:] = [d for d in dirs if not d[0] == "."]
for f in sorted(list(files_in_cwd - used_files)):
logger.info(f)
def d3dag(self, max_jobs=10000):
def node(job):
jobid = self.jobid(job)
return {
"id": jobid,
"value": {
"jobid": jobid,
"label": job.rule.name,
"rule": job.rule.name,
},
}
def edge(a, b):
return {"u": self.jobid(a), "v": self.jobid(b)}
jobs = list(self.jobs)
if len(jobs) > max_jobs:
logger.info(
"Job-DAG is too large for visualization (>{} jobs).".format(max_jobs)
)
else:
logger.d3dag(
nodes=[node(job) for job in jobs],
edges=[
edge(dep, job)
for job in jobs
for dep in self.dependencies[job]
if self.needrun(dep)
],
)
def stats(self):
rules = Counter()
rules.update(job.rule for job in self.needrun_jobs)
rules.update(job.rule for job in self.finished_jobs)
yield "Job counts:"
yield "\tcount\tjobs"
for rule, count in sorted(rules.most_common(), key=lambda item: item[0].name):
yield "\t{}\t{}".format(count, rule)
yield "\t{}".format(len(self))
def __str__(self):
return self.dot()
def __len__(self):
return self._len
| 37.205561 | 132 | 0.529814 | [
"MIT"
] | baileythegreen/snakemake | snakemake/dag.py | 74,934 | Python |
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
pool_forward = __import__('1-pool_forward').pool_forward
if __name__ == "__main__":
np.random.seed(0)
lib = np.load('../data/MNIST.npz')
X_train = lib['X_train']
m, h, w = X_train.shape
X_train_a = X_train.reshape((-1, h, w, 1))
X_train_b = 1 - X_train_a
X_train_c = np.concatenate((X_train_a, X_train_b), axis=3)
print(X_train_c.shape)
plt.imshow(X_train_c[0, :, :, 0])
plt.show()
plt.imshow(X_train_c[0, :, :, 1])
plt.show()
A = pool_forward(X_train_c, (2, 2), stride=(2, 2))
print(A.shape)
plt.imshow(A[0, :, :, 0])
plt.show()
plt.imshow(A[0, :, :, 1])
plt.show()
| 26.518519 | 62 | 0.606145 | [
"MIT"
] | kyeeh/holbertonschool-machine_learning | supervised_learning/0x07-cnn/1-main.py | 716 | Python |
from starlette.datastructures import URL
from dashboard.pagination import PageControl, get_page_controls, get_page_number
def test_single_page_does_not_include_any_pagination_controls():
"""
When there is only a single page, no pagination controls should render.
"""
url = URL("/")
controls = get_page_controls(url, current_page=1, total_pages=1)
assert controls == []
def test_first_page_in_pagination_controls():
"""
First page in pagination controls, should render as:
Previous [1] 2 3 4 5 Next
"""
url = URL("/")
controls = get_page_controls(url, current_page=1, total_pages=5)
assert controls == [
PageControl(text="Previous", is_disabled=True),
PageControl(text="1", is_active=True, url=URL("/")),
PageControl(text="2", url=URL("/?page=2")),
PageControl(text="3", url=URL("/?page=3")),
PageControl(text="4", url=URL("/?page=4")),
PageControl(text="5", url=URL("/?page=5")),
PageControl(text="Next", url=URL("/?page=2")),
]
def test_second_page_in_pagination_controls():
"""
Second page in pagination controls, should render as:
Previous 1 [2] 3 4 5 Next
"""
url = URL("/")
controls = get_page_controls(url, current_page=2, total_pages=5)
assert controls == [
PageControl(text="Previous", url=URL("/")), # No query parameter needed.
PageControl(text="1", url=URL("/")),
PageControl(text="2", is_active=True, url=URL("/?page=2")),
PageControl(text="3", url=URL("/?page=3")),
PageControl(text="4", url=URL("/?page=4")),
PageControl(text="5", url=URL("/?page=5")),
PageControl(text="Next", url=URL("/?page=3")),
]
def test_middle_page_in_pagination_controls():
"""
Middle page in pagination controls, should render as:
Previous 1 2 [3] 4 5 Next
"""
url = URL("/?page=3")
controls = get_page_controls(url, current_page=3, total_pages=5)
assert controls == [
PageControl(text="Previous", url=URL("/?page=2")),
PageControl(text="1", url=URL("/")),
PageControl(text="2", url=URL("/?page=2")),
PageControl(text="3", is_active=True, url=URL("/?page=3")),
PageControl(text="4", url=URL("/?page=4")),
PageControl(text="5", url=URL("/?page=5")),
PageControl(text="Next", url=URL("/?page=4")),
]
def test_last_page_in_pagination_controls():
"""
Last page in pagination controls, should render as:
Previous 1 2 3 4 [5] Next
"""
url = URL("/?page=5")
controls = get_page_controls(url, current_page=5, total_pages=5)
assert controls == [
PageControl(text="Previous", url=URL("/?page=4")),
PageControl(text="1", url=URL("/")),
PageControl(text="2", url=URL("/?page=2")),
PageControl(text="3", url=URL("/?page=3")),
PageControl(text="4", url=URL("/?page=4")),
PageControl(text="5", url=URL("/?page=5"), is_active=True),
PageControl(text="Next", is_disabled=True),
]
def test_first_page_in_long_pagination_controls():
"""
First page in long pagination controls, should render as:
Previous [1] 2 3 4 5 ... 49 50 Next
"""
url = URL("/")
controls = get_page_controls(url, current_page=1, total_pages=50)
assert controls == [
PageControl(text="Previous", is_disabled=True),
PageControl(text="1", is_active=True, url=URL("/")),
PageControl(text="2", url=URL("/?page=2")),
PageControl(text="3", url=URL("/?page=3")),
PageControl(text="4", url=URL("/?page=4")),
PageControl(text="5", url=URL("/?page=5")),
PageControl(text="…", is_disabled=True),
PageControl(text="49", url=URL("/?page=49")),
PageControl(text="50", url=URL("/?page=50")),
PageControl(text="Next", url=URL("/?page=2")),
]
def test_last_page_in_long_pagination_controls():
"""
Last page in long pagination controls, should render as:
Previous 1 2 ... 46 47 48 49 [50] Next
"""
url = URL("/?page=50")
controls = get_page_controls(url, current_page=50, total_pages=50)
assert controls == [
PageControl(text="Previous", url=URL("/?page=49")),
PageControl(text="1", url=URL("/")),
PageControl(text="2", url=URL("/?page=2")),
PageControl(text="…", is_disabled=True),
PageControl(text="46", url=URL("/?page=46")),
PageControl(text="47", url=URL("/?page=47")),
PageControl(text="48", url=URL("/?page=48")),
PageControl(text="49", url=URL("/?page=49")),
PageControl(text="50", is_active=True, url=URL("/?page=50")),
PageControl(text="Next", is_disabled=True),
]
def test_ellipsis_fill_in():
"""
If an ellipsis marker can be replaced with a single page marker, then
we should do so.
"""
url = URL("/?page=6")
controls = get_page_controls(url, current_page=6, total_pages=11)
assert controls == [
PageControl(text="Previous", url=URL("/?page=5")),
PageControl(text="1", url=URL("/")),
PageControl(text="2", url=URL("/?page=2")),
PageControl(text="3", url=URL("/?page=3")), # Ellipsis fill-in case.
PageControl(text="4", url=URL("/?page=4")),
PageControl(text="5", url=URL("/?page=5")),
PageControl(text="6", url=URL("/?page=6"), is_active=True),
PageControl(text="7", url=URL("/?page=7")),
PageControl(text="8", url=URL("/?page=8")),
PageControl(text="9", url=URL("/?page=9")), # Ellipsis fill-in case.
PageControl(text="10", url=URL("/?page=10")),
PageControl(text="11", url=URL("/?page=11")),
PageControl(text="Next", url=URL("/?page=7")),
]
def test_default_page_number():
url = URL("/")
page = get_page_number(url=url)
assert page == 1
def test_explicit_page_number():
url = URL("/?page=2")
page = get_page_number(url=url)
assert page == 2
def test_invalid_page_number():
url = URL("/?page=invalid")
page = get_page_number(url=url)
assert page == 1
| 36.005917 | 81 | 0.600329 | [
"BSD-3-Clause"
] | encode/dashboard | tests/test_pagination.py | 6,089 | Python |
# Generated by Django 2.2 on 2019-04-18 10:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_auto_20190417_2232'),
]
operations = [
migrations.AlterField(
model_name='question',
name='order',
field=models.IntegerField(),
),
]
| 19.578947 | 45 | 0.58871 | [
"MIT"
] | chinhle23/monitorme | core/migrations/0007_auto_20190418_0646.py | 372 | Python |
# Generated by Django 2.2.1 on 2020-03-26 05:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapi', '0015_auto_20200326_0955'),
]
operations = [
migrations.AlterField(
model_name='property',
name='property_name',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='unit',
name='symbol',
field=models.CharField(default='', max_length=255),
),
]
| 23.791667 | 63 | 0.579685 | [
"MIT"
] | kumagallium/labmine-api | src/webapi/migrations/0016_auto_20200326_1417.py | 571 | Python |
# MIT License
#
# Copyright (c) 2018 Haoxintong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""""""
import os
import time
import mxnet as mx
import numpy as np
from gluonfr.loss import ArcLoss
from mxnet.gluon.data.vision import MNIST
from mxnet import nd, gluon, metric as mtc, autograd as ag
from examples.mnist.net.lenet import LeNetPlus
from examples.mnist.utils import transform_train, transform_val, plot_result
os.environ['MXNET_GLUON_REPO'] = 'https://apache-mxnet.s3.cn-north-1.amazonaws.com.cn/'
os.environ['MXNET_ENABLE_GPU_P2P'] = '0'
def validate(net, val_data, ctx, loss, plot=False):
metric = mtc.Accuracy()
val_loss = 0
ebs = []
lbs = []
for i, batch in enumerate(val_data):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
ots = [net(X) for X in data]
embedds = [ot[0] for ot in ots]
outputs = [ot[1] for ot in ots]
losses = [loss(yhat, y) for yhat, y in zip(outputs, labels)]
metric.update(labels, outputs)
val_loss += sum([l.mean().asscalar() for l in losses]) / len(losses)
if plot:
for es, ls in zip(embedds, labels):
assert len(es) == len(ls)
for idx in range(len(es)):
ebs.append(es[idx].asnumpy())
lbs.append(ls[idx].asscalar())
if plot:
ebs = np.vstack(ebs)
lbs = np.hstack(lbs)
_, val_acc = metric.get()
return val_acc, val_loss / len(val_data), ebs, lbs
def train():
epochs = 100
lr = 0.1
lr_steps = [40, 70, np.inf]
momentum = 0.9
wd = 5e-4
plot_period = 5
ctx = [mx.gpu(i) for i in range(2)]
batch_size = 256
margin_s = 5
margin_m = 0.2
train_set = MNIST(train=True, transform=transform_train)
train_data = gluon.data.DataLoader(train_set, batch_size, True, num_workers=4, last_batch='discard')
val_set = MNIST(train=False, transform=transform_val)
val_data = gluon.data.DataLoader(val_set, batch_size, shuffle=False, num_workers=4)
net = LeNetPlus(embedding_size=64, feature_norm=True, weight_norm=True)
net.initialize(init=mx.init.MSRAPrelu(), ctx=ctx)
# net.load_parameters("./pretrained_mnist.params", ctx=ctx)
net.hybridize()
loss = ArcLoss(s=margin_s, m=margin_m, classes=10)
train_params = net.collect_params()
trainer = gluon.Trainer(train_params, 'sgd', {'learning_rate': lr, 'momentum': momentum, 'wd': wd})
lr_counter = 0
metric = mtc.Accuracy()
num_batch = len(train_data)
for epoch in range(epochs+1):
if epoch == lr_steps[lr_counter]:
trainer.set_learning_rate(trainer.learning_rate * 0.1)
lr_counter += 1
# if (epoch % plot_period) == 0:
# plot = True
# else:
plot = False
train_loss = 0
metric.reset()
tic = time.time()
ebs = []
lbs = []
for batch in train_data:
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
with ag.record():
ots = [net(X) for X in data]
embedds = [ot[0] for ot in ots]
outputs = [ot[1] for ot in ots]
losses = [loss(yhat, y) for yhat, y in zip(outputs, labels)]
for l in losses:
ag.backward(l)
if plot:
for es, ls in zip(embedds, labels):
assert len(es) == len(ls)
for idx in range(len(es)):
ebs.append(es[idx].asnumpy())
lbs.append(ls[idx].asscalar())
trainer.step(batch_size)
metric.update(labels, outputs)
train_loss += sum([l.mean().asscalar() for l in losses]) / len(losses)
_, train_acc = metric.get()
train_loss /= num_batch
val_acc, val_loss, val_ebs, val_lbs = validate(net, val_data, ctx, loss, plot)
if plot:
ebs = np.vstack(ebs)
lbs = np.hstack(lbs)
plot_result(ebs, lbs, os.path.join("../../resources", "arcloss-train-epoch{}.png".format(epoch)))
plot_result(val_ebs, val_lbs, os.path.join("../../resources", "arcloss-val-epoch{}.png".format(epoch)))
toc = time.time()
print('[epoch % 3d] train accuracy: %.6f, train loss: %.6f | '
'val accuracy: %.6f, val loss: %.6f, time: %.6f'
% (epoch, train_acc, train_loss, val_acc, val_loss, toc - tic))
# if epoch == 10:
# net.save_parameters("./pretrained_mnist.params")
# net.save_parameters("./models/attention%d-cifar10-epoch-%d.params" % (args.num_layers, epoch))
if __name__ == '__main__':
train() | 36.339394 | 115 | 0.624083 | [
"MIT"
] | OmoooJ/gluon-facex | examples/mnist/train_mnist_arcloss.py | 5,996 | Python |
"""
A mechanism for plotting field values along a line through a dataset
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2017, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
from collections import defaultdict
from yt.funcs import \
iterable, \
mylog
from yt.units.unit_object import \
Unit
from yt.units.yt_array import \
YTArray
from yt.visualization.base_plot_types import \
PlotMPL
from yt.visualization.plot_container import \
PlotContainer, \
PlotDictionary, \
log_transform, \
linear_transform, \
invalidate_plot
class LineBuffer(object):
r"""
LineBuffer(ds, start_point, end_point, npoints, label = None)
This takes a data source and implements a protocol for generating a
'pixelized', fixed-resolution line buffer. In other words, LineBuffer
takes a starting point, ending point, and number of sampling points and
can subsequently generate YTArrays of field values along the sample points.
Parameters
----------
ds : :class:`yt.data_objects.static_output.Dataset`
This is the dataset object holding the data that can be sampled by the
LineBuffer
start_point : n-element list, tuple, ndarray, or YTArray
Contains the coordinates of the first point for constructing the LineBuffer.
Must contain n elements where n is the dimensionality of the dataset.
end_point : n-element list, tuple, ndarray, or YTArray
Contains the coordinates of the first point for constructing the LineBuffer.
Must contain n elements where n is the dimensionality of the dataset.
npoints : int
How many points to sample between start_point and end_point
Examples
--------
>>> lb = yt.LineBuffer(ds, (.25, 0, 0), (.25, 1, 0), 100)
>>> lb[('all', 'u')].max()
0.11562424257143075 dimensionless
"""
def __init__(self, ds, start_point, end_point, npoints, label=None):
self.ds = ds
self.start_point = _validate_point(start_point, ds, start=True)
self.end_point = _validate_point(end_point, ds)
self.npoints = npoints
self.label = label
self.data = {}
def keys(self):
return self.data.keys()
def __setitem__(self, item, val):
self.data[item] = val
def __getitem__(self, item):
if item in self.data: return self.data[item]
mylog.info("Making a line buffer with %d points of %s" % \
(self.npoints, item))
self.points, self.data[item] = self.ds.coordinates.pixelize_line(item,
self.start_point,
self.end_point,
self.npoints)
return self.data[item]
def __delitem__(self, item):
del self.data[item]
class LinePlotDictionary(PlotDictionary):
def __init__(self, data_source):
super(LinePlotDictionary, self).__init__(data_source)
self.known_dimensions = {}
def _sanitize_dimensions(self, item):
field = self.data_source._determine_fields(item)[0]
finfo = self.data_source.ds.field_info[field]
dimensions = Unit(
finfo.units, registry=self.data_source.ds.unit_registry).dimensions
if dimensions not in self.known_dimensions:
self.known_dimensions[dimensions] = item
ret_item = item
else:
ret_item = self.known_dimensions[dimensions]
return ret_item
def __getitem__(self, item):
ret_item = self._sanitize_dimensions(item)
return super(LinePlotDictionary, self).__getitem__(ret_item)
def __setitem__(self, item, value):
ret_item = self._sanitize_dimensions(item)
super(LinePlotDictionary, self).__setitem__(ret_item, value)
def __contains__(self, item):
ret_item = self._sanitize_dimensions(item)
return super(LinePlotDictionary, self).__contains__(ret_item)
class LinePlot(PlotContainer):
r"""
A class for constructing line plots
Parameters
----------
ds : :class:`yt.data_objects.static_output.Dataset`
This is the dataset object corresponding to the
simulation output to be plotted.
fields : string / tuple, or list of strings / tuples
The name(s) of the field(s) to be plotted.
start_point : n-element list, tuple, ndarray, or YTArray
Contains the coordinates of the first point for constructing the line.
Must contain n elements where n is the dimensionality of the dataset.
end_point : n-element list, tuple, ndarray, or YTArray
Contains the coordinates of the first point for constructing the line.
Must contain n elements where n is the dimensionality of the dataset.
npoints : int
How many points to sample between start_point and end_point for
constructing the line plot
figure_size : int or two-element iterable of ints
Size in inches of the image.
Default: 5 (5x5)
fontsize : int
Font size for all text in the plot.
Default: 14
field_labels : dictionary
Keys should be the field names. Values should be latex-formattable
strings used in the LinePlot legend
Default: None
Example
-------
>>> import yt
>>>
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>>
>>> plot = yt.LinePlot(ds, 'density', [0, 0, 0], [1, 1, 1], 512)
>>> plot.add_legend('density')
>>> plot.set_x_unit('cm')
>>> plot.set_unit('density', 'kg/cm**3')
>>> plot.save()
"""
_plot_type = 'line_plot'
def __init__(self, ds, fields, start_point, end_point, npoints,
figure_size=5., fontsize=14., field_labels=None):
"""
Sets up figure and axes
"""
line = LineBuffer(ds, start_point, end_point, npoints, label=None)
self.lines = [line]
self._initialize_instance(self, ds, fields, figure_size,
fontsize, field_labels)
self._setup_plots()
@classmethod
def _initialize_instance(cls, obj, ds, fields, figure_size=5., fontsize=14.,
field_labels=None):
obj._x_unit = None
obj._y_units = {}
obj._titles = {}
data_source = ds.all_data()
obj.fields = data_source._determine_fields(fields)
obj.plots = LinePlotDictionary(data_source)
obj.include_legend = defaultdict(bool)
super(LinePlot, obj).__init__(data_source, figure_size, fontsize)
for f in obj.fields:
finfo = obj.data_source.ds._get_field_info(*f)
if finfo.take_log:
obj._field_transform[f] = log_transform
else:
obj._field_transform[f] = linear_transform
if field_labels is None:
obj.field_labels = {}
else:
obj.field_labels = field_labels
for f in obj.fields:
if f not in obj.field_labels:
obj.field_labels[f] = f[1]
@classmethod
def from_lines(cls, ds, fields, lines, figure_size=5., font_size=14., field_labels=None):
"""
A class method for constructing a line plot from multiple sampling lines
Parameters
----------
ds : :class:`yt.data_objects.static_output.Dataset`
This is the dataset object corresponding to the
simulation output to be plotted.
fields : field name or list of field names
The name(s) of the field(s) to be plotted.
lines : list of :class:`yt.visualization.line_plot.LineBuffer` instances
The lines from which to sample data
figure_size : int or two-element iterable of ints
Size in inches of the image.
Default: 5 (5x5)
fontsize : int
Font size for all text in the plot.
Default: 14
field_labels : dictionary
Keys should be the field names. Values should be latex-formattable
strings used in the LinePlot legend
Default: None
Example
--------
>>> ds = yt.load('SecondOrderTris/RZ_p_no_parts_do_nothing_bcs_cone_out.e', step=-1)
>>> fields = [field for field in ds.field_list if field[0] == 'all']
>>> lines = []
>>> lines.append(yt.LineBuffer(ds, [0.25, 0, 0], [0.25, 1, 0], 100, label='x = 0.25'))
>>> lines.append(yt.LineBuffer(ds, [0.5, 0, 0], [0.5, 1, 0], 100, label='x = 0.5'))
>>> plot = yt.LinePlot.from_lines(ds, fields, lines)
>>> plot.save()
"""
obj = cls.__new__(cls)
obj.lines = lines
cls._initialize_instance(obj, ds, fields, figure_size, font_size, field_labels)
obj._setup_plots()
return obj
def _get_plot_instance(self, field):
fontscale = self._font_properties._size / 14.
top_buff_size = 0.35*fontscale
x_axis_size = 1.35*fontscale
y_axis_size = 0.7*fontscale
right_buff_size = 0.2*fontscale
if iterable(self.figure_size):
figure_size = self.figure_size
else:
figure_size = (self.figure_size, self.figure_size)
xbins = np.array([x_axis_size, figure_size[0],
right_buff_size])
ybins = np.array([y_axis_size, figure_size[1], top_buff_size])
size = [xbins.sum(), ybins.sum()]
x_frac_widths = xbins/size[0]
y_frac_widths = ybins/size[1]
axrect = (
x_frac_widths[0],
y_frac_widths[0],
x_frac_widths[1],
y_frac_widths[1],
)
try:
plot = self.plots[field]
except KeyError:
plot = PlotMPL(self.figure_size, axrect, None, None)
self.plots[field] = plot
return plot
def _setup_plots(self):
if self._plot_valid:
return
for plot in self.plots.values():
plot.axes.cla()
for line in self.lines:
dimensions_counter = defaultdict(int)
for field in self.fields:
finfo = self.ds.field_info[field]
dimensions = Unit(finfo.units,
registry=self.ds.unit_registry).dimensions
dimensions_counter[dimensions] += 1
for field in self.fields:
# get plot instance
plot = self._get_plot_instance(field)
# calculate x and y
x, y = self.ds.coordinates.pixelize_line(
field, line.start_point, line.end_point, line.npoints)
# scale x and y to proper units
if self._x_unit is None:
unit_x = x.units
else:
unit_x = self._x_unit
if field in self._y_units:
unit_y = self._y_units[field]
else:
unit_y = y.units
x = x.to(unit_x)
y = y.to(unit_y)
# determine legend label
str_seq = []
str_seq.append(line.label)
str_seq.append(self.field_labels[field])
delim = "; "
legend_label = delim.join(filter(None, str_seq))
# apply plot to matplotlib axes
plot.axes.plot(x, y, label=legend_label)
# apply log transforms if requested
if self._field_transform[field] != linear_transform:
if (y < 0).any():
plot.axes.set_yscale('symlog')
else:
plot.axes.set_yscale('log')
# set font properties
plot._set_font_properties(self._font_properties, None)
# set x and y axis labels
axes_unit_labels = self._get_axes_unit_labels(unit_x, unit_y)
if self._xlabel is not None:
x_label = self._xlabel
else:
x_label = r'$\rm{Path\ Length' + axes_unit_labels[0]+'}$'
if self._ylabel is not None:
y_label = self._ylabel
else:
finfo = self.ds.field_info[field]
dimensions = Unit(finfo.units,
registry=self.ds.unit_registry).dimensions
if dimensions_counter[dimensions] > 1:
y_label = (r'$\rm{Multiple\ Fields}$' + r'$\rm{' +
axes_unit_labels[1]+'}$')
else:
y_label = (finfo.get_latex_display_name() + r'$\rm{' +
axes_unit_labels[1]+'}$')
plot.axes.set_xlabel(x_label)
plot.axes.set_ylabel(y_label)
# apply title
if field in self._titles:
plot.axes.set_title(self._titles[field])
# apply legend
dim_field = self.plots._sanitize_dimensions(field)
if self.include_legend[dim_field]:
plot.axes.legend()
self._plot_valid = True
@invalidate_plot
def annotate_legend(self, field):
"""
Adds a legend to the `LinePlot` instance. The `_sanitize_dimensions`
call ensures that a legend label will be added for every field of
a multi-field plot
"""
dim_field = self.plots._sanitize_dimensions(field)
self.include_legend[dim_field] = True
@invalidate_plot
def set_x_unit(self, unit_name):
"""Set the unit to use along the x-axis
Parameters
----------
unit_name: str
The name of the unit to use for the x-axis unit
"""
self._x_unit = unit_name
@invalidate_plot
def set_unit(self, field, unit_name):
"""Set the unit used to plot the field
Parameters
----------
field: str or field tuple
The name of the field to set the units for
unit_name: str
The name of the unit to use for this field
"""
self._y_units[self.data_source._determine_fields(field)[0]] = unit_name
@invalidate_plot
def annotate_title(self, field, title):
"""Set the unit used to plot the field
Parameters
----------
field: str or field tuple
The name of the field to set the units for
title: str
The title to use for the plot
"""
self._titles[self.data_source._determine_fields(field)[0]] = title
def _validate_point(point, ds, start=False):
if not iterable(point):
raise RuntimeError(
"Input point must be array-like"
)
if not isinstance(point, YTArray):
point = ds.arr(point, 'code_length')
if len(point.shape) != 1:
raise RuntimeError(
"Input point must be a 1D array"
)
if point.shape[0] < ds.dimensionality:
raise RuntimeError(
"Input point must have an element for each dimension"
)
# need to pad to 3D elements to avoid issues later
if point.shape[0] < 3:
if start:
val = 0
else:
val = 1
point = np.append(point.d, [val]*(3-ds.dimensionality))*point.uq
return point
| 35.015556 | 94 | 0.574602 | [
"BSD-3-Clause-Clear"
] | smressle/yt | yt/visualization/line_plot.py | 15,757 | Python |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SECRET_KEY'] = '323b22caac41acbf'
app.config['SQLALCHEMY'] = 'sqlite:///site.db'
db = SQLAlchemy(app)
from inventorymanager import routes
db.create_all()
db.session.commit() | 22.833333 | 46 | 0.773723 | [
"MIT"
] | marcus-deans/InventoryTracker | inventorymanager/__init__.py | 274 | Python |
import pandas as pd
import numpy as np
import scipy.io
import dpsimpy
class Reader:
def __init__(self, mpc_file_path, mpc_name = 'mpc'):
# read input file (returns multidimensional dict)
self.mpc_raw = scipy.io.loadmat(mpc_file_path)
self.mpc_name = mpc_name
def process_mpc(self):
version_idx = 0
base_pow_idx = 1
bus_data_idx = 2
gen_data_idx = 3
branch_data_idx = 4
# gencost_data_idx= 5
# Process raw mpc data and create corresponding dataframes
# Version
self.mpc_version = self.mpc_raw[self.mpc_name][0][0][version_idx]
# System frequency (not included in mpc but needed for setting dpsimpy component parameters i.e inductances, capacitances ..)
self.mpc_freq = 50
self.mpc_omega = 2*np.pi*50
# Base power (MVA)
self.mpc_base_power_MVA = self.mpc_raw[self.mpc_name][0][0][base_pow_idx][0][0]
#### Busses
mpc_bus_raw = self.mpc_raw[self.mpc_name][0][0][bus_data_idx]
bus_data_header = ["bus_i", "type", "Pd", "Qd", "Gs", "Bs", "area",
"Vm", "Va", "baseKV", "zone", "Vmax", "Vmin"]
self.mpc_bus_data = pd.DataFrame(mpc_bus_raw, columns = bus_data_header)
# scipy.io.loadmat loads all matrix entries as double. Convert specific columns back to int
self.mpc_bus_data['bus_i'] = self.mpc_bus_data['bus_i'].astype(int)
self.mpc_bus_data['type'] = self.mpc_bus_data['type'].astype(int)
self.mpc_bus_data['area'] = self.mpc_bus_data['area'].astype(int)
self.mpc_bus_data['zone'] = self.mpc_bus_data['zone'].astype(int)
#### Generators
mpc_gen_raw = self.mpc_raw[self.mpc_name][0][0][gen_data_idx]
gen_data_header = ["bus", "Pg", "Qg", "Qmax", "Qmin", "Vg", "mBase", "status",
"Pmax", "Pmin", "Pc1", "Pc2", "Qc1min", "Qc1max", "Qc2min",
"Qc2max", "ramp_agc", "ramp_10", "ramp_30", "ramp_q", "apf"]
self.mpc_gen_data = pd.DataFrame(mpc_gen_raw, columns = gen_data_header)
self.mpc_gen_data['bus'] = self.mpc_gen_data['bus'].astype(int)
self.mpc_gen_data['status'] = self.mpc_gen_data['status'].astype(int)
#### Branches
# extract only first 13 columns since following columns include results
mpc_branch_raw = self.mpc_raw[self.mpc_name][0][0][branch_data_idx][:, :13]
branch_data_header = ["fbus", "tbus", "r", "x", "b", "rateA", "rateB",
"rateC", "ratio", "angle", "status", "angmin", "angmax"]
self.mpc_branch_data = pd.DataFrame(mpc_branch_raw, columns = branch_data_header)
self.mpc_branch_data['fbus'] = self.mpc_branch_data['fbus'].astype(int)
self.mpc_branch_data['tbus'] = self.mpc_branch_data['tbus'].astype(int)
self.mpc_branch_data['status'] = self.mpc_branch_data['status'].astype(int)
#### TODO Generator costs
def create_dpsim_objects(self):
self.process_mpc()
# return values: nodes and components
dpsimpy_busses_dict = {}
dpsimpy_comp_dict = {}
# default multiplier for matpower data
mw_w = 1e6
kv_v = 1e3
# Nodes
bus = 0
load = 0
generator = 0
inj = 0
for index, bus in self.mpc_bus_data.iterrows():
# create dpsimpy busses
bus = bus + 1
bus_index = str(self.mpc_bus_data.at[index,'bus_i'])
bus_name = bus_index
dpsimpy_busses_dict[bus_name] = dpsimpy.sp.SimNode(bus_name, dpsimpy.PhaseType.Single)
# for each bus type create corresponding dpsimpy component
# 1 = PQ, 2 = PV, 3 = ref, 4 = isolated
bus_type = self.mpc_bus_data.at[index,'type']
# Loads
if bus_type == 1:
load = load + 1
load_name = "load%s" %load
load_p = self.mpc_bus_data.at[index,'Pd'] * mw_w
load_q = self.mpc_bus_data.at[index,'Qd'] * mw_w
load_baseV = self.mpc_bus_data.at[index,'baseKV'] * kv_v
dpsimpy_comp_dict[load_name] = [dpsimpy.sp.ph1.Load(load_name, dpsimpy.LogLevel.info)]
dpsimpy_comp_dict[load_name][0].set_parameters(load_p, load_q, load_baseV)
dpsimpy_comp_dict[load_name][0].modify_power_flow_bus_type(dpsimpy.PowerflowBusType.PQ)
# add connections
dpsimpy_comp_dict[load_name].append([dpsimpy_busses_dict[bus_index]]) # [to bus]
# Generators
elif bus_type == 2:
generator = generator + 1
gen_name = "gen%s" %generator
# relevant data from self.mpc_gen_data. Identification with bus number available in mpc_bus_data and mpc_gen_data
gen = self.mpc_gen_data.loc[self.mpc_gen_data['bus'] == self.mpc_bus_data.at[index,'bus_i']]
gen_baseS = gen['mBase']*mw_w # gen base MVA default is mpc.baseMVA
gen_baseV = self.mpc_bus_data.at[index,'baseKV']*kv_v # gen base kV
gen_v = gen['Vg']*gen_baseV # gen set point voltage (gen['Vg'] in p.u.)
gen_p = gen['Pg']*mw_w # gen ini. active power (gen['Pg'] in MVA)
# gen_q = gen['Qg']*mw_w # gen ini. reactive power (gen['Qg'] in MVAr)
gen_nom_s = abs(complex(gen['Pmax'], gen['Qmax'])) # gen nominal power (set default to mpc.baseMVA ? )
dpsimpy_comp_dict[gen_name] = [dpsimpy.sp.ph1.SynchronGenerator(gen_name, dpsimpy.LogLevel.info)]
dpsimpy_comp_dict[gen_name][0].set_parameters(gen_nom_s, gen_baseV, gen_p, gen_v, dpsimpy.PowerflowBusType.PV)
dpsimpy_comp_dict[gen_name][0].set_base_voltage(gen_baseV)
# add connections
dpsimpy_comp_dict[gen_name].append([dpsimpy_busses_dict[bus_index]]) # [to bus]
# Network injection (slack bus)
elif bus_type == 3:
inj = inj + 1
extnet_name = "extnet%s" %inj
# relevant data from self.mpc_gen_data. Identification with bus number available in mpc_bus_data and mpc_gen_data
extnet = self.mpc_gen_data.loc[self.mpc_gen_data['bus'] == self.mpc_bus_data.at[index,'bus_i']]
# extnet_baseS= extnet['mBase']*mw_w # default is mpc.baseMVA
extnet_baseV = self.mpc_bus_data.at[index,'baseKV']*kv_v
extnet_v = extnet['Vg']*extnet_baseV
dpsimpy_comp_dict[extnet_name] = [dpsimpy.sp.ph1.NetworkInjection(extnet_name, dpsimpy.LogLevel.info)]
dpsimpy_comp_dict[extnet_name][0].set_parameters(extnet_v)
dpsimpy_comp_dict[extnet_name][0].set_base_voltage(extnet_baseV)
dpsimpy_comp_dict[extnet_name][0].modify_power_flow_bus_type(dpsimpy.PowerflowBusType.VD)
# add connections
dpsimpy_comp_dict[extnet_name].append([dpsimpy_busses_dict[bus_index]]) # [to bus]
#isolated
elif bus_type == 4:
print("isolated bus type")
else:
print("bus type error")
### branches ####
line = 0
trafo = 0
for index, branch in self.mpc_branch_data.iterrows():
branch_ratio = self.mpc_branch_data.at[index,'ratio']
# Lines
if branch_ratio == 0:
line = line + 1
line_name = "line%s_%s-%s" %(line, self.mpc_branch_data.at[index,'fbus'] , self.mpc_branch_data.at[index,'tbus'])
line_fbus = self.mpc_branch_data.at[index,'fbus']
line_tbus = self.mpc_branch_data.at[index,'tbus']
tmp_fbus = self.mpc_bus_data.loc[self.mpc_bus_data['bus_i'] == line_fbus]
tmp_tbus = self.mpc_bus_data.loc[self.mpc_bus_data['bus_i'] == line_tbus]
line_fbus_baseV = self.mpc_bus_data.at[tmp_fbus.first_valid_index(),'baseKV']*kv_v
line_tbus_baseV = self.mpc_bus_data.at[tmp_tbus.first_valid_index(),'baseKV']*kv_v
line_baseZ = line_tbus_baseV*line_tbus_baseV / (self.mpc_base_power_MVA*mw_w)
line_r = self.mpc_branch_data.at[index,'r'] * line_baseZ
line_x = self.mpc_branch_data.at[index,'x'] * line_baseZ
line_b = self.mpc_branch_data.at[index,'b'] / line_baseZ
line_l = line_x / self.mpc_omega
line_c = line_b / self.mpc_omega
line_g = 0 # line conductance is not included in mpc
dpsimpy_comp_dict[line_name] = [dpsimpy.sp.ph1.PiLine(line_name, dpsimpy.LogLevel.info)]
dpsimpy_comp_dict[line_name][0].set_parameters(line_r, line_l, line_c, line_g)
dpsimpy_comp_dict[line_name][0].set_base_voltage(line_tbus_baseV)
# add connections
dpsimpy_comp_dict[line_name].append([dpsimpy_busses_dict[str(line_fbus)], dpsimpy_busses_dict[str(line_tbus)]])
# Transformers
else:
trafo = trafo + 1
transf_name = "transformer%s_%s-%s" %(trafo, self.mpc_branch_data.at[index,'fbus'] , self.mpc_branch_data.at[index,'tbus'])
transf_s = self.mpc_branch_data.at[index,'rateA']*mw_w # Matpower: Used to specify branch flow limits. By default these are limits on apparent power with units in MV
transf_fbus = self.mpc_branch_data.at[index,'fbus']
transf_tbus = self.mpc_branch_data.at[index,'tbus']
tmp_fbus = self.mpc_bus_data.loc[self.mpc_bus_data['bus_i'] == transf_fbus]
tmp_tbus = self.mpc_bus_data.loc[self.mpc_bus_data['bus_i'] == transf_tbus]
transf_fbus_baseV = self.mpc_bus_data.at[tmp_fbus.first_valid_index(),'baseKV']*kv_v
transf_tbus_baseV = self.mpc_bus_data.at[tmp_tbus.first_valid_index(),'baseKV']*kv_v
transf_primary_v = self.mpc_bus_data.at[tmp_fbus.first_valid_index(),'Vm']*transf_fbus_baseV
transf_secondary_v = self.mpc_bus_data.at[tmp_tbus.first_valid_index(),'Vm']*transf_tbus_baseV
transf_offNom_ratio = self.mpc_branch_data.at[index,'ratio']
transf_primary_v = transf_primary_v/ transf_offNom_ratio
transf_ratio = transf_primary_v / transf_secondary_v
transf_baseZ = transf_tbus_baseV*transf_tbus_baseV / (self.mpc_base_power_MVA*mw_w)
transf_r = self.mpc_branch_data.at[index,'r']* transf_baseZ
transf_x = self.mpc_branch_data.at[index,'x']* transf_baseZ
transf_l = transf_x / self.mpc_omega
dpsimpy_comp_dict[transf_name] = [dpsimpy.sp.ph1.Transformer(transf_name, dpsimpy.LogLevel.info)]
dpsimpy_comp_dict[transf_name][0].set_parameters(transf_primary_v, transf_secondary_v, np.abs(transf_ratio), np.angle(transf_ratio), transf_r, transf_l)
dpsimpy_comp_dict[transf_name][0].set_base_voltage(transf_tbus_baseV)
print(transf_primary_v, transf_secondary_v, np.abs(transf_ratio), np.angle(transf_ratio), transf_r, transf_l)
print(transf_tbus_baseV)
# add connections
dpsimpy_comp_dict[transf_name].append([dpsimpy_busses_dict[str(transf_fbus)], dpsimpy_busses_dict[str(transf_tbus)]])
return dpsimpy_busses_dict, dpsimpy_comp_dict
def load_mpc(self):
dpsimpy_busses_dict, dpsimpy_comp_dict = self.create_dpsim_objects()
system_comp = []
system_nodes = []
for key, value in dpsimpy_comp_dict.items():
dpsimpy_component = value[0]
connection_nodes = value[1]
dpsimpy_component.connect(connection_nodes)
system_comp.append(dpsimpy_component)
for n in connection_nodes:
if n in system_nodes:
continue
else:
system_nodes.append(n)
system = dpsimpy.SystemTopology(self.mpc_freq, system_nodes, system_comp)
return system
| 45.745318 | 182 | 0.616751 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | DPsim-Simulator/DPsim | python/src/dpsim/matpower.py | 12,214 | Python |
from __future__ import print_function
import contextlib
import imp
import os
import shutil
import subprocess
import sys
import tempfile
from unittest import skip
from ctypes import *
import numpy as np
try:
import setuptools
except ImportError:
setuptools = None
import llvmlite.binding as ll
from numba import unittest_support as unittest
from numba.pycc import main
from numba.pycc.decorators import clear_export_registry
from numba.pycc.platform import find_shared_ending, find_pyext_ending
from numba.pycc.platform import _external_compiler_ok
# if suitable compilers are not present then skip.
_skip_reason = 'AOT compatible compilers missing'
_skip_missing_compilers = unittest.skipIf(not _external_compiler_ok,
_skip_reason)
from .matmul_usecase import has_blas
from .support import TestCase, tag, import_dynamic, temp_directory
base_path = os.path.dirname(os.path.abspath(__file__))
def unset_macosx_deployment_target():
"""Unset MACOSX_DEPLOYMENT_TARGET because we are not building portable
libraries
"""
if 'MACOSX_DEPLOYMENT_TARGET' in os.environ:
del os.environ['MACOSX_DEPLOYMENT_TARGET']
class BasePYCCTest(TestCase):
def setUp(self):
unset_macosx_deployment_target()
self.tmpdir = temp_directory('test_pycc')
# Make sure temporary files and directories created by
# distutils don't clutter the top-level /tmp
tempfile.tempdir = self.tmpdir
def tearDown(self):
tempfile.tempdir = None
# Since we're executing the module-under-test several times
# from the same process, we must clear the exports registry
# between invocations.
clear_export_registry()
@contextlib.contextmanager
def check_c_ext(self, extdir, name):
sys.path.append(extdir)
try:
lib = import_dynamic(name)
yield lib
finally:
sys.path.remove(extdir)
sys.modules.pop(name, None)
@_skip_missing_compilers
class TestLegacyAPI(BasePYCCTest):
def test_pycc_ctypes_lib(self):
"""
Test creating a C shared library object using pycc.
"""
source = os.path.join(base_path, 'compile_with_pycc.py')
cdll_modulename = 'test_dll_legacy' + find_shared_ending()
cdll_path = os.path.join(self.tmpdir, cdll_modulename)
if os.path.exists(cdll_path):
os.unlink(cdll_path)
main(args=['--debug', '-o', cdll_path, source])
lib = CDLL(cdll_path)
lib.mult.argtypes = [POINTER(c_double), c_void_p,
c_double, c_double]
lib.mult.restype = c_int
lib.multf.argtypes = [POINTER(c_float), c_void_p,
c_float, c_float]
lib.multf.restype = c_int
res = c_double()
lib.mult(byref(res), None, 123, 321)
self.assertEqual(res.value, 123 * 321)
res = c_float()
lib.multf(byref(res), None, 987, 321)
self.assertEqual(res.value, 987 * 321)
def test_pycc_pymodule(self):
"""
Test creating a CPython extension module using pycc.
"""
self.skipTest("lack of environment can make the extension crash")
source = os.path.join(base_path, 'compile_with_pycc.py')
modulename = 'test_pyext_legacy'
out_modulename = os.path.join(self.tmpdir,
modulename + find_pyext_ending())
if os.path.exists(out_modulename):
os.unlink(out_modulename)
main(args=['--debug', '--python', '-o', out_modulename, source])
with self.check_c_ext(self.tmpdir, modulename) as lib:
res = lib.multi(123, 321)
self.assertPreciseEqual(res, 123 * 321)
res = lib.multf(987, 321)
self.assertPreciseEqual(res, 987.0 * 321.0)
def test_pycc_bitcode(self):
"""
Test creating a LLVM bitcode file using pycc.
"""
modulename = os.path.join(base_path, 'compile_with_pycc')
bitcode_modulename = os.path.join(self.tmpdir, 'test_bitcode_legacy.bc')
if os.path.exists(bitcode_modulename):
os.unlink(bitcode_modulename)
main(args=['--debug', '--llvm', '-o', bitcode_modulename,
modulename + '.py'])
# Sanity check bitcode file contents
with open(bitcode_modulename, "rb") as f:
bc = f.read()
bitcode_wrapper_magic = b'\xde\xc0\x17\x0b'
bitcode_magic = b'BC\xc0\xde'
self.assertTrue(bc.startswith((bitcode_magic, bitcode_wrapper_magic)), bc)
@_skip_missing_compilers
class TestCC(BasePYCCTest):
def setUp(self):
super(TestCC, self).setUp()
from . import compile_with_pycc
self._test_module = compile_with_pycc
imp.reload(self._test_module)
@contextlib.contextmanager
def check_cc_compiled(self, cc):
#cc.verbose = True
cc.output_dir = self.tmpdir
cc.compile()
with self.check_c_ext(self.tmpdir, cc.name) as lib:
yield lib
def check_cc_compiled_in_subprocess(self, lib, code):
prolog = """if 1:
import sys
sys.path.insert(0, %(path)r)
import %(name)s as lib
""" % {'name': lib.__name__,
'path': os.path.dirname(lib.__file__)}
code = prolog.strip(' ') + code
subprocess.check_call([sys.executable, '-c', code])
def test_cc_properties(self):
cc = self._test_module.cc
self.assertEqual(cc.name, 'pycc_test_simple')
# Inferred output directory
d = self._test_module.cc.output_dir
self.assertTrue(os.path.isdir(d), d)
# Inferred output filename
f = self._test_module.cc.output_file
self.assertFalse(os.path.exists(f), f)
self.assertTrue(os.path.basename(f).startswith('pycc_test_simple.'), f)
if sys.platform.startswith('linux'):
self.assertTrue(f.endswith('.so'), f)
if sys.version_info >= (3,):
self.assertIn('.cpython', f)
def test_compile(self):
with self.check_cc_compiled(self._test_module.cc) as lib:
res = lib.multi(123, 321)
self.assertPreciseEqual(res, 123 * 321)
res = lib.multf(987, 321)
self.assertPreciseEqual(res, 987.0 * 321.0)
res = lib.square(5)
self.assertPreciseEqual(res, 25)
self.assertIs(lib.get_none(), None)
with self.assertRaises(ZeroDivisionError):
lib.div(1, 0)
def check_compile_for_cpu(self, cpu_name):
cc = self._test_module.cc
cc.target_cpu = cpu_name
with self.check_cc_compiled(cc) as lib:
res = lib.multi(123, 321)
self.assertPreciseEqual(res, 123 * 321)
self.assertEqual(lib.multi.__module__, 'pycc_test_simple')
def test_compile_for_cpu(self):
# Compiling for the host CPU should always succeed
self.check_compile_for_cpu(ll.get_host_cpu_name())
def test_compile_for_cpu_host(self):
# Compiling for the host CPU should always succeed
self.check_compile_for_cpu("host")
@tag('important')
def test_compile_helperlib(self):
with self.check_cc_compiled(self._test_module.cc_helperlib) as lib:
res = lib.power(2, 7)
self.assertPreciseEqual(res, 128)
for val in (-1, -1 + 0j, np.complex128(-1)):
res = lib.sqrt(val)
self.assertPreciseEqual(res, 1j)
for val in (4, 4.0, np.float64(4)):
res = lib.np_sqrt(val)
self.assertPreciseEqual(res, 2.0)
res = lib.spacing(1.0)
self.assertPreciseEqual(res, 2**-52)
# Implicit seeding at startup should guarantee a non-pathological
# start state.
self.assertNotEqual(lib.random(-1), lib.random(-1))
res = lib.random(42)
expected = np.random.RandomState(42).random_sample()
self.assertPreciseEqual(res, expected)
res = lib.size(np.float64([0] * 3))
self.assertPreciseEqual(res, 3)
code = """if 1:
from numpy.testing import assert_equal, assert_allclose
res = lib.power(2, 7)
assert res == 128
res = lib.random(42)
assert_allclose(res, %(expected)s)
res = lib.spacing(1.0)
assert_allclose(res, 2**-52)
""" % {'expected': expected}
self.check_cc_compiled_in_subprocess(lib, code)
@tag('important')
def test_compile_nrt(self):
with self.check_cc_compiled(self._test_module.cc_nrt) as lib:
# Sanity check
self.assertPreciseEqual(lib.zero_scalar(1), 0.0)
res = lib.zeros(3)
self.assertEqual(list(res), [0, 0, 0])
if has_blas:
res = lib.vector_dot(4)
self.assertPreciseEqual(res, 30.0)
code = """if 1:
res = lib.zero_scalar(1)
assert res == 0.0
res = lib.zeros(3)
assert list(res) == [0, 0, 0]
if %(has_blas)s:
res = lib.vector_dot(4)
assert res == 30.0
""" % dict(has_blas=has_blas)
self.check_cc_compiled_in_subprocess(lib, code)
@_skip_missing_compilers
class TestDistutilsSupport(TestCase):
def setUp(self):
unset_macosx_deployment_target()
# Copy the test project into a temp directory to avoid
# keeping any build leftovers in the source tree
self.tmpdir = temp_directory('test_pycc_distutils')
source_dir = os.path.join(base_path, 'pycc_distutils_usecase')
self.usecase_dir = os.path.join(self.tmpdir, 'work')
shutil.copytree(source_dir, self.usecase_dir)
def check_setup_py(self, setup_py_file):
# Compute PYTHONPATH to ensure the child processes see this Numba
import numba
numba_path = os.path.abspath(os.path.dirname(
os.path.dirname(numba.__file__)))
env = dict(os.environ)
if env.get('PYTHONPATH', ''):
env['PYTHONPATH'] = numba_path + os.pathsep + env['PYTHONPATH']
else:
env['PYTHONPATH'] = numba_path
def run_python(args):
p = subprocess.Popen([sys.executable] + args,
cwd=self.usecase_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
out, _ = p.communicate()
rc = p.wait()
if rc != 0:
self.fail("python failed with the following output:\n%s"
% out.decode('utf-8', 'ignore'))
run_python([setup_py_file, "build_ext", "--inplace"])
code = """if 1:
import pycc_compiled_module as lib
assert lib.get_const() == 42
res = lib.ones(3)
assert list(res) == [1.0, 1.0, 1.0]
"""
run_python(["-c", code])
def test_setup_py_distutils(self):
if sys.version_info < (3,) and sys.platform == "win32":
# See e.g. https://stackoverflow.com/questions/28931875/problems-finding-vcvarsall-bat-when-using-distutils
self.skipTest("must use setuptools to build extensions for Python 2")
self.check_setup_py("setup_distutils.py")
@unittest.skipIf(setuptools is None, "test needs setuptools")
def test_setup_py_setuptools(self):
self.check_setup_py("setup_setuptools.py")
if __name__ == "__main__":
unittest.main()
| 35.297619 | 119 | 0.600759 | [
"BSD-2-Clause",
"MIT"
] | eric-erki/numba | numba/tests/test_pycc.py | 11,860 | Python |
from config.config import db
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
username = db.Column(db.String, primary_key=True)
password = db.Column(db.String)
email = db.Column(db.String)
def __init__(self, username, email, password):
self.username = username
self.password = password
self.email = email
| 25.117647 | 55 | 0.697892 | [
"MIT"
] | EswarAleti/MVC-Architecture | model/user.py | 427 | Python |
from rest_framework import serializers
from django.contrib.auth.models import User
from .models import *
class UserSerializer(serializers.ModelSerializer):
class Meta:
model= User
fields = ['username','first_name','last_name','password','email','is_superuser']
class RolesSerializer(serializers.ModelSerializer):
class Meta:
model= Roles
fields = ['name']
| 25.9375 | 89 | 0.684337 | [
"MIT"
] | jesuscol96/WebClassApp | WebClassApp/mainpage/serializers.py | 415 | Python |
from typing import List
from flake8_functions_names.custom_types import FuncdefInfo
from flake8_functions_names.utils.imports import is_module_installed
from flake8_functions_names.words import VERBS, PURE_VERBS, BLACKLISTED_WORDS_IN_FUNCTIONS_NAMES
def validate_returns_bool_if_names_said_so(funcdef: FuncdefInfo) -> List[str]:
if funcdef.is_name_looks_like_question and funcdef.return_type != 'bool':
return [
f'FNE001 Name of the function says, that is should '
f'return bool, but it returns {funcdef.return_type}',
]
return []
def validate_has_property_and_no_verbs(funcdef: FuncdefInfo) -> List[str]: # noqa: FNE007
if funcdef.has_property_decorator and any(w in VERBS for w in funcdef.name_words):
verbs = [w for w in funcdef.name_words if w in VERBS]
return [
f'FNE002 The method has a @property decorator, '
f"but has a verb in it's name ({', '.join(verbs)})",
]
return []
def validate_save_to(funcdef: FuncdefInfo) -> List[str]:
if 'save' in funcdef.name_words and 'to' not in funcdef.name_words:
return [
'FNE003 Name of the function uses "save", but not uses "to"',
]
return []
def validate_load_from(funcdef: FuncdefInfo) -> List[str]:
if 'load' in funcdef.name_words and 'from' not in funcdef.name_words:
return [
'FNE004 ame of the function uses "load", but not uses "from"',
]
return []
def validate_returns_bool_and_name_shows_it(funcdef: FuncdefInfo) -> List[str]: # noqa: FNE007
if (
funcdef.return_type == 'bool'
and not funcdef.is_name_looks_like_question
and not funcdef.is_buildin_dundner_method_that_returns_bool
):
return [
"FNE005 Return type of the function is bool, but the name doesn't show it",
]
return []
def validate_names_says_its_pure_and_its_pure( # noqa: CFQ003, FNE007
funcdef: FuncdefInfo,
) -> List[str]:
if (
is_module_installed('deal')
and not funcdef.has_deal_pure_decorator
and any(w in PURE_VERBS for w in funcdef.name_words)
):
return [
'FNE006 Name of function says, that it works with data, '
'so it should be pure, but it has no @deal.pure()',
]
return []
def validate_no_blacklisted_words_in_name(funcdef: FuncdefInfo) -> List[str]:
blacklisted_words = [w for w in funcdef.name_words if w in BLACKLISTED_WORDS_IN_FUNCTIONS_NAMES]
if blacklisted_words:
return [
f'FNE007 "{blacklisted_words[0]}" is not recommended in functions names',
]
return []
def validate_name_not_endswith_first_argument_name(funcdef: FuncdefInfo) -> List[str]:
if funcdef.arguments_names and funcdef.name.endswith(f'_{funcdef.arguments_names[0]}'):
return [
"FNE008 Name of functions ends with it's first argument name",
]
return []
| 34.952941 | 100 | 0.671491 | [
"MIT"
] | Melevir/flake8-functions-names | flake8_functions_names/validators.py | 2,971 | Python |
#!/usr/bin/python
import cgi
import cgitb
import json
import parse_enumeration
cgitb.enable()
form = cgi.FieldStorage()
# Get data from fields
callback = form.getvalue('callback')
email = form.getvalue('email')
if (email is None):
email = "<ul><li>hello, world!</li></ul>"
print "Content-type: application/json"
print
response = parse_enumeration.parse_enumerations(email)
d = json.JSONEncoder().encode((response))
if (callback):
print callback+'(' + d + ');'
else:
print d
| 18.296296 | 54 | 0.700405 | [
"CC0-1.0"
] | 18F/parse-shopping-list | src/process-request.py | 494 | Python |
#Punto 10 cambiar datos
lista=[]
datos=(input("cantidad de datos: "))
for i in range (0,datos):
alt=float(input("ingrese alturas: "))
lista.append(alt)
print("la altura maxima es ", max(lista))
##################################
lista=[]
numero=int(input("numero 1 para agregar una altura y numero 2 para buscar el numero mas grande"))
n=0
while True:
if(numero==1):
n=float(input("altura"))
numero=int(input("numero 1 para agregar una altura y numero 2 para buscar el numero mas grande"))
lista.append(n)
elif(numero==2):
print("la mayor altura:", max(lista))
break
| 24.307692 | 105 | 0.607595 | [
"MIT"
] | Ricardoppp/Talleres_De_Algoritmos_Ricardo | Taller de Estrucuras de Control Repeticion/Punto10.py | 632 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.