filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_5542 | import socket
from flask import Flask
app = Flask(__name__)
@app.route("/")
def home():
return "ahoj"
if __name__ == "__main__":
# resolving machine IP address for correct web publishing
hostname = socket.gethostname()
ip_here = socket.gethostbyname(hostname)
app.run(debug=True, host=ip_here)
|
the-stack_0_5545 | from typing import Any
import typing
from conda_forge_tick.xonsh_utils import indir
from .core import MiniMigrator
from conda_forge_tick.utils import as_iterable
if typing.TYPE_CHECKING:
from ..migrators_types import AttrsTypedDict
class PipMigrator(MiniMigrator):
bad_install = (
"python setup.py install",
"python -m pip install --no-deps --ignore-installed .",
)
def filter(self, attrs: "AttrsTypedDict", not_bad_str_start: str = "") -> bool:
scripts = as_iterable(
attrs.get("meta_yaml", {}).get("build", {}).get("script", []),
)
return not bool(set(self.bad_install) & set(scripts))
def migrate(self, recipe_dir: str, attrs: "AttrsTypedDict", **kwargs: Any) -> None:
with indir(recipe_dir):
with open("meta.yaml") as fp:
lines = fp.readlines()
new_lines = []
for line in lines:
for b in self.bad_install:
tst_str = "script: %s" % b
if tst_str in line:
line = line.replace(
tst_str,
"script: {{ PYTHON }} -m pip install . --no-deps -vv",
)
break
new_lines.append(line)
with open("meta.yaml", "w") as fp:
for line in new_lines:
fp.write(line)
|
the-stack_0_5546 | import warnings
from torchvision.datasets import *
from .base import *
from .coco import COCOSegmentation
from .ade20k import ADE20KSegmentation
from .pascal_voc import VOCSegmentation
from .pascal_aug import VOCAugSegmentation
from .pcontext import ContextSegmentation
from .cityscapes import CitySegmentation
from .imagenet import ImageNetDataset
from .minc import MINCDataset
from .steel import SteelSegmentation
from ..utils import EncodingDeprecationWarning
datasets = {
'coco': COCOSegmentation,
'ade20k': ADE20KSegmentation,
'pascal_voc': VOCSegmentation,
'pascal_aug': VOCAugSegmentation,
'pcontext': ContextSegmentation,
'citys': CitySegmentation,
'imagenet': ImageNetDataset,
'minc': MINCDataset,
'cifar10': CIFAR10,
'steel': SteelSegmentation,
}
acronyms = {
'coco': 'coco',
'pascal_voc': 'voc',
'pascal_aug': 'voc',
'pcontext': 'pcontext',
'ade20k': 'ade',
'citys': 'citys',
'minc': 'minc',
'cifar10': 'cifar10',
}
def get_dataset(name, **kwargs):
return datasets[name.lower()](**kwargs)
def _make_deprecate(meth, old_name):
new_name = meth.__name__
def deprecated_init(*args, **kwargs):
warnings.warn("encoding.dataset.{} is now deprecated in favor of encoding.dataset.{}."
.format(old_name, new_name), EncodingDeprecationWarning)
return meth(*args, **kwargs)
deprecated_init.__doc__ = r"""
{old_name}(...)
.. warning::
This method is now deprecated in favor of :func:`torch.nn.init.{new_name}`.
See :func:`~torch.nn.init.{new_name}` for details.""".format(
old_name=old_name, new_name=new_name)
deprecated_init.__name__ = old_name
return deprecated_init
get_segmentation_dataset = _make_deprecate(get_dataset, 'get_segmentation_dataset')
|
the-stack_0_5547 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class Article(object):
def __init__(self):
self._action_name = None
self._desc = None
self._image_url = None
self._title = None
self._url = None
@property
def action_name(self):
return self._action_name
@action_name.setter
def action_name(self, value):
self._action_name = value
@property
def desc(self):
return self._desc
@desc.setter
def desc(self, value):
self._desc = value
@property
def image_url(self):
return self._image_url
@image_url.setter
def image_url(self, value):
self._image_url = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def url(self):
return self._url
@url.setter
def url(self, value):
self._url = value
def to_alipay_dict(self):
params = dict()
if self.action_name:
if hasattr(self.action_name, 'to_alipay_dict'):
params['action_name'] = self.action_name.to_alipay_dict()
else:
params['action_name'] = self.action_name
if self.desc:
if hasattr(self.desc, 'to_alipay_dict'):
params['desc'] = self.desc.to_alipay_dict()
else:
params['desc'] = self.desc
if self.image_url:
if hasattr(self.image_url, 'to_alipay_dict'):
params['image_url'] = self.image_url.to_alipay_dict()
else:
params['image_url'] = self.image_url
if self.title:
if hasattr(self.title, 'to_alipay_dict'):
params['title'] = self.title.to_alipay_dict()
else:
params['title'] = self.title
if self.url:
if hasattr(self.url, 'to_alipay_dict'):
params['url'] = self.url.to_alipay_dict()
else:
params['url'] = self.url
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = Article()
if 'action_name' in d:
o.action_name = d['action_name']
if 'desc' in d:
o.desc = d['desc']
if 'image_url' in d:
o.image_url = d['image_url']
if 'title' in d:
o.title = d['title']
if 'url' in d:
o.url = d['url']
return o
|
the-stack_0_5548 | # Copyright 2015 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_utils import uuidutils
from neutron.common import constants
from neutron import context
from neutron.objects.qos import policy
from neutron.objects.qos import rule
from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions
from neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers import (
qos_driver)
from neutron.services.qos import qos_consts
from neutron.tests import base
class QosSRIOVAgentDriverTestCase(base.BaseTestCase):
ASSIGNED_MAC = '00:00:00:00:00:66'
PCI_SLOT = '0000:06:00.1'
def setUp(self):
super(QosSRIOVAgentDriverTestCase, self).setUp()
self.context = context.get_admin_context()
self.qos_driver = qos_driver.QosSRIOVAgentDriver()
self.qos_driver.initialize()
self.qos_driver.eswitch_mgr = mock.Mock()
self.qos_driver.eswitch_mgr.set_device_max_rate = mock.Mock()
self.qos_driver.eswitch_mgr.set_device_min_tx_rate = mock.Mock()
self.qos_driver.eswitch_mgr.clear_max_rate = mock.Mock()
self.qos_driver.eswitch_mgr.clear_min_tx_rate = mock.Mock()
self.max_rate_mock = self.qos_driver.eswitch_mgr.set_device_max_rate
self.min_tx_rate_mock = \
self.qos_driver.eswitch_mgr.set_device_min_tx_rate
self.clear_max_rate_mock = self.qos_driver.eswitch_mgr.clear_max_rate
self.clear_min_tx_rate_mock = \
self.qos_driver.eswitch_mgr.clear_min_tx_rate
self.rule = self._create_bw_limit_rule_obj()
self.rule_min_tx_rate = self._create_minimum_bandwidth_rule_obj()
self.qos_policy = self._create_qos_policy_obj([self.rule])
self.qos_policy_min_tx_rate = self._create_qos_policy_obj(
[self.rule_min_tx_rate])
self.port = self._create_fake_port(self.qos_policy.id)
self.port_min = self._create_fake_port(self.qos_policy_min_tx_rate.id)
def _create_bw_limit_rule_obj(self):
rule_obj = rule.QosBandwidthLimitRule()
rule_obj.id = uuidutils.generate_uuid()
rule_obj.max_kbps = 2
rule_obj.max_burst_kbps = 200
rule_obj.obj_reset_changes()
return rule_obj
def _create_minimum_bandwidth_rule_obj(self):
rule_obj = rule.QosMinimumBandwidthRule()
rule_obj.id = uuidutils.generate_uuid()
rule_obj.min_kbps = 200
rule_obj.direction = constants.EGRESS_DIRECTION
rule_obj.obj_reset_changes()
return rule_obj
def _create_qos_policy_obj(self, rules):
policy_dict = {'id': uuidutils.generate_uuid(),
'project_id': uuidutils.generate_uuid(),
'name': 'test',
'description': 'test',
'shared': False,
'rules': rules}
policy_obj = policy.QosPolicy(self.context, **policy_dict)
policy_obj.obj_reset_changes()
for policy_rule in policy_obj.rules:
policy_rule.qos_policy_id = policy_obj.id
policy_rule.obj_reset_changes()
return policy_obj
def _create_fake_port(self, qos_policy_id):
return {'port_id': uuidutils.generate_uuid(),
'profile': {'pci_slot': self.PCI_SLOT},
'device': self.ASSIGNED_MAC,
qos_consts.QOS_POLICY_ID: qos_policy_id,
'device_owner': uuidutils.generate_uuid()}
def test_create_rule(self):
self.qos_driver.create(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_update_rule(self):
self.qos_driver.update(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_delete_rules_on_assigned_vf(self):
self.qos_driver.delete(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, 0)
def test_delete_rules_on_released_vf(self):
del self.port['device_owner']
self.qos_driver.delete(self.port, self.qos_policy)
self.clear_max_rate_mock.assert_called_once_with(self.PCI_SLOT)
def test__set_vf_max_rate_captures_sriov_failure(self):
self.max_rate_mock.side_effect = exceptions.SriovNicError()
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
def test__set_vf_max_rate_unknown_device(self):
with mock.patch.object(self.qos_driver.eswitch_mgr, 'device_exists',
return_value=False):
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
self.assertFalse(self.max_rate_mock.called)
def test_create_minimum_bandwidth(self):
self.qos_driver.create(self.port_min, self.qos_policy_min_tx_rate)
self.min_tx_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule_min_tx_rate.min_kbps)
def test_update_minimum_bandwidth(self):
self.qos_driver.update(self.port_min, self.qos_policy_min_tx_rate)
self.min_tx_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule_min_tx_rate.min_kbps)
def test_delete_minimum_bandwidth_on_assigned_vf(self):
self.qos_driver.delete(self.port_min, self.qos_policy_min_tx_rate)
self.min_tx_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, 0)
def test_delete_minimum_bandwidth_on_released_vf(self):
del self.port_min['device_owner']
self.qos_driver.delete(self.port_min, self.qos_policy_min_tx_rate)
self.clear_min_tx_rate_mock.assert_called_once_with(self.PCI_SLOT)
|
the-stack_0_5549 | from os import urandom
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
class AESCipher:
""" Wrapper for cryptography aes cipher.
:attr char: padding_value(char): padding character used for encryption.
"""
padding_value = "\0"
def __init__(self, key, iv_length):
"""
Cipher constructor.
:param str key: AES key
:param str iv: initialization vector
"""
self._key = key
self._iv_length = iv_length
self._cipher = Cipher(
algorithms.AES(key),
modes.CBC(urandom(iv_length)),
backend=default_backend())
def encrypt(self, content):
"""
Encrypt string using (key, iv) pair.
Uses padding_value if content has wrong padding.
:param str content: unencrypted string.
:returns: Encrypted string.
"""
padding = len(content) % 16
if padding != 0:
content += "".join(self.padding_value for i in range(16 - padding))
iv = urandom(self._iv_length)
self._cipher.mode = modes.CBC(iv)
encryptor = self._cipher.encryptor()
ct = encryptor.update(content.encode('utf-8')) + encryptor.finalize()
return iv + ct
def decrypt(self, content):
"""
Decrypt string using (key, iv) pair.
Removes padding_value from the end.
:param str content: encrypted string.
:returns: Unencrypted string.
"""
iv = content[:self._iv_length]
self._cipher.mode = modes.CBC(iv)
decryptor = self._cipher.decryptor()
content = decryptor.update(content[self._iv_length:]) + decryptor.finalize()
content = content.decode('utf-8')
return content.rstrip(self.padding_value)
def encrypt_file(self, in_filename):
"""
Encrypt file content using (key, iv) pair.
Uses padding_value if content has wrong padding.
:param str in_filename(in_filename): unencrypted data file name.
:returns: Encrypted string.
"""
with open(in_filename, "rb") as file:
content = file.read()
return self.encrypt(content)
def decrypt_file(self, in_filename):
"""
Decrypt file using (key, iv) pair.
Removes padding_value from the end.
:param str out_filename(out_filename): encrypted data file name.
:returns: Unencrypted string.
"""
with open(in_filename, "rb") as file:
content = file.read()
return self.decrypt(content)
def encrypt_file_save_file(self, in_filename, out_filename):
"""
Encrypt file using (key, iv) pair and save result in a file.
Uses padding_value if content has wrong padding.
:param str in_filename(in_filename): unencrypted data file name.
:param str out_filename(out_filename): encrypted data file name.
"""
content = self.encrypt_file(in_filename)
with open(out_filename, "wb+") as out:
out.write(content)
def decrypt_file_save_file(self, in_filename, out_filename):
"""
Decrypt file using (key, iv) pair and save result in a file.
Removes padding_value from the end.
:param str in_filename(in_filename): encrypted data file name.
:param str out_filename(out_filename): unencrypted data file name.
"""
content = self.decrypt_file(in_filename)
with open(out_filename, "wb+") as out:
out.write(content) |
the-stack_0_5550 | import os
import gmplot
import requests
from requests import RequestException
from numpy import random
class CoordinatesPlotter:
@staticmethod
def plot_coordinates_on_map():
apikey = ''
try:
response = requests.get("")
response.raise_for_status()
print(response)
response_json = response.json()
feeds = response_json['feeds']
lat_list = []
lon_list = []
for feed in feeds:
lat = float(feed['field2'])
lon = float(feed['field1'])
lat_list.append(lat)
lon_list.append(lon)
curr_lat = lat_list[-1]
curr_lon = lon_list[-1]
origin_lat = lat_list[0]
origin_lon = lon_list[0]
zoom_lvl = 16
gmap = gmplot.GoogleMapPlotter(origin_lat, origin_lon, zoom_lvl, apikey=apikey)
for i in range(100):
curr_lat += (random.rand() - 0.5) / 10000.0
lat_list.append(curr_lat)
curr_lon += (random.rand() - 0.5) / 10000.0
lon_list.append(curr_lon)
gmap.plot(lat_list, lon_list, edge_width=7, color='blue')
print(lat_list[0:5])
print(lon_list[0:5])
gmap.draw('map.html')
os.system('map.html')
except RequestException:
print('Request not satisfied!')
|
the-stack_0_5552 | #
# Copyright 2020 Intellivoid Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ziproto import Headmap
from ziproto.ValueType import ValueType
import struct
class MapIter:
def __init__(self, decoder_obj):
if not decoder_obj.is_map():
raise TypeError('is not map')
self.bytedata = decoder_obj.bytedata
def __iter__(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
count, offset = value(self.bytedata[1:])
body = self.bytedata[1+offset:]
x = 0
current = None
while x<count:
if current is None:
current = Decoder(body)
else:
current = v.next()
v = current.next()
yield current, v
x+=1
class Decoder:
__slots__ = ('bytedata', 'filled')
def __init__(self, bytedata, filled=True):
self.bytedata = memoryview(bytedata)
self.filled = filled
def get_type(self):
head = self.bytedata[0]
t, _ = Headmap.HEAD_MAP[head]
return t
def get(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.ARRAY:
return [x.get() for x in self]
elif t == ValueType.MAP:
return {k.get(): v.get() for k, v in self.items()}
else:
x, _ = value(self.bytedata[1:])
if t == ValueType.STR:
return x.tobytes().decode('utf-8')
else:
return x
def is_nil(self):
head = self.bytedata[0]
t, _ = Headmap.HEAD_MAP[head]
if t == ValueType.NIL:
return True
return False
def is_array(self):
head = self.bytedata[0]
t, _ = Headmap.HEAD_MAP[head]
return t == ValueType.ARRAY
def is_map(self):
head = self.bytedata[0]
t, _ = Headmap.HEAD_MAP[head]
return t == ValueType.MAP
def get_bool(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.BOOL:
x, _ = value(self.bytedata[1:])
return x
raise ValueError('is not bool. %s' % t)
def get_int(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.INT:
x, _ = value(self.bytedata[1:])
return x
raise ValueError('is not int. %s' % t)
def get_float(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.FLOAT:
x, _ = value(self.bytedata[1:])
return x
raise ValueError('is not float. %s' % t)
def get_number(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.INT or t == ValueType.FLOAT:
x, _ = value(self.bytedata[1:])
return x
raise ValueError('is not number. %s' % t)
def get_str(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.STR:
x, _ = value(self.bytedata[1:])
return x.tobytes().decode('utf-8')
raise ValueError('is not str. %s' % t)
def get_bin(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.BIN:
x, _ = value(self.bytedata[1:])
return x
raise ValueError('is not bin. %s' % t)
def __len__(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.ARRAY or t == ValueType.MAP:
count, _ = value(self.bytedata[1:])
return count
raise ValueError('is not array or map. %s' % t)
def __getitem__(self, index):
if isinstance(index, int):
for i, x in enumerate(self):
if i == index:
return x
else:
for k, v in self.items():
if k.get() == index:
return v
def __iter__(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.ARRAY:
count, offset = value(self.bytedata[1:])
x = 0
current = None
while x<count:
if current is None:
current = Decoder(self.bytedata[1+offset:])
else:
current = current.next()
yield current
x += 1
else:
raise ValueError('is not array. %s' % t)
def get_bytes(self):
if self.filled:
return self.bytedata
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
count, offset = value(self.bytedata[1:])
if t == ValueType.ARRAY:
x = 0
pos = 1+offset
body = self.bytedata[pos:]
current = Decoder(body)
while x<count:
pos += len(current.get_bytes())
current = current.next()
x+=1
return self.bytedata[0:pos]
elif t == ValueType.MAP:
x = 0
pos = 1+offset
body = self.bytedata[pos:]
current = Decoder(body)
while x<count:
v = current.next()
pos += len(current.get_bytes())
pos += len(v.get_bytes())
current = v.next()
x+=1
return self.bytedata[0:pos]
else:
return self.bytedata[0:1+offset]
def next(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
count, offset = value(self.bytedata[1:])
body = self.bytedata[1+offset:]
if t == ValueType.ARRAY:
x = 0
current = Decoder(body)
while x<count:
current
current = current.next()
x+=1
return current
elif t == ValueType.MAP:
x = 0
current = Decoder(body)
while x<count:
v = current.next()
current, v
current = v.next()
x+=1
return current
else:
return Decoder(body)
def items(self):
return MapIter(self)
def decode(bytes):
Decoder_proto = Decoder(bytes, True)
return(Decoder_proto.get())
|
the-stack_0_5553 | from django import forms
from django.contrib import admin
from django.contrib.admin.utils import unquote
from django.http import (
JsonResponse, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
)
from django.utils import timezone
from django.urls import re_path
from experiments import conf
from experiments.admin_utils import get_result_context
from experiments.models import Experiment
from experiments.utils import participant
class ExperimentAdmin(admin.ModelAdmin):
list_display = ('name', 'start_date', 'end_date', 'state')
list_filter = ('state', 'start_date', 'end_date')
ordering = ('-start_date',)
search_fields = ('=name',)
actions = None
readonly_fields = ['start_date', 'end_date']
def get_fieldsets(self, request, obj=None):
"""
Slightly different fields are shown for Add and Change:
- default_alternative can only be changed
- name can only be set on Add
"""
main_fields = ('description', 'start_date', 'end_date', 'state')
if obj:
main_fields += ('default_alternative',)
else:
main_fields = ('name',) + main_fields
return (
(None, {
'fields': main_fields,
}),
('Relevant Goals', {
'classes': ('collapse', 'hidden-relevant-goals'),
'fields': ('relevant_chi2_goals', 'relevant_mwu_goals'),
})
)
# --------------------------------------- Default alternative
def get_form(self, request, obj=None, **kwargs):
"""
Add the default alternative dropdown with appropriate choices
"""
if obj:
if obj.alternatives:
choices = [(alternative, alternative) for alternative in obj.alternatives.keys()]
else:
choices = [(conf.CONTROL_GROUP, conf.CONTROL_GROUP)]
class ExperimentModelForm(forms.ModelForm):
default_alternative = forms.ChoiceField(choices=choices,
initial=obj.default_alternative,
required=False)
kwargs['form'] = ExperimentModelForm
return super(ExperimentAdmin, self).get_form(request, obj=obj, **kwargs)
def save_model(self, request, obj, form, change):
if change:
obj.set_default_alternative(form.cleaned_data['default_alternative'])
obj.save()
# --------------------------------------- Overriding admin views
class Media:
css = {
"all": (
'experiments/dashboard/css/admin.css',
),
}
js = (
'https://www.google.com/jsapi', # used for charts
'experiments/dashboard/js/csrf.js',
'experiments/dashboard/js/admin.js',
)
def _admin_view_context(self, extra_context=None):
context = {}
if extra_context:
context.update(extra_context)
context.update({
'all_goals': conf.ALL_GOALS,
'control_group': conf.CONTROL_GROUP,
})
return context
def add_view(self, request, form_url='', extra_context=None):
return super(ExperimentAdmin, self).add_view(request,
form_url=form_url,
extra_context=self._admin_view_context(extra_context=extra_context))
def change_view(self, request, object_id, form_url='', extra_context=None):
experiment = self.get_object(request, unquote(object_id))
context = self._admin_view_context(extra_context=extra_context)
context.update(get_result_context(request, experiment))
return super(ExperimentAdmin, self).change_view(request, object_id, form_url=form_url, extra_context=context)
# --------------------------------------- Views for ajax functionality
def get_urls(self):
experiment_urls = [
re_path(r'^set-alternative/$', self.admin_site.admin_view(self.set_alternative_view), name='experiment_admin_set_alternative'),
re_path(r'^set-state/$', self.admin_site.admin_view(self.set_state_view), name='experiment_admin_set_state'),
]
return experiment_urls + super(ExperimentAdmin, self).get_urls()
def set_alternative_view(self, request):
"""
Allows the admin user to change their assigned alternative
"""
if not request.user.has_perm('experiments.change_experiment'):
return HttpResponseForbidden()
experiment_name = request.POST.get("experiment")
alternative_name = request.POST.get("alternative")
if not (experiment_name and alternative_name):
return HttpResponseBadRequest()
participant(request).set_alternative(experiment_name, alternative_name)
return JsonResponse({
'success': True,
'alternative': participant(request).get_alternative(experiment_name)
})
def set_state_view(self, request):
"""
Changes the experiment state
"""
if not request.user.has_perm('experiments.change_experiment'):
return HttpResponseForbidden()
try:
state = int(request.POST.get("state", ""))
except ValueError:
return HttpResponseBadRequest()
try:
experiment = Experiment.objects.get(name=request.POST.get("experiment"))
except Experiment.DoesNotExist:
return HttpResponseBadRequest()
experiment.state = state
if state == 0:
experiment.end_date = timezone.now()
else:
experiment.end_date = None
experiment.save()
return HttpResponse()
admin.site.register(Experiment, ExperimentAdmin)
|
the-stack_0_5554 |
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Base Widget class. Allows user to create widgets in the back-end that render
in the IPython notebook front-end.
"""
from contextlib import contextmanager
from collections.abc import Iterable
from IPython.core.getipython import get_ipython
from ipykernel.comm import Comm
from traitlets import (
HasTraits, Unicode, Dict, Instance, List, Int, Set, Bytes, observe, default, Container,
Undefined)
from IPython.display import display
from json import loads as jsonloads, dumps as jsondumps
from base64 import standard_b64encode
from .._version import __protocol_version__, __jupyter_widgets_base_version__
PROTOCOL_VERSION_MAJOR = __protocol_version__.split('.')[0]
def _widget_to_json(x, obj):
if isinstance(x, dict):
return {k: _widget_to_json(v, obj) for k, v in x.items()}
elif isinstance(x, (list, tuple)):
return [_widget_to_json(v, obj) for v in x]
elif isinstance(x, Widget):
return "IPY_MODEL_" + x.model_id
else:
return x
def _json_to_widget(x, obj):
if isinstance(x, dict):
return {k: _json_to_widget(v, obj) for k, v in x.items()}
elif isinstance(x, (list, tuple)):
return [_json_to_widget(v, obj) for v in x]
elif isinstance(x, str) and x.startswith('IPY_MODEL_') and x[10:] in Widget.widgets:
return Widget.widgets[x[10:]]
else:
return x
widget_serialization = {
'from_json': _json_to_widget,
'to_json': _widget_to_json
}
_binary_types = (memoryview, bytearray, bytes)
def _put_buffers(state, buffer_paths, buffers):
"""The inverse of _remove_buffers, except here we modify the existing dict/lists.
Modifying should be fine, since this is used when state comes from the wire.
"""
for buffer_path, buffer in zip(buffer_paths, buffers):
# we'd like to set say sync_data['x'][0]['y'] = buffer
# where buffer_path in this example would be ['x', 0, 'y']
obj = state
for key in buffer_path[:-1]:
obj = obj[key]
obj[buffer_path[-1]] = buffer
def _separate_buffers(substate, path, buffer_paths, buffers):
"""For internal, see _remove_buffers"""
# remove binary types from dicts and lists, but keep track of their paths
# any part of the dict/list that needs modification will be cloned, so the original stays untouched
# e.g. {'x': {'ar': ar}, 'y': [ar2, ar3]}, where ar/ar2/ar3 are binary types
# will result in {'x': {}, 'y': [None, None]}, [ar, ar2, ar3], [['x', 'ar'], ['y', 0], ['y', 1]]
# instead of removing elements from the list, this will make replacing the buffers on the js side much easier
if isinstance(substate, (list, tuple)):
is_cloned = False
for i, v in enumerate(substate):
if isinstance(v, _binary_types):
if not is_cloned:
substate = list(substate) # shallow clone list/tuple
is_cloned = True
substate[i] = None
buffers.append(v)
buffer_paths.append(path + [i])
elif isinstance(v, (dict, list, tuple)):
vnew = _separate_buffers(v, path + [i], buffer_paths, buffers)
if v is not vnew: # only assign when value changed
if not is_cloned:
substate = list(substate) # clone list/tuple
is_cloned = True
substate[i] = vnew
elif isinstance(substate, dict):
is_cloned = False
for k, v in substate.items():
if isinstance(v, _binary_types):
if not is_cloned:
substate = dict(substate) # shallow clone dict
is_cloned = True
del substate[k]
buffers.append(v)
buffer_paths.append(path + [k])
elif isinstance(v, (dict, list, tuple)):
vnew = _separate_buffers(v, path + [k], buffer_paths, buffers)
if v is not vnew: # only assign when value changed
if not is_cloned:
substate = dict(substate) # clone list/tuple
is_cloned = True
substate[k] = vnew
else:
raise ValueError("expected state to be a list or dict, not %r" % substate)
return substate
def _remove_buffers(state):
"""Return (state_without_buffers, buffer_paths, buffers) for binary message parts
A binary message part is a memoryview, bytearray, or python 3 bytes object.
As an example:
>>> state = {'plain': [0, 'text'], 'x': {'ar': memoryview(ar1)}, 'y': {'shape': (10,10), 'data': memoryview(ar2)}}
>>> _remove_buffers(state)
({'plain': [0, 'text']}, {'x': {}, 'y': {'shape': (10, 10)}}, [['x', 'ar'], ['y', 'data']],
[<memory at 0x107ffec48>, <memory at 0x107ffed08>])
"""
buffer_paths, buffers = [], []
state = _separate_buffers(state, [], buffer_paths, buffers)
return state, buffer_paths, buffers
def _buffer_list_equal(a, b):
"""Compare two lists of buffers for equality.
Used to decide whether two sequences of buffers (memoryviews,
bytearrays, or python 3 bytes) differ, such that a sync is needed.
Returns True if equal, False if unequal
"""
if len(a) != len(b):
return False
if a == b:
return True
for ia, ib in zip(a, b):
# Check byte equality, since bytes are what is actually synced
# NOTE: Simple ia != ib does not always work as intended, as
# e.g. memoryview(np.frombuffer(ia, dtype='float32')) !=
# memoryview(np.frombuffer(b)), since the format info differs.
# Compare without copying.
if memoryview(ia).cast('B') != memoryview(ib).cast('B'):
return False
return True
class LoggingHasTraits(HasTraits):
"""A parent class for HasTraits that log.
Subclasses have a log trait, and the default behavior
is to get the logger from the currently running Application.
"""
log = Instance('logging.Logger')
@default('log')
def _log_default(self):
from traitlets import log
return log.get_logger()
class CallbackDispatcher(LoggingHasTraits):
"""A structure for registering and running callbacks"""
callbacks = List()
def __call__(self, *args, **kwargs):
"""Call all of the registered callbacks."""
value = None
for callback in self.callbacks:
try:
local_value = callback(*args, **kwargs)
except Exception as e:
ip = get_ipython()
if ip is None:
self.log.warning("Exception in callback %s: %s", callback, e, exc_info=True)
else:
ip.showtraceback()
else:
value = local_value if local_value is not None else value
return value
def register_callback(self, callback, remove=False):
"""(Un)Register a callback
Parameters
----------
callback: method handle
Method to be registered or unregistered.
remove=False: bool
Whether to unregister the callback."""
# (Un)Register the callback.
if remove and callback in self.callbacks:
self.callbacks.remove(callback)
elif not remove and callback not in self.callbacks:
self.callbacks.append(callback)
def _show_traceback(method):
"""decorator for showing tracebacks in IPython"""
def m(self, *args, **kwargs):
try:
return(method(self, *args, **kwargs))
except Exception as e:
ip = get_ipython()
if ip is None:
self.log.warning("Exception in widget method %s: %s", method, e, exc_info=True)
else:
ip.showtraceback()
return m
class WidgetRegistry:
def __init__(self):
self._registry = {}
def register(self, model_module, model_module_version_range, model_name, view_module, view_module_version_range, view_name, klass):
"""Register a value"""
model_module = self._registry.setdefault(model_module, {})
model_version = model_module.setdefault(model_module_version_range, {})
model_name = model_version.setdefault(model_name, {})
view_module = model_name.setdefault(view_module, {})
view_version = view_module.setdefault(view_module_version_range, {})
view_version[view_name] = klass
def get(self, model_module, model_module_version, model_name, view_module, view_module_version, view_name):
"""Get a value"""
module_versions = self._registry[model_module]
# The python semver module doesn't work well, for example, it can't do match('3', '*')
# so we just take the first model module version.
#model_names = next(v for k, v in module_versions.items()
# if semver.match(model_module_version, k))
model_names = list(module_versions.values())[0]
view_modules = model_names[model_name]
view_versions = view_modules[view_module]
# The python semver module doesn't work well, so we just take the first view module version
#view_names = next(v for k, v in view_versions.items()
# if semver.match(view_module_version, k))
view_names = list(view_versions.values())[0]
widget_class = view_names[view_name]
return widget_class
def items(self):
for model_module, mm in sorted(self._registry.items()):
for model_version, mv in sorted(mm.items()):
for model_name, vm in sorted(mv.items()):
for view_module, vv in sorted(vm.items()):
for view_version, vn in sorted(vv.items()):
for view_name, widget in sorted(vn.items()):
yield (model_module, model_version, model_name, view_module, view_version, view_name), widget
def register(name=''):
"For backwards compatibility, we support @register(name) syntax."
def reg(widget):
"""A decorator registering a widget class in the widget registry."""
w = widget.class_traits()
Widget.widget_types.register(w['_model_module'].default_value,
w['_model_module_version'].default_value,
w['_model_name'].default_value,
w['_view_module'].default_value,
w['_view_module_version'].default_value,
w['_view_name'].default_value,
widget)
return widget
if isinstance(name, str):
import warnings
warnings.warn("Widget registration using a string name has been deprecated. Widget registration now uses a plain `@register` decorator.", DeprecationWarning)
return reg
else:
return reg(name)
class Widget(LoggingHasTraits):
#-------------------------------------------------------------------------
# Class attributes
#-------------------------------------------------------------------------
_widget_construction_callback = None
# widgets is a dictionary of all active widget objects
widgets = {}
# widget_types is a registry of widgets by module, version, and name:
widget_types = WidgetRegistry()
@classmethod
def close_all(cls):
for widget in list(cls.widgets.values()):
widget.close()
@staticmethod
def on_widget_constructed(callback):
"""Registers a callback to be called when a widget is constructed.
The callback must have the following signature:
callback(widget)"""
Widget._widget_construction_callback = callback
@staticmethod
def _call_widget_constructed(widget):
"""Static method, called when a widget is constructed."""
if Widget._widget_construction_callback is not None and callable(Widget._widget_construction_callback):
Widget._widget_construction_callback(widget)
@staticmethod
def handle_comm_opened(comm, msg):
"""Static method, called when a widget is constructed."""
version = msg.get('metadata', {}).get('version', '')
if version.split('.')[0] != PROTOCOL_VERSION_MAJOR:
raise ValueError("Incompatible widget protocol versions: received version %r, expected version %r"%(version, __protocol_version__))
data = msg['content']['data']
state = data['state']
# Find the widget class to instantiate in the registered widgets
widget_class = Widget.widget_types.get(state['_model_module'],
state['_model_module_version'],
state['_model_name'],
state['_view_module'],
state['_view_module_version'],
state['_view_name'])
widget = widget_class(comm=comm)
if 'buffer_paths' in data:
_put_buffers(state, data['buffer_paths'], msg['buffers'])
widget.set_state(state)
@staticmethod
def get_manager_state(drop_defaults=False, widgets=None):
"""Returns the full state for a widget manager for embedding
:param drop_defaults: when True, it will not include default value
:param widgets: list with widgets to include in the state (or all widgets when None)
:return:
"""
state = {}
if widgets is None:
widgets = Widget.widgets.values()
for widget in widgets:
state[widget.model_id] = widget._get_embed_state(drop_defaults=drop_defaults)
return {'version_major': 2, 'version_minor': 0, 'state': state}
def _get_embed_state(self, drop_defaults=False):
state = {
'model_name': self._model_name,
'model_module': self._model_module,
'model_module_version': self._model_module_version
}
model_state, buffer_paths, buffers = _remove_buffers(self.get_state(drop_defaults=drop_defaults))
state['state'] = model_state
if len(buffers) > 0:
state['buffers'] = [{'encoding': 'base64',
'path': p,
'data': standard_b64encode(d).decode('ascii')}
for p, d in zip(buffer_paths, buffers)]
return state
def get_view_spec(self):
return dict(version_major=2, version_minor=0, model_id=self._model_id)
#-------------------------------------------------------------------------
# Traits
#-------------------------------------------------------------------------
_model_name = Unicode('WidgetModel',
help="Name of the model.", read_only=True).tag(sync=True)
_model_module = Unicode('@jupyter-widgets/base',
help="The namespace for the model.", read_only=True).tag(sync=True)
_model_module_version = Unicode(__jupyter_widgets_base_version__,
help="A semver requirement for namespace version containing the model.", read_only=True).tag(sync=True)
_view_name = Unicode(None, allow_none=True,
help="Name of the view.").tag(sync=True)
_view_module = Unicode(None, allow_none=True,
help="The namespace for the view.").tag(sync=True)
_view_module_version = Unicode('',
help="A semver requirement for the namespace version containing the view.").tag(sync=True)
_view_count = Int(None, allow_none=True,
help="EXPERIMENTAL: The number of views of the model displayed in the frontend. This attribute is experimental and may change or be removed in the future. None signifies that views will not be tracked. Set this to 0 to start tracking view creation/deletion.").tag(sync=True)
comm = Instance('ipykernel.comm.Comm', allow_none=True)
keys = List(help="The traits which are synced.")
@default('keys')
def _default_keys(self):
return [name for name in self.traits(sync=True)]
_property_lock = Dict()
_holding_sync = False
_states_to_send = Set()
_display_callbacks = Instance(CallbackDispatcher, ())
_msg_callbacks = Instance(CallbackDispatcher, ())
#-------------------------------------------------------------------------
# (Con/de)structor
#-------------------------------------------------------------------------
def __init__(self, **kwargs):
"""Public constructor"""
self._model_id = kwargs.pop('model_id', None)
super().__init__(**kwargs)
Widget._call_widget_constructed(self)
self.open()
def __del__(self):
"""Object disposal"""
self.close()
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
def open(self):
"""Open a comm to the frontend if one isn't already open."""
if self.comm is None:
state, buffer_paths, buffers = _remove_buffers(self.get_state())
args = dict(target_name='jupyter.widget',
data={'state': state, 'buffer_paths': buffer_paths},
buffers=buffers,
metadata={'version': __protocol_version__}
)
if self._model_id is not None:
args['comm_id'] = self._model_id
self.comm = Comm(**args)
@observe('comm')
def _comm_changed(self, change):
"""Called when the comm is changed."""
if change['new'] is None:
return
self._model_id = self.model_id
self.comm.on_msg(self._handle_msg)
Widget.widgets[self.model_id] = self
@property
def model_id(self):
"""Gets the model id of this widget.
If a Comm doesn't exist yet, a Comm will be created automagically."""
return self.comm.comm_id
#-------------------------------------------------------------------------
# Methods
#-------------------------------------------------------------------------
def close(self):
"""Close method.
Closes the underlying comm.
When the comm is closed, all of the widget views are automatically
removed from the front-end."""
if self.comm is not None:
Widget.widgets.pop(self.model_id, None)
self.comm.close()
self.comm = None
self._ipython_display_ = None
def send_state(self, key=None):
"""Sends the widget state, or a piece of it, to the front-end, if it exists.
Parameters
----------
key : unicode, or iterable (optional)
A single property's name or iterable of property names to sync with the front-end.
"""
state = self.get_state(key=key)
if len(state) > 0:
if self._property_lock: # we need to keep this dict up to date with the front-end values
for name, value in state.items():
if name in self._property_lock:
self._property_lock[name] = value
state, buffer_paths, buffers = _remove_buffers(state)
msg = {'method': 'update', 'state': state, 'buffer_paths': buffer_paths}
self._send(msg, buffers=buffers)
def get_state(self, key=None, drop_defaults=False):
"""Gets the widget state, or a piece of it.
Parameters
----------
key : unicode or iterable (optional)
A single property's name or iterable of property names to get.
Returns
-------
state : dict of states
metadata : dict
metadata for each field: {key: metadata}
"""
if key is None:
keys = self.keys
elif isinstance(key, str):
keys = [key]
elif isinstance(key, Iterable):
keys = key
else:
raise ValueError("key must be a string, an iterable of keys, or None")
state = {}
traits = self.traits()
for k in keys:
to_json = self.trait_metadata(k, 'to_json', self._trait_to_json)
value = to_json(getattr(self, k), self)
if not drop_defaults or not self._compare(value, traits[k].default_value):
state[k] = value
return state
def _is_numpy(self, x):
return x.__class__.__name__ == 'ndarray' and x.__class__.__module__ == 'numpy'
def _compare(self, a, b):
if self._is_numpy(a) or self._is_numpy(b):
import numpy as np
return np.array_equal(a, b)
else:
return a == b
def set_state(self, sync_data):
"""Called when a state is received from the front-end."""
# The order of these context managers is important. Properties must
# be locked when the hold_trait_notification context manager is
# released and notifications are fired.
with self._lock_property(**sync_data), self.hold_trait_notifications():
for name in sync_data:
if name in self.keys:
from_json = self.trait_metadata(name, 'from_json',
self._trait_from_json)
self.set_trait(name, from_json(sync_data[name], self))
def send(self, content, buffers=None):
"""Sends a custom msg to the widget model in the front-end.
Parameters
----------
content : dict
Content of the message to send.
buffers : list of binary buffers
Binary buffers to send with message
"""
self._send({"method": "custom", "content": content}, buffers=buffers)
def on_msg(self, callback, remove=False):
"""(Un)Register a custom msg receive callback.
Parameters
----------
callback: callable
callback will be passed three arguments when a message arrives::
callback(widget, content, buffers)
remove: bool
True if the callback should be unregistered."""
self._msg_callbacks.register_callback(callback, remove=remove)
def on_displayed(self, callback, remove=False):
"""(Un)Register a widget displayed callback.
Parameters
----------
callback: method handler
Must have a signature of::
callback(widget, **kwargs)
kwargs from display are passed through without modification.
remove: bool
True if the callback should be unregistered."""
self._display_callbacks.register_callback(callback, remove=remove)
def add_traits(self, **traits):
"""Dynamically add trait attributes to the Widget."""
super().add_traits(**traits)
for name, trait in traits.items():
if trait.get_metadata('sync'):
self.keys.append(name)
self.send_state(name)
def notify_change(self, change):
"""Called when a property has changed."""
# Send the state to the frontend before the user-registered callbacks
# are called.
name = change['name']
if self.comm is not None and self.comm.kernel is not None:
# Make sure this isn't information that the front-end just sent us.
if name in self.keys and self._should_send_property(name, getattr(self, name)):
# Send new state to front-end
self.send_state(key=name)
super().notify_change(change)
def __repr__(self):
return self._gen_repr_from_keys(self._repr_keys())
#-------------------------------------------------------------------------
# Support methods
#-------------------------------------------------------------------------
@contextmanager
def _lock_property(self, **properties):
"""Lock a property-value pair.
The value should be the JSON state of the property.
NOTE: This, in addition to the single lock for all state changes, is
flawed. In the future we may want to look into buffering state changes
back to the front-end."""
self._property_lock = properties
try:
yield
finally:
self._property_lock = {}
@contextmanager
def hold_sync(self):
"""Hold syncing any state until the outermost context manager exits"""
if self._holding_sync is True:
yield
else:
try:
self._holding_sync = True
yield
finally:
self._holding_sync = False
self.send_state(self._states_to_send)
self._states_to_send.clear()
def _should_send_property(self, key, value):
"""Check the property lock (property_lock)"""
to_json = self.trait_metadata(key, 'to_json', self._trait_to_json)
if key in self._property_lock:
# model_state, buffer_paths, buffers
split_value = _remove_buffers({ key: to_json(value, self)})
split_lock = _remove_buffers({ key: self._property_lock[key]})
# A roundtrip conversion through json in the comparison takes care of
# idiosyncracies of how python data structures map to json, for example
# tuples get converted to lists.
if (jsonloads(jsondumps(split_value[0])) == split_lock[0]
and split_value[1] == split_lock[1]
and _buffer_list_equal(split_value[2], split_lock[2])):
return False
if self._holding_sync:
self._states_to_send.add(key)
return False
else:
return True
# Event handlers
@_show_traceback
def _handle_msg(self, msg):
"""Called when a msg is received from the front-end"""
data = msg['content']['data']
method = data['method']
if method == 'update':
if 'state' in data:
state = data['state']
if 'buffer_paths' in data:
_put_buffers(state, data['buffer_paths'], msg['buffers'])
self.set_state(state)
# Handle a state request.
elif method == 'request_state':
self.send_state()
# Handle a custom msg from the front-end.
elif method == 'custom':
if 'content' in data:
self._handle_custom_msg(data['content'], msg['buffers'])
# Catch remainder.
else:
self.log.error('Unknown front-end to back-end widget msg with method "%s"' % method)
def _handle_custom_msg(self, content, buffers):
"""Called when a custom msg is received."""
self._msg_callbacks(self, content, buffers)
def _handle_displayed(self, **kwargs):
"""Called when a view has been displayed for this widget instance"""
self._display_callbacks(self, **kwargs)
@staticmethod
def _trait_to_json(x, self):
"""Convert a trait value to json."""
return x
@staticmethod
def _trait_from_json(x, self):
"""Convert json values to objects."""
return x
def _ipython_display_(self, **kwargs):
"""Called when `IPython.display.display` is called on the widget."""
plaintext = repr(self)
if len(plaintext) > 110:
plaintext = plaintext[:110] + '…'
data = {
'text/plain': plaintext,
}
if self._view_name is not None:
# The 'application/vnd.jupyter.widget-view+json' mimetype has not been registered yet.
# See the registration process and naming convention at
# http://tools.ietf.org/html/rfc6838
# and the currently registered mimetypes at
# http://www.iana.org/assignments/media-types/media-types.xhtml.
data['application/vnd.jupyter.widget-view+json'] = {
'version_major': 2,
'version_minor': 0,
'model_id': self._model_id
}
display(data, raw=True)
if self._view_name is not None:
self._handle_displayed(**kwargs)
def _send(self, msg, buffers=None):
"""Sends a message to the model in the front-end."""
if self.comm is not None and self.comm.kernel is not None:
self.comm.send(data=msg, buffers=buffers)
def _repr_keys(self):
traits = self.traits()
for key in sorted(self.keys):
# Exclude traits that start with an underscore
if key[0] == '_':
continue
# Exclude traits who are equal to their default value
value = getattr(self, key)
trait = traits[key]
if self._compare(value, trait.default_value):
continue
elif (isinstance(trait, (Container, Dict)) and
trait.default_value == Undefined and
(value is None or len(value) == 0)):
# Empty container, and dynamic default will be empty
continue
yield key
def _gen_repr_from_keys(self, keys):
class_name = self.__class__.__name__
signature = ', '.join(
'{}={!r}'.format(key, getattr(self, key))
for key in keys
)
return '{}({})'.format(class_name, signature)
|
the-stack_0_5556 | #
# Copyright 2017-2018 Stanislav Pidhorskyi. All rights reserved.
# License: https://raw.githubusercontent.com/podgorskiy/impy/master/LICENSE.txt
#
from setuptools import setup, Extension, find_packages
from distutils.errors import *
from distutils.dep_util import newer_group
from distutils import log
from distutils.command.build_ext import build_ext
from codecs import open
import os
import sys
import platform
import re
sys._argv = sys.argv[:]
sys.argv=[sys.argv[0], '--root', 'gl3w/']
try:
from gl3w import gl3w_gen
except:
sys.path.insert(0, './gl3w')
import gl3w_gen
sys.argv = sys._argv
target_os = 'none'
if sys.platform == 'darwin':
target_os = 'darwin'
elif os.name == 'posix':
target_os = 'posix'
elif platform.system() == 'Windows':
target_os = 'win32'
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
def filter_sources(sources):
"""Filters sources into c, cpp and objc"""
cpp_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
c_ext_match = re.compile(r'.*[.](c|C)\Z', re.I).match
objc_ext_match = re.compile(r'.*[.]m\Z', re.I).match
c_sources = []
cpp_sources = []
objc_sources = []
other_sources = []
for source in sources:
if c_ext_match(source):
c_sources.append(source)
elif cpp_ext_match(source):
cpp_sources.append(source)
elif objc_ext_match(source):
objc_sources.append(source)
else:
other_sources.append(source)
return c_sources, cpp_sources, objc_sources, other_sources
def build_extension(self, ext):
"""Modified version of build_extension method from distutils.
Can handle compiler args for different files"""
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'ext_modules' option (extension '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % ext.name)
sources = list(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
sources = self.swig_sources(sources, ext)
extra_args = ext.extra_compile_args or []
extra_c_args = getattr(ext, "extra_compile_c_args", [])
extra_cpp_args = getattr(ext, "extra_compile_cpp_args", [])
extra_objc_args = getattr(ext, "extra_compile_objc_args", [])
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
c_sources, cpp_sources, objc_sources, other_sources = filter_sources(sources)
def _compile(src, args):
return self.compiler.compile(src,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args + args,
depends=ext.depends)
objects = []
objects += _compile(c_sources, extra_c_args)
objects += _compile(cpp_sources, extra_cpp_args)
objects += _compile(objc_sources, extra_objc_args)
objects += _compile(other_sources, [])
self._built_objects = objects[:]
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_path,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
# patching
build_ext.build_extension = build_extension
glfw = [
"glfw/src/context.c"
,"glfw/src/init.c"
,"glfw/src/input.c"
,"glfw/src/monitor.c"
,"glfw/src/vulkan.c"
,"glfw/src/window.c"
]
glfw_platform = {
'darwin': [
"glfw/src/cocoa_init.m"
,"glfw/src/cocoa_joystick.m"
,"glfw/src/cocoa_monitor.m"
,"glfw/src/cocoa_window.m"
,"glfw/src/cocoa_time.c"
,"glfw/src/posix_thread.c"
,"glfw/src/nsgl_context.m"
,"glfw/src/egl_context.c"
,"glfw/src/osmesa_context.c"
],
'posix': [
"glfw/src/x11_init.c"
,"glfw/src/x11_monitor.c"
,"glfw/src/x11_window.c"
,"glfw/src/xkb_unicode.c"
,"glfw/src/posix_time.c"
,"glfw/src/posix_thread.c"
,"glfw/src/glx_context.c"
,"glfw/src/egl_context.c"
,"glfw/src/osmesa_context.c"
,"glfw/src/linux_joystick.c"
],
'win32': [
"glfw/src/win32_init.c"
,"glfw/src/win32_joystick.c"
,"glfw/src/win32_monitor.c"
,"glfw/src/win32_time.c"
,"glfw/src/win32_thread.c"
,"glfw/src/win32_window.c"
,"glfw/src/wgl_context.c"
,"glfw/src/egl_context.c"
,"glfw/src/osmesa_context.c"
]
}
imgui = [
"imgui/imgui.cpp"
,"imgui/imgui_demo.cpp"
,"imgui/imgui_draw.cpp"
,"imgui/imgui_widgets.cpp"
]
definitions = {
'darwin': [("_GLFW_COCOA", 1)],
'posix': [("GLFW_USE_OSMESA", 0), ("GLFW_USE_WAYLAND", 0), ("GLFW_USE_MIR", 0), ("_GLFW_X11", 1)],
'win32': [("GLFW_USE_HYBRID_HPG", 0), ("_GLFW_WIN32", 1), ("_CRT_SECURE_NO_WARNINGS", 1), ("NOMINMAX", 1)],
}
libs = {
'darwin': [],
'posix': ["rt", "m", "X11"],
'win32': ["gdi32", "opengl32", "Shell32"],
}
extra_link = {
'darwin': ["-framework", "Cocoa","-framework", "IOKit","-framework", "Cocoa","-framework", "CoreFoundation","-framework", "CoreVideo"],
'posix': [],
'win32': [],
}
extra_compile_args = {
'darwin': [],
'posix': [],
'win32': ['/MT', '/fp:fast', '/GL', '/GR-'],
}
extra_compile_cpp_args = {
'darwin': ['-std=c++11'],
'posix': ['-std=c++11'],
'win32': [],
}
extension = Extension("_bimpy",
imgui + glfw + glfw_platform[target_os] + ['bimpy.cpp', "imgui_glfw.cpp", "gl3w/src/gl3w.c"],
define_macros = definitions[target_os],
include_dirs=["glfw/include", "imgui", "pybind11/include", "gl3w/include"],
extra_compile_args=extra_compile_args[target_os],
extra_link_args=extra_link[target_os],
libraries = libs[target_os])
extension.extra_compile_cpp_args = extra_compile_cpp_args[target_os]
setup(
name='bimpy',
version='0.0.11',
description='bimpy - bundled imgui for python',
long_description=long_description,
url='https://github.com/podgorskiy/bimpy',
author='Stanislav Pidhorskyi',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
keywords='imgui ui',
packages=['bimpy'],
ext_modules=[extension],
)
|
the-stack_0_5558 | import argparse
import threading
import time
# import uuid
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Tuple, Union
import consul
import yaml
from consul.base import Check
from logger import create_info_logger
from utils.network import find_open_port, get_ip_address
logger = create_info_logger("registry", "registry.log")
config = None
current_service_id = None
class EndpointConfig:
Port: int
Host: str
Scheme: str
def __str__(self):
return self.Scheme + "://" + self.Host + ":" + str(self.Port)
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.next_call = time.time()
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self.next_call += self.interval
self._timer = threading.Timer(self.next_call - time.time(), self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class DebugWebServer(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("<html><head><title>Registry info</title></head>", "utf-8"))
self.wfile.write(bytes("<p>Request: %s</p>" % self.path, "utf-8"))
self.wfile.write(bytes("<body>", "utf-8"))
self.wfile.write(bytes("<p>Service status.</p>", "utf-8"))
self.wfile.write(bytes("</body></html>", "utf-8"))
def start_webserver(hostName, serverPort):
webServer = HTTPServer((hostName, serverPort), DebugWebServer)
logger.info("Server started http://%s:%s" % (hostName, serverPort))
try:
webServer.serve_forever()
except KeyboardInterrupt:
pass
webServer.server_close()
logger.info("Server stopped.")
def verify_connection(cfg: EndpointConfig) -> bool:
"""
Verify consul connection
Exceptions throw such as ConnectionError will be captured
"""
if cfg is None:
raise Exception("Configuration is required")
port = cfg.Port
host = cfg.Host
logger.debug('Verifying Consul connection to %s:%s', host, port)
try:
client = consul.Consul(host=host, port=port)
client.agent.self()
return True
except Exception:
pass
return False
def createClient(cfg: EndpointConfig, verify: bool = True) -> Tuple[consul.Consul, bool]:
"""
Create new consul client
"""
if cfg is None:
raise Exception("Configuration is required but got None")
try:
port = cfg.Port
host = cfg.Host
logger.info('Consul Host: %s Port: %s ', host, port)
client = consul.Consul(host=host, port=port)
online = False
if verify:
online = verify_connection(cfg)
logger.debug('Consul online : %s', online)
return client, online
except Exception:
pass
return None, False
def driver_version():
return consul.__version__
def getServiceByNameAndId(service_name, service_id):
c, online = createClient(config, True)
if not online:
return None
index, nodes = c.health.service(service_name)
for node in nodes:
if node['Service']['ID'] == service_id:
return node
return None
def register(service_host, service_port, service_id=None) -> Union[None, str]:
"""
Register new service in consul
"""
logger.info('Registering ServiceHost: %s Port: %s ',
service_host, service_port)
c, online = createClient(config, True)
if not online:
logger.debug('Consul service is offline')
return None
service_name = 'traefik-system-ingress'
service_url = f'http://{service_host}:{service_port}/api'
# TODO : Service ID generation needs to be configurable
# Create new service id, otherwise we will re-register same id
if service_id is None:
# service_id = f'{service_name}@{service_port}#{uuid.uuid4()}'
host = get_ip_address()
service_id = f'{service_name}@{host}:{service_port}'
# service_id = f'{service_name}@{service_port}'
logger.info('Service url: %s', service_url)
logger.info('Service id: %s', service_id)
# TODO: De-registration needs to be configurable
c.agent.service.register(
name=service_name,
service_id=service_id,
port=service_port,
address=service_host,
# check=Check.http(service_url, '10s', deregister='10m'),
check=Check.http(service_url, '10s'),
tags=[
"traefik.enable=true",
"traefik.consulcatalog.connect=false",
"traefik.http.routers.traefik-system-ingress.entrypoints=marie",
"traefik.http.routers.traefik-system-ingress.service=traefik-system-ingress",
"traefik.http.routers.traefik-system-ingress.rule=HostRegexp(`{host:.+}`)",
"traefik.http.services.traefik-system-ingress.loadbalancer.server.scheme=http",
])
return service_id
def start_watchdog(interval, service_host, service_port):
sid = current_service_id
def _register(_service_host, _service_port):
nonlocal sid
logger.info("watchdog:Host, Port, ServiceId : %s, %s, %s", _service_host, _service_port, sid)
online = verify_connection(config)
logger.info('watchdog:consul online : %s', online)
service_name = 'traefik-system-ingress'
if online:
node = getServiceByNameAndId(service_name, sid)
if node is None:
sid = register(service_host=_service_host, service_port=_service_port, service_id=sid)
logger.info('watchdog:Re-registered service: %s', sid)
logger.info("watchdog:starting with interval : %s", interval)
rt = RepeatedTimer(interval, _register, service_host, service_port)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument('--debug-server', type=bool, default=False, required=False, help='Should we start debug webserver')
# parser.add_argument('--port', type=int, default=-1, help='Port number to export (-1 dynamic)')
# parser.add_argument('--ip', type=str, default='127.0.0.1', help='Service IP to expose, blank for dynamic')
# parser.add_argument('--watchdog-interval', type=int, default=60, help='watchdog interval checkin seconds')
parser.add_argument('--config', type=str, default='./config/marie-debug.yml', help='Configuration file')
opt = parser.parse_args()
# Load config
with open(opt.config, "r") as yamlfile:
data = yaml.load(yamlfile, Loader=yaml.FullLoader)
logger.info(f"Config read successfully : {opt.config}")
print(data)
enabled = bool(data['RegistryEnabled'])
if not enabled:
logger.info("registry not enabled, exiting...")
exit()
config = EndpointConfig()
config.Host = data['ConsulEndpoint']['Host']
config.Port = int(data['ConsulEndpoint']['Port'])
config.Scheme = data['ConsulEndpoint']['Scheme']
hostName = data['ServiceEndpoint']['Host']
serverPort = int(data['ServiceEndpoint']['Port'])
watchdog_interval = int(data['WatchdogInterval'])
debug_server = bool(data['DebugWebserver'])
if hostName is None or hostName == '':
hostName = get_ip_address()
if serverPort == -1:
serverPort = find_open_port()
current_service_id = register(
service_host=hostName, service_port=serverPort, service_id=None)
logger.info('Registered service: %s', current_service_id)
def _target():
return start_watchdog(watchdog_interval,
service_host=hostName, service_port=serverPort)
watchdog_task = threading.Thread(
target=_target, daemon=debug_server).start()
if debug_server:
start_webserver(hostName, serverPort)
|
the-stack_0_5561 |
import os, sys, time, random, argparse, math
import numpy as np
from copy import deepcopy
from collections import defaultdict
import torch
import torch.nn as nn
import wandb
from tqdm import tqdm
from pathlib import Path
from hessian_eigenthings import compute_hessian_eigenthings
lib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
from config_utils import load_config, dict2config, configure2str
from datasets import get_datasets, get_nas_search_loaders
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
from log_utils import AverageMeter, time_string, convert_secs2time
from utils import count_parameters_in_MB, obtain_accuracy
from utils.sotl_utils import _hessian, analyze_grads, eval_archs_on_batch, summarize_results_by_dataset, avg_nested_dict
from typing import *
from models.cell_searchs.generic_model import ArchSampler
import collections
def sample_arch_and_set_mode_search(args, outer_iter, sampled_archs, api, network, algo, arch_sampler,
step, logger, epoch, supernets_decomposition, all_archs, arch_groups_brackets, placement=None):
parsed_algo = algo.split("_")
sampling_done, lowest_loss_arch, lowest_loss = False, None, 10000 # Used for GreedyNAS online search space pruning - might have to resample many times until we find an architecture below the required threshold
sampled_arch = None
branch = None
if algo.startswith('setn'):
branch = "setn"
sampled_arch = network.dync_genotype(True)
network.set_cal_mode('dynamic', sampled_arch)
elif algo.startswith('gdas'):
branch = "gdas"
network.set_cal_mode('gdas', None)
if sampled_archs is not None and (not args.refresh_arch_oneshot or (args.refresh_arch_oneshot == "train_real" and placement in ["inner_sandwich", "outer"])):
assert placement in ["inner_sandwich", "outer", None]
network.last_gumbels = sampled_archs[outer_iter]
network.refresh_arch_oneshot = False
if epoch < 2 and step < 3:
logger.log(f"Set Gumbels at epoch={epoch}, outer_iter={outer_iter} = {network.last_gumbels}")
sampled_arch = network.genotype
elif algo.startswith('darts'):
branch = "darts"
network.set_cal_mode('joint', None)
sampled_arch = network.genotype
elif "random_" in algo and len(parsed_algo) > 1 and ("perf" in algo or "size" in algo):
branch = "random1"
if args.search_space_paper == "nats-bench":
sampled_arch = arch_sampler.sample()[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
network.set_cal_mode('urs')
elif "random" in algo and args.evenly_split is not None: # TODO should just sample outside of the function and pass it in as all_archs?
branch = "random2"
sampled_arch = arch_sampler.sample(mode="evenly_split", candidate_num = args.eval_candidate_num)[0]
network.set_cal_mode('dynamic', sampled_arch)
elif "random" in algo and args.sandwich is not None and args.sandwich > 1:
branch = "random_quartiles"
if args.search_space_paper == "nats-bench":
assert args.sandwich == 4 or args.sandwich_mode != "quartiles" # 4 corresponds to using quartiles
if step < 2 and epoch is not None and epoch < 2:
logger.log(f"Sampling from the Sandwich branch with sandwich={args.sandwich} and sandwich_mode={args.sandwich_mode}")
logger.log(f"Sampled archs = {[api.archstr2index[x.tostr()] for x in sampled_archs]}, cur arch = {sampled_archs[outer_iter]}")
sampled_arch = sampled_archs[outer_iter] # Pick the corresponding quartile architecture for this iteration
network.set_cal_mode('dynamic', sampled_arch)
else:
sampled_arch = sampled_archs[outer_iter]
network.set_cal_mode('dynamic', sampled_arch)
elif "random" in algo and args.sandwich is not None and args.sandwich > 1 and args.sandwich_mode == "fairnas":
branch = "random_fairnas"
assert args.sandwich == len(network._op_names)
sampled_arch = sampled_archs[outer_iter] # Pick the corresponding quartile architecture for this iteration
if step < 2 and epoch is not None and epoch < 2:
logger.log(f"Sampling from the FairNAS branch with sandwich={args.sandwich} and sandwich_mode={args.sandwich_mode}, arch={sampled_arch}")
logger.log(f"Sampled archs = {[api.archstr2index[x.tostr()] for x in sampled_archs]}, cur arch = {sampled_archs[outer_iter]}")
network.set_cal_mode('dynamic', sampled_arch)
elif "random_" in algo and "grad" in algo:
network.set_cal_mode('urs')
elif algo == 'random': # NOTE the original branch needs to be last so that it is fall-through for all the special 'random' branches
branch = "random"
if supernets_decomposition or all_archs is not None or arch_groups_brackets is not None:
branch = "random_weird"
if all_archs is not None:
sampled_arch = random.sample(all_archs, 1)[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
if args.search_space_paper == "nats-bench":
sampled_arch = arch_sampler.sample(mode="random")[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
sampled_arch = network.sample_arch()
network.set_cal_mode('dynamic', sampled_arch)
else:
if args.search_space_paper == "nats-bench":
branch="random_basic"
network.set_cal_mode('urs', None)
else:
sampled_arch = network.sample_arch()
network.set_cal_mode('dynamic', sampled_arch)
elif algo == 'enas':
with torch.no_grad():
network.controller.eval()
_, _, sampled_arch = network.controller()
network.set_cal_mode('dynamic', sampled_arch)
else:
raise ValueError('Invalid algo name : {:}'.format(algo))
# if step < 2:
# print(f"Sample_arch through branch={branch}")
return sampled_arch
def sample_new_arch(network, algo, arch_sampler, sandwich_archs, all_archs, base_inputs, base_targets, arch_overview, loss_threshold, outer_iter, step, logger, supernets_decomposition, arch_groups_brackets, args):
# Need to sample a new architecture (considering it as a meta-batch dimension)
parsed_algo = algo.split("_")
sampling_done = False # Used for GreedyNAS online search space pruning - might have to resample many times until we find an architecture below the required threshold
lowest_loss_arch = None
lowest_loss = 10000
while not sampling_done: # TODO the sampling_done should be useful for like online sampling with rejections maybe
if algo == 'setn':
sampled_arch = network.dync_genotype(True)
network.set_cal_mode('dynamic', sampled_arch)
elif algo == 'gdas':
network.set_cal_mode('gdas', None)
sampled_arch = network.genotype
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
sampled_arch = network.genotype
elif "random_" in algo and len(parsed_algo) > 1 and ("perf" in algo or "size" in algo):
if args.search_space_paper == "nats-bench":
sampled_arch = arch_sampler.sample()[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
network.set_cal_mode('urs')
# elif "random" in algo and args.evenly_split is not None: # TODO should just sample outside of the function and pass it in as all_archs?
# sampled_arch = arch_sampler.sample(mode="evenly_split", candidate_num = args.eval_candidate_num)[0]
# network.set_cal_mode('dynamic', sampled_arch)
elif "random" in algo and args.sandwich is not None and args.sandwich > 1 and args.sandwich_computation == "parallel":
assert args.sandwich_mode != "quartiles", "Not implemented yet"
sampled_arch = sandwich_archs[outer_iter]
network.set_cal_mode('dynamic', sampled_arch)
elif "random" in algo and args.sandwich is not None and args.sandwich > 1 and args.sandwich_mode == "quartiles":
if args.search_space_paper == "nats-bench":
assert args.sandwich == 4 # 4 corresponds to using quartiles
if step == 0:
logger.log(f"Sampling from the Sandwich branch with sandwich={args.sandwich} and sandwich_mode={args.sandwich_mode}")
sampled_archs = arch_sampler.sample(mode = "quartiles", subset = all_archs, candidate_num=args.sandwich) # Always samples 4 new archs but then we pick the one from the right quartile
sampled_arch = sampled_archs[outer_iter] # Pick the corresponding quartile architecture for this iteration
network.set_cal_mode('dynamic', sampled_arch)
else:
network.set_cal_mode('urs')
elif "random_" in algo and "grad" in algo:
network.set_cal_mode('urs')
elif algo == 'random': # NOTE the original branch needs to be last so that it is fall-through for all the special 'random' branches
if supernets_decomposition or all_archs is not None or arch_groups_brackets is not None:
if all_archs is not None:
sampled_arch = random.sample(all_archs, 1)[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
if args.search_space_paper == "nats-bench":
sampled_arch = arch_sampler.sample(mode="random")[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
network.set_cal_mode('urs', None)
else:
network.set_cal_mode('urs', None)
elif algo == 'enas':
with torch.no_grad():
network.controller.eval()
_, _, sampled_arch = network.controller()
network.set_cal_mode('dynamic', sampled_arch)
else:
raise ValueError('Invalid algo name : {:}'.format(algo))
if loss_threshold is not None:
with torch.no_grad():
_, logits = network(base_inputs)
base_loss = criterion(logits, base_targets) * (1 if args.sandwich is None else 1/args.sandwich)
if base_loss.item() < lowest_loss:
lowest_loss = base_loss.item()
lowest_loss_arch = sampled_arch
if base_loss.item() < loss_threshold:
sampling_done = True
else:
sampling_done = True
if sampling_done:
arch_overview["cur_arch"] = sampled_arch
arch_overview["all_archs"].append(sampled_arch)
arch_overview["all_cur_archs"].append(sampled_arch)
return sampled_arch
def format_input_data(base_inputs, base_targets, arch_inputs, arch_targets, search_loader_iter, inner_steps, args, loader_type="train-val"):
# base_inputs, arch_inputs = base_inputs.cuda(non_blocking=True), arch_inputs.cuda(non_blocking=True)
# base_targets, arch_targets = base_targets.cuda(non_blocking=True), arch_targets.cuda(non_blocking=True)
base_inputs, base_targets = base_inputs.cuda(non_blocking=True), base_targets.cuda(non_blocking=True)
arch_inputs, arch_targets = arch_inputs.cuda(non_blocking=True), arch_targets.cuda(non_blocking=True)
if args.higher_method == "sotl":
arch_inputs, arch_targets = None, None
all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets = [base_inputs], [base_targets], [arch_inputs], [arch_targets]
for extra_step in range(inner_steps-1):
if args.inner_steps_same_batch:
all_base_inputs.append(base_inputs)
all_base_targets.append(base_targets)
all_arch_inputs.append(arch_inputs)
all_arch_targets.append(arch_targets)
continue # If using the same batch, we should not try to query the search_loader_iter for more samples
try:
if loader_type == "train-val" or loader_type == "train-train":
extra_base_inputs, extra_base_targets, extra_arch_inputs, extra_arch_targets = next(search_loader_iter)
else:
extra_base_inputs, extra_base_targets = next(search_loader_iter)
extra_arch_inputs, extra_arch_targets = None, None
except:
continue
# extra_base_inputs, extra_arch_inputs = extra_base_inputs.cuda(non_blocking=True), extra_arch_inputs.cuda(non_blocking=True)
# extra_base_targets, extra_arch_targets = extra_base_targets.cuda(non_blocking=True), extra_arch_targets.cuda(non_blocking=True)
extra_base_inputs, extra_base_targets = extra_base_inputs.cuda(non_blocking=True), extra_base_targets.cuda(non_blocking=True)
if extra_arch_inputs is not None and extra_arch_targets is not None:
extra_arch_inputs, extra_arch_targets = extra_arch_inputs.cuda(non_blocking=True), extra_arch_targets.cuda(non_blocking=True)
all_base_inputs.append(extra_base_inputs)
all_base_targets.append(extra_base_targets)
all_arch_inputs.append(extra_arch_inputs)
all_arch_targets.append(extra_arch_targets)
return all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets
def update_brackets(supernet_train_stats_by_arch, supernet_train_stats, supernet_train_stats_avgmeters, arch_groups_brackets, arch_overview, items, all_brackets, sampled_arch, args):
if arch_overview["cur_arch"] is not None:
if type(arch_groups_brackets) is dict:
cur_bracket = arch_groups_brackets[arch_overview["cur_arch"].tostr()]
for key, val in items:
supernet_train_stats_by_arch[sampled_arch.tostr()][key].append(val)
for bracket in all_brackets:
if bracket == cur_bracket:
supernet_train_stats[key]["sup"+str(cur_bracket)].append(val)
supernet_train_stats_avgmeters[key+"AVG"]["sup"+str(cur_bracket)].update(val)
supernet_train_stats[key+"AVG"]["sup"+str(cur_bracket)].append(supernet_train_stats_avgmeters[key+"AVG"]["sup"+str(cur_bracket)].avg)
else:
item_to_add = supernet_train_stats[key]["sup"+str(bracket)][-1] if len(supernet_train_stats[key]["sup"+str(bracket)]) > 0 else 3.14159
supernet_train_stats[key]["sup"+str(bracket)].append(item_to_add)
avg_to_add = supernet_train_stats_avgmeters[key+"AVG"]["sup"+str(bracket)].avg if supernet_train_stats_avgmeters[key+"AVG"]["sup"+str(bracket)].avg > 0 else 3.14159
supernet_train_stats[key+"AVG"]["sup"+str(bracket)].append(avg_to_add)
def get_finetune_scheduler(scheduler_type, config, xargs, network2, epochs=None, logger=None, best_lr=None):
if scheduler_type in ['linear_warmup', 'linear']:
config = config._replace(scheduler=scheduler_type, warmup=1, eta_min=0, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type == "cos_reinit":
# In practice, this leads to constant LR = 0.025 since the original Cosine LR is annealed over 100 epochs and our training schedule is very short
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type == "cos_restarts":
config = config._replace(scheduler='cos_restarts', warmup=0, epochs=epochs, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config, xargs)
elif scheduler_type in ['cos_adjusted']:
config = config._replace(scheduler='cos', warmup=0, epochs=epochs, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type in ['cos_fast']:
config = config._replace(scheduler='cos', warmup=0, LR=0.001 if xargs.lr is None else xargs.lr, epochs=epochs, eta_min=0, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type in ['cos_warmup']:
config = config._replace(scheduler='cos', warmup=1, LR=0.001 if xargs.lr is None else xargs.lr, epochs=epochs, eta_min=0, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type in ["scratch"]:
config_opt = load_config('./configs/nas-benchmark/hyper-opts/200E.config', None, logger)
config_opt = config_opt._replace(LR=0.1 if xargs.lr is None else xargs.lr, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config_opt)
elif scheduler_type in ["scratch12E"]:
config_opt = load_config('./configs/nas-benchmark/hyper-opts/12E.config', None, logger)
config_opt = config_opt._replace(LR=0.1 if xargs.lr is None else xargs.lr, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config_opt)
elif scheduler_type in ["scratch1E"]:
config_opt = load_config('./configs/nas-benchmark/hyper-opts/01E.config', None, logger)
config_opt = config_opt._replace(LR=0.1 if xargs.lr is None else xargs.lr, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config_opt)
elif (xargs.lr is not None or (xargs.lr is None and bool(xargs.adaptive_lr) == True)) and scheduler_type == 'constant':
config = config._replace(scheduler='constant', constant_lr=xargs.lr if not xargs.adaptive_lr else best_lr,
decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type == "constant":
config = config._replace(scheduler='constant', constant_lr=xargs.lr if not xargs.adaptive_lr else best_lr, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
else:
print(f"Unrecognized scheduler at {scheduler_type}")
raise NotImplementedError
return w_optimizer2, w_scheduler2, criterion
def find_best_lr(xargs, network2, train_loader, config, arch_idx):
lr_counts = {}
if xargs.adaptive_lr == "1cycle":
from torch_lr_finder import LRFinder
network3 = deepcopy(network2)
network3.logits_only = True
w_optimizer3, _, criterion = get_optim_scheduler(network3.weights, config, attach_scheduler=False)
lr_finder = LRFinder(network3, w_optimizer3, criterion, device="cuda")
lr_finder.range_test(train_loader, start_lr=0.0001, end_lr=1, num_iter=100)
best_lr = lr_finder.history["lr"][(np.gradient(np.array(lr_finder.history["loss"]))).argmin()]
try:
lr_plot_ax, weird_lr = lr_finder.plot(suggest_lr=True) # to inspect the loss-learning rate graph
except:
lr_plot_ax = lr_finder.plot(suggest_lr=False)
lr_finder.reset() # to reset the model and optimizer to their initial state
wandb.log({"lr_plot": lr_plot_ax}, commit=False)
elif xargs.adaptive_lr == "custom":
lrs = np.geomspace(1, 0.001, 10)
lr_results = {}
avg_of_avg_loss = AverageMeter()
for lr in tqdm(lrs, desc="Searching LRs"):
network3 = deepcopy(network2)
print(str(list(network3.parameters()))[0:100])
config = config._replace(scheduler='constant', constant_lr=lr)
w_optimizer3, _, criterion = get_optim_scheduler(network3.weights, config)
avg_loss = AverageMeter()
for batch_idx, data in tqdm(enumerate(train_loader), desc = f"Training in order to find the best LR for arch_idx={arch_idx}", disable=True):
if batch_idx > 20:
break
network3.zero_grad()
inputs, targets = data
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
_, logits = network3(inputs)
train_acc_top1, train_acc_top5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
loss = criterion(logits, targets)
avg_loss.update(loss.item())
loss.backward()
w_optimizer3.step()
lr_results[lr] = avg_loss.avg
avg_of_avg_loss.update(avg_loss.avg)
best_lr = min(lr_results, key = lambda k: lr_results[k])
print(lr_results)
lr_counts[best_lr] += 1
else:
best_lr = None
return best_lr
def sample_arch_and_set_mode(network, algo, arch_sampler, all_archs, parsed_algo, args, step, logger, sampled_archs, outer_iter):
sampled_arch = None
if algo.startswith('setn'):
sampled_arch = network.dync_genotype(True)
network.set_cal_mode('dynamic', sampled_arch)
elif algo.startswith('gdas'):
network.set_cal_mode('gdas', None)
sampled_arch = network.genotype
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
sampled_arch = network.genotype
elif "random_" in algo and len(parsed_algo) > 1 and ("perf" in algo or "size" in algo):
if args.search_space_paper == "nats-bench":
sampled_arch = arch_sampler.sample()[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
network.set_cal_mode('urs')
elif "random" in algo and args.sandwich is not None and args.sandwich > 1 and args.sandwich_mode == "quartiles":
if args.search_space_paper == "nats-bench":
assert args.sandwich == 4 # 4 corresponds to using quartiles
if step == 0:
logger.log(f"Sampling from the Sandwich branch with sandwich={args.sandwich} and sandwich_mode={args.sandwich_mode}")
sampled_arch = sampled_archs[outer_iter] # Pick the corresponding quartile architecture for this iteration
network.set_cal_mode('dynamic', sampled_arch)
else:
network.set_cal_mode('urs')
elif "random" in algo and args.sandwich is not None and args.sandwich > 1 and args.sandwich_mode == "fairnas":
assert args.sandwich == len(network._op_names)
sampled_arch = sampled_archs[outer_iter] # Pick the corresponding quartile architecture for this iteration
if step == 0:
logger.log(f"Sampling from the FairNAS branch with sandwich={args.sandwich} and sandwich_mode={args.sandwich_mode}, arch={sampled_arch}")
network.set_cal_mode('dynamic', sampled_arch)
elif "random_" in algo and "grad" in algo:
network.set_cal_mode('urs')
elif algo == 'random': # NOTE the original branch needs to be last so that it is fall-through for all the special 'random' branches
if all_archs is not None:
sampled_arch = random.sample(all_archs, 1)[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
if args.search_space_paper == "nats-bench":
sampled_arch = arch_sampler.sample(mode="random")[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
sampled_arch = network.sample_arch()
network.set_cal_mode('dynamic', sampled_arch)
elif algo == 'enas':
with torch.no_grad():
network.controller.eval()
_, _, sampled_arch = network.controller()
network.set_cal_mode('dynamic', sampled_arch)
else:
raise ValueError('Invalid algo name : {:}'.format(algo))
return sampled_arch
def valid_func(xloader, network, criterion, algo, logger, steps=None, grads=False):
data_time, batch_time = AverageMeter(), AverageMeter()
loss, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter()
end = time.time()
with torch.set_grad_enabled(grads):
network.eval()
for step, (arch_inputs, arch_targets) in enumerate(xloader):
if steps is not None and step >= steps:
break
arch_targets = arch_targets.cuda(non_blocking=True)
# prediction
_, logits = network(arch_inputs.cuda(non_blocking=True))
arch_loss = criterion(logits, arch_targets)
if grads:
arch_loss.backward()
# record
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
loss.update(arch_loss.item(), arch_inputs.size(0))
top1.update (arch_prec1.item(), arch_inputs.size(0))
top5.update (arch_prec5.item(), arch_inputs.size(0))
network.train()
return loss.avg, top1.avg, top5.avg
def train_controller(xloader, network, criterion, optimizer, prev_baseline, epoch_str, print_freq, logger, xargs, w_optimizer=None, train_loader=None):
# config. (containing some necessary arg)
# baseline: The baseline score (i.e. average val_acc) from the previous epoch
# NOTE the xloader is typically val loader
data_time, batch_time = AverageMeter(), AverageMeter()
GradnormMeter, LossMeter, ValAccMeter, EntropyMeter, BaselineMeter, RewardMeter, xend = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), time.time()
controller_num_aggregate = 20
controller_train_steps = 50
controller_bl_dec = 0.99
controller_entropy_weight = 0.0001
network.eval()
network.controller.train()
network.controller.zero_grad()
loader_iter = iter(xloader)
for step in tqdm(range(controller_train_steps * controller_num_aggregate), desc = "Training controller", total=controller_train_steps*controller_num_aggregate):
try:
inputs, targets = next(loader_iter)
except:
loader_iter = iter(xloader)
inputs, targets = next(loader_iter)
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
# measure data loading time
data_time.update(time.time() - xend)
log_prob, entropy, sampled_arch = network.controller()
if xargs.discrete_diffnas_method in [None, "val"]:
with torch.no_grad():
network.set_cal_mode('dynamic', sampled_arch)
_, logits = network(inputs)
loss = criterion(logits, targets)
reward_metric, val_top5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
reward_metric = reward_metric.view(-1) / 100
elif xargs.discrete_diffnas_method in ["sotl"]:
if step == 0: print(f"ENAS train controller - supernet weight sample before finetune: {str(list(network.parameters())[1])[0:80]}")
eval_metrics, finetune_metrics = eval_archs_on_batch(xloader=xloader, archs=[sampled_arch], network=network, criterion=criterion, metric="loss",
train_steps=xargs.discrete_diffnas_steps, w_optimizer=w_optimizer, train_loader=train_loader,
progress_bar=False)
if step == 0: print(f"ENAS train controller - supernet weight sample after finetune (should be the same to make sure we do not change the original network): {str(list(network.parameters())[1])[0:80]}")
reward_metric = torch.tensor(finetune_metrics[sampled_arch]["sotl"][-1]) # Take the SOTL over all training steps as the reward
else:
raise NotImplementedError
reward = reward_metric + controller_entropy_weight * entropy
if prev_baseline is None:
baseline = reward_metric
else:
baseline = prev_baseline - (1 - controller_bl_dec) * (prev_baseline - reward)
loss = -1 * log_prob * (reward - baseline)
# account
RewardMeter.update(reward.item())
BaselineMeter.update(baseline.item())
ValAccMeter.update(reward_metric.item()*100)
LossMeter.update(loss.item())
EntropyMeter.update(entropy.item())
# Average gradient over controller_num_aggregate samples
loss = loss / controller_num_aggregate
loss.backward(retain_graph=True)
# measure elapsed time
batch_time.update(time.time() - xend)
xend = time.time()
if (step+1) % controller_num_aggregate == 0:
grad_norm = torch.nn.utils.clip_grad_norm_(network.controller.parameters(), 5.0)
GradnormMeter.update(grad_norm)
optimizer.step()
network.controller.zero_grad()
if step % print_freq == 0:
Sstr = '*Train-Controller* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, controller_train_steps * controller_num_aggregate)
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Wstr = '[Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Reward {reward.val:.2f} ({reward.avg:.2f})] Baseline {basel.val:.2f} ({basel.avg:.2f})'.format(loss=LossMeter, top1=ValAccMeter, reward=RewardMeter, basel=BaselineMeter)
Estr = 'Entropy={:.4f} ({:.4f})'.format(EntropyMeter.val, EntropyMeter.avg)
logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Estr)
return LossMeter.avg, ValAccMeter.avg, BaselineMeter.avg, RewardMeter.avg
def regularized_evolution_ws(network, train_loader, population_size, sample_size, mutate_arch, cycles, arch_sampler, api, config, xargs, train_steps=15, train_epochs=1, metric="loss"):
"""Algorithm for regularized evolution (i.e. aging evolution).
Follows "Algorithm 1" in Real et al. "Regularized Evolution for Image
Classifier Architecture Search".
Args:
cycles: the number of cycles the algorithm should run for.
population_size: the number of individuals to keep in the population.
sample_size: the number of individuals that should participate in each tournament.
time_budget: the upper bound of searching cost
Returns:
history: a list of `Model` instances, representing all the models computed
during the evolution experiment.
"""
# init_model = deepcopy(network.state_dict())
# init_optim = deepcopy(w_optimizer.state_dict())
population = collections.deque()
api.reset_time()
history = [] # Not used by the algorithm, only used to report results.
cur_best_arch = []
stats = {"pop":{"mean":[], "std":[]}}
top_ns = [1, 5, 10]
total_time = 0
model_init = deepcopy(network)
cycle_len = train_epochs if train_steps is None else train_steps/len(train_loader)*train_epochs
if cycles is None:
assert xargs.rea_epochs is not None
cycles = xargs.rea_epochs / cycle_len # Just super large number because we are using epoch budget
print(f"Converted cycles=None to cycles={cycles} since rea_epochs={xargs.rea_epochs} and each cycle has cycle_len={cycle_len}")
# Initialize the population with random models.
while len(population) < population_size:
model = deepcopy(network)
w_optimizer, w_scheduler, criterion = get_finetune_scheduler(xargs.scheduler, config, xargs, model, None)
cur_arch = arch_sampler.random_topology_func()
model.set_cal_mode("dynamic", cur_arch)
metrics, sum_metrics = eval_archs_on_batch(xloader=train_loader, archs=[cur_arch], network = model, criterion=criterion,
train_steps=train_steps, epochs=train_epochs, same_batch=True, metric=metric, train_loader=train_loader, w_optimizer=w_optimizer, progress_bar=False)
if xargs.rea_metric in ['loss', 'acc']:
decision_metric, decision_lambda = metrics[0], lambda x: x[metric][0]
elif xargs.rea_metric in ['sotl']:
decision_metric, decision_lambda = sum_metrics["loss"], lambda x: x["sum"]["loss"][-1]
elif xargs.rea_metric in ['soacc']:
decision_metric, decision_lambda = sum_metrics["acc"], lambda x: x["sum"]["acc"]
model.metric = decision_metric
model.arch = cur_arch
ground_truth = summarize_results_by_dataset(cur_arch, api=api, iepoch=199, hp='200')
history_stats = {"model":model, metric: metrics[0], "sum": sum_metrics, "arch": cur_arch, "ground_truth": ground_truth}
# Append the info
population.append(history_stats)
history.append(history_stats)
total_time += cycle_len
print(history)
top_n_perfs = sorted(history, key = decision_lambda, reverse=True) # Should start with the best and end with the worst
# Reformatting history into top-N logging
top_perfs = {}
for top in top_ns:
top_perf = {nth_top: top_n_perfs[min(nth_top, len(top_n_perfs)-1)]["ground_truth"]
for nth_top in range(top)}
top_perf = avg_nested_dict(top_perf)
top_perfs["top"+str(top)] = top_perf
cur_best_arch.append(top_n_perfs[0]["arch"].tostr())
wandb.log({"ground_truth":top_perfs, "total_time": total_time})
# Carry out evolution in cycles. Each cycle produces a model and removes another.
for i in tqdm(range(round(cycles)), desc = "Cycling in REA"):
# Sample randomly chosen models from the current population.
if total_time >= xargs.rea_epochs:
logger.log("Breaking REA early because the total budget was reached")
break
start_time, sample = time.time(), []
while len(sample) < sample_size:
# Inefficient, but written this way for clarity. In the case of neural
# nets, the efficiency of this line is irrelevant because training neural
# nets is the rate-determining step.
candidate = random.choice(list(population))
sample.append(candidate)
# The parent is the best model in the sample.
parent = max(sample, key=lambda i: i["model"].metric)
# Create the child model and store it.
child = deepcopy(network)
w_optimizer, w_scheduler, criterion = get_finetune_scheduler(xargs.scheduler, config, xargs, child, None)
cur_arch = mutate_arch(parent["model"].arch)
child.arch = cur_arch
child.set_cal_mode("dynamic", cur_arch)
metrics, sum_metrics = eval_archs_on_batch(xloader=train_loader, archs=[cur_arch], network = child, criterion=criterion, train_steps=train_steps, epochs=train_epochs, same_batch=True, metric=metric, train_loader=train_loader, w_optimizer=w_optimizer)
if xargs.rea_metric in ['loss', 'acc']:
decision_metric, decision_lambda = metrics[0], lambda x: x[metric][0]
elif xargs.rea_metric in ['sotl']:
decision_metric, decision_lambda = sum_metrics["loss"], lambda x: x["sum"]["loss"]
elif xargs.rea_metric in ['soacc']:
decision_metric, decision_lambda = sum_metrics["acc"], lambda x: x["sum"]["acc"]
child.metric = decision_metric
child.arch = cur_arch
ground_truth = summarize_results_by_dataset(cur_arch, api=api, iepoch=199, hp='200')
history_stats = {"model":child, metric: metrics[0], "sum": sum_metrics, "arch": cur_arch, "ground_truth": ground_truth}
# Append the info
population.append(history_stats)
history.append(history_stats)
total_time += cycle_len
top_n_perfs = sorted(history[-1], key = decision_lambda, reverse=True) # Should start with the best and end with the worst
# Reformatting history into top-N logging
top_perfs = {}
for top in top_ns:
top_perf = {nth_top: top_n_perfs[nth_top]["ground_truth"]
for nth_top in range(top)}
top_perf = avg_nested_dict(top_perf)
top_perfs["top"+str(top)] = top_perf
cur_best_arch.append(top_n_perfs[0]["arch"].tostr())
if i % 50 == 0:
print(f"REA best perf at iter={i} is {top_n_perfs[0]['ground_truth']}")
wandb.log({"ground_truth":top_perfs, "total_time": total_time})
# Remove the oldest model.
population.popleft()
return history, cur_best_arch, total_time
def search_func_bare(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, algo, logger, args=None, epoch=None, smoke_test=False,
meta_learning=False, api=None, supernets_decomposition=None, arch_groups_quartiles=None, arch_groups_brackets: Dict=None,
all_archs=None, grad_metrics_percentiles=None, metrics_percs=None, percentiles=None, loss_threshold=None, replay_buffer = None, checkpoint_freq=3, val_loader=None, train_loader=None, meta_optimizer=None):
data_time, batch_time = AverageMeter(), AverageMeter()
base_losses, base_top1, base_top5 = AverageMeter(track_std=True), AverageMeter(track_std=True), AverageMeter()
arch_losses, arch_top1, arch_top5 = AverageMeter(track_std=True), AverageMeter(track_std=True), AverageMeter()
end = time.time()
network.train()
parsed_algo = algo.split("_")
if args.search_space_paper == "nats-bench":
if (len(parsed_algo) == 3 and ("perf" in algo or "size" in algo)): # Can be used with algo=random_size_highest etc. so that it gets parsed correctly
arch_sampler = ArchSampler(api=api, model=network, mode=parsed_algo[1], prefer=parsed_algo[2], op_names=network._op_names, max_nodes = args.max_nodes, search_space = args.search_space_paper)
else:
arch_sampler = ArchSampler(api=api, model=network, mode="random", prefer="random", op_names=network._op_names, max_nodes = args.max_nodes, search_space = args.search_space_paper) # TODO mode=perf is a placeholder so that it loads the perf_all_dict, but then we do sample(mode=random) so it does not actually exploit the perf information
else:
arch_sampler = None
arch_overview = {"cur_arch": None, "all_cur_archs": [], "all_archs": [], "top_archs_last_epoch": [], "train_loss": [], "train_acc": [], "val_acc": [], "val_loss": []}
search_loader_iter = iter(xloader)
if args.inner_steps is not None:
inner_steps = args.inner_steps
else:
inner_steps = 1 # SPOS equivalent
logger.log(f"Starting search with batch_size={len(next(iter(xloader)))}, len={len(xloader)}")
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in tqdm(enumerate(search_loader_iter), desc = "Iterating over SearchDataset", total = round(len(xloader)/(inner_steps if not args.inner_steps_same_batch else 1))): # Accumulate gradients over backward for sandwich rule
all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets = format_input_data(base_inputs, base_targets, arch_inputs, arch_targets, search_loader_iter, inner_steps, args)
network.zero_grad()
if smoke_test and step >= 3:
break
if step == 0:
logger.log(f"New epoch (len={len(search_loader_iter)}) of arch; for debugging, those are the indexes of the first minibatch in epoch: {base_targets[0:10]}")
scheduler.update(None, 1.0 * step / len(xloader))
# measure data loading time
data_time.update(time.time() - end)
if (args.sandwich is None or args.sandwich == 1):
outer_iters = 1
else:
outer_iters = args.sandwich
if args.sandwich_mode in ["quartiles", "fairnas"]:
sampled_archs = arch_sampler.sample(mode = args.sandwich_mode, subset = all_archs, candidate_num=args.sandwich) # Always samples 4 new archs but then we pick the one from the right quartile
for outer_iter in range(outer_iters):
# Update the weights
# sampled_arch = sample_arch_and_set_mode(network, algo, arch_sampler)
sampled_arch = None
network.set_cal_mode("urs", None)
if sampled_arch is not None:
arch_overview["cur_arch"] = sampled_arch
arch_overview["all_archs"].append(sampled_arch)
arch_overview["all_cur_archs"].append(sampled_arch)
fnetwork = network
fnetwork.zero_grad()
diffopt = w_optimizer
for inner_step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(zip(all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets)):
if step in [0, 1] and inner_step < 3 and epoch % 5 == 0:
logger.log(f"Base targets in the inner loop at inner_step={inner_step}, step={step}: {base_targets[0:10]}")
# if algo.startswith("gdas"): # NOTE seems the forward pass doesnt explicitly change the genotype? The gumbels are always resampled in forward_gdas but it does not show up here
# logger.log(f"GDAS genotype at step={step}, inner_step={inner_step}, epoch={epoch}: {sampled_arch}")
_, logits = fnetwork(base_inputs)
base_loss = criterion(logits, base_targets) * (1 if args.sandwich is None else 1/args.sandwich)
base_loss.backward()
w_optimizer.step()
network.zero_grad()
base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
base_losses.update(base_loss.item() / (1 if args.sandwich is None else 1/args.sandwich), base_inputs.size(0))
base_top1.update (base_prec1.item(), base_inputs.size(0))
base_top5.update (base_prec5.item(), base_inputs.size(0))
arch_loss = torch.tensor(10) # Placeholder in case it never gets updated here. It is not very useful in any case
# Preprocess the hypergradients into desired form
if algo == 'setn':
network.set_cal_mode('joint')
elif algo.startswith('gdas'):
network.set_cal_mode('gdas', None)
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
elif 'random' in algo:
network.set_cal_mode('urs', None)
elif algo != 'enas':
raise ValueError('Invalid algo name : {:}'.format(algo))
network.zero_grad()
if algo == 'darts-v2' and not args.meta_algo:
arch_loss, logits = backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets, meta_learning=meta_learning)
a_optimizer.step()
elif (algo == 'random' or algo == 'enas' or 'random' in algo ) and not args.meta_algo:
if algo == "random" and args.merge_train_val_supernet:
arch_loss = torch.tensor(10) # Makes it slower and does not return anything useful anyways
else:
arch_loss = torch.tensor(10)
# with torch.no_grad():
# _, logits = network(arch_inputs)
# arch_loss = criterion(logits, arch_targets)
else:
# The Darts-V1/FOMAML/GDAS/who knows what else branch
network.zero_grad()
_, logits = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
arch_loss.backward()
a_optimizer.step()
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update (arch_prec1.item(), arch_inputs.size(0))
arch_top5.update (arch_prec5.item(), arch_inputs.size(0))
arch_overview["val_acc"].append(arch_prec1)
arch_overview["val_loss"].append(arch_loss.item())
arch_overview["all_cur_archs"] = [] #Cleanup
network.zero_grad()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if step % print_freq == 0 or step + 1 == len(xloader):
Sstr = '*SEARCH* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader))
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5)
Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5)
logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr)
if step == print_freq:
logger.log(network.show_alphas())
eigenvalues=None
search_metric_stds,supernet_train_stats, supernet_train_stats_by_arch = {}, {}, {}
search_metric_stds = {"train_loss.std": base_losses.std, "train_loss_arch.std": base_losses.std, "train_acc.std": base_top1.std, "train_acc_arch.std": arch_top1.std}
return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg, supernet_train_stats, supernet_train_stats_by_arch, arch_overview, search_metric_stds, eigenvalues
def train_epoch(train_loader, network, w_optimizer, criterion, algo, logger):
data_time, batch_time = AverageMeter(), AverageMeter()
loss, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter()
network.train()
if algo.startswith('setn'):
sampled_arch = network.dync_genotype(True)
network.set_cal_mode('dynamic', sampled_arch)
elif algo.startswith('gdas'):
network.set_cal_mode('gdas', None)
sampled_arch = network.genotype
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
sampled_arch = network.genotype
elif "random" in algo: # TODO REMOVE SOON
network.set_cal_mode('urs')
start = time.time()
for step, (inputs, targets) in tqdm(enumerate(train_loader), desc = "Iterating over batches while training weights only", total = len(train_loader)):
targets = targets.cuda(non_blocking=True)
_, logits = network(inputs.cuda(non_blocking=True))
train_loss = criterion(logits, targets)
train_loss.backward()
w_optimizer.step()
network.zero_grad()
prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
loss.update(train_loss.item(), inputs.size(0))
top1.update (prec1.item(), inputs.size(0))
top5.update (prec5.item(), inputs.size(0))
end = time.time()
logger.log(f"Trained epoch in {end-start} time, avg loss = {loss.avg}, avg acc = {top1.avg}")
return loss.avg, top1.avg, top5.avg
def evenify_training(network2, train_loader, criterion, w_optimizer2, logger, arch_idx, epoch_eqs, sampled_arch):
# Train each architecture until they all reach the same amount of training as a preprocessing step before recording the training statistics for correlations
cur_epoch, target_loss = epoch_eqs[sampled_arch.tostr()]["epoch"], epoch_eqs[sampled_arch.tostr()]["val"]
max_epoch_attained = max([x["val"] for x in epoch_eqs.values()])
done = False
iter_count=0
while not done:
avg_loss = AverageMeter()
for batch_idx, data in tqdm(enumerate(train_loader), desc = f"Evenifying training for arch_idx={arch_idx}"):
if avg_loss.avg < target_loss and batch_idx >= 15 and avg_loss.avg != 0:
done = True
break
network2.zero_grad()
inputs, targets = data
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
_, logits = network2(inputs)
train_acc_top1, train_acc_top5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
loss = criterion(logits, targets)
avg_loss.update(loss.item())
loss.backward()
w_optimizer2.step()
iter_count += 1
logger.log(f"Trained arch_idx for {iter_count} iterations to make it match up to {max_epoch_attained}")
def exact_hessian(network, val_loader, criterion, xloader, epoch, logger, args):
labels = []
for i in range(network._max_nodes):
for n in network._op_names:
labels.append(n + "_" + str(i))
network.logits_only=True
val_x, val_y = next(iter(val_loader))
val_loss = criterion(network(val_x.to('cuda')), val_y.to('cuda'))
try:
train_x, train_y, _, _ = next(iter(xloader))
except:
train_x, train_y = next(iter(xloader))
train_loss = criterion(network(train_x.to('cuda')), train_y.to('cuda'))
val_hessian_mat = _hessian(val_loss, network.arch_params())
if epoch == 0:
print(f"Example architecture Hessian: {val_hessian_mat}")
val_eigenvals, val_eigenvecs = torch.eig(val_hessian_mat)
try:
if not args.merge_train_val_supernet:
train_hessian_mat = _hessian(train_loss, network.arch_params())
train_eigenvals, train_eigenvecs = torch.eig(train_hessian_mat)
else:
train_eigenvals = val_eigenvals
except:
train_eigenvals = val_eigenvals
val_eigenvals = val_eigenvals[:, 0] # Drop the imaginary components
if epoch == 0:
print(f"Example architecture eigenvals: {val_eigenvals}")
train_eigenvals = train_eigenvals[:, 0]
val_dom_eigenvalue = torch.max(val_eigenvals)
train_dom_eigenvalue = torch.max(train_eigenvals)
eigenvalues = {"max":{}, "spectrum": {}}
eigenvalues["max"]["train"] = train_dom_eigenvalue
eigenvalues["max"]["val"] = val_dom_eigenvalue
eigenvalues["spectrum"]["train"] = {k:v for k,v in zip(labels, train_eigenvals)}
eigenvalues["spectrum"]["val"] = {k:v for k,v in zip(labels, val_eigenvals)}
network.logits_only=False
return eigenvalues
def approx_hessian(network, val_loader, criterion, xloader, args):
network.logits_only=True
val_eigenvals, val_eigenvecs = compute_hessian_eigenthings(network, val_loader, criterion, 1, mode="power_iter",
power_iter_steps=50, arch_only=True, max_samples=64, full_dataset=True)
val_dom_eigenvalue = val_eigenvals[0]
try:
if hasattr(args, "merge_train_val_supernet") and not args.merge_train_val_supernet:
train_eigenvals, train_eigenvecs = compute_hessian_eigenthings(network, val_loader, criterion, 1, mode="power_iter",
power_iter_steps=50, arch_only=True, max_samples=64, full_dataset=True)
train_dom_eigenvalue = train_eigenvals[0]
else:
train_eigenvals, train_eigenvecs = None, None
train_dom_eigenvalue = None
except:
train_eigenvals, train_eigenvecs, train_dom_eigenvalue = None, None, None
eigenvalues = {"max":{}, "spectrum": {}}
eigenvalues["max"]["val"] = val_dom_eigenvalue
eigenvalues["max"]["train"] = train_dom_eigenvalue
network.logits_only=False
network.zero_grad()
return eigenvalues
# The following three functions are used for DARTS-V2
def _concat(xs):
return torch.cat([x.view(-1) for x in xs])
def _hessian_vector_product(vector, network, criterion, base_inputs, base_targets, r=1e-2):
R = r / _concat(vector).norm()
for p, v in zip(network.weights, vector):
p.data.add_(R, v)
_, logits = network(base_inputs)
loss = criterion(logits, base_targets)
grads_p = torch.autograd.grad(loss, network.alphas)
for p, v in zip(network.weights, vector):
p.data.sub_(2*R, v)
_, logits = network(base_inputs)
loss = criterion(logits, base_targets)
grads_n = torch.autograd.grad(loss, network.alphas)
for p, v in zip(network.weights, vector):
p.data.add_(R, v)
return [(x-y).div_(2*R) for x, y in zip(grads_p, grads_n)]
def backward_step_unrolled_darts(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets):
# _compute_unrolled_model
_, logits = network(base_inputs)
loss = criterion(logits, base_targets)
start=time.time()
LR, WD, momentum = w_optimizer.param_groups[0]['lr'], w_optimizer.param_groups[0]['weight_decay'], w_optimizer.param_groups[0]['momentum']
with torch.no_grad():
theta = _concat(network.weights)
try:
moment = _concat(w_optimizer.state[v]['momentum_buffer'] for v in network.weights)
moment = moment.mul_(momentum)
except:
moment = torch.zeros_like(theta)
dtheta = _concat(torch.autograd.grad(loss, network.weights)) + WD*theta
params = theta.sub(LR, moment+dtheta)
# print(f"Time of momentum whatever: {time.time()-start}")
start=time.time()
unrolled_model = deepcopy(network)
model_dict = unrolled_model.state_dict()
new_params, offset = {}, 0
start2=time.time()
for k, v in network.named_parameters():
if 'arch' in k or 'alpha' in k: continue
v_length = np.prod(v.size())
model_dict[k] = params[offset: offset+v_length].view(v.size())
offset += v_length
# print(f"Loading shit subroutine : {time.time()-start2}")
# model_dict.update(new_params)
# unrolled_model.load_state_dict(model_dict)
# print(f"Loading shit {time.time()-start}")
start=time.time()
unrolled_model.zero_grad()
_, unrolled_logits = unrolled_model(arch_inputs)
unrolled_loss = criterion(unrolled_logits, arch_targets)
unrolled_loss.backward()
# print(f"Model forward: {time.time()-start}")
dalpha = [p.grad for p in unrolled_model.arch_parameters]
vector = [v.grad.data for v in unrolled_model.weights]
start=time.time()
implicit_grads = _hessian_vector_product(vector, network, criterion, base_inputs, base_targets)
# print(f"time of hvp: {time.time()-start}")
for g, ig in zip(dalpha, implicit_grads):
# dalpha.data.sub_(LR, implicit_grads.data)
g.data.sub_(LR, ig.data)
for p, da in zip(network.arch_parameters, dalpha):
if p.grad is None:
p.grad = deepcopy( da )
else:
p.data.copy_( da.data )
return unrolled_loss.detach(), unrolled_logits.detach()
def backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets, meta_learning=False):
# _compute_unrolled_model
if meta_learning in ['all', 'arch_only']:
base_inputs = arch_inputs
base_targets = arch_targets
_, logits = network(base_inputs)
loss = criterion(logits, base_targets)
LR, WD, momentum = w_optimizer.param_groups[0]['lr'], w_optimizer.param_groups[0]['weight_decay'], w_optimizer.param_groups[0]['momentum']
with torch.no_grad():
theta = _concat(network.weights)
try:
moment = _concat(w_optimizer.state[v]['momentum_buffer'] for v in network.weights)
moment = moment.mul_(momentum)
except:
moment = torch.zeros_like(theta)
dtheta = _concat(torch.autograd.grad(loss, network.weights)) + WD*theta
params = theta.sub(LR, moment+dtheta)
unrolled_model = deepcopy(network)
model_dict = unrolled_model.state_dict()
new_params, offset = {}, 0
for k, v in network.named_parameters():
if 'arch_parameters' in k: continue
v_length = np.prod(v.size())
new_params[k] = params[offset: offset+v_length].view(v.size())
offset += v_length
model_dict.update(new_params)
unrolled_model.load_state_dict(model_dict)
unrolled_model.zero_grad()
_, unrolled_logits = unrolled_model(arch_inputs)
unrolled_loss = criterion(unrolled_logits, arch_targets)
unrolled_loss.backward()
dalpha = unrolled_model.arch_parameters.grad
vector = [v.grad.data for v in unrolled_model.weights]
[implicit_grads] = _hessian_vector_product(vector, network, criterion, base_inputs, base_targets)
dalpha.data.sub_(LR, implicit_grads.data)
if network.arch_parameters.grad is None:
network.arch_parameters.grad = deepcopy( dalpha )
else:
network.arch_parameters.grad.data.copy_( dalpha.data )
return unrolled_loss.detach(), unrolled_logits.detach()
def update_supernets_decomposition(supernets_decomposition, arch_groups_quartiles, losses_percs, grad_metrics_percentiles, base_loss, data_step, epoch, xloader, sampled_arch,
fnetwork):
# TODO need to fix the logging here I think. The normal logging is much better now
cur_quartile = arch_groups_quartiles[sampled_arch.tostr()]
with torch.no_grad():
dw = [p.grad.detach().to('cpu') if p.grad is not None else torch.zeros_like(p).to('cpu') for p in
fnetwork.parameters()]
cur_supernet = supernets_decomposition[cur_quartile]
for decomp_w, g in zip(cur_supernet.parameters(), dw):
if decomp_w.grad is not None:
decomp_w.grad.copy_(g)
else:
decomp_w.grad = g
analyze_grads(cur_supernet, grad_metrics_percentiles["perc" + str(cur_quartile)]["supernet"],
true_step=data_step + epoch * len(xloader), total_steps=data_step + epoch * len(xloader))
if type(arch_groups_quartiles) is dict:
for quartile in arch_groups_quartiles.keys():
if quartile == cur_quartile:
losses_percs["perc" + str(quartile)].update(base_loss.item()) # TODO this doesnt make any sense
def bracket_tracking_setup(arch_groups_brackets, brackets_cond, arch_sampler):
all_brackets = set(arch_groups_brackets.values()) if brackets_cond else set()
supernet_train_stats = {"train_loss":{"sup"+str(percentile): [] for percentile in all_brackets},
"val_loss": {"sup"+str(percentile): [] for percentile in all_brackets},
"val_acc": {"sup"+str(percentile): [] for percentile in all_brackets},
"train_acc": {"sup"+str(percentile): [] for percentile in all_brackets}}
supernet_train_stats_by_arch = {arch: {"train_loss": [], "val_loss": [], "train_acc": [], "val_acc": []} for arch in (arch_sampler.archs if arch_sampler is not None else {})}
supernet_train_stats_avgmeters = {}
for k in list(supernet_train_stats.keys()):
supernet_train_stats[k+str("AVG")] = {"sup"+str(percentile): [] for percentile in all_brackets}
supernet_train_stats_avgmeters[k+str("AVG")] = {"sup"+str(percentile): AverageMeter() for percentile in all_brackets}
return all_brackets, supernet_train_stats, supernet_train_stats_by_arch, supernet_train_stats_avgmeters
def update_running(running, valid_loss=None, valid_acc = None, valid_acc_top5=None, loss=None, train_acc_top1=None,
train_acc_top5=None, sogn=None, sogn_norm=None, total_train_loss_for_sotl_aug=None):
if valid_loss is not None:
running["sovl"] -= valid_loss
if valid_acc is not None:
running["sovalacc"] += valid_acc
# if valid_acc_top5 is not None:
# running["sovalacc_top5"] += valid_acc_top5
# if train_acc_top5 is not None:
# running["sotrainacc_top5"] += train_acc_top5
if loss is not None:
running["sotl"] -= loss # Need to have negative loss so that the ordering is consistent with val acc
if train_acc_top1 is not None:
running["sotrainacc"] += train_acc_top1
if sogn is not None:
# running["sogn"] += grad_metrics["train"]["sogn"]
running["sogn"] += sogn
if sogn_norm is not None:
# running["sogn_norm"] += grad_metrics["train"]["grad_normalized"]
running["sogn_norm"] += sogn_norm
if total_train_loss_for_sotl_aug is not None:
# running["sotl_aug"] = running["sotl"] + total_metrics_dict["total_train_loss"]
running["sotl_aug"] = running["sotl"] + total_train_loss_for_sotl_aug
if valid_loss is not None and loss is not None:
running["sobothl"] -= (valid_loss + loss)
return running
def update_base_metrics(metrics, running, metrics_keys=None, grad_metrics=None, drop_fancy=False, grads_analysis=None,
valid_acc=None, train_acc=None, loss=None, valid_loss=None, arch_str=None, epoch_idx = None):
if metrics_keys is None:
metrics_keys = metrics.keys()
for k in running.keys():
metrics[k][arch_str][epoch_idx].append(running[k])
metrics["val_acc"][arch_str][epoch_idx].append(valid_acc)
metrics["train_acc"][arch_str][epoch_idx].append(train_acc)
metrics["train_loss"][arch_str][epoch_idx].append(-loss)
metrics["val_loss"][arch_str][epoch_idx].append(-valid_loss)
metrics["gap_loss"][arch_str][epoch_idx].append(-valid_loss + (loss - valid_loss))
# if arch_str is not None and epoch_idx is not None:
# if len(metrics["train_loss"][arch_str][epoch_idx]) >= 3:
# loss_normalizer = sum(metrics["train_loss"][arch_str][epoch_idx][-3:])/3
# elif epoch_idx >= 1:
# loss_normalizer = sum(metrics["train_loss"][arch_str][epoch_idx-1][-3:])/3
# else:
# loss_normalizer = 1
# metrics["train_loss_pct"][arch_str][epoch_idx].append(1-loss/loss_normalizer)
data_types = ["train"] if not grads_analysis else ["train", "val", "total_train", "total_val"]
grad_log_keys = ["gn", "gnL1", "sogn", "sognL1", "grad_normalized", "grad_accum", "grad_accum_singleE", "grad_accum_decay", "grad_mean_accum", "grad_mean_sign", "grad_var_accum", "grad_var_decay_accum"]
if not drop_fancy and grad_metrics is not None:
for data_type in data_types:
for log_key in grad_log_keys:
val = grad_metrics[data_type][log_key]
metrics[data_type+"_"+log_key][arch_str][epoch_idx].append(grad_metrics[data_type][log_key])
return metrics
def load_my_state_dict(model, state_dict):
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state or 'classifier' in name:
continue
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
def resolve_higher_conds(xargs):
use_higher_cond = xargs.meta_algo and xargs.meta_algo not in ['reptile', 'metaprox']
if xargs.meta_algo is not None and 'darts' in xargs.meta_algo and xargs.higher_method == "joint" and (xargs.sandwich is None or xargs.sandwich == 1): # Special case for single-level DARTS training
print("Set use_higher_cond to False because using single-level DARTS most likely")
use_higher_cond = False
diffopt_higher_grads_cond = True if (xargs.meta_algo not in ['reptile', 'metaprox', 'reptile_higher'] and xargs.higher_order != "first") else False
monkeypatch_higher_grads_cond = True if (xargs.meta_algo not in ['reptile', 'metaprox', 'reptile_higher'] and (xargs.higher_order != "first" or xargs.higher_method == "val")) else False
first_order_grad_for_free_cond = xargs.higher_order == "first" and xargs.higher_method in ["sotl", "sotl_v2"]
first_order_grad_concurrently_cond = xargs.higher_order == "first" and xargs.higher_method.startswith("val")
second_order_grad_optimization_cond = xargs.higher_order == "second" and xargs.higher_method in ["sotl", "sotl_v2"]
print(f"Resolved higher conds as use_higher_cond={use_higher_cond}, diffopt_higher_grads_cond={diffopt_higher_grads_cond}, monkeypatch_higher_grads_cond={monkeypatch_higher_grads_cond}, first_order_grad_for_free_cond={first_order_grad_for_free_cond}, first_order_grad_concurrently_cond={first_order_grad_concurrently_cond}, second_order_grad_optimization_cond={second_order_grad_optimization_cond}")
return use_higher_cond, diffopt_higher_grads_cond, monkeypatch_higher_grads_cond, first_order_grad_for_free_cond, first_order_grad_concurrently_cond, second_order_grad_optimization_cond
def init_search_from_checkpoint(search_model, logger, xargs):
# The supernet init path can have form like '1,2,3' or 'darts_1,darts_2,darts_3' or 'cifar10_random_1, cifar10_random_2, cifar10_random_3'
split_path = xargs.supernet_init_path.split(",")
whole_path = split_path[xargs.rand_seed % len(split_path)]
logger.log(f"Picked {xargs.rand_seed % len(split_path)}-th seed from {xargs.supernet_init_path}")
if os.path.exists(xargs.supernet_init_path):
pass
else:
try:
dataset, algo = "cifar10", "random" # Defaults
parsed_init_path = whole_path.split("_") # Should be algo followed by seed number, eg. darts_1 or random_30 or cifar100_random_50
logger.log(f"Parsed init path into {parsed_init_path}")
if len(parsed_init_path) == 2:
seed_num = int(parsed_init_path[1])
seed_algo = parsed_init_path[0]
if len(parsed_init_path) == 3:
seed_num = int(parsed_init_path[2])
seed_algo = parsed_init_path[1]
dataset = parsed_init_path[0]
whole_path = f'./output/search-tss/{dataset}/{seed_algo}-affine0_BN0-None/checkpoint/seed-{seed_num}-basic.pth'
except Exception as e:
logger.log(f"Supernet init path does not seem to be formatted as seed number - it is {xargs.supernet_init_path}, error was {e}")
logger.log(f'Was given supernet checkpoint to use as initialization at {xargs.supernet_init_path}, decoded into {whole_path} and loaded its weights into search model')
checkpoint = torch.load(whole_path)
# The remaining things that are usually contained in a checkpoint are restarted to empty a bit further down
search_model.load_state_dict(checkpoint['search_model'], strict=False)
# load_my_state_dict(model, checkpoint["search_model"])
def init_supernets_decomposition(xargs, logger, checkpoint, network):
percentiles = [0, 25, 50, 75, 100]
empty_network = deepcopy(network).to('cpu') # TODO dont actually need to use those networks in the end? Can just use grad_metrics I think
with torch.no_grad():
for p in empty_network.parameters():
p.multiply_(0.)
supernets_decomposition = {percentiles[i+1]:empty_network for i in range(len(percentiles)-1)}
supernets_decomposition["init"] = deepcopy(network)
logger.log(f'Initialized {len(percentiles)} supernets because supernet_decomposition={xargs.supernets_decomposition}')
arch_groups_quartiles = arch_percentiles(percentiles=percentiles, mode=xargs.supernets_decomposition_mode)
if (last_info_orig.exists() and "grad_metrics_percs" not in checkpoint.keys()) or not last_info_orig.exists():
# TODO what is the point of this archs_subset?
archs_subset = network.return_topK(-1 if xargs.supernets_decomposition_topk is None else xargs.supernets_decomposition_topk, use_random=False) # Should return all archs for negative K
grad_metrics_percs = {"perc"+str(percentiles[i+1]):init_grad_metrics(keys=["supernet"]) for i in range(len(percentiles)-1)}
else:
grad_metrics_percs = checkpoint["grad_metrics_percs"]
archs_subset = checkpoint["archs_subset"]
metrics_factory = {"perc"+str(percentile):[[] for _ in range(total_epoch)] for percentile in percentiles}
metrics_percs = DefaultDict_custom()
metrics_percs.set_default_item(metrics_factory)
return percentiles, supernets_decomposition, arch_groups_quartiles, archs_subset, grad_metrics_percs, metrics_factory, metrics_percs
def scheduler_step(w_scheduler2, epoch_idx, batch_idx, train_loader, steps_per_epoch, scheduler_type):
if scheduler_type in ["linear", "linear_warmup"]:
w_scheduler2.update(epoch_idx, 1.0 * batch_idx / min(len(train_loader), steps_per_epoch))
elif scheduler_type == "cos_adjusted":
w_scheduler2.update(epoch_idx , batch_idx/min(len(train_loader), steps_per_epoch))
elif scheduler_type == "cos_reinit":
w_scheduler2.update(epoch_idx, 0.0)
elif scheduler_type in ['cos_fast', 'cos_warmup']:
w_scheduler2.update(epoch_idx , batch_idx/min(len(train_loader), steps_per_epoch))
else:
w_scheduler2.update(epoch_idx, 1.0 * batch_idx / len(train_loader))
def count_ops(arch):
ops = ['none', 'skip_connect', 'nor_conv_1x1', 'nor_conv_3x3', 'avg_pool_3x3']
arch_str = str(arch)
counts = {op: arch_str.count(op) for op in ops}
return counts
def grad_drop(params, p=0.0, arch_param_count=None, p_method=None):
if p == 0:
pass
else:
# NB201 param avg: 0.3985MB
for param in params:
if param.requires_grad and param.grad is not None:
if p_method is None:
torch.nn.functional.dropout(param.grad, p, training = True, inplace = True)
elif p_method == "adaptive":
p = None
else:
raise NotImplementedError
def search_func_old(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, algo, logger):
data_time, batch_time = AverageMeter(), AverageMeter()
base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
end = time.time()
network.train()
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader):
scheduler.update(None, 1.0 * step / len(xloader))
base_inputs = base_inputs.cuda(non_blocking=True)
arch_inputs = arch_inputs.cuda(non_blocking=True)
base_targets = base_targets.cuda(non_blocking=True)
arch_targets = arch_targets.cuda(non_blocking=True)
# measure data loading time
data_time.update(time.time() - end)
# Update the weights
if algo == 'setn':
sampled_arch = network.dync_genotype(True)
network.set_cal_mode('dynamic', sampled_arch)
elif algo == 'gdas':
network.set_cal_mode('gdas', None)
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
elif algo == 'random':
network.set_cal_mode('urs', None)
elif algo == 'enas':
with torch.no_grad():
network.controller.eval()
_, _, sampled_arch = network.controller()
network.set_cal_mode('dynamic', sampled_arch)
else:
raise ValueError('Invalid algo name : {:}'.format(algo))
network.zero_grad()
_, logits = network(base_inputs)
base_loss = criterion(logits, base_targets)
base_loss.backward()
w_optimizer.step()
# record
base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
base_losses.update(base_loss.item(), base_inputs.size(0))
base_top1.update (base_prec1.item(), base_inputs.size(0))
base_top5.update (base_prec5.item(), base_inputs.size(0))
# update the architecture-weight
if algo == 'setn':
network.set_cal_mode('joint')
elif algo == 'gdas':
network.set_cal_mode('gdas', None)
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
elif algo == 'random':
network.set_cal_mode('urs', None)
elif algo != 'enas':
raise ValueError('Invalid algo name : {:}'.format(algo))
network.zero_grad()
if algo == 'darts-v2':
arch_loss, logits = backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets)
a_optimizer.step()
elif algo == 'random' or algo == 'enas':
with torch.no_grad():
_, logits = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
else:
_, logits = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
arch_loss.backward()
a_optimizer.step()
# record
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update (arch_prec1.item(), arch_inputs.size(0))
arch_top5.update (arch_prec5.item(), arch_inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if step % print_freq == 0 or step + 1 == len(xloader):
Sstr = '*SEARCH* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader))
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5)
Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5)
logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr)
return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg
def train_real(xargs, use_higher_cond, network, fnetwork, criterion, before_rollout_state, logger, all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets, w_optimizer, epoch, data_step, outer_iter, outer_iters):
if use_higher_cond and xargs.higher_loop == "bilevel" and xargs.higher_params == "arch" and xargs.sandwich_computation == "serial" and xargs.meta_algo not in ["reptile", "metaprox"]:
if xargs.refresh_arch_oneshot in ["always", "train_real"]: network.refresh_arch_oneshot = True
for inner_step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(zip(all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets)):
if inner_step == 1 and xargs.inner_steps_same_batch: # TODO Dont need more than one step of finetuning when using a single batch for the bilevel rollout I think?
break
if xargs.bilevel_train_steps is not None and inner_step >= xargs.bilevel_train_steps:
break
if data_step in [0, 1] and inner_step < 3 and epoch < 5:
logger.log(f"Doing weight training for real in higher_loop={xargs.higher_loop} at inner_step={inner_step}, step={data_step}: target={base_targets[0:10]}")
logger.log(f"Weight-training-for-real check: Original net: {str(list(before_rollout_state['model_init'].parameters())[1])[0:80]}, after-rollout net: {str(list(network.parameters())[1])[0:80]}")
logger.log(f"Arch check: Original net: {str(list(before_rollout_state['model_init'].alphas))[0:80]}, after-rollout net: {str(list(network.alphas))[0:80]}")
_, logits = network(base_inputs)
base_loss = criterion(logits, base_targets) * (1 if xargs.sandwich is None else 1/xargs.sandwich)
network.zero_grad()
base_loss.backward()
w_optimizer.step()
if xargs.refresh_arch_oneshot in ["train_real"]: network.refresh_arch_oneshot = True
elif use_higher_cond and xargs.higher_loop == "joint" and xargs.higher_loop_joint_steps is None and xargs.higher_params == "arch" and xargs.sandwich_computation == "serial" and outer_iter == outer_iters - 1 and xargs.meta_algo not in ["reptile", "metaprox"]:
if epoch == 0 and data_step < 3:
logger.log(f"Updating meta-weights by copying from the rollout model")
with torch.no_grad():
for (n1, p1), p2 in zip(network.named_parameters(), fnetwork.parameters()):
if ('arch' not in n1 and 'alpha' not in n1): # Want to copy weights only - the architecture update was done on the original network
p1.data = p2.data
elif use_higher_cond and xargs.higher_loop == "joint" and xargs.higher_loop_joint_steps is not None and xargs.higher_params == "arch" and xargs.sandwich_computation == "serial" and outer_iter == outer_iters - 1 and xargs.meta_algo not in ["reptile", "metaprox"]:
# This branch can be used for GDAS with unrolled SOTL
for inner_step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(zip(all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets)):
if inner_step >= xargs.higher_loop_joint_steps:
break
if data_step < 2 and inner_step < 3 and epoch < 5:
logger.log(f"Doing weight training for real in higher_loop={xargs.higher_loop} with higher_loop_joint_steps={xargs.higher_loop_joint_steps} at inner_step={inner_step}, step={data_step}: {base_targets[0:10]}")
logger.log(f"Arch check: Original net: {str(list(before_rollout_state['model_init'].alphas))[0:80]}, after-rollout net: {str(list(network.alphas))[0:80]}")
_, logits = network(base_inputs)
base_loss = criterion(logits, base_targets) * (1 if xargs.sandwich is None else 1/xargs.sandwich)
network.zero_grad()
base_loss.backward()
w_optimizer.step()
def get_best_arch_old(train_loader, valid_loader, network, n_samples, algo, logger,
additional_training=True, api=None, style:str='sotl', w_optimizer=None, w_scheduler=None,
config: Dict=None, epochs:int=1, steps_per_epoch:int=100,
val_loss_freq:int=1, overwrite_additional_training:bool=False,
scheduler_type:str=None, xargs=None):
with torch.no_grad():
network.eval()
if algo == 'random':
archs, decision_metrics = network.return_topK(n_samples, True), []
elif algo == 'setn':
archs, decision_metrics = network.return_topK(n_samples, False), []
elif algo.startswith('darts') or algo == 'gdas':
arch = network.genotype
archs, decision_metrics = [arch], []
elif algo == 'enas':
archs, decision_metrics = [], []
for _ in range(n_samples):
_, _, sampled_arch = network.controller()
archs.append(sampled_arch)
else:
raise ValueError('Invalid algorithm name : {:}'.format(algo))
# The true rankings are used to calculate correlations later
true_rankings, final_accs = get_true_rankings(archs, api)
corr_funs = {"kendall": lambda x,y: scipy.stats.kendalltau(x,y).correlation,
"spearman":lambda x,y: scipy.stats.spearmanr(x,y).correlation,
"pearson":lambda x, y: scipy.stats.pearsonr(x,y)[0]}
if steps_per_epoch is not None and steps_per_epoch != "None":
steps_per_epoch = int(steps_per_epoch)
elif steps_per_epoch in [None, "None"]:
steps_per_epoch = len(train_loader)
else:
raise NotImplementedError
if style == 'val_acc':
decision_metrics = calculate_valid_accs(xloader=valid_loader, archs=archs, network=network)
corr_per_dataset = calc_corrs_val(archs=archs, valid_accs=decision_metrics, final_accs=final_accs, true_rankings=true_rankings, corr_funs=corr_funs)
wandb.log(corr_per_dataset)
if style == 'sotl' or style == "sovl":
# Simulate short training rollout to compute SoTL for candidate architectures
cond = logger.path('corr_metrics').exists() and not overwrite_additional_training
metrics_keys = ["sotl", "val", "sovl", "sovalacc", "sotrainacc", "sovalacc_top5", "sotrainacc_top5", "train_losses", "val_losses", "total_val"]
must_restart = False
start_arch_idx = 0
if cond:
logger.log("=> loading checkpoint of the last-checkpoint '{:}' start".format(logger.path('corr_metrics')))
checkpoint = torch.load(logger.path('corr_metrics'))
checkpoint_config = checkpoint["config"] if "config" in checkpoint.keys() else {}
try:
if type(list(checkpoint["metrics"]["sotl"].keys())[0]) is not str:
must_restart = True # will need to restart metrics because using the old checkpoint format
metrics = {k:checkpoint["metrics"][k] if k in checkpoint["metrics"] else {} for k in metrics_keys}
prototype = metrics[metrics_keys[0]]
first_arch = next(iter(metrics[metrics_keys[0]].keys()))
for metric_key in metrics_keys:
if not (len(metrics[metric_key]) == len(prototype) and len(metrics[metric_key][first_arch]) == len(prototype[first_arch])):
must_restart = True
except:
must_restart = True
decision_metrics = checkpoint["decision_metrics"] if "decision_metrics" in checkpoint.keys() else []
start_arch_idx = checkpoint["start_arch_idx"]
cond1={k:v for k,v in checkpoint_config.items() if ('path' not in k and 'dir' not in k and k not in ["dry_run"])}
cond2={k:v for k,v in vars(xargs).items() if ('path' not in k and 'dir' not in k and k not in ["dry_run"])}
logger.log(f"Checkpoint config: {cond1}")
logger.log(f"Newly input config: {cond2}")
if (cond1 == cond2):
logger.log("Both configs are equal.")
else:
logger.log("Checkpoint and current config are not the same! need to restart")
different_items = {k: cond1[k] for k in cond1 if k in cond2 and cond1[k] != cond2[k]}
logger.log(f"Different items are : {different_items}")
if (not cond) or must_restart or (xargs is None) or (cond1 != cond2) or any([len(x) == 0 for x in metrics.values()]): #config should be an ArgParse Namespace
if not cond:
logger.log(f"Did not find a checkpoint for supernet post-training at {logger.path('corr_metrics')}")
else:
logger.log(f"Starting postnet training with fresh metrics")
metrics = {k:{arch.tostr():[[] for _ in range(epochs)] for arch in archs} for k in metrics_keys}
decision_metrics = []
start_arch_idx = 0
train_start_time = time.time()
train_stats = [[] for _ in range(epochs*steps_per_epoch+1)]
for arch_idx, sampled_arch in tqdm(enumerate(archs[start_arch_idx:], start_arch_idx), desc="Iterating over sampled architectures", total = n_samples-start_arch_idx):
network2 = deepcopy(network)
network2.set_cal_mode('dynamic', sampled_arch)
if xargs.lr is not None and scheduler_type is None:
scheduler_type = "constant"
if scheduler_type in ['linear_warmup', 'linear']:
config = config._replace(scheduler=scheduler_type, warmup=1, eta_min=0)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type == "cos_reinit":
# In practice, this leads to constant LR = 0.025 since the original Cosine LR is annealed over 100 epochs and our training schedule is very short
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type in ['cos_adjusted']:
config = config._replace(scheduler='cos', warmup=0, epochs=epochs)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type in ['cos_fast']:
config = config._replace(scheduler='cos', warmup=0, LR=0.001 if xargs.lr is None else xargs.lr, epochs=epochs, eta_min=0)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type in ['cos_warmup']:
config = config._replace(scheduler='cos', warmup=1, LR=0.001 if xargs.lr is None else xargs.lr, epochs=epochs, eta_min=0)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif xargs.lr is not None and scheduler_type == 'constant':
config = config._replace(scheduler='constant', constant_lr=xargs.lr)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
else:
# NOTE in practice, since the Search function uses Cosine LR with T_max that finishes at end of search_func training, this switches to a constant 1e-3 LR.
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
w_optimizer2.load_state_dict(w_optimizer.state_dict())
w_scheduler2.load_state_dict(w_scheduler.state_dict())
if arch_idx == start_arch_idx: #Should only print it once at the start of training
logger.log(f"Optimizers for the supernet post-training: {w_optimizer2}, {w_scheduler2}")
running_sotl = 0 # TODO implement better SOTL class to make it more adjustible and get rid of this repeated garbage everywhere
running_sovl = 0
running_sovalacc = 0
running_sotrainacc = 0
running_sovalacc_top5 = 0
running_sotrainacc_top5 = 0
_, val_acc_total, _ = valid_func(xloader=valid_loader, network=network2, criterion=criterion, algo=algo, logger=logger)
true_step = 0
arch_str = sampled_arch.tostr()
if steps_per_epoch is None or steps_per_epoch=="None":
steps_per_epoch = len(train_loader)
# q = mp.Queue()
# # This reporting process is necessary due to WANDB technical difficulties. It is used to continuously report train stats from a separate process
# # Otherwise, when a Run is intiated from a Sweep, it is not necessary to log the results to separate training runs. But that it is what we want for the individual arch stats
# p=mp.Process(target=train_stats_reporter, kwargs=dict(queue=q, config=vars(xargs),
# sweep_group=f"Search_Cell_{algo}_arch", sweep_run_name=wandb.run.name or wandb.run.id or "unknown", arch=sampled_arch.tostr()))
# p.start()
for epoch_idx in range(epochs):
if epoch_idx == 0:
metrics["total_val"][arch_str][epoch_idx] = [val_acc_total]*(len(train_loader)-1)
else:
metrics["total_val"][arch_str][epoch_idx] = [metrics["total_val"][arch_str][epoch_idx-1][-1]]*(len(train_loader)-1)
valid_loader_iter = iter(valid_loader) if not additional_training else None # This causes deterministic behavior for validation data since the iterator gets passed in to each function
for batch_idx, data in enumerate(train_loader):
if (steps_per_epoch is not None and steps_per_epoch != "None") and batch_idx > steps_per_epoch:
break
with torch.set_grad_enabled(mode=additional_training):
if scheduler_type in ["linear", "linear_warmup"]:
w_scheduler2.update(epoch_idx, 1.0 * batch_idx / min(len(train_loader), steps_per_epoch))
elif scheduler_type == "cos_adjusted":
w_scheduler2.update(epoch_idx , batch_idx/min(len(train_loader), steps_per_epoch))
elif scheduler_type == "cos_reinit":
w_scheduler2.update(epoch_idx, 0.0)
elif scheduler_type in ['cos_fast', 'cos_warmup']:
w_scheduler2.update(epoch_idx , batch_idx/min(len(train_loader), steps_per_epoch))
else:
w_scheduler2.update(None, 1.0 * batch_idx / len(train_loader))
network2.zero_grad()
inputs, targets = data
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
_, logits = network2(inputs)
train_acc_top1, train_acc_top5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
loss = criterion(logits, targets)
if additional_training:
loss.backward()
w_optimizer2.step()
true_step += 1
if batch_idx == 0 or (batch_idx % val_loss_freq == 0):
valid_acc, valid_acc_top5, valid_loss = calculate_valid_acc_single_arch(valid_loader=valid_loader, arch=sampled_arch, network=network2, criterion=criterion, valid_loader_iter=valid_loader_iter)
batch_train_stats = {"lr":w_scheduler2.get_lr()[0], "true_step":true_step, "train_loss":loss.item(), "train_acc_top1":train_acc_top1.item(), "train_acc_top5":train_acc_top5.item(),
"valid_loss":valid_loss, "valid_acc":valid_acc, "valid_acc_top5":valid_acc_top5}
# q.put(batch_train_stats)
train_stats[epoch_idx*steps_per_epoch+batch_idx].append(batch_train_stats)
running_sovl -= valid_loss
running_sovalacc += valid_acc
running_sovalacc_top5 += valid_acc_top5
running_sotl -= loss.item() # Need to have negative loss so that the ordering is consistent with val acc
running_sotrainacc += train_acc_top1.item()
running_sotrainacc_top5 += train_acc_top5.item()
metrics["sotl"][arch_str][epoch_idx].append(running_sotl)
metrics["val"][arch_str][epoch_idx].append(valid_acc)
metrics["sovl"][arch_str][epoch_idx].append(running_sovl)
metrics["sovalacc"][arch_str][epoch_idx].append(running_sovalacc)
metrics["sotrainacc"][arch_str][epoch_idx].append(running_sotrainacc)
metrics["sovalacc_top5"][arch_str][epoch_idx].append(running_sovalacc_top5)
metrics["sotrainacc_top5"][arch_str][epoch_idx].append(running_sotrainacc_top5)
metrics["train_losses"][arch_str][epoch_idx].append(-loss.item())
metrics["val_losses"][arch_str][epoch_idx].append(-valid_loss)
if additional_training:
_, val_acc_total, _ = valid_func(xloader=valid_loader, network=network2, criterion=criterion, algo=algo, logger=logger)
metrics["total_val"][arch_str][epoch_idx].append(val_acc_total)
final_metric = None # Those final/decision metrics are not very useful apart from being a compatibility layer with how get_best_arch worked in the base repo
if style == "sotl":
final_metric = running_sotl
elif style == "sovl":
final_metric = running_sovl
decision_metrics.append(final_metric)
corr_metrics_path = save_checkpoint({"corrs":{}, "metrics":metrics,
"archs":archs, "start_arch_idx": arch_idx+1, "config":vars(xargs), "decision_metrics":decision_metrics},
logger.path('corr_metrics'), logger, quiet=True)
# q.put("SENTINEL") # This lets the Reporter process know it should quit
train_total_time = time.time()-train_start_time
print(f"Train total time: {train_total_time}")
wandb.run.summary["train_total_time"] = train_total_time
original_metrics = deepcopy(metrics)
metrics_FD = {k+"FD": {arch.tostr():SumOfWhatever(measurements=metrics[k][arch.tostr()], e=1).get_time_series(chunked=True, mode="fd") for arch in archs} for k,v in metrics.items() if k in ['val', 'train_losses', 'val_losses']}
metrics.update(metrics_FD)
if epochs > 1:
interim = {} # We need an extra dict to avoid changing the dict's keys during iteration for the R metrics
for key in metrics.keys():
if key in ["train_losses", "train_lossesFD", "val_losses", "val"]:
interim[key+"R"] = {}
for arch in archs:
arr = []
for epoch_idx in range(len(metrics[key][arch.tostr()])):
epoch_arr = []
for batch_metric in metrics[key][arch.tostr()][epoch_idx]:
if key in ["train_losses", "train_lossesFD", "val_losses"]:
sign = -1
else:
sign = 1
epoch_arr.append(sign*batch_metric if epoch_idx == 0 else -1*sign*batch_metric)
arr.append(epoch_arr)
interim[key+"R"][arch.tostr()] = SumOfWhatever(measurements=arr, e=epochs+1, mode='last').get_time_series(chunked=True)
# interim[key+"R"][arch.tostr()] = SumOfWhatever(measurements=[[[batch_metric if epoch_idx == 0 else -batch_metric for batch_metric in batch_metrics] for batch_metrics in metrics[key][arch.tostr()][epoch_idx]]] for epoch_idx in range(len(metrics[key][arch.tostr()])), e=epochs+1).get_time_series(chunked=True)
# print(interim)
# print(metrics["train_lossesFD"])
# print(metrics["train_losses"])
metrics.update(interim)
metrics_E1 = {k+"E1": {arch.tostr():SumOfWhatever(measurements=metrics[k][arch.tostr()], e=1).get_time_series(chunked=True) for arch in archs} for k,v in metrics.items()}
metrics.update(metrics_E1)
else:
# We only calculate Sum-of-FD metrics in this case
metrics_E1 = {k+"E1": {arch.tostr():SumOfWhatever(measurements=metrics[k][arch.tostr()], e=1).get_time_series(chunked=True) for arch in archs} for k,v in metrics.items() if "FD" in k}
metrics.update(metrics_E1)
for key in metrics_FD.keys(): # Remove the pure FD metrics because they are useless anyways
metrics.pop(key, None)
start=time.time()
corrs = {}
to_logs = []
for k,v in tqdm(metrics.items(), desc="Calculating correlations"):
# We cannot do logging synchronously with training becuase we need to know the results of all archs for i-th epoch before we can log correlations for that epoch
corr, to_log = calc_corrs_after_dfs(epochs=epochs, xloader=train_loader, steps_per_epoch=steps_per_epoch, metrics_depth_dim=v,
final_accs = final_accs, archs=archs, true_rankings = true_rankings, corr_funs=corr_funs, prefix=k, api=api, wandb_log=False)
corrs["corrs_"+k] = corr
to_logs.append(to_log)
print(f"Calc corrs time: {time.time()-start}")
if n_samples-start_arch_idx > 0: #If there was training happening - might not be the case if we just loaded checkpoint
# We reshape the stored train statistics so that it is a Seq[Dict[k: summary statistics across all archs for a timestep]] instead of Seq[Seq[Dict[k: train stat for a single arch]]]
processed_train_stats = []
stats_keys = batch_train_stats.keys()
for idx, stats_across_time in tqdm(enumerate(train_stats), desc="Processing train stats"):
agg = {k: np.array([single_train_stats[k] for single_train_stats in stats_across_time]) for k in stats_keys}
agg = {k: {"mean":np.mean(v), "std": np.std(v)} for k,v in agg.items()}
agg["true_step"] = idx
processed_train_stats.append(agg)
for epoch_idx in range(len(to_logs[0])):
relevant_epochs = [to_logs[i][epoch_idx] for i in range(len(to_logs))]
for batch_idx in range(len(relevant_epochs[0])):
relevant_batches = [relevant_epoch[batch_idx] for relevant_epoch in relevant_epochs]
all_batch_data = {}
for batch in relevant_batches:
all_batch_data.update(batch)
# Here we log both the aggregated train statistics and the correlations
if n_samples-start_arch_idx > 0: #If there was training happening - might not be the case if we just loaded checkpoint
all_data_to_log = {**all_batch_data, **processed_train_stats[epoch_idx*steps_per_epoch+batch_idx]}
else:
all_data_to_log = all_batch_data
wandb.log(all_data_to_log)
if style in ["sotl", "sovl"] and n_samples-start_arch_idx > 0: # otherwise, we are just reloading the previous checkpoint so should not save again
corr_metrics_path = save_checkpoint({"metrics":original_metrics, "corrs": corrs,
"archs":archs, "start_arch_idx":arch_idx+1, "config":vars(xargs), "decision_metrics":decision_metrics},
logger.path('corr_metrics'), logger)
try:
wandb.save(str(corr_metrics_path.absolute()))
except:
print("Upload to WANDB failed")
best_idx = np.argmax(decision_metrics)
try:
best_arch, best_valid_acc = archs[best_idx], decision_metrics[best_idx]
except:
logger.log("Failed to get best arch via decision_metrics")
logger.log(f"Decision metrics: {decision_metrics}")
logger.log(f"Best idx: {best_idx}, length of archs: {len(archs)}")
best_arch,best_valid_acc = archs[0], decision_metrics[0]
return best_arch, best_valid_acc
|
the-stack_0_5562 | """
VHDL Mode for Sublime Text 3
This package attempts to recreate to some level of fidelity the features
in the vhdl-mode in Emacs.
"""
import os
import time
import re
import textwrap
import sublime
import sublime_plugin
#from threading import Thread
from . import vhdl_lang as vhdl
from . import vhdl_util as util
#-------------------------------------------------------------------------------
class vhdlModeVersionCommand(sublime_plugin.TextCommand):
"""
Prints the version to the console.
"""
def run(self, edit):
print("vhdl-mode: VHDL Mode Version 1.8.0")
#-------------------------------------------------------------------------------
class vhdlModeInsertHeaderCommand(sublime_plugin.TextCommand):
"""
This command is used to insert a predefined header into the
current text file.
"""
def run(self, edit):
# Assigning this to a string to keep command shorter later.
template = "Packages/VHDL Mode/Snippets/vhdl-header.sublime-snippet"
# Looking for a name, first the buffer name, then the file name,
# then finally a default value.
buffname = self.view.name()
longname = self.view.file_name()
if buffname:
filename = buffname
elif longname:
# Convert Windows slashes to Unix slashes (if any)
longname = re.sub(r'\\', '/', longname)
namechunks = longname.split('/')
filename = namechunks[len(namechunks)-1]
else:
filename = '<filename>'
# Get the other fields out of settings.
linesize = util.get_vhdl_setting(self, 'vhdl-line-length')
project = util.get_vhdl_setting(self, 'vhdl-project-name')
author = util.get_vhdl_setting(self, 'vhdl-user')
company = util.get_vhdl_setting(self, 'vhdl-company')
platform = util.get_vhdl_setting(self, 'vhdl-platform')
standard = util.get_vhdl_setting(self, 'vhdl-standard')
mtime_prefix = util.get_vhdl_setting(self, 'vhdl-modified-time-string')
use_copyright = util.get_vhdl_setting(self, 'vhdl-use-copyright-block')
use_revision = util.get_vhdl_setting(self, 'vhdl-use-revision-block')
copyright_list = util.get_vhdl_setting(self, 'vhdl-copyright-block')
revision_list = util.get_vhdl_setting(self, 'vhdl-revision-block')
# Set the string to dynamically replace the line field to the chosen
# line length.
linestr = '-'*linesize
# Get the current time and create the modified time string.
date = time.ctime(time.time())
year = time.strftime("%Y",time.localtime())
mod_time = mtime_prefix + date
# Create the copyright block and revision block. Both need
# prefixed newlines because they are optional and the
# snippet field is at the end of the preceding line.
if use_copyright:
copyright = '\n'.join(copyright_list)
copyright = re.sub(r'\${YEAR}', year, copyright)
copyright = re.sub(r'\${COMPANY}', company, copyright)
copyright = re.sub(r'\${LINE}', linestr, copyright)
copyright = '\n' + copyright
else:
copyright = ''
if use_revision:
revision = '\n'.join(revision_list)
revision = re.sub(r'\${LINE}', linestr, revision)
revision = '\n' + revision
else:
revision = ''
# Moving insertion point to the beginning of the file.
bof = self.view.text_point(0,0)
self.view.sel().clear()
self.view.sel().add(sublime.Region(bof))
self.view.show(bof)
# Inserting template/snippet
self.view.run_command("insert_snippet",
{
"name" : template,
"PROJECT" : project,
"FILENAME" : filename,
"AUTHOR" : author,
"COMPANY" : company,
"CDATE" : date,
"MODIFIED_TIME_STRING" : mod_time,
"MDATE" : date,
"YEAR" : year,
"PLATFORM" : platform,
"STANDARD" : standard,
"COPYRIGHT_BLOCK" : copyright,
"REVISION_BLOCK" : revision,
"LINE" : linestr
})
print('vhdl-mode: Inserted header template.')
#-------------------------------------------------------------------------------
class vhdlModeToggleCommentRegionCommand(sublime_plugin.TextCommand):
"""
The command analyzes the block delivered to the command
and attempts to find the leftmost point and uses that for
the location of the commenting characters so that it provides
an even margin and eases removal later.
If the starting line of the region begins with a comment,
the command attempts to remove the comment from that and
each subsequent line.
"""
def run(self, edit):
# This could theoretically run on multiple regions but
# it's not a likely application and I'm just going to
# worry about the first one for now.
region = self.view.sel()[0]
# The line method when applied to a region says it
# returns a new region that is blocked to the
# beginning of the line and the end of the line.
# Exactly what I want, so let's try it.
region = self.view.line(region)
block = self.view.substr(region)
lines = block.split('\n')
# Setting the value to an absurd value for
# comparison. Search for the first non-
# whitespace character to determine the
# left margin.
margin = 1000
for line in lines:
s = re.search(r'\S', line)
if s:
if s.start() < margin:
margin = s.start()
# Check for comment on first line. This
# determines if we're commenting or
# uncommenting.
comment = True
s = re.search(r'^\s*--', lines[0])
if s:
comment = False
# Process lines.
for index, line in enumerate(lines):
if comment:
lines[index] = lines[index][0:margin] + \
'--' + \
lines[index][margin:]
else:
# Assuming this is a commented block, we replace
# only the first comment designator. Weird things
# will happen if there are uncommented lines in the
# block and there's also inline comments.
lines[index] = re.sub('--', '', lines[index], 1)
# Put together into big string
block = '\n'.join(lines)
#print(block)
# Replace the current region with the new region
self.view.replace(edit, region, block)
#-------------------------------------------------------------------------------
class vhdlModeBeautifyBufferCommand(sublime_plugin.TextCommand):
"""
This is a Sublime Text variation of the standalone beautify
code program. Sets the region to the entire buffer, obtains
the lines, then processes them and writes them back.
"""
def run(self, edit):
# Finding the current view and location of the point.
x, y = self.view.viewport_position()
row, col = self.view.rowcol(self.view.sel()[0].begin())
#print('vhdl-mode: x={}, y={}, row={}, col={}'.format(x, y, row, col))
# Create points for a region that define beginning and end.
begin = 0
end = self.view.size()-1
# Slurp up entire buffer and create CodeBlock object
whole_region = sublime.Region(begin, end)
buffer_str = self.view.substr(whole_region)
cb = vhdl.CodeBlock.from_block(buffer_str)
# Get the scope for each line. There's commented out code here for
# which scope to get first column of the line, and first character of
# the line. The first column seems to give the best results, though
# there are anomalies (like a when <choice> => followed by a line that
# uses => as a discrete member group assignment).
point = 0
scope_list = []
while not util.is_end_line(self, point):
#point = util.move_to_1st_char(self, point)
scope_list.append(self.view.scope_name(point))
#point = util.move_to_bol(self, point)
point = util.move_down(self, point)
scope_list.append(self.view.scope_name(point))
# Process the block of code. Prep pads symbols and removes extra
# spaces.
cb.prep()
cb.left_justify()
# Do the initial alignment after justification.
print('vhdl-mode: Pre-indent symbol alignment.')
cb.align_symbol(r':(?!=)', 'pre', scope_list)
cb.align_symbol(r':(?!=)\s?(?:in\b|out\b|inout\b|buffer\b)?\s*', 'post', scope_list)
cb.align_symbol(r'<(?==)|:(?==)', 'pre', scope_list)
cb.align_symbol(r'=>', 'pre', scope_list)
# Indent! Get some settings first.
use_spaces = util.get_vhdl_setting(self, 'translate_tabs_to_spaces')
tab_size = util.get_vhdl_setting(self, 'tab_size')
print('vhdl-mode: Indenting.')
cb.indent_vhdl(0, tab_size, use_spaces)
# Post indent alignment
print('vhdl-mode: Post-indent symbol alignment.')
cb.align_symbol(r'\bwhen\b', 'pre', scope_list)
print('vhdl-mode: Aligning comments.')
cb.align_comments(tab_size, use_spaces)
# Recombine into one big blobbed string.
buffer_str = cb.to_block()
# Annnd if all went well, write it back into the buffer
self.view.replace(edit, whole_region, buffer_str)
# New replacement routine that does not trigger Sublime's
# repainting mechanism that seems to be triggered by using
# self.view.replace()
#self.view.run_command("select_all")
#self.view.run_command("left_delete")
#self.view.run_command("append", {"characters": buffer_str})
# Restore the view.
original_point = self.view.text_point(row, col)
util.set_cursor(self, original_point)
# Trying out another method for handling the viewport. You can have
# a zero value for set_timeout() delay so this executes after the
# command exits.
restore = lambda: self.view.set_viewport_position((x, y), False)
sublime.set_timeout(restore, 0)
#self.view.set_viewport_position((x, y), False)
#-------------------------------------------------------------------------------
class vhdlModeUpdateLastUpdatedCommand(sublime_plugin.TextCommand):
"""
Finds the last updated field in the header and updates the time
in the field.
"""
def run(self, edit):
"""Sublime Text plugin run method."""
# Note, if one changes the header, this might need to change too.
pattern = util.get_vhdl_setting(self, 'vhdl-modified-time-string')
region = self.view.find(pattern, 0)
#print('Region Diagnostics')
#print('------------------')
#print('Begin: {}'.format(region.begin()))
#print('End: {}'.format(region.end()))
#print('Empty? {}'.format(region.empty()))
if not region.empty():
region = self.view.line(region)
date = time.ctime(time.time())
new_mtime = pattern + '{}'.format(date)
self.view.replace(edit, region, new_mtime)
print('vhdl-mode: Updated last modified time.')
else:
print('vhdl-mode: No last modified time field found.')
#-------------------------------------------------------------------------------
class vhdlModeUpdateModifiedTimeOnSave(sublime_plugin.EventListener):
"""
Watches for a save event and updates the Last update
field in the header.
"""
def on_pre_save(self, view):
"""
Gets passed the view that is being saved and scans for the
Last updated field.
"""
# MUST CHECK FOR VHDL FILE TYPE (otherwise it
# starts executing on this very source file which
# is problematic!)
if util.is_vhdl_file(view.scope_name(0)):
view.run_command("vhdl_mode_update_last_updated")
#-------------------------------------------------------------------------------
class vhdlModeScopeSnifferCommand(sublime_plugin.TextCommand):
"""
My own scope sniffing command that prints to
console instead of a popup window.
"""
def run(self, edit):
"""ST3 Run Method"""
region = self.view.sel()[0]
sniff_point = region.begin()
print('vhdl-mode: {}'.format(self.view.scope_name(sniff_point)))
#-------------------------------------------------------------------------------
class vhdlModeInsertCommentLine(sublime_plugin.TextCommand):
"""
This should insert a line out to the margin (80 characters)
starting where the point is. This is intended to run after
the user types '---' (see keybindings)
"""
def run(self, edit):
"""Standard TextCommand Run method"""
# Get the current point.
region = self.view.sel()[0]
original_point = region.begin()
point_r, point_c = self.view.rowcol(original_point)
# Figure out if any tab characters were used.
line = self.view.substr(self.view.line(original_point))
numtabs = line.count('\t')
# Get the current tab size and line length.
tabsize = util.get_vhdl_setting(self, 'tab_size')
linesize = util.get_vhdl_setting(self, 'vhdl-line-length')
# Create string of correct amount of dashes. A tab consumed
# one character but generates tabsize-1 space.
numdash = linesize-point_c-(tabsize-1)*numtabs
if numdash <= 2:
print('vhdl-mode: Warning: Line length setting violation. Setting number of dashes to 2.')
numdash = 2
line = '-'*numdash
num_chars = self.view.insert(edit, original_point, line)
print('vhdl-mode: Inserted comment line.')
#-------------------------------------------------------------------------------
class vhdlModeInsertCommentBox(sublime_plugin.TextCommand):
"""
This should insert a box out to the margin (80 characters)
starting where the point is, and taking into account tabs.
This is intended to run after the user types '----' (see
keybindings)
"""
def run(self, edit):
"""Standard TextCommand Run method"""
# Get the current point.
region = self.view.sel()[0]
original_point = region.begin()
point_r, point_c = self.view.rowcol(original_point)
# Figure out if any tab characters were used.
line = self.view.substr(self.view.line(original_point))
numtabs = line.count('\t')
# Get the current tab size
tabsize = util.get_vhdl_setting(self, 'tab_size')
linesize = util.get_vhdl_setting(self, 'vhdl-line-length')
# Create string of correct amount of dashes. A tab consumed
# one character but generates tabsize-1 space.
numdash = linesize-point_c-(tabsize-1)*numtabs
if numdash <= 2:
print('vhdl-mode: Warning: Line length setting violation. Setting number of dashes to 2.')
numdash = 2
line = '-'*numdash
# Create snippet object.
snippet = line + '\n' + '-- $0' + '\n' + line + '\n'
# Inserting template/snippet
self.view.run_command("insert_snippet",
{
"contents" : snippet
})
print('vhdl-mode: Inserted comment box.')
#-------------------------------------------------------------------------------
class vhdlModeSettingSniffer(sublime_plugin.TextCommand):
'''
Creating a command to check settings in various
contexts
'''
def run(self, edit):
'''
Standard TextCommand Run Method
'''
print('Preference Settings')
print('vhdl-mode: {}: {}'.format('tab_size', util.get_vhdl_setting(self, 'tab_size')))
print('vhdl-mode: {}: {}'.format('translate_tabs_to_spaces', util.get_vhdl_setting(self, 'translate_tabs_to_spaces')))
vhdl_settings = sublime.load_settings('vhdl_mode.sublime-settings')
keys = ['vhdl-line-length',
'vhdl-user',
'vhdl-company',
'vhdl-project-name',
'vhdl-platform',
'vhdl-standard',
'vhdl-modified-time-string',
'vhdl-use-copyright-block',
'vhdl-use-revision-block',
'vhdl-copyright-block',
'vhdl-revision-block']
print('Package Settings')
for key in keys:
print('vhdl-mode: {}: "{}"'.format(key, vhdl_settings.get(key)))
print('View Settings')
for key in keys:
print('vhdl-mode: {}: {}'.format(key, util.get_vhdl_setting(self, key)))
#-------------------------------------------------------------------------------
class vhdlModeViewportSniffer(sublime_plugin.TextCommand):
def run(self, edit):
x, y = self.view.viewport_position()
print('vhdl-mode: Viewport X: {} Y: {}'.format(x,y))
#self.view.set_viewport_position((0, y), False)
|
the-stack_0_5563 | """
Data structures required for our testing.
"""
import os
import shutil
from wsgi_intercept import httplib2_intercept
import wsgi_intercept
from tiddlyweb.web.serve import load_app
from tiddlyweb.model.collections import Tiddlers
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.model.recipe import Recipe
from tiddlyweb.config import config
from tiddlyweb.store import Store
config['server_host'] = {
'scheme': 'http',
'host': 'our_test_domain',
'port': '8001',
}
def initialize_app():
app = load_app()
def app_fn():
return app
httplib2_intercept.install()
wsgi_intercept.add_wsgi_intercept('our_test_domain', 8001, app_fn)
TiddlerOne = Tiddler('TiddlerOne')
TiddlerOne.modifier = 'AuthorOne'
TiddlerOne.text = u'c tiddler one content'
TiddlerOne.tags = ['tagone', 'tagtwo']
TiddlerTwo = Tiddler('TiddlerTwo')
TiddlerTwo.modifier = u'AuthorTwo'
TiddlerTwo.text = u'b tiddler two content'
TiddlerThree = Tiddler('TiddlerThree')
TiddlerThree.modifier = u'AuthorThree'
TiddlerThree.text = u'a tiddler three content'
TiddlerThree.tags = [u'tagone', u'tagthree']
tiddlers = [TiddlerOne, TiddlerTwo, TiddlerThree]
bagone = Bag(name='bagone')
container = Tiddlers()
container.add(tiddlers[0])
bagone.tiddlers = container
bagtwo = Bag(name='bagtwo')
container = Tiddlers()
container.add(tiddlers[1])
bagtwo.tiddlers = container
bagthree = Bag(name='bagthree')
container = Tiddlers()
container.add(tiddlers[2])
bagthree.tiddlers = container
bagfour = Bag(name='bagfour')
container = Tiddlers()
for tiddler in tiddlers:
container.add(tiddler)
bagfour.tiddlers = container
tiddler_collection = Tiddlers()
for tiddler in tiddlers:
tiddler.bag = u'bagfour'
tiddler_collection.add(tiddler)
recipe_list = [
(bagone, u'select=title:TiddlerOne'),
(bagtwo, u'select=title:TiddlerTwo'),
(bagthree, u'select=tag:tagone;select=tag:tagthree')
]
recipe_list_string = [
[u'bagone', u'select=title:TiddlerOne'],
[u'bagtwo', u'select=title:TiddlerTwo'],
[u'bagthree', u'select=tag:tagone;select=tag:tagthree']
]
def _teststore():
return Store(config['server_store'][0], config['server_store'][1],
environ={'tiddlyweb.config': config})
def reset_textstore():
if os.path.exists('store'):
shutil.rmtree('store')
def muchdata(store):
for bag_numeral in range(30):
bag = create_bag(store, bag_numeral)
for tiddler_numeral in range(10):
tiddler = create_tiddler(store, bag, tiddler_numeral)
recipe = Recipe('long')
recipe_list = [(u'bag1', '')]
for numeral in range(0, 30, 2):
bag_name = u'bag%s' % numeral
filter_string = u'select=title:tiddler%s' % (numeral % 10)
if not (numeral % 10) % 3:
filter_string = filter_string + u';select=tag:tag three'
recipe_list.append([bag_name, filter_string])
recipe.set_recipe(recipe_list)
store.put(recipe)
def create_tiddler(store, bag, numeral):
tiddler = Tiddler('tiddler%s' % numeral)
tiddler.bag = bag.name
tiddler.text = u'i am tiddler %s' % numeral
tags = [u'basic tag']
if not numeral % 2:
tags.append(u'tagtwo')
if not numeral % 3:
tags.append(u'tagthree')
if not numeral % 4:
tags.append(u'tagfour')
tiddler.tags = tags
if tiddler.title == 'tiddler8':
tiddler.modified = '200805230303'
store.put(tiddler)
def create_bag(store, numeral):
bag = Bag('bag%s' % numeral)
store.put(bag)
return bag
|
the-stack_0_5564 | #!/Users/marc/miniconda3/bin/python3
import math
import numpy as np
def sphere_vertices( n ):
phistep = math.pi / n
thetastep = 2*math.pi / n
vertices = []
for i in range( n+1 ):
phi = - math.pi/2 + i * phistep
if i == 0:
tb = 'bottom'
elif i==n:
tb = 'top'
else:
tb = False
for j in range( n ):
theta = j * thetastep
face = sphere_face( phi, theta, phi+phistep, theta+thetastep, tb )
vertices.extend( face )
#vertices = [item for sublist in vertices for item in sublist]
return np.array( vertices, dtype=np.float32)
def sphere_face( phi0, theta0, phi1, theta1, tb ):
x0 = .5*math.cos(theta0) * math.cos(phi0)
x1 = .5*math.cos(theta0) * math.cos(phi1)
x2 = .5*math.cos(theta1) * math.cos(phi1)
x3 = .5*math.cos(theta1) * math.cos(phi0)
y0 = .5*math.sin(theta0) * math.cos(phi0)
y1 = .5*math.sin(theta0) * math.cos(phi1)
y2 = .5*math.sin(theta1) * math.cos(phi1)
y3 = .5*math.sin(theta1) * math.cos(phi0)
z0 = .5*math.sin(phi0)
z1 = .5*math.sin(phi1)
if tb == 'bottom':
return [ x0,y0,z0, theta0/(2*math.pi), (phi0+math.pi/2)/math.pi,
x1,y1,z1, theta0/(2*math.pi), (phi1+math.pi/2)/math.pi,
x2,y2,z1, theta1/(2*math.pi), (phi1+math.pi/2)/math.pi, ]
elif tb == 'top':
return [ x0,y0,z0, theta0/(2*math.pi), (phi0+math.pi/2)/math.pi,
x3,y3,z0, theta1/(2*math.pi), (phi0+math.pi/2)/math.pi,
x2,y2,z1, theta1/(2*math.pi), (phi1+math.pi/2)/math.pi ]
else:
return [x0,y0,z0, theta0/(2*math.pi), (phi0+math.pi/2)/math.pi,
x1,y1,z1, theta0/(2*math.pi), (phi1+math.pi/2)/math.pi,
x2,y2,z1, theta1/(2*math.pi), (phi1+math.pi/2)/math.pi,
x0,y0,z0, theta0/(2*math.pi), (phi0+math.pi/2)/math.pi,
x3,y3,z0, theta1/(2*math.pi), (phi0+math.pi/2)/math.pi,
x2,y2,z1, theta1/(2*math.pi), (phi1+math.pi/2)/math.pi ]
if __name__ == "__main__":
sphere = sphere_vertices( 3 )
np.set_printoptions(precision=3, suppress=True, linewidth=110)
print(sphere)
print("Faces: ", len(sphere)/3)
|
the-stack_0_5565 | #!/usr/bin/env python
import os
import subprocess
import re
import time
import json
from charmhelpers.core import hookenv
from charmhelpers.core.host import get_nic_mtu, service_start, service_running
from charmhelpers.fetch import apt_install
class Lldp():
lldp_out = '/home/ubuntu/lldp_output.json'
enabled = False
parsed_data = None
def install(self):
apt_install("lldpd")
def disable_i40e_lldp_agent(self):
path = '/sys/kernel/debug/i40e'
if os.path.isdir(path):
hookenv.log('Disabling NIC internal LLDP agent','INFO')
for r,dirs,files in os.walk(path):
for d in dirs:
with open("{}/{}/command".format(path,d),"w") as fh:
fh.write('lldp stop')
def enable(self):
if not service_running('lldpd'):
self.disable_i40e_lldp_agent()
service_start('lldpd')
hookenv.log('Waiting to collect LLDP data','INFO')
time.sleep(30)
enabled=True
def collect_data(self):
cmd = "lldpcli show neighbors details -f json | tee " + self.lldp_out
os.system(cmd)
def data(self):
if not self.parsed_data:
with open(self.lldp_out, 'r') as f:
self.parsed_data = json.load(f)
return self.parsed_data
def get_interface(self,iface):
for i in self.data()['lldp']['interface']:
if iface in i:
return i[iface]
return None
def get_interface_vlan(self,iface):
try:
return self.get_interface(iface)['vlan']['vlan-id']
except (KeyError,TypeError):
hookenv.log('No LLDP data for {}'.format(iface),'INFO')
return None
def get_interface_port_descr(self,iface):
try:
return self.get_interface(iface)['port']['descr']
except (KeyError,TypeError):
hookenv.log('No LLDP data for {}'.format(iface),'INFO')
return None
class Iperf():
"""
Install and start a server automatically
"""
iperf_out = '/home/ubuntu/iperf_output.txt'
def install_iperf(self):
apt_install("iperf")
def listen(self):
cmd = "iperf -s -m -fm | tee " + self.iperf_out + " &"
os.system(cmd)
def mtu(self):
with open(self.iperf_out) as f:
for line in f.readlines():
if "MTU" in line:
match = line
try:
return match.split('MTU', 4)[1].split(' ')[1]
except UnboundLocalError:
return "no iperf test results: failed"
def speed(self):
with open(self.iperf_out) as f:
for line in f.readlines():
if "bits" in line:
match = line
try:
return match.rsplit(' ', 2)[1]
except UnboundLocalError:
return "no iperf test results: failed"
def selfcheck(self):
subprocess.check_output(["iperf", "-c", "localhost", "-t", "1"])
def hostcheck(self, nodes):
# Wait for other nodes to start their servers...
for node in nodes:
msg = "checking iperf on {}".format(node[1])
hookenv.log(msg)
cmd = "iperf -t1 -c {}".format(node[1])
os.system(cmd)
def safe_status(workload, status):
cfg = hookenv.config()
if not cfg.get('supress_status'):
hookenv.status_set(workload, status)
def ping(input, ping_time, ping_tries):
ping_string = "ping -c {} -w {} {} > /dev/null 2>&1"\
.format(ping_tries, ping_time, input)
hookenv.log('Ping command: {}'.format(ping_string), 'DEBUG')
response = os.system(ping_string)
if response == 0:
return 0
else:
return 1
def check_local_hostname():
local_hostname = subprocess.check_output('hostname', shell=True)\
.decode('utf-8').rstrip()
lookup_cmd = "getent hosts {}".format(local_hostname)
hookenv.log('Looking up local hostname: {}'.format(local_hostname))
try:
result = subprocess.check_output(lookup_cmd, shell=True)\
.decode('utf-8').rstrip()
result = ''
stderr = 0
except subprocess.CalledProcessError as exc:
result = local_hostname
stderr = exc.returncode
return result, stderr
def check_local_mtu(required_mtu, iface_mtu):
if required_mtu == 0:
return 0
elif 0 <= (int(iface_mtu) - int(required_mtu)) <= 12:
return 100
else:
return 200
def check_min_speed(min_speed, iperf_speed):
if min_speed == 0:
return 0
elif min_speed <= iperf_speed:
return 100
elif min_speed > iperf_speed:
return 200
def check_port_description(lldp):
iface_dir = "/sys/class/net"
status=None
local_hostname = subprocess.check_output('hostname', shell=True)\
.decode('utf-8').rstrip()
for r,dirs,files in os.walk(iface_dir):
for d in dirs:
if d == 'lo':
continue
if d.startswith('vnet'):
continue
if d.startswith('veth'):
continue
if check_iface_type(d) == 'eth':
if not check_iface_down(d):
desc = lldp.get_interface_port_descr(d)
hookenv.log("Port {} description {}".format(d,desc),
'INFO')
if desc:
if not re.search(local_hostname,desc):
if status:
status="{} {}:{}"\
.format(status,d,desc)
else:
status="{}:{}".format(d,desc)
if status:
return "ports failed: {}".format(status)
else:
return "ports ok"
def check_iface_type(iface):
iface_dir = "/sys/class/net/{}".format(iface)
with open("{}/uevent".format(iface_dir)) as fos:
content = fos.read()
if re.search('DEVTYPE', content):
return "complex"
return 'eth'
def check_iface_down(iface):
iface_dir = "/sys/class/net/{}".format(iface)
with open("{}/operstate".format(iface_dir)) as fos:
content = fos.read()
if not re.search('up', content):
return "down"
with open("{}/carrier".format(iface_dir)) as fos:
content = fos.read()
if not re.search('1', content):
return "down"
return None
def check_bond(bond,lldp=None):
bond_path = "/sys/class/net/{}".format(bond)
if not os.path.isdir( bond_path ):
return "missing"
if check_iface_down(bond):
return "down"
with open("{}/bonding/slaves".format(bond_path)) as fos:
content = fos.read()
vlan=None
for slave in content.split():
if check_iface_down(slave):
return "{} down".format(slave)
if lldp:
if vlan:
if not vlan == lldp.get_interface_vlan(slave):
return "vlan mismatch"
else:
vlan = lldp.get_interface_vlan(slave)
return None
def check_bonds(bonds,lldp=None):
bonds_status=None
for bond in [b.strip() for b in bonds.split(',')]:
bond_status = check_bond(bond,lldp)
if bond_status:
if bonds_status:
bonds_status="{} {}:{}".format(bonds_status,bond,bond_status)
else:
bonds_status="{}:{}".format(bond,bond_status)
if bonds_status:
return "bonds failed: {}".format(bonds_status)
else:
return "bonds ok"
def check_nodes(nodes, iperf_client=False):
cfg = hookenv.config()
local_ip = hookenv.unit_private_ip()
iface_lines = subprocess.check_output(["ip", "route", "show", "to", "match", local_ip]).decode()
iface_lines = iface_lines.split('\n')
for line in iface_lines:
if re.match('.* via .*', line) is None:
break
primary_iface = str(line).split('dev')[1].split(' ')[1]
iface_mtu = get_nic_mtu(primary_iface)
required_mtu = cfg.get('required_mtu')
min_speed = cfg.get('min_speed')
msg = "MTU for iface: {} is {}".format(primary_iface, iface_mtu)
hookenv.log(msg, 'INFO')
#if required_mtu != 0 and not 0 <= (int(iface_mtu) - int(required_mtu)) <= 12:
# iperf_status = ", local mtu check failed, required_mtu: {}, iface mtu: {}".format(required_mtu, iface_mtu)
#elif required_mtu == 0 or 0 <= (int(iface_mtu) - int(required_mtu)) <= 12:
port_status=""
lldp = None
if cfg.get('use_lldp'):
lldp = Lldp()
lldp.enable()
lldp.collect_data()
if cfg.get('check_port_description'):
port_status = "{}, ".format(check_port_description(lldp))
cfg_check_bonds = cfg.get('check_bonds',lldp)
bond_status=""
if cfg_check_bonds:
bond_status = "{}, ".format(check_bonds(cfg_check_bonds,lldp))
cfg_check_iperf = cfg.get('check_iperf')
if cfg_check_iperf:
hookenv.log("Running iperf test", 'INFO')
if not iperf_client:
iperf = Iperf()
mtu = iperf.mtu()
speed = iperf.speed()
# Make space for 8 or 12 byte variable overhead (TCP options)
if "failed" not in mtu:
if 0 <= (int(iface_mtu) - int(mtu)) <= 12:
iperf_status = ", net mtu ok: {}".format(iface_mtu)
else:
iperf_status = ", net mtu failed, mismatch: {} packet vs {} on iface {}".format(
mtu, iface_mtu, primary_iface)
else:
iperf_status = ", network mtu check failed"
if "failed" not in speed:
if check_min_speed(min_speed, float(speed)) == 0:
iperf_status = iperf_status + ", {} mbit/s".format(speed)
if check_min_speed(min_speed, float(speed)) == 100:
iperf_status = iperf_status + ", speed ok: {} mbit/s".format(speed)
if check_min_speed(min_speed, float(speed)) == 200:
iperf_status = iperf_status + ", speed failed: {} < {} mbit/s".format(speed, str(min_speed))
else:
iperf_status = iperf_status + ", iperf speed check failed"
elif iperf_client:
iperf_status = ", iperf leader, mtu: {}".format(iface_mtu)
iperf = Iperf()
iperf.hostcheck(nodes)
else:
iperf_status = ""
if check_local_mtu(required_mtu, iface_mtu) == 100:
iperf_status = iperf_status + ", local mtu ok, required: {}".format(required_mtu)
elif check_local_mtu(required_mtu, iface_mtu) == 200:
iperf_status = iperf_status + ", local mtu failed, required: {}, iface: {}".format(required_mtu, iface_mtu)
hookenv.log('doing other things after iperf', 'INFO')
cfg_check_local_hostname = cfg.get('check_local_hostname')
if cfg_check_local_hostname:
no_hostname = check_local_hostname()
if no_hostname[0] == '':
no_hostname = ', local hostname ok'
hookenv.log('Local hostname lookup OK: {}'.format(
str(no_hostname)), 'INFO')
else:
no_hostname = ', local hostname failed'
hookenv.log('Local hostname lookup FAILED: {}'.format(
str(no_hostname)), 'ERROR')
no_ping = check_ping(nodes)
cfg_check_dns = cfg.get('check_dns')
if cfg_check_dns:
no_dns = check_dns(nodes)
hookenv.log("Units with DNS problems: " + str(no_dns))
try:
dns_status
except NameError:
dns_status = ''
else:
dns_status = ''
no_dns = ([], [], [])
try:
dns_status
except NameError:
dns_status = ''
if not no_ping:
no_ping = 'icmp ok'
else:
no_ping = 'icmp failed: ' + str(no_ping)
if no_dns == ([], [], []):
dns_status = ', dns ok'
else:
no_rev = no_dns[0]
no_fwd = no_dns[1]
no_match = no_dns[2]
if no_match != []:
dns_status = ', match dns failed: ' + str(no_match)
else:
if no_rev:
no_rev = ', rev dns failed: ' + str(no_rev)
if no_fwd:
no_fwd = ', fwd dns failed: ' + str(no_fwd)
if no_rev == []:
no_rev = ''
if no_fwd == []:
no_fwd = ''
dns_status = '{}{}{}'\
.format(dns_status, str(no_rev), str(no_fwd))
if cfg_check_local_hostname:
check_status = '{}{}{}{}{}{}'.format(
port_status,bond_status,no_ping,
str(no_hostname), str(dns_status), str(iperf_status))
else:
check_status = '{}{}{}{}{}'.format(
port_status,bond_status,no_ping,
str(dns_status), str(iperf_status))
if 'failed' in check_status:
workload = 'blocked'
else:
workload = 'active'
safe_status(workload, check_status)
reactive_state = {'icmp': no_ping, 'dns': dns_status}
return reactive_state
def check_ping(nodes):
cfg = hookenv.config()
ping_time = cfg.get('ping_timeout')
ping_tries = cfg.get('ping_tries')
try:
unreachable
except NameError:
unreachable = []
for node in nodes:
unit_id = node[0].split('/')[1]
hookenv.log('Pinging unit_id: ' + str(unit_id), 'INFO')
if ping(node[1], ping_time, ping_tries) == 1:
hookenv.log('Ping FAILED for unit_id: ' + str(unit_id), 'ERROR')
if unit_id not in unreachable:
unreachable.append(unit_id)
else:
hookenv.log('Ping OK for unit_id: ' + str(unit_id), 'INFO')
if unit_id in unreachable:
unreachable.remove(unit_id)
return unreachable
def check_dns(nodes):
cfg = hookenv.config()
dns_server = cfg.get('dns_server')
dns_tries = cfg.get('dns_tries')
dns_time = cfg.get('dns_time')
try:
norev
except NameError:
norev = []
try:
nofwd
except NameError:
nofwd = []
try:
nomatch
except NameError:
nomatch = []
hookenv.log("DNS (ALL NODES): {}".format(nodes))
for node in nodes:
ip = node[1]
if not re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
hookenv.log("private-address appears to be a hostname: {},"
" attempting forward lookup...", 'WARN')
ip = forward_dns(ip, dns_server, dns_tries, dns_time)[0]
else:
hookenv.log('private-address appears to be an IP', 'INFO')
unit_id = node[0].split('/')[1]
hookenv.log("Reverse lookup for ip: {}, node: {},"
" unit_id: {}".format(ip, node[0], unit_id), 'INFO')
reverse, r_stderr = reverse_dns(ip, dns_server, dns_tries, dns_time)
hookenv.log("Reverse result for unit_id: {}, hostname: {},"
" exitcode: {}".format(unit_id, str(reverse),
str(r_stderr)))
if r_stderr:
hookenv.log("Reverse FAILED for"
" unit_id: {}".format(unit_id), 'ERROR')
if unit_id not in norev:
norev.append(unit_id)
continue
else:
hookenv.log("Reverse OK for unit_id: {}".format(unit_id), 'INFO')
if unit_id in norev:
norev.remove(unit_id)
hookenv.log("Forward lookup for hostname: {}, node: {},"
" unit_id: {}".format(str(reverse), node[0], unit_id),
'INFO')
for rev in reverse.split():
forward, f_stderr = forward_dns(rev, dns_server,
dns_tries, dns_time)
hookenv.log("Forward result for unit_id: {}, ip: {},"
" exitcode: {}".format(unit_id, forward,
str(f_stderr)))
if f_stderr:
hookenv.log("Forward FAILED for"
" unit_id: {}".format(unit_id), 'ERROR')
if unit_id not in nofwd:
nofwd.append(unit_id)
else:
hookenv.log("Forward OK for"
" unit_id: {}".format(unit_id), 'INFO')
if unit_id in nofwd:
nofwd.remove(unit_id)
if ip != forward:
if not re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$",
forward):
forward = "Can not resolve hostname to IP {}"\
.format(repr(forward))
hookenv.log("Original IP and Forward MATCH FAILED for"
" unit_id: {}, Original: {}, Forward: {}"
.format(unit_id, ip, forward), 'ERROR')
if unit_id not in nomatch:
nomatch.append(unit_id)
else:
hookenv.log("Original IP and Forward MATCH OK for unit_id:"
" {}, Original: {}, Forward: {}"
.format(unit_id, ip, forward),
'INFO')
if unit_id in nomatch:
nomatch.remove(unit_id)
break
return norev, nofwd, nomatch
def reverse_dns(input, dns_server, tries, timeout):
cmd = '/usr/bin/dig -x ' + input + ' +short +tries={} +time={}'\
.format(tries, timeout)
if dns_server:
cmd = '{} @{}'.format(cmd, dns_server)
hookenv.log('DNS Reverse command: {}'.format(cmd), 'DEBUG')
try:
result = subprocess.check_output(cmd, shell=True)\
.decode('utf-8').rstrip()
stderr = 0
except subprocess.CalledProcessError as exc:
result = "Reverse DNS lookup error: " + str(exc.output)
stderr = exc.returncode
if result == '':
result = 'No reverse response'
stderr = 1
return result, stderr
def forward_dns(input, dns_server, tries, timeout):
cmd = '/usr/bin/dig ' + input + ' +short +tries={} +time={}'\
.format(tries, timeout)
if dns_server:
cmd = '{} @{}'.format(cmd, dns_server)
hookenv.log('DNS Forward command: {}'.format(cmd), 'DEBUG')
try:
result = subprocess.check_output(cmd, shell=True)\
.decode('utf-8').rstrip()
stderr = 0
except subprocess.CalledProcessError as exc:
result = "Forward DNS lookup error: " + str(exc.output)
stderr = exc.returncode
if result == '':
result = 'No forward response'
stderr = 1
return result, stderr
|
the-stack_0_5566 | # -*- coding: utf-8 -*-
import pytest
import datetime
from web.processors.event import create_or_update_event
@pytest.mark.django_db
def test_unknown_URL(db, client):
response = client.get('/bar-foo/')
assert response.status_code == 404
@pytest.mark.django_db
def test_country_redirect(db, client):
# Test without a slash in the end
response = client.get('/AB')
assert response.status_code == 301
assert response['Location'][-5:] == '/#!AB'
# and with one
response = client.get('/AB/')
assert response.status_code == 301
assert response['Location'][-5:] == '/#!AB'
@pytest.mark.django_db
def test_view_just_id(admin_user, db, client):
event_data = {
'audience': [3],
'theme': [1, 2],
'contact_person': u'[email protected]',
'country': u'SI',
'description': u'Lorem ipsum dolor sit amet',
'event_url': u'',
'location': u'Ljubljana, Slovenia',
'organizer': u'CodeCatz test',
"creator": admin_user,
'start_date': datetime.datetime.now(),
'end_date': datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
'tags': [u'css', u'html', u'web'],
'title': u'Redirect Test',
}
test_event = create_or_update_event(event_id=None, **event_data)
# Test without a slash in the end
response = client.get('/view/1')
assert response.status_code == 301
# Test with a slash in the end
response = client.get('/view/1/')
assert response.status_code == 302
|
the-stack_0_5567 | import json
import os
from typing import Union
from pathlib import Path
from jsonschema import RefResolver, Draft7Validator
from aqt import mw
from aqt.qt import QWidget, QLabel, Qt
from ...lib.config import serialize_setting, deserialize_setting
from ...lib.config_types import TWConcrScript, TWMetaScript
from ...lib.registrar import get_interface
from ..tw_script_tab_ui import Ui_TWWrapTab
from .tw_setting_add_replace import TWSettingAddReplace
from .tw_script_config import TWScriptConfig
from .util import mapTruthValueToIcon
class TWScriptTab(QWidget):
def __init__(self, main):
super().__init__()
self.ui = Ui_TWWrapTab()
self.ui.setupUi(self)
self.ui.addPushButton.clicked.connect(self.addScript)
self.ui.deletePushButton.clicked.connect(self.deleteScript)
self.ui.downPushButton.clicked.connect(self.moveDown)
self.ui.upPushButton.clicked.connect(self.moveUp)
self.ui.importButton.clicked.connect(self.importDialog)
self.ui.scriptsTable.currentCellChanged.connect(self.updateButtonsForCurrentCell)
self.ui.scriptsTable.cellDoubleClicked.connect(self.editScript)
self.ui.scriptsTable.setColumnWidth(1, 75)
self.ui.scriptsTable.setColumnWidth(2, 55)
self.ui.scriptsTable.setColumnWidth(3, 55)
def setupUi(self, setting):
self.modelName = setting.model_name
self.ui.enableCheckBox.setChecked(setting.enabled),
self.ui.insertStubCheckBox.setChecked(setting.insert_stub),
self.scr = setting.scripts
self.drawScripts()
self.updateButtons(False)
def drawScripts(self):
self.ui.scriptsTable.clearContents()
self.ui.scriptsTable.setRowCount(len(self.scr))
headerLabels = []
for idx, scr in enumerate(self.scr):
headerLabels.append(f'Script {idx}')
if isinstance(scr, TWConcrScript):
self.setRowMod(
idx,
scr.name,
scr.version,
mapTruthValueToIcon(scr.enabled),
mapTruthValueToIcon(False),
json.dumps(scr.conditions),
)
else:
iface = get_interface(scr.tag)
script = iface.getter(scr.id, scr.storage)
self.setRowMod(
idx,
script.name,
script.version,
mapTruthValueToIcon(script.enabled),
mapTruthValueToIcon(True),
json.dumps(script.conditions),
)
self.ui.scriptsTable.setVerticalHeaderLabels(headerLabels)
def setRowMod(self, row, *args):
for i, text in enumerate(args):
label = QLabel()
label.setText(text)
label.setAlignment(Qt.AlignCenter)
self.ui.scriptsTable.setCellWidget(row, i, label)
def editScript(self, row, column):
def saveScript(newScript):
self.scr[row] = newScript
self.drawScripts()
a = TWScriptConfig(mw, self.modelName, saveScript)
a.setupUi(self.scr[row])
a.exec_()
###########
def updateButtonsForCurrentCell(self, currentRow, currentColumn, previousRow, previousColumn):
self.updateButtons(currentRow != -1)
def updateButtons(self, state=True):
self.ui.deletePushButton.setEnabled(state)
self.ui.downPushButton.setEnabled(state)
self.ui.upPushButton.setEnabled(state)
def addScript(self):
newScript = deserialize_script(self.modelName, {
'name': 'New Script',
'description': '',
'enabled': True,
'conditions': [],
'statements': [],
})
self.scr.append(newScript)
self.drawScripts()
def deleteScript(self):
current_scr: Union[TWConcrScript, TWMetaScript] = self.scr[self.ui.scriptsTable.currentRow()]
def show_nondeletable():
from aqt.utils import showInfo # not to be deleted!
showInfo(
'This script does not allow for deletion.\n'
'You might have to uninstall the add-on which inserted this script.'
)
if isinstance(current_scr, TWConcrScript):
del self.scr[self.ui.scriptsTable.currentRow()] # gotta delete within dict
else:
iface = get_interface(current_scr.tag)
if iface.deletable:
is_deletable = iface.deletable(current_scr.id, current_scr.storage)
if is_deletable:
del self.scr[self.ui.scriptsTable.currentRow()] # gotta delete within dict
else:
show_nondeletable()
else:
show_nondeletable()
self.drawScripts()
self.updateButtons(False)
def moveDown(self):
i = self.ui.scriptsTable.currentRow()
if len(self.scr) != 1 and i < len(self.scr) - 1:
self.scr[i], self.scr[i + 1] = self.scr[i + 1], self.scr[i]
self.drawScripts()
self.ui.scriptsTable.setCurrentCell(i + 1, 0)
def moveUp(self):
i = self.ui.scriptsTable.currentRow()
if len(self.scr) != 1 and i > 0:
self.scr[i], self.scr[i - 1] = self.scr[i - 1], self.scr[i]
self.drawScripts()
self.ui.scriptsTable.setCurrentCell(i - 1, 0)
###########
def exportData(self):
result = deserialize_setting(self.modelName, {
"enabled": self.ui.enableCheckBox.isChecked(),
"insertStub": self.ui.insertStubCheckBox.isChecked(),
"scripts": self.scr,
})
return result
def importDialog(self):
def addAfterImport(scripts_new):
self.setupUi(self.scr + [deserialize_script(self.modelName, scr) for scr in scripts_new])
def replaceAfterImport(scripts_new):
self.setupUi([deserialize_script(self.modelName, scr) for scr in scripts_new])
dirpath = Path(f'{os.path.dirname(os.path.realpath(__file__))}', '../../json_schemas/scripts.json')
schema_path = dirpath.absolute().as_uri()
with dirpath.open('r') as jsonfile:
schema = json.load(jsonfile)
resolver = RefResolver(
schema_path,
schema,
)
validator = Draft7Validator(schema, resolver=resolver, format_checker=None)
dial = TWSettingAddReplace(mw)
dial.setupUi(
json.dumps([serialize_script(scr) for scr in self.scr], sort_keys=True, indent=4),
validator,
addAfterImport,
replaceAfterImport,
)
dial.exec_()
|
the-stack_0_5568 | r"""
Polynomial Regression
=====================
This example shows how to use the :py:class:`pylops.Regression` operator
to perform *Polynomial regression analysis*.
In short, polynomial regression is the problem of finding the best fitting
coefficients for the following equation:
.. math::
y_i = \sum_{n=0}^{order} x_n t_i^n \qquad \forall i=1,2,...,N
As we can express this problem in a matrix form:
.. math::
\mathbf{y}= \mathbf{A} \mathbf{x}
our solution can be obtained by solving the following optimization problem:
.. math::
J= ||\mathbf{y} - \mathbf{A} \mathbf{x}||_2
See documentation of :py:class:`pylops.Regression` for more detailed
definition of the forward problem.
"""
import numpy as np
import matplotlib.pyplot as plt
import pylops
plt.close('all')
np.random.seed(10)
###############################################################################
# Define the input parameters: number of samples along the t-axis (``N``),
# order (``order``), regression coefficients (``x``), and standard deviation
# of noise to be added to data (``sigma``).
N = 30
order = 3
x = np.array([1., .05, 0., -.01])
sigma = 1
###############################################################################
# Let's create the time axis and initialize the
# :py:class:`pylops.Regression` operator
t = np.arange(N, dtype='float64') - N//2
PRop = pylops.Regression(t, order=order, dtype='float64')
###############################################################################
# We can then apply the operator in forward mode to compute our data points
# along the x-axis (``y``). We will also generate some random gaussian noise
# and create a noisy version of the data (``yn``).
y = PRop*x
yn = y + np.random.normal(0, sigma, N)
###############################################################################
# We are now ready to solve our problem. As we are using an operator from the
# :py:class:`pylops.LinearOperator` family, we can simply use ``/``,
# which in this case will solve the system by means of an iterative solver
# (i.e., :py:func:`scipy.sparse.linalg.lsqr`).
xest = PRop / y
xnest = PRop / yn
###############################################################################
# Let's plot the best fitting curve for the case of noise free and noisy data
plt.figure(figsize=(5, 7))
plt.plot(t, PRop*x, 'k', lw=4,
label=r'true: $x_0$ = %.2f, $x_1$ = %.2f, '
r'$x_2$ = %.2f, $x_3$ = %.2f' % (x[0], x[1], x[2], x[3]))
plt.plot(t, PRop*xest, '--r', lw=4,
label='est noise-free: $x_0$ = %.2f, $x_1$ = %.2f, '
r'$x_2$ = %.2f, $x_3$ = %.2f' %
(xest[0], xest[1], xest[2], xest[3]))
plt.plot(t, PRop*xnest, '--g', lw=4,
label='est noisy: $x_0$ = %.2f, $x_1$ = %.2f, '
r'$x_2$ = %.2f, $x_3$ = %.2f' %
(xnest[0], xnest[1], xnest[2], xnest[3]))
plt.scatter(t, y, c='r', s=70)
plt.scatter(t, yn, c='g', s=70)
plt.legend(fontsize='x-small')
###############################################################################
# We consider now the case where some of the observations have large errors.
# Such elements are generally referred to as *outliers* and can affect the
# quality of the least-squares solution if not treated with care. In this
# example we will see how using a L1 solver such as
# :py:func:`pylops.optimization.sparsity.IRLS` can drammatically improve the
# quality of the estimation of intercept and gradient.
# Add outliers
yn[1] += 40
yn[N-2] -= 20
# IRLS
nouter = 20
epsR = 1e-2
epsI = 0
tolIRLS = 1e-2
xnest = PRop / yn
xirls, nouter, xirls_hist, rw_hist = \
pylops.optimization.sparsity.IRLS(PRop, yn, nouter, threshR=False,
epsR=epsR, epsI=epsI,
tolIRLS=tolIRLS, returnhistory=True)
print('IRLS converged at %d iterations...' % nouter)
plt.figure(figsize=(5, 7))
plt.plot(t, PRop*x, 'k', lw=4,
label=r'true: $x_0$ = %.2f, $x_1$ = %.2f, '
r'$x_2$ = %.2f, $x_3$ = %.2f' % (x[0], x[1], x[2], x[3]))
plt.plot(t, PRop*xnest, '--r', lw=4,
label=r'L2: $x_0$ = %.2f, $x_1$ = %.2f, '
r'$x_2$ = %.2f, $x_3$ = %.2f' % (xnest[0], xnest[1], xnest[2], xnest[3]))
plt.plot(t, PRop*xirls, '--g', lw=4,
label=r'IRLS: $x_0$ = %.2f, $x_1$ = %.2f, '
r'$x_2$ = %.2f, $x_3$ = %.2f' % (xirls[0], xirls[1], xirls[2], xirls[3]))
plt.scatter(t, y, c='r', s=70)
plt.scatter(t, yn, c='g', s=70)
plt.legend(fontsize='x-small')
|
the-stack_0_5570 | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pytest
from bkuser_core.departments.models import Department
from bkuser_core.profiles.models import Profile
from bkuser_core.user_settings.models import Setting
pytestmark = pytest.mark.django_db
class TestCategory:
def test_delete(self, test_ldap_category, test_profile, test_department, test_setting):
test_ldap_category.delete()
assert not test_ldap_category.enabled
assert test_ldap_category.inactive
assert (
Profile.objects.filter(category_id=test_ldap_category.id, enabled=False).count()
== Profile.objects.filter(category_id=test_ldap_category.id).count()
)
assert (
Profile.objects.filter(category_id=test_ldap_category.id, status="DELETED").count()
== Profile.objects.filter(category_id=test_ldap_category.id).count()
)
assert (
Department.objects.filter(category_id=test_ldap_category.id, enabled=False).count()
== Department.objects.filter(category_id=test_ldap_category.id).count()
)
assert (
Setting.objects.filter(category_id=test_ldap_category.id, enabled=False).count()
== Setting.objects.filter(category_id=test_ldap_category.id).count()
)
|
the-stack_0_5572 | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'getTotalX' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER_ARRAY a
# 2. INTEGER_ARRAY b
#
def isCommonFactor(b, num):
for e in b:
if (e % num) != 0:
return False
return True
def isCommonMultiple(a, num):
for e in a:
if (num % e) != 0:
return False
return True
debug = False
def debug_print(*args):
global debug
if debug:
for arg in args:
sys.stdout.write(str(arg))
sys.stdout.write(' ')
sys.stdout.write('\n')
def findMultiplesFactors(a, b):
ismultiple = (b % a) == 0
result = []
if not ismultiple:
return result
result.append(a)
multiple = 1
while a * multiple < b:
multiple += 1
if multiple * a == b:
break
ismultiple = ((b % (a * multiple)) == 0)
if ismultiple:
debug_print("adds", a * multiple)
result.append(a * multiple)
else:
debug_print("skips", a * multiple)
return result + [b]
def findMultiplesFactors2(a, b):
result = []
tmp = b // a
if a * tmp != b:
return []
max_multiple = (b + a) // 2
result.append(a)
multiple = 1
a_mult = a
while a_mult < max_multiple:
multiple += 1
a_mult = a * multiple
tmp = b // a_mult
if a_mult == b or a_mult * tmp != b:
debug_print("skips", a_mult)
continue
debug_print("adds", a_mult)
result.append(a_mult)
result.append(b)
return sorted(result)
if debug:
for a in range(1, 200):
for b in range(a, 200):
ref = findMultiplesFactors(a, b)
cand = findMultiplesFactors2(a, b)
if ref != cand:
print('findMultiplesFactors(%d, %d) returned %s' % (a, b, ref))
print('findMultiplesFactors2(%d, %d) returned %s' % (a, b, cand))
assert(False)
def getTotalX(a, b):
a.sort()
b.sort()
if a[-1] > b[0]:
# No solution here
return 0
elif a[-1] == b[0]:
# only possible solution is b[0]
if isCommonFactor(b, b[0]) and isCommonMultiple(a, b[0]):
return 1
return 0
# Find the possible solutions
solutions = 0
mults = findMultiplesFactors2(a[-1], b[0])
for mult in mults:
if isCommonFactor(b, mult) and isCommonMultiple(a, mult):
solutions += 1
return solutions
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
m = int(first_multiple_input[1])
arr = list(map(int, input().rstrip().split()))
brr = list(map(int, input().rstrip().split()))
total = getTotalX(arr, brr)
fptr.write(str(total) + '\n')
fptr.close()
|
the-stack_0_5574 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants and types shared by tf.Transform Beam package."""
import collections
import enum
import os
import uuid
import apache_beam as beam
from apache_beam.typehints import Union
from tensorflow_transform import nodes
from tfx_bsl.telemetry import util
# TODO(https://issues.apache.org/jira/browse/SPARK-22674): Switch to
# `collections.namedtuple` or `typing.NamedTuple` once the Spark issue is
# resolved.
from tfx_bsl.types import tfx_namedtuple
NUMERIC_TYPE = Union[float, int]
PRIMITIVE_TYPE = Union[NUMERIC_TYPE, str, bytes]
METRICS_NAMESPACE = util.MakeTfxNamespace(['Transform'])
# Depending on the environment, (TF 1.x vs 2.x for e.g.,) we may want to
# register different implementations of beam nodes for the TFT beam nodes. These
# tags are used to identify the implementation to use under the current
# environment.
class EnvironmentTags(enum.Enum):
TF_COMPAT_V1 = 'tf_compat_v1'
TF_V2_ONLY = 'tf_v2_only'
_ALLOWED_PTRANSFORM_TAGS = [tag.value for tag in EnvironmentTags]
def get_unique_temp_path(base_temp_dir):
"""Return a path to a unique temp dir from given base temp dir.
Note this doesn't create the path that it returns.
Args:
base_temp_dir: A base directory
Returns:
The path name of a subdirectory of base_temp_dir, where the subdirectory is
unique.
"""
return os.path.join(base_temp_dir, uuid.uuid4().hex)
class _PtransformWrapper:
"""A wrapper around registered implementations of beam nodes."""
_GENERAL_ENVIRONMENT_TAG = object()
def __init__(self):
self._ptransform_by_tag = {}
def add_ptransform(self, ptransform_class, tags):
"""Add `ptransform_class` for all `tags`."""
# Many tags can refer to the same ptransform_class, but each
# ptransform_class should be registered only once.
tags = {self._GENERAL_ENVIRONMENT_TAG} if tags is None else tags
assert (tag not in self._ptransform_by_tag for tag in tags)
for tag in tags:
self._ptransform_by_tag[tag] = ptransform_class
def get_ptransform(self, tag):
"""Retrieves ptransform for `tag`.
Args:
tag: A string key (or None) to retrieve corresponding ptransform.
Returns:
A tuple of a registered beam.PTransform implementation and the tag it was
registered with.
Raises:
KeyError: If no registered PTransform implementation could be found.
"""
if tag is None or tag not in self._ptransform_by_tag:
return self._ptransform_by_tag[self._GENERAL_ENVIRONMENT_TAG], None
return self._ptransform_by_tag[tag], tag.value
_PTRANSFORM_BY_OPERATION_DEF_SUBCLASS = (
collections.defaultdict(_PtransformWrapper))
def register_ptransform(operation_def_subclass, tags=None):
"""Decorator to register a PTransform as the implementation for an analyzer.
This function is used to define implementations of the analyzers defined in
tensorflow_transform/analyzer_nodes.py and also the internal operations
defined in tensorflow_transform/beam/beam_nodes.py. The registered PTransform
will be invoked as follows:
outputs = inputs | operation.label >> MyPTransform(operation, extra_args)
where operation is a the instance of the subclass that was registered,
extra_args are global arguments available to each PTransform (see
ConstructBeamPipelineVisitor.extra_args) and `inputs` is a tuple of
PCollections correpsonding to the inputs of the OperationNode being
implemented. The return value `outputs` should be a a tuple of PCollections
corresponding to the outputs of the OperationNode. If the OperationNode has
a single output then the return value can also be a PCollection instead of a
tuple.
In some cases the implementation cannot be a PTransform and so instead the
value being registered may also be a function. The registered function will
be invoked as follows:
outputs = my_function(inputs, operation, extra_args)
where inputs, operation, extra_args and outputs are the same as for the
PTransform case.
Args:
operation_def_subclass: The class of attributes that is being registered.
Should be a subclass of `tensorflow_transform.nodes.OperationDef`.
tags: A set of string tags belonging to `EnvironmentTags`. If
provided, the PTransform will be registered against all of them.
Returns:
A class decorator that registers a PTransform or function as an
implementation of the OperationDef subclass.
"""
def register(ptransform_class):
assert isinstance(ptransform_class, type)
assert issubclass(ptransform_class, beam.PTransform)
assert tags is None or (tag in _ALLOWED_PTRANSFORM_TAGS for tag in tags)
_PTRANSFORM_BY_OPERATION_DEF_SUBCLASS[
operation_def_subclass].add_ptransform(ptransform_class, tags)
return ptransform_class
return register
class ConstructBeamPipelineVisitor(nodes.Visitor):
"""Visitor that constructs the beam pipeline from the node graph."""
ExtraArgs = tfx_namedtuple.namedtuple( # pylint: disable=invalid-name
'ExtraArgs', [
'base_temp_dir',
'pipeline',
'flat_pcollection',
'pcollection_dict',
'tf_config',
'graph',
'input_signature',
'input_specs',
'input_tensor_adapter_config',
'use_tf_compat_v1',
'cache_pcoll_dict',
'preprocessing_fn',
])
def __init__(self, extra_args):
self._extra_args = extra_args
def visit(self, operation, inputs):
try:
ptransform_wrapper = (
_PTRANSFORM_BY_OPERATION_DEF_SUBCLASS[operation.__class__])
environment_tag = (
EnvironmentTags.TF_COMPAT_V1
if self._extra_args.use_tf_compat_v1 else EnvironmentTags.TF_V2_ONLY)
ptransform, tag = ptransform_wrapper.get_ptransform(environment_tag)
except KeyError:
raise ValueError('No implementation for {} was registered'.format(
operation))
# TODO(zoyahav): Consider extracting a single PCollection before passing to
# ptransform if len(inputs) == 1.
if tag is None:
tagged_label = operation.label
else:
tagged_label = '{label}[{tag}]'.format(label=operation.label, tag=tag)
outputs = ((inputs or beam.pvalue.PBegin(self._extra_args.pipeline))
| tagged_label >> ptransform(operation, self._extra_args))
if isinstance(outputs, beam.pvalue.PCollection):
return (outputs,)
else:
return outputs
def validate_value(self, value):
if not isinstance(value, beam.pvalue.PCollection):
raise TypeError('Expected a PCollection, got {} of type {}'.format(
value, type(value)))
class IncrementCounter(beam.PTransform):
"""A PTransform that increments a counter once per PCollection.
The output PCollection is the same as the input PCollection.
"""
def __init__(self, counter_name):
self._counter_name = counter_name
def _make_and_increment_counter(self, unused_element):
del unused_element
beam.metrics.Metrics.counter(METRICS_NAMESPACE, self._counter_name).inc()
return None
def expand(self, pcoll):
_ = (
pcoll.pipeline
| 'CreateSole' >> beam.Create([None])
| 'Count' >> beam.Map(self._make_and_increment_counter))
return pcoll
|
the-stack_0_5575 | import torch.nn as nn
import torch.optim as optim
import argparse
import os
from utils import process_all_files,load_GloVe,accuracy_cal
from model import GA_Reader
from data_loader import DataLoader,TestLoader
def train(epochs,iterations,loader_train,loader_val,
model,optimizer,loss_function):
for epoch in range(epochs):
for iteration in range(iterations):
model.train()
optimizer.zero_grad()
doc,doc_char,doc_mask,query,query_char,query_mask, \
char_type,char_type_mask,answer,cloze,cand, \
cand_mask,qe_comm=loader_train.__load_next__()
output=model( doc,doc_char,doc_mask,query,query_char,query_mask,
char_type,char_type_mask,answer,cloze,cand,
cand_mask,qe_comm)
loss=loss_function(output,answer)
scalar=loss.item()
loss.backward()
optimizer.step()
valid_loss,valid_acc=validate(loader_val,model,loss_function)
print('epoch=',epoch+1,'iteration=',iteration+1,'training loss=',scalar,
'validation loss=',valid_loss,'validation accuracy=',valid_acc)
if epoch>=2:
optimizer=optim.Adam(model.parameters(),lr=optimizer.param_groups[0]['lr']/2)
def validate(loader_val,model,loss_function):
model.eval()
return_loss=0
accuracy=0
for _ in range(loader_val.examples//loader_val.batch_size):
doc,doc_char,doc_mask,query,query_char,query_mask, \
char_type,char_type_mask,answer,cloze,cand, \
cand_mask,qe_comm=loader_val.__load_next__()
output=model( doc,doc_char,doc_mask,query,query_char,query_mask,
char_type,char_type_mask,answer,cloze,cand,
cand_mask,qe_comm)
accuracy+=accuracy_cal(output,answer)
loss=loss_function(output,answer)
return_loss+=loss.item()
return_loss/=(loader_val.examples//loader_val.batch_size)
accuracy=100*accuracy/loader_val.examples
return return_loss,accuracy
def test(loader_test,model):
model.eval()
accuracy=0
for _ in range(loader_test.examples//loader_test.batch_size):
doc,doc_char,doc_mask,query,query_char,query_mask, \
char_type,char_type_mask,answer,cloze,cand, \
cand_mask,qe_comm=loader_test.__load_next__()
output=model( doc,doc_char,doc_mask,query,query_char,query_mask,
char_type,char_type_mask,answer,cloze,cand,
cand_mask,qe_comm)
accuracy+=accuracy_cal(output,answer)
accuracy=100*accuracy/loader_test.examples
print('test accuracy=',accuracy)
def main(args):
word_to_int,int_to_word,char_to_int,int_to_char, \
training_data=process_all_files(args.train_file)
glove_embeddings=load_GloVe(args.embed_file,word_to_int,args.embed_size)
loss_function=nn.CrossEntropyLoss()
model=GA_Reader(len(char_to_int),args.char_size,args.embed_size,
args.char_hidden_size,args.hidden_size,len(word_to_int),
glove_embeddings,args.gru_layers,args.use_features,args.use_char)
optimizer=optim.Adam(model.parameters(),lr=args.lr)
data_loader_train=DataLoader(training_data[:args.training_size],args.batch_size)
data_loader_validate=TestLoader(training_data[args.training_size:args. \
training_size+args.dev_size],args.dev_size)
data_loader_test=TestLoader(training_data[args. \
training_size_args.dev_size:args. \
training_size+args.dev_size+args.test_size],args.test_size)
train(args.epochs,args.iterations,data_loader_train,
data_loader_validate,model,optimizer,loss_function)
test(data_loader_test,model)
def setup():
parser=argparse.ArgumentParser('argument parser')
parser.add_argument('--lr',type=float,default=0.00005)
parser.add_argument('--epochs',type=int,default=12)
parser.add_argument('--iterations',type=int,default=120)
parser.add_argument('--hidden_size',type=int,default=256)
parser.add_argument('--char_hidden_size',type=int,default=50)
parser.add_argument('--char_size',type=int,default=25)
parser.add_argument('--embed_size',type=int,default=100)
parser.add_argument('--use_features',type=bool,default=True)
parser.add_argument('--use_char',type=bool,default=True)
parser.add_argument('--batch_size',type=int,default=32)
parser.add_argument('--gru_layers',type=int,default=3)
parser.add_argument('--embed_file',type=str,default=os.getcwd()+'/word2vec_glove.text')
parser.add_argument('--train_file',type=str,default=os.getcwd()+'/train/')
parser.add_argument('--train_size',type=int,default=380298)
parser.add_argument('--dev_size',type=int,default=3924)
parser.add_argument('--test_size',type=int,default=3198)
args=parser.parse_args()
return args
if __name__=='__main__':
args=setup()
main(args) |
the-stack_0_5577 | '''
Merge Sort
Time Complexity: O(N*log(N))
Space Complexity: N
'''
from algorithms.Algorithm import Algorithm
class MergeSort(Algorithm):
def __init__(self):
super().__init__("Merge Sort")
def algorithm(self, temp_array = [], index = 0):
if temp_array == []:
temp_array = self.array.copy()
if len(temp_array) > 1:
m = len(temp_array)//2
left = temp_array[:m]
right = temp_array[m:]
self.algorithm(left, index)
self.algorithm(right, index+m)
#i - index of left array, j - index of right array, k - index of temp merged array
i = j = k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
if self.array[index] != left[i]:
self.array[index], self.array[index-j+m] = left[i], self.array[index]
self.update(index, index-j+m)
else:
self.array[index] = left[i]
self.update(index)
temp_array[k] = left[i]
i += 1
else:
self.array[index], self.array[index-i+m] = right[j], self.array[index]
self.update(index, index-i+m)
temp_array[k] = right[j]
j += 1
#visualise the sortingm+k
index += 1
k += 1
while i < len(left):
self.array[index] = left[i]
temp_array[k] = left[i]
#visualise the sorting
self.update(index)
index += 1
i += 1
k += 1
while j < len(right):
self.array[index] = right[j]
temp_array[k] = right[j]
#visualise the sorting
self.update(index)
index += 1
j += 1
k += 1
|
the-stack_0_5578 | import json
import re
from lxml import html
import HTMLInfo
import sys
class JDPrice(object):
def __init__(self, url):
self.url = url
HTMLInfo.REFERER = url
r = HTMLInfo.get_html(url)
self.html = r.text
self.info = self.get_product()
def get_url_page(self):
tree = html.fromstring(self.html)
page = tree.xpath('//div[@id="J_filter"]//div[@id="J_topPage"]//span[@class="fp-text"]/i/text()')
if page:
page = page[0]
else:
print("Error: Cannot get the pages")
sys.exit()
return int(page) if int(page) < 2 else 2
def create_url(self, url_list):
page = self.get_url_page()
for i in range(1, int(page) + 1):
url_list.append(self.url + str(i))
def get_itemlist(self, itemlist):
tree = html.fromstring(self.html)
status = tree.xpath('//div[@id="J_goodsList"]//div[@class="p-img"]//@href')
for item in status:
if re.search('^//item.jd.com', item):
item = re.sub('//', 'https://', item)
if item not in itemlist:
itemlist.append(item)
def get_product(self):
product_pattern = re.compile(r'compatible: true,(.*?)};', re.S)
product_info = re.findall(product_pattern, self.html)
if product_info:
return product_info[0]
return None
def get_product_jpg(self):
jpg_pattern = re.compile(r"src: '(.*?)',")
jpg = "http://img10.360buyimg.com/n1/" + re.findall(jpg_pattern, self.info)[0]
return jpg
def get_product_skuid(self):
sku_id_pattern = re.compile(r'skuid: (.*?),')
sku_id = re.findall(sku_id_pattern, self.info)[0]
return sku_id
def get_product_cate(self):
cat_pattern = re.compile(r"cat: \[(.*?)\],")
cat = re.findall(cat_pattern, self.info)[0]
return cat
def get_vendorId(self):
vid_pattern = re.compile(r'venderId:(.*?),')
vid = re.findall(vid_pattern, self.info)[0]
return vid
def get_shopId(self):
sid_pattern = re.compile(r"shopId:'(.*?)',")
sid = re.findall(sid_pattern, self.info)[0]
return sid
def get_product_promotion(self):
discount = {}
content = ""
vip = ""
sku_id = self.get_product_skuid()
cat = self.get_product_cate()
vender_id = self.get_vendorId()
shop_id = self.get_shopId()
# 2_2813_51976_0 stands for Shanghai; 1_72_2799_0 means Beijing
url = "http://cd.jd.com/promotion/v2?&skuId=" + sku_id + "&area=2_2813_51976_0&shopId=" + shop_id + "&venderId=" + vender_id + "&cat=" + cat
prom = HTMLInfo.get_html(url).content.decode('gbk')
try:
if prom.find('You have triggered an abuse') < 0:
prom = json.loads(prom)
if "skuCoupon" in prom.keys():
if prom["skuCoupon"]:
for i in prom["skuCoupon"]:
discount[i["discount"]] = i["quota"]
if "prom" in prom.keys():
if "tags" in prom["prom"].keys():
if prom["prom"]["tags"]:
if prom["prom"]["tags"][0]["name"] == u'会员特价':
vip = prom["prom"]["tags"][0]["name"]
if "pickOneTag" in prom["prom"].keys():
if prom["prom"]["pickOneTag"]:
content = prom["prom"]["pickOneTag"][0]["content"]
except Exception as ex:
print('get_product_promotion ', ex)
sale = ""
gift = ""
if discount:
for i in discount.keys():
sale += u'满减:满' + str(discount[i]) + u'减' + str(i) + "<br />"
if vip:
vip = str(vip) + "<br />"
if content:
gift = u'满赠:' + str(content) + "<br />"
promotion = vip + sale + gift
return promotion
def get_product_name(self):
name = ""
try:
name_pattern = re.compile(r"name: '(.*?)',")
name = re.findall(name_pattern, self.info)[0]
except Exception as ex:
print(ex)
return bytes(name.encode()).decode('unicode-escape')
def get_product_price(self):
price = ""
plus_price = ""
date = {}
sku_id = self.get_product_skuid()
r = HTMLInfo.get_html("https://d.jd.com/lab/get?callback=lab")
match_pattern = re.compile(r"lab\(\[(.*?)\]\)")
try:
json_data = json.loads(re.findall(match_pattern, r.text)[0])
except Exception as ex:
print('get_product_price Ex:', ex)
if re.match('www.jd.com', json_data['url']):
date = json_data["startOn"]
date = str(date) + "1608370126"
# this url to get the price for JD
url = "http://p.3.cn/prices/mgets?&type=1&pduid=" + date + "&skuIds=J_" + sku_id
# response.json() can return the json-encoded content of a response
status = HTMLInfo.get_html(url).json()[0]
if status:
if 'tpp' in status:
plus_price = u"PLUS价:<br />" + status['tpp']
if 'p' in status:
price = u"京东价:<br />" + status['p']
return price + "<br />" + plus_price
if __name__ == '__main__':
jd = JDPrice("https://item.jd.com/4488334.html")
print(jd.get_product_price())
|
the-stack_0_5580 | # Modelliere eine Warteschlange von Autos beim TÜV
# Aufgaben: Eingabe des Autokennzeichens eines neuen Kunden
# Anhängen des neuen Kfz-Kennz. an die bestehende Warteschlange
# Ausgabe des Kfz-Kennz. des nächsten Autos
# Entfernen dieses Kennz. anschließend
# Programm beenden
from module_Queue import Queue
print("""Warten beim TÜV
---------------""")
warteschlange = Queue() # Erzeugt ein Objekt "warteschlange" der Klasse "Queue"
warteschlange.__menuetext = """
(N)euer Kunde
(A)bfertigen des nächsten Kunden
(E)nde
"""
wahl = "x"
while not (wahl in "eE" and warteschlange.empty()):
print(warteschlange.__menuetext)
wahl = input("Auswahl: ")
if wahl in ["n", "N"]:
kennzeichen = input("Kennzeichen: ")
warteschlange.enqueue(kennzeichen)
elif wahl in ["a","A"]:
if not warteschlange.empty():
print("Der Nächste ist: ",
warteschlange.dequeue())
else:
print("Die Warteschlange ist leer")
elif (wahl in "eE") and not warteschlange.empty():
print("Es warten noch Kunden!")
print("Kfz-Kennzeichen: ", warteschlange.front())
print("Ich wünsche einen schönen Feierabend!")
|
the-stack_0_5583 | import json
import logging
from os import execv, unlink
import subprocess
from threading import Thread
from time import sleep
import netifaces
from fiotest.api import API
from fiotest.spec import Reboot, Sequence, Test, TestSpec
log = logging.getLogger()
class SpecStopped(Exception):
pass
class SpecRunner:
reboot_state = "/var/lib/fiotest/reboot.state"
def __init__(self, spec: TestSpec):
self.spec = spec
self.running = False
self.thread = Thread(target=self.run)
def start(self):
self.running = True
self.thread.start()
def run(self):
completed = 0
try:
with open(self.reboot_state) as f:
data = json.load(f)
completed = data["seq_idx"]
log.warning(
"Detectected rebooted sequence, continuing after sequence %d",
completed,
)
unlink(self.reboot_state)
API("/var/sota", False).complete_test(data["test_id"], {})
except FileNotFoundError:
pass # This is the "normal" case - no reboot has occurred
try:
for i, seq in enumerate(self.spec.sequence):
self._assert_running()
if i < completed:
log.debug("Skipping seq %d", i)
continue
log.info("Executing seq %d", i)
if seq.reboot:
self._reboot(i, seq.reboot)
else:
# run_tests recursively decrements seq.repeat.total
# we need to keep a copy of this value so that testing
# can be repeated
if seq.repeat:
total = seq.repeat.total
self._run_tests(seq)
if seq.repeat:
seq.repeat.total = total
except SpecStopped:
log.warning("Sequence has been stopped before completion")
log.info("Testing complete")
def stop(self):
log.info("Stopping run")
self.running = False
def join(self):
self.thread.join()
def _assert_running(self):
if not self.running:
raise SpecStopped()
def _reboot(self, seq_idx: int, reboot: Reboot):
log.warning("rebooting!!!!")
test_id = API("/var/sota", False).start_test("reboot")
with open(self.reboot_state, "w") as f:
state = {"seq_idx": seq_idx + 1, "test_id": test_id}
json.dump(state, f)
execv(reboot.command[0], reboot.command)
def _run_test(self, test: Test):
host_ip = netifaces.gateways()["default"][netifaces.AF_INET][0]
args = ["/usr/local/bin/fio-test-wrap", test.name]
if test.on_host:
args.extend(
[
"sshpass",
"-pfio",
"ssh",
"-o",
"StrictHostKeyChecking no",
"fio@" + host_ip,
]
)
args.extend(test.command)
with open("/tmp/tmp.log", "wb") as f:
p = subprocess.Popen(args, stderr=f, stdout=f)
while p.poll() is None:
if not self.running:
log.info("Killing test")
p.kill()
return
sleep(1)
rc = p.wait()
if rc != 0:
log.error("Test exited with %d", rc)
def _run_tests(self, seq: Sequence):
if seq.tests:
for test in seq.tests:
self._assert_running()
log.info("Executing test: %s", test.name)
self._run_test(test)
if seq.repeat and seq.repeat.total != 1:
if seq.repeat.total > 0:
seq.repeat.total -= 1
self._assert_running()
log.info("Repeating sequence in %d seconds", seq.repeat.delay_seconds)
sleep(seq.repeat.delay_seconds)
self._run_tests(seq)
|
the-stack_0_5584 | from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.db.models import Count
from django.contrib import messages
from .models import Poll, Choice, Vote
from .forms import PollAddForm, EditPollForm, ChoiceAddForm
@login_required()
def polls_list(request):
all_polls = Poll.objects.all()
search_term = ''
if 'name' in request.GET:
all_polls = all_polls.order_by('text')
if 'date' in request.GET:
all_polls = all_polls.order_by('pub_date')
if 'vote' in request.GET:
all_polls = all_polls.annotate(Count('vote')).order_by('vote__count')
if 'search' in request.GET:
search_term = request.GET['search']
all_polls = all_polls.filter(text__icontains=search_term)
paginator = Paginator(all_polls, 6) # Show 6 contacts per page
page = request.GET.get('page')
polls = paginator.get_page(page)
get_dict_copy = request.GET.copy()
params = get_dict_copy.pop('page', True) and get_dict_copy.urlencode()
print(params)
context = {
'polls': polls,
'params': params,
'search_term': search_term,
}
return render(request, 'polls/polls_list.html', context)
@login_required()
def list_by_user(request):
all_polls = Poll.objects.filter(owner=request.user)
paginator = Paginator(all_polls, 7) # Show 7 contacts per page
page = request.GET.get('page')
polls = paginator.get_page(page)
context = {
'polls': polls,
}
return render(request, 'polls/polls_list.html', context)
@login_required()
def polls_add(request):
if request.user.has_perm('polls.add_poll'):
if request.method == 'POST':
form = PollAddForm(request.POST)
if form.is_valid:
poll = form.save(commit=False)
poll.owner = request.user
poll.save()
new_choice1 = Choice(
poll=poll, choice_text=form.cleaned_data['choice1']).save()
new_choice2 = Choice(
poll=poll, choice_text=form.cleaned_data['choice2']).save()
messages.success(
request, "Poll & Choices added successfully", extra_tags='alert alert-success alert-dismissible fade show')
return redirect('polls:list')
else:
form = PollAddForm()
context = {
'form': form,
}
return render(request, 'polls/add_poll.html', context)
else:
return HttpResponse("Sorry but you don't have permission to do that!")
@login_required
def polls_edit(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
if request.user != poll.owner:
return redirect('home')
if request.method == 'POST':
form = EditPollForm(request.POST, instance=poll)
if form.is_valid:
form.save()
messages.success(request, "Poll Updated successfully",
extra_tags='alert alert-success alert-dismissible fade show')
return redirect("polls:list")
else:
form = EditPollForm(instance=poll)
return render(request, "polls/poll_edit.html", {'form': form, 'poll': poll})
@login_required
def polls_delete(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
if request.user != poll.owner:
return redirect('home')
poll.delete()
messages.success(request, "Poll Deleted successfully",
extra_tags='alert alert-success alert-dismissible fade show')
return redirect("polls:list")
@login_required
def add_choice(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
if request.user != poll.owner:
return redirect('home')
if request.method == 'POST':
form = ChoiceAddForm(request.POST)
if form.is_valid:
new_choice = form.save(commit=False)
new_choice.poll = poll
new_choice.save()
messages.success(
request, "Choice added successfully", extra_tags='alert alert-success alert-dismissible fade show')
return redirect('polls:edit', poll.id)
else:
form = ChoiceAddForm()
context = {
'form': form,
}
return render(request, 'polls/add_choice.html', context)
@login_required
def choice_edit(request, choice_id):
choice = get_object_or_404(Choice, pk=choice_id)
poll = get_object_or_404(Poll, pk=choice.poll.id)
if request.user != poll.owner:
return redirect('home')
if request.method == 'POST':
form = ChoiceAddForm(request.POST, instance=choice)
if form.is_valid:
new_choice = form.save(commit=False)
new_choice.poll = poll
new_choice.save()
messages.success(
request, "Choice Updated successfully", extra_tags='alert alert-success alert-dismissible fade show')
return redirect('polls:edit', poll.id)
else:
form = ChoiceAddForm(instance=choice)
context = {
'form': form,
'edit_choice': True,
'choice': choice,
}
return render(request, 'polls/add_choice.html', context)
@login_required
def choice_delete(request, choice_id):
choice = get_object_or_404(Choice, pk=choice_id)
poll = get_object_or_404(Poll, pk=choice.poll.id)
if request.user != poll.owner:
return redirect('home')
choice.delete()
messages.success(
request, "Choice Deleted successfully", extra_tags='alert alert-success alert-dismissible fade show')
return redirect('polls:edit', poll.id)
def poll_detail(request, poll_id):
poll = get_object_or_404(Poll, id=poll_id)
if not poll.active:
return render(request, 'polls/poll_result.html', {'poll': poll})
loop_count = poll.choice_set.count()
context = {
'poll': poll,
'loop_time': range(0, loop_count),
}
return render(request, 'polls/poll_detail.html', context)
@login_required
def poll_vote(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
# choice_id = request.POST.get('choice')
choice_ids = request.POST.getlist('choice')
print(f'choice_ids равно {choice_ids}')
if not poll.user_can_vote(request.user):
messages.error(
request, "You already voted this poll", extra_tags='alert alert-warning alert-dismissible fade show')
return redirect("polls:list")
if len(choice_ids) > 0:
# когда чекбоксы, то нужно передавать несколько choice_id
# -----------
for choice_item in choice_ids:
print(f'choice_item равно {choice_item}, тип {type(choice_item)}')
choice = Choice.objects.get(id=choice_item)
vote = Vote(user=request.user, poll=poll, choice=choice)
vote.save()
print(f'Проголосовал: {vote}')
return render(request, 'polls/poll_result.html', {'poll': poll})
# -----------
# choice = Choice.objects.get(id=choice_id)
# vote = Vote(user=request.user, poll=poll, choice=choice)
# vote.save()
# return render(request, 'polls/poll_result.html', {'poll': poll})
# -----------
else:
messages.error(
request, "No choice selected", extra_tags='alert alert-warning alert-dismissible fade show')
return redirect("polls:detail", poll_id)
return render(request, 'polls/poll_result.html', {'poll': poll})
@login_required
def endpoll(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
if request.user != poll.owner:
return redirect('home')
if poll.active is True:
poll.active = False
poll.save()
return render(request, 'polls/poll_result.html', {'poll': poll})
else:
return render(request, 'polls/poll_result.html', {'poll': poll})
|
the-stack_0_5586 | # Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import dpctl
import base_bs_erf
import numba_dppy
from math import log, sqrt, exp, erf
@numba_dppy.kernel
def black_scholes( nopt, price, strike, t, rate, vol, call, put):
mr = -rate
sig_sig_two = vol * vol * 2
i = numba_dppy.get_global_id(0)
P = price[i]
S = strike [i]
T = t [i]
a = log(P / S)
b = T * mr
z = T * sig_sig_two
c = 0.25 * z
y = 1./sqrt(z)
w1 = (a - b + c) * y
w2 = (a - b - c) * y
d1 = 0.5 + 0.5 * erf(w1)
d2 = 0.5 + 0.5 * erf(w2)
Se = exp(b) * S
r = P * d1 - Se * d2
call [i] = r
put [i] = r - P + Se
def black_scholes_driver(nopt, price, strike, t, rate, vol, call, put):
with dpctl.device_context("opencl:gpu"):
black_scholes[nopt,numba_dppy.DEFAULT_LOCAL_SIZE]( nopt, price, strike, t, rate, vol, call, put )
base_bs_erf.run("Numba@jit-loop-par", black_scholes_driver, nparr=True, pass_args=True)
|
the-stack_0_5587 | from db import connection
cnx = connection()
cursor = cnx.cursor()
def execute(names, query, cursor=cursor):
print(query)
cursor.execute(query)
print('\t'.join(names))
for tpl in cursor:
print('\t'.join(str(s) for s in tpl))
print()
def where_clauses(no_forks=False, language=None):
where_clauses = []
if no_forks:
where_clauses.append("is_fork_project=0")
if language is not None:
where_clauses.append(f"main_language='{language}'")
if bool(where_clauses):
conjunction = ' AND '.join(where_clauses)
return f" WHERE {conjunction} "
else:
return ""
def q_count(no_forks=False, language=None):
q = "SELECT COUNT(*) FROM repo ";
q += where_clauses(no_forks, language)
return ["count"], q
def q_license_count(no_forks=False, language=None):
q = "SELECT license, COUNT(*) AS license_count FROM repo "
q += where_clauses(no_forks, language)
q += "GROUP BY license ORDER BY license_count DESC"
return ["license", "license_count"], q
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--language")
args = parser.parse_args()
print("-" * 10)
print("including forks")
print("-" * 10)
execute(*q_count(language=args.language))
execute(*q_license_count(language=args.language))
print("-" * 10)
print("no forks")
print("-" * 10)
execute(*q_count(no_forks=True, language=args.language))
execute(*q_license_count(no_forks=True, language=args.language))
cursor.close()
cnx.close()
|
the-stack_0_5588 | import pandas as pd
import requests
import us
from bs4 import BeautifulSoup
from can_tools.scrapers.base import CMU
from can_tools.scrapers.official.base import CountyDashboard
class ArizonaMaricopaVaccine(CountyDashboard):
"""
Fetch county level Covid-19 vaccination data from official Maricopa county website
"""
source = "https://www.maricopa.gov/5641/COVID-19-Vaccine"
source_name = "Maricopa County"
has_location = False
location_type = "county"
state_fips = int(us.states.lookup("Arizona").fips)
def fetch(self):
# Set url of website
url = "https://www.maricopa.gov/5641/COVID-19-Vaccine"
request = requests.get(url)
if not request.ok:
message = f"Could not request data from {url}"
raise ValueError(message)
return request.content
def normalize(self, data) -> pd.DataFrame:
# Read data into Beautiful Soup
bs = BeautifulSoup(data, "html.parser")
# Find the doses given
doses = bs.find_all("h2", class_="dataNumber")[1::1][0].text.replace(",", "")
# Create data frame
df = pd.DataFrame(
{
"location_name": ["Maricopa"],
"total_vaccine_doses_administered": pd.to_numeric(doses),
}
)
# Create dictionary for columns to map
crename = {
"total_vaccine_doses_administered": CMU(
category="total_vaccine_doses_administered",
measurement="cumulative",
unit="doses",
),
}
# Move things into long format
df = df.melt(id_vars=["location_name"], value_vars=crename.keys()).dropna()
# Determine the category of each observation
out = self.extract_CMU(df, crename)
# Add rows that don't change
out["vintage"] = self._retrieve_vintage()
out["dt"] = self._retrieve_dt("US/Arizona")
return out
|
the-stack_0_5589 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Util class or function."""
from mindspore.train.serialization import load_checkpoint
import mindspore.nn as nn
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', tb_writer=None):
self.name = name
self.fmt = fmt
self.reset()
self.tb_writer = tb_writer
self.cur_step = 1
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if self.tb_writer is not None:
self.tb_writer.add_scalar(self.name, self.val, self.cur_step)
self.cur_step += 1
def __str__(self):
fmtstr = '{name}:{avg' + self.fmt + '}'
return fmtstr.format(**self.__dict__)
def load_backbone(net, ckpt_path, args):
"""Load darknet53 backbone checkpoint."""
param_dict = load_checkpoint(ckpt_path)
yolo_backbone_prefix = 'feature_map.backbone'
darknet_backbone_prefix = 'network.backbone'
find_param = []
not_found_param = []
for name, cell in net.cells_and_names():
if name.startswith(yolo_backbone_prefix):
name = name.replace(yolo_backbone_prefix, darknet_backbone_prefix)
if isinstance(cell, (nn.Conv2d, nn.Dense)):
darknet_weight = '{}.weight'.format(name)
darknet_bias = '{}.bias'.format(name)
if darknet_weight in param_dict:
cell.weight.default_input = param_dict[darknet_weight].data
find_param.append(darknet_weight)
else:
not_found_param.append(darknet_weight)
if darknet_bias in param_dict:
cell.bias.default_input = param_dict[darknet_bias].data
find_param.append(darknet_bias)
else:
not_found_param.append(darknet_bias)
elif isinstance(cell, (nn.BatchNorm2d, nn.BatchNorm1d)):
darknet_moving_mean = '{}.moving_mean'.format(name)
darknet_moving_variance = '{}.moving_variance'.format(name)
darknet_gamma = '{}.gamma'.format(name)
darknet_beta = '{}.beta'.format(name)
if darknet_moving_mean in param_dict:
cell.moving_mean.default_input = param_dict[darknet_moving_mean].data
find_param.append(darknet_moving_mean)
else:
not_found_param.append(darknet_moving_mean)
if darknet_moving_variance in param_dict:
cell.moving_variance.default_input = param_dict[darknet_moving_variance].data
find_param.append(darknet_moving_variance)
else:
not_found_param.append(darknet_moving_variance)
if darknet_gamma in param_dict:
cell.gamma.default_input = param_dict[darknet_gamma].data
find_param.append(darknet_gamma)
else:
not_found_param.append(darknet_gamma)
if darknet_beta in param_dict:
cell.beta.default_input = param_dict[darknet_beta].data
find_param.append(darknet_beta)
else:
not_found_param.append(darknet_beta)
args.logger.info('================found_param {}========='.format(len(find_param)))
args.logger.info(find_param)
args.logger.info('================not_found_param {}========='.format(len(not_found_param)))
args.logger.info(not_found_param)
args.logger.info('=====load {} successfully ====='.format(ckpt_path))
return net
def default_wd_filter(x):
"""default weight decay filter."""
parameter_name = x.name
if parameter_name.endswith('.bias'):
# all bias not using weight decay
return False
if parameter_name.endswith('.gamma'):
# bn weight bias not using weight decay, be carefully for now x not include BN
return False
if parameter_name.endswith('.beta'):
# bn weight bias not using weight decay, be carefully for now x not include BN
return False
return True
def get_param_groups(network):
"""Param groups for optimizer."""
decay_params = []
no_decay_params = []
for x in network.trainable_params():
parameter_name = x.name
if parameter_name.endswith('.bias'):
# all bias not using weight decay
no_decay_params.append(x)
elif parameter_name.endswith('.gamma'):
# bn weight bias not using weight decay, be carefully for now x not include BN
no_decay_params.append(x)
elif parameter_name.endswith('.beta'):
# bn weight bias not using weight decay, be carefully for now x not include BN
no_decay_params.append(x)
else:
decay_params.append(x)
return [{'params': no_decay_params, 'weight_decay': 0.0}, {'params': decay_params}]
class ShapeRecord:
"""Log image shape."""
def __init__(self):
self.shape_record = {
320: 0,
352: 0,
384: 0,
416: 0,
448: 0,
480: 0,
512: 0,
544: 0,
576: 0,
608: 0,
'total': 0
}
def set(self, shape):
if len(shape) > 1:
shape = shape[0]
shape = int(shape)
self.shape_record[shape] += 1
self.shape_record['total'] += 1
def show(self, logger):
for key in self.shape_record:
rate = self.shape_record[key] / float(self.shape_record['total'])
logger.info('shape {}: {:.2f}%'.format(key, rate*100))
|
the-stack_0_5591 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.db import migrations
from pipeline.models import PipelineInstance, PipelineTemplate
from pipeline.contrib.statistics.models import InstanceInPipeline, TemplateInPipeline
def load_data(apps, schema_editor):
# 清空数据
TemplateInPipeline.objects.all().delete()
InstanceInPipeline.objects.all().delete()
template_list = PipelineTemplate.objects.filter(is_deleted=False)
template_data = []
for template in template_list:
template_id = template.template_id
try:
result = statistics_total(template.data)
data = TemplateInPipeline(
template_id=template_id,
atom_total=result["atom_total"],
subprocess_total=result["subprocess_total"],
gateways_total=result["gateways_total"],
)
template_data.append(data)
except Exception:
pass
TemplateInPipeline.objects.bulk_create(template_data)
instance_list = PipelineInstance.objects.filter(is_deleted=False)
instance_data = []
for instance in instance_list:
instance_id = instance.instance_id
try:
result = statistics_total(instance.execution_data)
data = InstanceInPipeline(
instance_id=instance_id,
atom_total=result["atom_total"],
subprocess_total=result["subprocess_total"],
gateways_total=result["gateways_total"],
)
instance_data.append(data)
except Exception:
pass
InstanceInPipeline.objects.bulk_create(instance_data)
def statistics_total(pipeline_tree):
atom_total = 0
subprocess_total = 0
tree_activities = pipeline_tree["activities"]
# 获取网关数量
gateways_total = len(pipeline_tree["gateways"])
# 遍历activities节点
for activity in tree_activities:
activity_type = tree_activities[activity]["type"]
if activity_type == "ServiceActivity":
atom_total += 1
elif activity_type == "SubProcess":
subprocess_total += 1
return {"atom_total": atom_total, "subprocess_total": subprocess_total, "gateways_total": gateways_total}
class Migration(migrations.Migration):
dependencies = [("statistics", "0006_auto_20181115_1208")]
operations = [migrations.RunPython(load_data)]
|
the-stack_0_5592 | #!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# [TEST_TAGS]
# eviction:prepare
# [END_TAGS]
import wttest
from wtscenario import make_scenarios
# test_prepare12.py
# Test update restore of a page with prepared update.
class test_prepare12(wttest.WiredTigerTestCase):
conn_config = 'cache_size=2MB'
format_values = [
('column', dict(key_format='r', value_format='S')),
('column_fix', dict(key_format='r', value_format='8t')),
('row_integer', dict(key_format='i', value_format='S')),
]
scenarios = make_scenarios(format_values)
def test_prepare_update_restore(self):
uri = "table:test_prepare12"
format = 'key_format={},value_format={}'.format(self.key_format, self.value_format)
self.session.create(uri, format)
if self.value_format == '8t':
value_a = 97
value_b = 98
value_aaa = 65
else:
value_a = 'a'
value_b = 'b'
value_aaa = 'a' * 500
# Prepare a transaction
cursor = self.session.open_cursor(uri, None)
self.session.begin_transaction()
cursor[1] = value_a
self.session.prepare_transaction('prepare_timestamp=' + self.timestamp_str(1))
# Insert an uncommitted key
session2 = self.conn.open_session(None)
cursor2 = session2.open_cursor(uri, None)
session2.begin_transaction()
cursor2[2] = value_b
# Insert a bunch of other content to fill the database to trigger eviction.
session3 = self.conn.open_session(None)
cursor3 = session3.open_cursor(uri, None)
for i in range(3, 101):
session3.begin_transaction()
cursor3[i] = value_aaa
session3.commit_transaction()
# Commit the prepared update
self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(1) + ',durable_timestamp=' + self.timestamp_str(2))
# Read the prepared update
self.session.begin_transaction('read_timestamp=' + self.timestamp_str(2))
self.assertEqual(cursor[1], value_a)
|
the-stack_0_5596 | import collections
import glob
import itertools
import logging
import os
from dxtbx.imageset import ImageSequence
from dxtbx.model.experiment_list import (
BeamComparison,
DetectorComparison,
ExperimentList,
ExperimentListFactory,
GoniometerComparison,
)
from dxtbx.sequence_filenames import locate_files_matching_template_string
from dials.command_line.dials_import import ManualGeometryUpdater
from dials.util.options import geometry_phil_scope
from scitbx.array_family import flex
from xia2.Handlers.Phil import PhilIndex
logger = logging.getLogger("xia2.Schema")
class _ImagesetCache(dict):
pass
imageset_cache = _ImagesetCache()
def longest_common_substring(s1, s2):
m = [[0] * (1 + len(s2)) for i in range(1 + len(s1))]
longest, x_longest = 0, 0
for x in range(1, 1 + len(s1)):
for y in range(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest : x_longest]
def load_imagesets(
template,
directory,
id_image=None,
image_range=None,
use_cache=True,
reversephi=False,
):
global imageset_cache
from xia2.Applications.xia2setup import known_hdf5_extensions
full_template_path = os.path.join(directory, template)
if full_template_path not in imageset_cache or not use_cache:
params = PhilIndex.params.xia2.settings
compare_beam = BeamComparison(
wavelength_tolerance=params.input.tolerance.beam.wavelength,
direction_tolerance=params.input.tolerance.beam.direction,
polarization_normal_tolerance=params.input.tolerance.beam.polarization_normal,
polarization_fraction_tolerance=params.input.tolerance.beam.polarization_fraction,
)
compare_detector = DetectorComparison(
fast_axis_tolerance=params.input.tolerance.detector.fast_axis,
slow_axis_tolerance=params.input.tolerance.detector.slow_axis,
origin_tolerance=params.input.tolerance.detector.origin,
)
compare_goniometer = GoniometerComparison(
rotation_axis_tolerance=params.input.tolerance.goniometer.rotation_axis,
fixed_rotation_tolerance=params.input.tolerance.goniometer.fixed_rotation,
setting_rotation_tolerance=params.input.tolerance.goniometer.setting_rotation,
)
scan_tolerance = params.input.tolerance.scan.oscillation
# If diamond anvil cell data, always use dynamic shadowing
high_pressure = PhilIndex.params.dials.high_pressure.correction
format_kwargs = {
"dynamic_shadowing": params.input.format.dynamic_shadowing or high_pressure,
"multi_panel": params.input.format.multi_panel,
}
if os.path.splitext(full_template_path)[-1] in known_hdf5_extensions:
# if we are passed the correct file, use this, else look for a master
# file (i.e. something_master.h5)
if os.path.exists(full_template_path) and os.path.isfile(
full_template_path
):
master_file = full_template_path
else:
g = glob.glob(os.path.join(directory, "*_master.h5"))
master_file = None
for p in g:
substr = longest_common_substring(template, p)
if substr:
if master_file is None or (
len(substr)
> len(longest_common_substring(template, master_file))
):
master_file = p
if master_file is None:
raise RuntimeError("Can't find master file for %s" % full_template_path)
unhandled = []
experiments = ExperimentListFactory.from_filenames(
[master_file],
unhandled=unhandled,
compare_beam=compare_beam,
compare_detector=compare_detector,
compare_goniometer=compare_goniometer,
scan_tolerance=scan_tolerance,
format_kwargs=format_kwargs,
)
assert len(unhandled) == 0, (
"unhandled image files identified: %s" % unhandled
)
else:
params = PhilIndex.get_python_object()
read_all_image_headers = params.xia2.settings.read_all_image_headers
if read_all_image_headers:
paths = sorted(
locate_files_matching_template_string(full_template_path)
)
unhandled = []
experiments = ExperimentListFactory.from_filenames(
paths,
unhandled=unhandled,
compare_beam=compare_beam,
compare_detector=compare_detector,
compare_goniometer=compare_goniometer,
scan_tolerance=scan_tolerance,
format_kwargs=format_kwargs,
)
assert len(unhandled) == 0, (
"unhandled image files identified: %s" % unhandled
)
else:
from xia2.Handlers.CommandLine import CommandLine
experiments = ExperimentList()
start_ends = CommandLine.get_start_ends(full_template_path)
if not start_ends:
start_ends.append(None)
for start_end in start_ends:
experiments.extend(
ExperimentList.from_templates(
[full_template_path],
format_kwargs=format_kwargs,
image_range=start_end,
)
)
imagesets = [
iset for iset in experiments.imagesets() if isinstance(iset, ImageSequence)
]
assert len(imagesets) > 0, "no imageset found"
imageset_cache[full_template_path] = collections.OrderedDict()
if reversephi:
for imageset in imagesets:
goniometer = imageset.get_goniometer()
goniometer.set_rotation_axis(
tuple(-g for g in goniometer.get_rotation_axis())
)
reference_geometry = PhilIndex.params.xia2.settings.input.reference_geometry
if reference_geometry is not None and len(reference_geometry) > 0:
update_with_reference_geometry(imagesets, reference_geometry)
# Update the geometry
params = PhilIndex.params.xia2.settings
update_geometry = []
# Then add manual geometry
work_phil = geometry_phil_scope.format(params.input)
diff_phil = geometry_phil_scope.fetch_diff(source=work_phil)
if diff_phil.as_str() != "":
update_geometry.append(ManualGeometryUpdater(params.input))
imageset_list = []
for imageset in imagesets:
for updater in update_geometry:
imageset = updater(imageset)
imageset_list.append(imageset)
imagesets = imageset_list
for imageset in imagesets:
scan = imageset.get_scan()
exposure_times = scan.get_exposure_times()
epochs = scan.get_epochs()
if exposure_times.all_eq(0) or exposure_times[0] == 0:
exposure_times = flex.double(exposure_times.size(), 1)
scan.set_exposure_times(exposure_times)
elif not exposure_times.all_gt(0):
exposure_times = flex.double(exposure_times.size(), exposure_times[0])
scan.set_exposure_times(exposure_times)
if epochs.size() > 1 and not epochs.all_gt(0):
if epochs[0] == 0:
epochs[0] = 1
for i in range(1, epochs.size()):
epochs[i] = epochs[i - 1] + exposure_times[i - 1]
scan.set_epochs(epochs)
_id_image = scan.get_image_range()[0]
imageset_cache[full_template_path][_id_image] = imageset
if id_image is not None:
return [imageset_cache[full_template_path][id_image]]
elif image_range is not None:
for imageset in imageset_cache[full_template_path].values():
scan = imageset.get_scan()
scan_image_range = scan.get_image_range()
if (
image_range[0] >= scan_image_range[0]
and image_range[1] <= scan_image_range[1]
):
b0 = scan.get_batch_offset()
i0 = image_range[0] - scan_image_range[0] + b0
i1 = image_range[1] - scan_image_range[0] + b0
imagesets = [imageset[i0 : i1 + 1]]
assert len(imagesets[0]) == image_range[1] - image_range[0] + 1, len(
imagesets[0]
)
return imagesets
return list(imageset_cache[full_template_path].values())
def update_with_reference_geometry(imagesets, reference_geometry_list):
assert reference_geometry_list is not None
assert len(reference_geometry_list) >= 1
reference_components = load_reference_geometries(reference_geometry_list)
for imageset in imagesets:
reference_geometry = find_relevant_reference_geometry(
imageset, reference_components
)
imageset.set_beam(reference_geometry["beam"])
imageset.set_detector(reference_geometry["detector"])
def load_reference_geometries(geometry_file_list):
logger.debug("Collecting reference instrument models.")
ref_geoms = {
# Note that 'index' is the index of the experiment in the expt list file,
# as per dials.show, rather than the UID string of the experiment.
(expt.detector, expt.beam, f, index)
for f in geometry_file_list
for index, expt in enumerate(ExperimentList.from_file(f, check_format=False))
}
logger.debug("Removing duplicate reference geometries.")
duplicates = set()
for a, b in filter(duplicates.isdisjoint, itertools.combinations(ref_geoms, 2)):
if compare_geometries(a[0], b[0]):
logger.debug(f"Experiment {b[3]} of {b[2]} is a duplicate.")
duplicates.add(b)
ref_geoms -= duplicates
n = len(ref_geoms)
logger.debug(f"Found {n} unique reference geometr{'ies' if n != 1 else 'y'}.")
for geometry in ref_geoms:
logger.debug(f"Experiment {geometry[3]} of {geometry[2]} is unique.")
return [{"detector": geometry[0], "beam": geometry[1]} for geometry in ref_geoms]
def compare_geometries(detectorA, detectorB):
return detectorA.is_similar_to(
detectorB,
fast_axis_tolerance=0.1,
slow_axis_tolerance=0.1,
origin_tolerance=10,
ignore_trusted_range=True,
)
def find_relevant_reference_geometry(imageset, geometry_list):
for geometry in geometry_list:
if compare_geometries(geometry["detector"], imageset.get_detector()):
break
else:
raise Exception("No appropriate reference geometry found")
return geometry
|
the-stack_0_5597 | '''
Homebrew for Mac OS X
'''
# Import salt libs
import salt
from salt.modules.yumpkg import _compare_versions
def __virtual__():
'''
Confine this module to Mac OS with Homebrew.
'''
if salt.utils.which('brew') and __grains__['os'] == 'MacOS':
return 'pkg'
def list_pkgs(*args):
'''
List the packages currently installed in a dict::
{'<package_name>': '<version>'}
CLI Example::
salt '*' pkg.list_pkgs
'''
cmd = 'brew list --versions {0}'.format(' '.join(args))
result_dict = {}
for line in __salt__['cmd.run'](cmd).splitlines():
(pkg, version) = line.split(' ')[0:2]
result_dict[pkg] = version
return result_dict
def version(name):
'''
Returns a version if the package is installed, else returns an empty string
CLI Example::
salt '*' pkg.version <package name>
'''
pkgs = list_pkgs(name)
if name in pkgs:
return pkgs[name]
else:
return ''
def remove(pkgs):
'''
Removes packages with ``brew uninstall``
Return a list containing the removed packages:
CLI Example::
salt '*' pkg.remove <package,package,package>
'''
formulas = ' '.join(pkgs.split(','))
cmd = 'brew uninstall {0}'.format(formulas)
return __salt__['cmd.run'](cmd)
def install(pkgs, refresh=False, repo='', skip_verify=False, **kwargs):
'''
Install the passed package(s) with ``brew install``
pkgs
The names of the packages to be installed
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>']}
CLI Example::
salt '*' pkg.install 'package package package'
'''
if ',' in pkgs:
pkgs = pkgs.split(',')
else:
pkgs = pkgs.split(' ')
old = list_pkgs(*pkgs)
formulas = ' '.join(pkgs)
homebrew_prefix = __salt__['cmd.run']('brew --prefix')
user = __salt__['file.get_user'](homebrew_prefix)
cmd = 'brew install {0}'.format(formulas)
if user != __opts__['user']:
__salt__['cmd.run'](cmd, runas=user)
else:
__salt__['cmd.run'](cmd)
new = list_pkgs(*pkgs)
return _compare_versions(old, new)
def list_upgrades():
'''
Check whether or not an upgrade is available for all packages
CLI Example::
salt '*' pkg.list_upgrades
'''
cmd = 'brew outdated'
return __salt__['cmd.run'](cmd).splitlines()
def upgrade_available(pkg):
'''
Check whether or not an upgrade is available for a given package
CLI Example::
salt '*' pkg.upgrade_available <package name>
'''
return pkg in list_upgrades()
|
the-stack_0_5599 | from datetime import datetime
SKILLS = ['overall', 'attack', 'defence', 'strength', 'hitpoints',
'ranged', 'prayer', 'magic', 'cooking', 'woodcutting',
'fletching', 'fishing', 'firemaking', 'crafting', 'smithing',
'mining', 'herblore', 'agility', 'theiving', 'slayer',
'farming', 'hunter']
class Skill:
def __init__(self, rank: int, level: int, experience: int):
self.rank = int(rank)
self.level = int(level)
self.experience = int(experience)
def get_encodable(self):
return {
'rank': self.rank,
'level': self.level,
'experience': self.experience,
}
class HighScore:
def __init__(self, account_id: str, id: str = None,
created_at: datetime = None,
**kwargs: Skill):
self.account_id = account_id
self.id = id
self.created_at = created_at
self._skills = dict()
for name, skill in kwargs.items():
if name not in SKILLS:
raise AttributeError('{key} is not a valid skill'.format(
key=name
))
setattr(self, name, skill)
@property
def skills(self):
return {skill: getattr(self, skill) for skill in SKILLS}
def __setattr__(self, key: str, value):
if key in SKILLS:
if not isinstance(value, Skill):
raise AttributeError('A skill must be an instance of {}'
.format(Skill.__name__))
self._skills[key] = value
super().__setattr__(key, value)
def __getattr__(self, item: str):
if item in SKILLS:
if item not in self._skills:
return None
return self._skills[item]
return super().__getattribute__(item)
def get_encodable(self):
skills = {name: skill.get_encodable() for name, skill in
self.skills.items() if skill is not None}
return {
'account_id': self.account_id,
'id': self.id,
'created_at': self.created_at.isoformat() \
if self.created_at else None,
'skills': skills,
}
def calc_xp_sum(self):
total = 0
for name, skill in self.skills.items():
if skill is None:
continue
if skill.experience < 1:
continue
total += skill.experience
return total
|
the-stack_0_5603 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class DialignTx(MakefilePackage):
"""DIALIGN-TX: greedy and progressive approaches for segment-based
multiple sequence alignment"""
homepage = "http://dialign-tx.gobics.de/"
url = "http://dialign-tx.gobics.de/DIALIGN-TX_1.0.2.tar.gz"
version('1.0.2', '8ccfb1d91136157324d1e513f184ca29')
build_directory = 'source'
conflicts('%gcc@6:')
def edit(self, spec, prefix):
with working_dir(self.build_directory):
makefile = FileFilter('Makefile')
makefile.filter(' -march=i686 ', ' ')
makefile.filter('CC=gcc', 'CC=%s' % spack_cc)
def install(self, spec, prefix):
mkdirp(prefix.bin)
with working_dir(self.build_directory):
install('dialign-tx', prefix.bin)
# t-coffee recognizes as dialign-t
install('dialign-tx', join_path(prefix.bin, 'dialign-t'))
|
the-stack_0_5604 | #!/usr/bin/env python
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function
import os
import glob
import json
import numpy as np
from PIL import Image
import math
from tqdm import tqdm
from collections import OrderedDict
import ingest_utils as util
from neon.util.persist import get_data_cache_or_nothing
def get_ssd_config(img_reshape, inference=False):
ssd_config = OrderedDict()
ssd_config['batch_size'] = 32
if inference:
ssd_config['batch_size'] = 1
ssd_config['block_size'] = 50
ssd_config['cache_directory'] = get_data_cache_or_nothing(subdir='kitti_cache')
ssd_config["etl"] = [{
"type": "localization_ssd",
"height": img_reshape[0],
"width": img_reshape[1],
"max_gt_boxes": 500,
"class_names": ['__background__', 'Car', 'Van', 'Truck', 'Pedestrian',
'Person_sitting', 'Cyclist', 'Tram', 'Misc', 'DontCare']
}, {
"type": "image",
"height": img_reshape[0],
"width": img_reshape[1],
"channels": 3
}]
if not inference:
ssd_config["augmentation"] = [{
"type": "image",
"batch_samplers":
[
{
"max_sample": 1,
"max_trials": 1
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"min_jaccard_overlap": 0.1}
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"min_jaccard_overlap": 0.3}
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"min_jaccard_overlap": 0.5}
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"min_jaccard_overlap": 0.7}
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"min_jaccard_overlap": 0.9}
},
{
"max_sample": 1,
"max_trials": 50,
"sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]},
"sample_constraint": {"max_jaccard_overlap": 1.0, "min_jaccard_overlap": 0.1}
}
]
}]
ssd_config['ssd_config'] = OrderedDict(
[('conv4_3', {'min_sizes': 30.0, 'max_sizes': 60.0,
'aspect_ratios': 2.0, 'step': 8, 'normalize': True}),
('fc7', {'min_sizes': 60.0, 'max_sizes': 111.0,
'aspect_ratios': (2.0, 3.0), 'step': 16}),
('conv6_2', {'min_sizes': 111.0, 'max_sizes': 162.0,
'aspect_ratios': (2.0, 3.0), 'step': 32}),
('conv7_2', {'min_sizes': 162.0, 'max_sizes': 213.0,
'aspect_ratios': (2.0, 3.0), 'step': 64}),
('conv8_2', {'min_sizes': 213.0, 'max_sizes': 264.0,
'aspect_ratios': 2.0, 'step': 100}),
('conv9_2', {'min_sizes': 264.0, 'max_sizes': 315.0,
'aspect_ratios': 2.0, 'step': {'step_h': 300, 'step_w': 100}})])
return ssd_config
def convert_annot_to_json(path, im_path, out_path, difficult, img_reshape=None):
"""
Converts the KITTI annotations to json file.
Uses the below reference for the KITTI dataset:
OO representation of label format used in Kitti dataset.
Description of fields from Kitti dataset dev kit: (link)[]
The label files contain the following information, which can be read and
written using the matlab tools (readLabels.m, writeLabels.m) provided within
this devkit. All values (numerical or strings) are separated via spaces,
each row corresponds to one object. The 15 columns represent:
#Values Name Description
----------------------------------------------------------------------------
1 type Describes the type of object: 'Car', 'Van', 'Truck',
'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',
'Misc' or 'DontCare'
1 truncated Float from 0 (non-truncated) to 1 (truncated), where
truncated refers to the object leaving image boundaries
1 occluded Integer (0,1,2,3) indicating occlusion state:
0 = fully visible, 1 = partly occluded
2 = largely occluded, 3 = unknown
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the image (0-based index):
contains left, top, right, bottom pixel coordinates
3 dimensions 3D object dimensions: height, width, length (in meters)
3 location 3D object location x,y,z in camera coordinates (in meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]
1 score Only for results: Float, indicating confidence in
detection, needed for p/r curves, higher is better.
Arguments:
path (string): path to KITTI annotation file
im_path (string): path to image
out_path (string): path to save the json file
difficult (bool): include difficult objects
img_reshape (tuple of int): if a tuple of H,W values is given, image will be reshaped
"""
with open(path) as f:
labels = f.readlines()
# start empty dictionary
annot = {'object': []}
# load image
im = np.array(Image.open(im_path))
scale, (h, w) = util.get_image_scale(im.shape[:2], img_reshape)
c = im.shape[2]
annot['size'] = {'depth': c, 'height': h, 'width': w}
for label in labels:
vals = label.split()
typeid = vals[0]
truncated = float(vals[1])
occluded = int(vals[2])
bbox = [float(x) for x in vals[4:8]]
bbox = util.scale_boxes(bbox, scale)
bbox_int = tuple([int(math.floor(x)) for x in bbox])
if typeid == 'DontCare':
assert truncated == -1
assert occluded == -1
else:
assert occluded in (0, 1, 2, 3)
diff = truncated > 0.5 or occluded == 2
# add object to annotation
obj = {'bndbox': {'xmin': bbox_int[0], 'ymin': bbox_int[1],
'xmax': bbox_int[2], 'ymax': bbox_int[3]},
'difficult': diff,
'name': typeid,
'truncated': truncated > 0.5,
'occluded': occluded
}
if not diff or difficult:
annot['object'].append(obj)
with open(out_path, 'w') as f:
json.dump(annot, f, indent=4)
def ingest_kitti(input_dir, out_dir, img_reshape=(300, 994),
train_percent=90, overwrite=False, skip_unzip=False):
"""
Ingests the KITTI dataset. Peforms the following ops:
0. Unzips the files into output directory.
1. Reshapes image to lower resolution (default reshape of 300x994 maintains KITTI image AR)
1. Convert annotations to json format
2. Split the training data into train and validation sets
3. Write manifest file
4. Write configuration file
Arguments:
input_dir (string): path to folder with KITTI zip files.
out_dir (string): path to unzip KITTI data
img_reshape (tuple of int): size to reshape image (default = (300, 994))
train_percent (float): percent of data to use for training.
overwrite (bool): overwrite existing files
"""
assert img_reshape is not None, "Target image reshape required."
hw = '{}x{}'.format(img_reshape[0], img_reshape[1])
zip_files = ['data_object_image_2.zip', 'data_object_label_2.zip']
root_dir = os.path.join(out_dir, 'kitti')
train_manifest = os.path.join(root_dir, 'train_{}.csv'.format(hw))
val_manifest = os.path.join(root_dir, 'val_{}.csv'.format(hw))
if os.path.exists(train_manifest) and os.path.exists(val_manifest) and not overwrite:
print("Manifest files already found, skipping ingest.")
print("Use --overwrite flag to force re-ingest.")
return
util.make_dir(root_dir)
tags = {'trainval': [], 'test': []}
if skip_unzip is False:
util.unzip_files(zip_files, input_dir, root_dir)
img_folder = os.path.join(root_dir, 'training', 'image_2')
annot_folder = os.path.join(root_dir, 'training', 'label_2')
target_img_folder = os.path.join(root_dir, 'training', 'image_2-converted')
target_annot_folder = os.path.join(root_dir, 'training', 'label_2-json')
tags = glob.glob(os.path.join(img_folder, '*.png'))
tags = [os.path.basename(os.path.splitext(tag)[0]) for tag in tags]
assert len(tags) > 0, "No images found in {}".format(img_folder)
util.make_dir(target_img_folder)
util.make_dir(target_annot_folder)
manifest = []
for tag in tqdm(tags):
image = os.path.join(img_folder, tag + '.png')
annot = os.path.join(annot_folder, tag + '.txt')
assert os.path.exists(image), "{} not found.".format(image)
assert os.path.exists(annot), "{} not found.".format(annot)
target_image = os.path.join(target_img_folder, tag + '.png')
target_annot = os.path.join(target_annot_folder, tag + '.json')
convert_annot_to_json(annot, image, target_annot, difficult=True, img_reshape=None)
util.resize_image(image, target_image, img_reshape=None)
manifest.append((target_image, target_annot))
# shuffle files and split into training and validation set.
np.random.seed(0)
np.random.shuffle(manifest)
train_count = (len(manifest) * train_percent) // 100
train = manifest[:train_count]
val = manifest[train_count:]
util.create_manifest(train_manifest, train, root_dir)
util.create_manifest(val_manifest, val, root_dir)
# write SSD CONFIG
ssd_config = get_ssd_config(img_reshape)
ssd_config_path = os.path.join(root_dir, 'kitti_ssd_{}.cfg'.format(hw))
util.write_ssd_config(ssd_config, ssd_config_path, True)
# write SSD VAL CONFIG
ssd_config_val = get_ssd_config(img_reshape, True)
ssd_config_path_val = os.path.join(root_dir, 'kitti_ssd_{}_val.cfg'.format(hw))
util.write_ssd_config(ssd_config_val, ssd_config_path_val, True)
config_path = os.path.join(root_dir, 'kitti_{}.cfg'.format(hw))
config = {'manifest': '[train:{}, val:{}]'.format(train_manifest, val_manifest),
'manifest_root': root_dir,
'epochs': 100,
'height': img_reshape[0],
'width': img_reshape[0],
'ssd_config': '[train:{}, val:{}]'.format(ssd_config_path, ssd_config_path_val)
}
util.write_config(config, config_path)
if __name__ == '__main__':
from configargparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--input_dir', required=True, help='path to dir with KITTI zip files.')
parser.add_argument('--output_dir', required=True, help='path to unzip data.')
parser.add_argument('--overwrite', action='store_true', help='overwrite files')
parser.add_argument('--training_pct', default=90, help='fraction of data used for training.')
parser.add_argument('--skip_unzip', action='store_true', help='skip unzip')
args = parser.parse_args()
ingest_kitti(args.input_dir, args.output_dir, train_percent=args.training_pct,
overwrite=args.overwrite, skip_unzip=args.skip_unzip)
|
the-stack_0_5606 | #!/usr/bin/env python
import argparse
import re
import sys
from os.path import isfile, join
from subprocess import PIPE, TimeoutExpired, run
class BaseTask:
TIME_LIMIT_SECONDS = 1
SPACES_RE = re.compile(r"\s+", re.M)
def __init__(self, continue_on_error=True, only_matching=None):
self.continue_on_error = continue_on_error
self.only_matching = only_matching
self.tested_cases = set()
self.passed_cases = set()
self.tested_units = set()
self.passed_units = set()
self.show_all_errors = False
def strip_spaces(self, text):
return self.SPACES_RE.sub(" ", text.strip())
def read_file_utf8(self, file_name):
assert isfile(file_name), f"Não existe o arquivo {file_name}"
try:
with open(file_name, encoding="utf-8", errors="strict") as f:
return f.read()
except ValueError:
assert False, f"Enconding inválido em {file_name}. Por favor, use UTF-8."
except Exception as e: # noqa
assert False, f"Falha ao ler arquivo {file_name}: {e}"
def compare_stripped(self, left, right):
return self.strip_spaces(left) == self.strip_spaces(right)
def compare_files(self, out, res):
left = self.read_file_utf8(out)
right = self.read_file_utf8(res)
return self.compare_stripped(left, right)
def exists(self, file_name):
assert isfile(file_name), f"você deve criar um arquivo {file_name}"
def run_binary_inner(self, cmd, stdin, stdout, input):
if input is None:
p = run(
cmd,
stdin=stdin,
stdout=stdout,
encoding="utf8",
errors="ignore",
timeout=self.TIME_LIMIT_SECONDS,
)
else:
p = run(
cmd,
input=input,
stdout=stdout,
encoding="utf8",
errors="ignore",
timeout=self.TIME_LIMIT_SECONDS,
)
assert p.returncode == 0, f"código de saída é {p.returncode}"
return p
def run_binary(
self,
cmd,
stdin,
stdout,
input=None,
in_filename=None,
out_filename=None,
):
cmd_str = " ".join([c if " " not in c and c != "" else f'"{c}"' for c in cmd])
if in_filename:
cmd_str += f" < {in_filename}"
if out_filename:
cmd_str += f" > {out_filename}"
if input:
cmd_str += f' com entrada "{input}"'
try:
return self.run_binary_inner(cmd, stdin, stdout, input)
except AssertionError as e:
assert False, f"falha ao executar {cmd_str} : {e}"
except TimeoutExpired:
assert (
False
), f"falha ao executar {cmd_str} : tempo limite de {self.TIME_LIMIT_SECONDS}s excedido"
def test_one_case(self, script, in_filename_name):
out_filename_name = in_filename_name.replace(".in", ".out")
res_file_name = in_filename_name.replace(".in", ".res")
self.exists(script)
with open(in_filename_name) as i, open(out_filename_name, "w") as o:
self.run_binary(
["python3", script],
i,
o,
in_filename=in_filename_name,
out_filename=out_filename_name,
)
assert self.compare_files(
out_filename_name, res_file_name
), f'execute: diff "{out_filename_name}" "{res_file_name}"'
def test_cases(self, script, in_filename_names, folder="testes"):
assert type(in_filename_names) != str, "erro no caso de teste, deveria ser lista de strings"
errors = []
for in_filename_name in in_filename_names:
in_filename_name = join(folder, in_filename_name)
try:
self.tested_cases.add(in_filename_name)
self.test_one_case(script, in_filename_name)
self.passed_cases.add(in_filename_name)
print(f" -> {in_filename_name} passou")
except AssertionError as e:
print(f" -> {in_filename_name} falhou")
errors.append(f"{e}")
if not self.continue_on_error:
break
if errors:
assert False, "\n -> ".join(errors)
def input_output(self, script, input_content, expected_output):
self.exists(script)
p = self.run_binary(["python3", script], None, PIPE, input=input_content)
assert self.compare_stripped(
p.stdout, expected_output
), f'para entrada "{input_content}", a saída é "{p.stdout.strip()}", mas era esperado "{expected_output}"'
def should_test(self, name):
if not name.startswith("teste_"):
return False
if not self.only_matching:
return True
for pattern in self.only_matching:
if pattern in name:
return True
return False
def test_units(self):
for name in sorted(dir(self)):
if not self.should_test(name):
continue
print()
print(f"Executando {name}...")
sys.stderr.flush()
sys.stdout.flush()
try:
test = getattr(self, name)
self.tested_units.add(name)
test()
self.passed_units.add(name)
print(f"{name}: OK")
except AssertionError as e:
print(f"{name}: FALHOU")
if "privado" not in name or self.show_all_errors:
print(f" -> {e}\n")
if not self.continue_on_error:
break
def case_range(self, input_template, start, end):
input_files = []
for i in range(start, end + 1):
input_files.append(input_template.format(i))
return input_files
class Task(BaseTask):
def teste_1_bomdia(self):
script = "bomdia.py"
self.input_output(script, "Antônio", "Bom dia, Antônio.")
self.test_cases(script, ["bomdia.in"])
def teste_2_boanoite(self):
script = "boanoite.py"
self.test_cases(script, self.case_range("boanoite{}.in", 1, 2))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Testa tarefa.")
parser.add_argument("only", nargs="*", help="apenas unidades contendo essa string")
parser.add_argument("-c", action="store_true", help="continua mesmo que anteriores falhem")
args = parser.parse_args()
Task(args.c, args.only).test_units()
|
the-stack_0_5609 | import argparse
import bs4
import json
import io
import os
import requests
import zipfile
class Scraper():
"""A scraper with which to scrape Scratch projects.
Typical usage example:
from ccl_scratch_tools import Scraper
scraper = Scraper()
project = scraper.download_project(555555555)
"""
def __init__(self, studio_url = None, project_url = None, project_meta_url = None, comments_url = None, user_url = None, studio_meta_url = None):
"""Initializes scraper with studio and project URLs."""
if studio_url is None:
self.STUDIO_URL = "https://api.scratch.mit.edu/studios/{0}/projects?limit=40&offset={1}"
else:
self.STUDIO_URL = studio_url
if studio_meta_url is None:
self.STUDIO_META_URL = "https://api.scratch.mit.edu/studios/{0}"
else:
self.STUDIO_META_URL = studio_meta_url
if project_url is None:
self.PROJECT_URL = "https://projects.scratch.mit.edu/{0}"
else:
self.PROJECT_URL = project_url
if project_meta_url is None:
self.PROJECT_META_URL = "https://api.scratch.mit.edu/projects/{0}"
else:
self.PROJECT_META_URL = project_meta_url
if comments_url is None:
self.COMMENTS_URL = "https://scratch.mit.edu/site-api/comments/project/{0}/?page={1}"
else:
self.COMMENTS_URL = comments_url
if user_url is None:
self.USER_URL = "https://api.scratch.mit.edu/users/{0}"
else:
self.USER_URL = user_url
def download_project(self, id):
"""Downloads an individual project JSON and returns it as a Python object.
Args:
id: An integer Scratch project ID.
Returns:
A dictionary object representing the Scratch project JSON.
Raises:
RuntimeError: An error occurred accessing the Scratch API, or
the project couldn't be downloaded in/converted to JSON format.
"""
url = self.PROJECT_URL.format(id)
r = requests.get(url)
if r.status_code != 200:
raise RuntimeError("GET {0} failed with status code {1}".format(url, r.status_code))
project = ""
try:
project = r.json()
except:
# In some cases, a binary archive will download -- handle that
if json.decoder.JSONDecodeError:
try:
f = io.BytesIO(r.content)
archive = zipfile.ZipFile(f)
if "project.json" in archive.namelist():
proj = archive.read("project.json")
project = json.loads(proj.decode("utf-8"))
except:
raise RuntimeError("Cannot handle format of project {0}".format(id))
return project
def download_projects(self, ids, projects_to_studio=dict(), output_directory=None, file_name=None):
"""Given project IDs, download the JSON files.
Args:
ids: array-like collection of Scratch project IDs.
projects_to_studio: dictionary mapping project IDs to studio IDs.
If set, creates subdirectories for each studio.
output_directory (str): directory for output; if not set,
defaults to current working directory.
file_name (str): if set, combines projects into one JSON file with file_name;
else creates a separate JSON file for each project.
Returns:
None.
"""
if output_directory is None:
output_directory = os.getcwd()
self.make_dir(output_directory)
projects = list()
for id in ids:
project = self.download_project(id)
if len(project) < 1:
break
if file_name is None:
od = output_directory
if len(projects_to_studio) > 0:
od = "{0}/{1}".format(od, projects_to_studio[id])
self.make_dir(od)
with open("{0}/{1}.json".format(od, id), "w") as f:
json.dump(project, f)
else:
projects.append(project)
# If projects has at least one item, we should write to a single file
if len(projects) > 0 and file_name is not None:
with open("{0}/{1}".format(output_directory, file_name), "w") as f:
json.dump(projects, f)
def get_id(self, url):
"""Returns the integer ID from a string that may be a URL or an ID.
Args:
url: The string representing the URL, or ID, to be extracted.
Returns:
An integer ID of a Scratch object, whether a studio or project.
In case of error, returns None.
"""
url = url.rstrip()
a = url.rstrip("/")
try:
return int(a.split("/")[-1])
except:
return None
def get_ids_from_file(self, filename):
"""Returns a list of IDs from a newline-separated file. Project/studio link agnostic. Works with links and IDs.
Args:
filename: String file name of a text file with line-separated URLs or IDs.
Returns:
A list of integer IDs. Empty if error reading file.
"""
ids = list()
try:
ids = list()
with open(filename) as f:
for l in f.readlines():
ids.append(self.get_id(l))
except:
pass
return ids
def get_project_comments(self, id):
"""Returns the comments on a given Scratch project.
Args:
id (int): a Scratch project ID.
Returns:
A list of dictionaries, each with keys for author, comment, and timestamp.
Raises:
RuntimeError: An error occurred accessing the Scratch API, or the project doesn't exist.
"""
# This is all a remastered version of GSE-CCL/scratch-comments
comments = list()
page = 1
while True:
# Call API
url = self.COMMENTS_URL.format(id, page)
r = requests.get(url)
if r.status_code == 404 and page > 1:
break
elif r.status_code != 200:
raise RuntimeError("GET {0} failed with status code {1}".format(url, r.status_code))
# Use Beautiful Soup to scrape the webpage for comments
soup = bs4.BeautifulSoup(r.content, "html.parser")
all_comments = soup.select(".comment")
if len(all_comments) < 1:
break
# Go through each comment and clean
for comment in all_comments:
content = comment.select_one(".content").get_text().strip()
if content != "[deleted]":
cid = int(comment["data-comment-id"])
user = comment.select_one(".name").get_text().strip()
time = comment.select_one(".time")["title"]
comments.append({"id": cid, "username": user, "comment": content, "timestamp": time})
page += 1
return comments
def get_project_meta(self, id):
"""Returns the publicly-available metadata about a given Scratch project.
Args:
id (int): a Scratch project ID.
Returns:
A dictionary with the entire API response from project meta API endpoint.
None if the studio doesn't exist.
Raises:
RuntimeError: An error occurred accessing the Scratch API.
"""
url = self.PROJECT_META_URL.format(id)
r = requests.get(url)
if r.status_code != 200 and r.status_code != 404:
raise RuntimeError("GET {0} failed with status code {1}".format(url, r.status_code))
project = r.json()
if "code" in project and project["code"] == "NotFound":
return None
return project
def get_projects_in_studio(self, id):
"""Returns the set of project IDs contained in a given Scratch studio.
Args:
id: An integer Scratch studio ID.
Returns:
A set of project IDs.
Raises:
RuntimeError: An error occurred accessing the Scratch API.
"""
offset = 0
project_ids = set()
while True:
url = self.STUDIO_URL.format(id, offset)
r = requests.get(url)
if r.status_code != 200:
raise RuntimeError("GET {0} failed with status code {1}".format(url, r.status_code))
# No more projects
projects = r.json()
if len(projects) < 1:
break
else:
for project in projects:
project_ids.add(project["id"])
offset += 40
return project_ids
def get_studio_meta(self, id):
"""Returns the metadata for a given Scratch studio.
Args:
id: An integer Scratch studio ID.
Returns:
A dictionary with the studio's metadata. None if the studio doesn't exist.
Raises:
RuntimeError: An error occurred accessing the Scratch API.
"""
url = self.STUDIO_META_URL.format(id)
r = requests.get(url)
if r.status_code != 200 and r.status_code != 404:
raise RuntimeError("GET {0} failed with status code {1}".format(url, r.status_code))
studio_meta = r.json()
if "code" in studio_meta and studio_meta["code"] == "NotFound":
return None
return studio_meta
def get_user_info(self, username):
"""Gets a Scratch user's publicly-available information.
Args:
username (str): the username to look up.
Returns:
A dictionary with the results of the API call.
Raises:
RuntimeError: An error occurred accessing the Scratch API, or the user doesn't exist.
"""
url = self.USER_URL.format(username)
r = requests.get(url)
if r.status_code != 200:
raise RuntimeError("GET {0} failed with status code {1}".format(url, r.status_code))
return r.json()
def make_dir(self, path):
"""Creates a directory given path.
Args:
path (str): A file path on the current system.
Returns:
True, if directory was successfully created or already existed.
Raises:
RuntimeError: Failed to create the directory.
"""
try:
os.mkdir(path)
except OSError:
if FileExistsError:
return True
else:
raise RuntimeError("Creation of directory '{0}' failed".format(path))
else:
return True
|
the-stack_0_5610 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The Project U-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import csv
import numpy as np
from utils import util
from utils.clock_utils import MAX_GLOBAL_CLOCKS
from prjuray.db import Database
def ps8_bufg_pin_map_by_tile():
tiles = {}
with open('../ps8_bufg_pin_map.csv') as f:
for row in csv.DictReader(f):
clock_tiles = row['clock_tiles'].split(' ')
assert len(clock_tiles) == 1, (row['pin'], clock_tiles)
tile = clock_tiles[0]
if tile not in tiles:
tiles[tile] = []
tiles[tile].append(row['pin'].split('/')[1])
return tiles
def get_ps8_pin_map():
with open('../ps8_pin_map.csv') as f:
for row in csv.DictReader(f):
yield row['pin']
def print_top(seed):
np.random.seed(seed)
options_by_tile = {}
with open('../permutations.csv') as f:
for row in csv.DictReader(f):
tile = row['tile']
opt = {}
for bufg_idx in range(MAX_GLOBAL_CLOCKS):
input_idx = row['bufg{}_input'.format(bufg_idx)]
if input_idx == "":
continue
opt[bufg_idx] = int(input_idx)
if tile not in options_by_tile:
options_by_tile[tile] = []
options_by_tile[tile].append(opt)
ps8_pins = sorted(get_ps8_pin_map())
bus_widths = {}
for pin in ps8_pins:
parts = pin.split('/')[-1].split('[')
if len(parts) == 1:
bus_widths[parts[0]] = 1
elif len(parts) == 2:
if parts[0] not in bus_widths:
bus_widths[parts[0]] = 0
width = int(parts[1][:-1]) + 1
if width > bus_widths[parts[0]]:
bus_widths[parts[0]] = width
else:
assert False, pin
tiles = ps8_bufg_pin_map_by_tile()
for tile in tiles:
tiles[tile].sort()
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
bufgs_by_tile = {}
for tile in tiles:
bufgs_by_tile[tile] = []
gridinfo = grid.gridinfo_at_tilename(tile)
for site, site_type in gridinfo.sites.items():
if site_type == 'BUFG_PS':
bufgs_by_tile[tile].append(site)
for tile in tiles:
bufgs_by_tile[tile].sort()
assert len(bufgs_by_tile[tile]) == MAX_GLOBAL_CLOCKS, tile
opins = []
sinks = []
print('''
module top();
''')
spec_num = util.specn() - 1
for tile in tiles:
opts = options_by_tile[tile]
if spec_num < len(opts):
# Use permutation from permutations.csv
opt = opts[spec_num]
else:
# Use a random permutation.
opt = {}
bufgs = set(range(MAX_GLOBAL_CLOCKS))
for input_idx in range(len(tiles[tile])):
bufg_idx = np.random.choice(sorted(bufgs))
bufgs.remove(bufg_idx)
opt[bufg_idx] = input_idx
for bufg_idx, input_idx in opt.items():
bufg = bufgs_by_tile[tile][bufg_idx]
input_pin = tiles[tile][input_idx]
idx = len(opins)
print("""
wire bufg_{idx};
(* LOC="{loc}", KEEP, DONT_TOUCH *)
BUFG_PS bufg_{idx} (
.I(bufg_{idx})
);
""".format(loc=bufg, idx=idx))
sinks.append('bufg_{idx}'.format(idx=idx))
opins.append(input_pin)
busses = set()
for pin in opins:
busses.add(pin.split('[')[0])
for bus in busses:
print('wire [{width}-1:0] {bus};'.format(
bus=bus, width=bus_widths[bus]))
print('PS8 ps8 (')
connections = []
for bus in busses:
connections.append(' .{bus}({bus})'.format(bus=bus))
print(',\n'.join(connections))
print('\n);')
for pin, sink in zip(opins, sinks):
print('assign {sink} = {pin};'.format(pin=pin, sink=sink))
print('endmodule')
|
the-stack_0_5613 | from blazingsql import DataType
from Configuration import ExecutionMode
from Configuration import Settings as Settings
from DataBase import createSchema as cs
from pynvml import nvmlInit
from Runner import runTest
from Utils import Execution, gpuMemory, init_context, skip_test
queryType = "Full outer join"
def main(dask_client, drill, dir_data_file, bc, nRals):
start_mem = gpuMemory.capture_gpu_memory_usage()
def executionTest():
tables = ["nation"]
data_types = [
DataType.DASK_CUDF,
DataType.CUDF,
DataType.CSV,
DataType.ORC,
DataType.PARQUET,
] # TODO json
# Create Tables -----------------------------------------------------
for fileSchemaType in data_types:
if skip_test(dask_client, nRals, fileSchemaType, queryType):
continue
cs.create_tables(bc, dir_data_file, fileSchemaType, tables=tables)
# Run Query ------------------------------------------------------
# Parameter to indicate if its necessary to order
# the resulsets before compare them
worder = 1
use_percentage = False
acceptable_difference = 0
print("==============================")
print(queryType)
print("==============================")
queryId = "TEST_01"
query = """select n1.n_nationkey as n1key,
n2.n_nationkey as n2key, n1.n_nationkey + n2.n_nationkey
from nation as n1
full outer join nation as n2
on n1.n_nationkey = n2.n_nationkey + 6"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_02"
query = """select n1.n_nationkey as n1key,
n2.n_nationkey as n2key,
n1.n_nationkey + n2.n_nationkey
from nation as n1
full outer join nation as n2
on n1.n_nationkey = n2.n_nationkey + 6
where n1.n_nationkey < 10"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_03"
query = """select n1.n_nationkey as n1key,
n2.n_nationkey as n2key,
n1.n_nationkey + n2.n_nationkey
from nation as n1
full outer join nation as n2
on n1.n_nationkey = n2.n_nationkey + 6
where n1.n_nationkey < 10 and n1.n_nationkey > 5"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
queryId = "TEST_04"
query = """select n1.n_nationkey as n1key,
n2.n_nationkey as n2key,
n1.n_nationkey + n2.n_nationkey
from nation as n1
full outer join nation as n2
on n1.n_nationkey = n2.n_nationkey + 6
and n1.n_nationkey + 1 = n2.n_nationkey + 7
and n1.n_nationkey + 2 = n2.n_nationkey + 8"""
runTest.run_query(
bc,
drill,
query,
queryId,
queryType,
worder,
"",
acceptable_difference,
use_percentage,
fileSchemaType,
)
if Settings.execution_mode == ExecutionMode.GENERATOR:
print("==============================")
break
executionTest()
end_mem = gpuMemory.capture_gpu_memory_usage()
gpuMemory.log_memory_usage(queryType, start_mem, end_mem)
if __name__ == "__main__":
Execution.getArgs()
nvmlInit()
drill = "drill" # None
compareResults = True
if "compare_results" in Settings.data["RunSettings"]:
compareResults = Settings.data["RunSettings"]["compare_results"]
if ((Settings.execution_mode == ExecutionMode.FULL and
compareResults == "true") or
Settings.execution_mode == ExecutionMode.GENERATOR):
# Create Table Drill ------------------------------------------------
print("starting drill")
from pydrill.client import PyDrill
drill = PyDrill(host="localhost", port=8047)
cs.init_drill_schema(drill,
Settings.data["TestSettings"]["dataDirectory"])
# Create Context For BlazingSQL
bc, dask_client = init_context()
nRals = Settings.data["RunSettings"]["nRals"]
main(dask_client, drill,
Settings.data["TestSettings"]["dataDirectory"], bc, nRals)
if Settings.execution_mode != ExecutionMode.GENERATOR:
runTest.save_log()
gpuMemory.print_log_gpu_memory()
|
the-stack_0_5614 | """
This file offers the methods to automatically retrieve the graph G54.
The graph is automatically retrieved from the NetworkRepository repository.
References
---------------------
Please cite the following if you use the data:
```bib
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def G54(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/networkrepository",
version: str = "latest",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the G54 graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "latest"
The version of the graph to retrieve.
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of G54 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="G54",
repository="networkrepository",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_5616 | # Local imports
from gmprocess.metrics.imt.imt import IMT
class PGA(IMT):
"""Class defining steps and invalid imts, for peak ground acceleration."""
# making invalid IMCs a class variable because
# 1) it doesn't change with instances
# 2) information can now be retrieved without
# instantiating first
_invalid_imcs = []
def __init__(self, imt, imc, period=None):
"""
Args:
imt (string): Intensity measurement type.
imc (string): Intensity measurement component.
period (float): Period for fourier amplitude spectra and
spectral amplitudes. Default is None. Not used by PGA.
"""
super().__init__(imt, imc, period=None)
self._steps = {
'Transform2': 'null_transform',
'Transform3': 'null_transform',
'Combination1': 'null_combination',
'Reduction': 'max',
}
|
the-stack_0_5618 | import frappe
from frappe import _
def execute(filters=None):
columns = get_columns(filters)
if filters.summary_based_on_month:
month_summary, chart = get_summary_based_on_month(filters)
if month_summary:
data = month_summary
if not filters.summary_based_on_month:
chart = {}
fee_data = get_fees(filters)
if fee_data:
data = fee_data
return columns, data, chart
def get_filter_condtions(filters):
conditions = ""
if filters.get("company"):
conditions += " AND fe.company = %(company)s "
if filters.get("academic_year"):
conditions += " AND fe.academic_year = %(academic_year)s "
conditions += " AND p_en.academic_year = %(academic_year)s "
conditions += " AND sg.academic_year = %(academic_year)s "
if filters.get("program"):
conditions += " AND fe.program = %(program)s "
conditions += " AND p_en.program = %(program)s "
conditions += " AND sg.program = %(program)s "
return conditions
def get_columns(filters):
if filters.summary_based_on_month:
columns = [
{"fieldname": "year", "fieldtype": "Data", "label": _("Year of Due Date")},
{"fieldname": "month", "fieldtype": "Data", "label": _("Month")},
{"fieldname": "program", "fieldtype": "Data", "label": _("Program")},
{"fieldname": "total_amount_to_be_paid", "fieldtype": "Currency", "label": _("Total Fee to be Collected")},
{"fieldname": "total_paid_amount", "fieldtype": "Currency", "label": _("Total Fee Collected")},
{"fieldname": "outstanding_amount", "fieldtype": "Currency", "label": _("Outstanding Amount")}
]
else:
columns = [
{"fieldname": "academic_year", "fieldtype": "Data", "label": _("Academic Year")},
{"fieldname": "student_name", "fieldtype": "Data", "label": _("Student Name")},
{"fieldname": "program", "fieldtype": "Data", "label": _("Program")},
{"fieldname": "class_name", "fieldtype": "Data", "label": _("Class Name")},
{"fieldname": "student_category", "fieldtype": "Data", "label": _("Fee Category")},
{"fieldname": "total_amount_to_be_paid", "fieldtype": "Currency", "label": _("Fee to be Collected")},
{"fieldname": "1st_installment_paid_amount", "fieldtype": "Currency", "label": _("1st Installment Paid")},
{"fieldname": "2nd_installment_paid_amount", "fieldtype": "Currency", "label": _("2nd Installment Paid")},
{"fieldname": "3rd_installment_paid_amount", "fieldtype": "Currency", "label":_("3rd Installment Paid")},
{"fieldname": "4th_installment_paid_amount", "fieldtype": "Currency", "label": _("4th Installment Paid")},
{"fieldname": "5th_installment_paid_amount", "fieldtype": "Currency", "label": _("5th Installment Paid")},
{"fieldname": "total_paid_amount", "fieldtype": "Currency", "label": _("Total Fee Collected")},
{"fieldname": "outstanding_amount", "fieldtype": "Currency", "label": _("Outstanding Amount")}
]
return columns
def get_fees(filters):
fees_record = []
student_records = []
if not filters.summary_based_on_program:
student_details, student_name_list = get_fee_details(filters)
first_installment_list = []
second_installment_list = []
third_installment_list = []
fourth_installment_list = []
fifth_installment_list = []
for st_name in student_name_list:
total_amount_to_be_paid = total_unpaid_amount = 0
for student_row in student_details:
if (
st_name["student"] == student_row["student"] and
st_name["program"] == student_row["program"] and
st_name["class_name"] == student_row["class_name"]
):
total_amount_to_be_paid += student_row["grand_total"]
total_unpaid_amount += student_row["outstanding_amount"]
if st_name not in first_installment_list:
st_name.update({
"paid_amount1": student_row["grand_total"] - student_row["outstanding_amount"]
})
first_installment_list.append(st_name)
elif st_name not in second_installment_list:
st_name["paid_amount2"] = student_row["grand_total"] - student_row["outstanding_amount"]
second_installment_list.append(st_name)
elif st_name not in third_installment_list:
st_name.update({
"paid_amount3": student_row["grand_total"] - student_row["outstanding_amount"]
})
third_installment_list.append(st_name)
elif st_name not in fourth_installment_list:
st_name.update({
"paid_amount4": student_row["grand_total"] - student_row["outstanding_amount"]
})
fourth_installment_list.append(st_name)
else:
st_name.update({
"paid_amount5": student_row["grand_total"] - student_row["outstanding_amount"]
})
fifth_installment_list.append(st_name)
st_name.update({
"total_amount_to_be_paid": total_amount_to_be_paid,
"outstanding_amount": total_unpaid_amount
})
student_records.append(st_name)
for record in student_records:
paid_amount = 0
for first in first_installment_list:
if (record["student"] == first["student"] and
record["program"] == first["program"] and
record["class_name"] == first["class_name"]
):
record.update({
"1st_installment_paid_amount": first["paid_amount1"],
})
paid_amount += first["paid_amount1"]
for second in second_installment_list:
if (record["student"] == second["student"] and
record["program"] == second["program"] and
record["class_name"] == second["class_name"]
):
record.update({
"2nd_installment_paid_amount": second["paid_amount2"]
})
paid_amount += second["paid_amount2"]
for third in third_installment_list:
if (record["student"] == third["student"] and
record["program"] == third["program"] and
record["class_name"] == third["class_name"]
):
record.update({
"3rd_installment_paid_amount": third["paid_amount3"]
})
paid_amount += third["paid_amount3"]
for fourth in fourth_installment_list:
if (record["student"] == fourth["student"] and
record["program"] == fourth["program"] and
record["class_name"] == fourth["class_name"]
):
record.update({
"4th_installment_paid_amount": fourth["paid_amount4"]
})
paid_amount += fourth["paid_amount4"]
for fifth in fifth_installment_list:
if (record["student"] == fifth["student"] and
record["program"] == fifth["program"] and
record["class_name"] == fifth["class_name"]
):
record.update({
"5th_installment_paid_amount": fifth["paid_amount5"]
})
paid_amount += fifth["paid_amount5"]
record["total_paid_amount"] = paid_amount
fees_record.append(record)
return fees_record
def get_fee_details(filters):
name_list = []
student_list = []
student_details = []
conditions = get_filter_condtions(filters)
fee_details = frappe.db.sql("""
SELECT fe.due_date, fe.student, fe.student_name, fe.program,
fe.grand_total, fe.outstanding_amount, p_en.student_category,
sg.academic_year, sgs.parent
FROM `tabFees` fe
INNER JOIN `tabProgram Enrollment` p_en ON fe.student = p_en.student
LEFT JOIN `tabStudent Group Student` sgs ON sgs.student = fe.student AND sgs.active = 1
LEFT JOIN `tabStudent Group` sg ON sgs.parent = sg.name AND sg.disabled = 0
WHERE fe.docstatus = 1
AND p_en.docstatus = 1 {conditions}
ORDER BY fe.due_date asc, fe.student asc
""".format(conditions=conditions), filters, as_dict=1)
for student in fee_details:
txt = student.parent
program_class = ""
if (student.academic_year != 2020 and "FORM" in txt and "TODDLERS" not in txt):
year, stream = txt.split("-")
program_class += stream
elif (student.academic_year != 2020 and "FORM" not in txt and "TODDLERS" in txt):
year, stream = txt.split("-")
program_class += stream
elif (student.academic_year != 2020 and "FORM" not in txt and "TODDLERS" not in txt):
year, pro, stream = txt.split("-")
program_class += pro +' - '+ stream
else:
program_class += txt
student.update({
"class_name": program_class
})
student_details.append(student)
if student.student not in name_list:
name_list.append(student.student)
student_list.append(student)
return student_details, student_list
def get_summary_based_on_month(filters):
if filters.summary_based_on_month:
chart = {}
summary_data = []
conditions = ""
if filters.get("company"):
conditions += " AND fe.company = %(company)s "
if filters.get("academic_year"):
conditions += " AND fe.academic_year = %(academic_year)s "
conditions += " AND p_en.academic_year = %(academic_year)s "
fee_details = frappe.db.sql("""
SELECT YEAR(fe.due_date) as year, MONTHNAME(fe.due_date) AS month, fe.program, SUM(fe.grand_total) AS grand_total,
SUM(fe.outstanding_amount) AS outstanding_amount
FROM `tabFees` fe
INNER JOIN `tabProgram Enrollment` p_en ON fe.student = p_en.student AND fe.program = p_en.program
WHERE fe.docstatus = 1 AND p_en.docstatus = 1 {conditions}
GROUP BY MONTHNAME(fe.due_date), fe.program
ORDER BY YEAR(fe.due_date), MONTHNAME(fe.due_date), fe.program
""".format(conditions=conditions), filters, as_dict=1
)
for fee in fee_details:
summary_data.append({
"year": fee.year,
"month": fee.month,
"program": fee.program,
"total_paid_amount": fee.grand_total - fee.outstanding_amount,
"outstanding_amount": fee.outstanding_amount,
"total_amount_to_be_paid": fee.grand_total
})
chart = get_chart_data(summary_data)
return summary_data, chart
def get_chart_data(summary_data):
if not summary_data:
return
labels = []
fees_collected = []
outstanding_amount = []
fees_to_be_collected = []
for entry in summary_data:
labels.append(entry.get('program'))
fees_collected.append(entry.get('total_paid_amount'))
outstanding_amount.append(entry.get('outstanding_amount'))
fees_to_be_collected.append(entry.get('total_amount_to_be_paid'))
return {
'data': {
'labels': labels,
'datasets': [
{
'name': _('Fee to be Collected'),
'values': fees_to_be_collected
},
{
'name': _('Fees Collected'),
'values': fees_collected
},
{
'name': _('Outstanding Amount'),
'values': outstanding_amount
}
]
},
'type': 'bar'
}
|
the-stack_0_5619 | import torch
import torch.nn as nn
class MNIST_Network(nn.Module):
def __init__(self):
super(MNIST_Network, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size = 5, padding=2)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2, stride=2)
self.conv2 = nn.Conv2d(32, 64, kernel_size = 5, padding=2)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2, stride=2)
self.fc3 = nn.Linear(7*7*64, 1024)
self.relu3 = nn.ReLU()
self.fc4 = nn.Linear(1024, 10)
self.softmax4 = nn.Softmax(dim=1)
# 前向传播
def forward(self, input1):
x = self.conv1(input1)
x = self.relu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.pool2(x)
x = x.view(x.size()[0], -1)
x = self.fc3(x)
x = self.relu3(x)
x = self.fc4(x)
x = self.softmax4(x)
return x |
the-stack_0_5620 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import math
import sys
import random
import numpy as np
import numbers
import types
import collections
import warnings
import traceback
from paddle.utils import try_import
from . import functional as F
if sys.version_info < (3, 3):
Sequence = collections.Sequence
Iterable = collections.Iterable
else:
Sequence = collections.abc.Sequence
Iterable = collections.abc.Iterable
__all__ = []
def _get_image_size(img):
if F._is_pil_image(img):
return img.size
elif F._is_numpy_image(img):
return img.shape[:2][::-1]
elif F._is_tensor_image(img):
return img.shape[1:][::-1] # chw
else:
raise TypeError("Unexpected type {}".format(type(img)))
def _check_input(value,
name,
center=1,
bound=(0, float('inf')),
clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError(
"If {} is a single number, it must be non negative.".format(
name))
value = [center - value, center + value]
if clip_first_on_zero:
value[0] = max(value[0], 0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name,
bound))
else:
raise TypeError(
"{} should be a single number or a list/tuple with lenght 2.".
format(name))
if value[0] == value[1] == center:
value = None
return value
class Compose(object):
"""
Composes several transforms together use for composing list of transforms
together for a dataset transform.
Args:
transforms (list|tuple): List/Tuple of transforms to compose.
Returns:
A compose object which is callable, __call__ for this Compose
object will call each given :attr:`transforms` sequencely.
Examples:
.. code-block:: python
from paddle.vision.datasets import Flowers
from paddle.vision.transforms import Compose, ColorJitter, Resize
transform = Compose([ColorJitter(), Resize(size=608)])
flowers = Flowers(mode='test', transform=transform)
for i in range(10):
sample = flowers[i]
print(sample[0].size, sample[1])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, data):
for f in self.transforms:
try:
data = f(data)
except Exception as e:
stack_info = traceback.format_exc()
print("fail to perform transform [{}] with error: "
"{} and stack:\n{}".format(f, e, str(stack_info)))
raise e
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class BaseTransform(object):
"""
Base class of all transforms used in computer vision.
calling logic:
if keys is None:
_get_params -> _apply_image()
else:
_get_params -> _apply_*() for * in keys
If you want to implement a self-defined transform method for image,
rewrite _apply_* method in subclass.
Args:
keys (list[str]|tuple[str], optional): Input type. Input is a tuple contains different structures,
key is used to specify the type of input. For example, if your input
is image type, then the key can be None or ("image"). if your input
is (image, image) type, then the keys should be ("image", "image").
if your input is (image, boxes), then the keys should be ("image", "boxes").
Current available strings & data type are describe below:
- "image": input image, with shape of (H, W, C)
- "coords": coordinates, with shape of (N, 2)
- "boxes": bounding boxes, with shape of (N, 4), "xyxy" format,
the 1st "xy" represents top left point of a box,
the 2nd "xy" represents right bottom point.
- "mask": map used for segmentation, with shape of (H, W, 1)
You can also customize your data types only if you implement the corresponding
_apply_*() methods, otherwise ``NotImplementedError`` will be raised.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
import paddle.vision.transforms.functional as F
from paddle.vision.transforms import BaseTransform
def _get_image_size(img):
if F._is_pil_image(img):
return img.size
elif F._is_numpy_image(img):
return img.shape[:2][::-1]
else:
raise TypeError("Unexpected type {}".format(type(img)))
class CustomRandomFlip(BaseTransform):
def __init__(self, prob=0.5, keys=None):
super(CustomRandomFlip, self).__init__(keys)
self.prob = prob
def _get_params(self, inputs):
image = inputs[self.keys.index('image')]
params = {}
params['flip'] = np.random.random() < self.prob
params['size'] = _get_image_size(image)
return params
def _apply_image(self, image):
if self.params['flip']:
return F.hflip(image)
return image
# if you only want to transform image, do not need to rewrite this function
def _apply_coords(self, coords):
if self.params['flip']:
w = self.params['size'][0]
coords[:, 0] = w - coords[:, 0]
return coords
# if you only want to transform image, do not need to rewrite this function
def _apply_boxes(self, boxes):
idxs = np.array([(0, 1), (2, 1), (0, 3), (2, 3)]).flatten()
coords = np.asarray(boxes).reshape(-1, 4)[:, idxs].reshape(-1, 2)
coords = self._apply_coords(coords).reshape((-1, 4, 2))
minxy = coords.min(axis=1)
maxxy = coords.max(axis=1)
trans_boxes = np.concatenate((minxy, maxxy), axis=1)
return trans_boxes
# if you only want to transform image, do not need to rewrite this function
def _apply_mask(self, mask):
if self.params['flip']:
return F.hflip(mask)
return mask
# create fake inputs
fake_img = Image.fromarray((np.random.rand(400, 500, 3) * 255.).astype('uint8'))
fake_boxes = np.array([[2, 3, 200, 300], [50, 60, 80, 100]])
fake_mask = fake_img.convert('L')
# only transform for image:
flip_transform = CustomRandomFlip(1.0)
converted_img = flip_transform(fake_img)
# transform for image, boxes and mask
flip_transform = CustomRandomFlip(1.0, keys=('image', 'boxes', 'mask'))
(converted_img, converted_boxes, converted_mask) = flip_transform((fake_img, fake_boxes, fake_mask))
print('converted boxes', converted_boxes)
"""
def __init__(self, keys=None):
if keys is None:
keys = ("image", )
elif not isinstance(keys, Sequence):
raise ValueError(
"keys should be a sequence, but got keys={}".format(keys))
for k in keys:
if self._get_apply(k) is None:
raise NotImplementedError(
"{} is unsupported data structure".format(k))
self.keys = keys
# storage some params get from function get_params()
self.params = None
def _get_params(self, inputs):
pass
def __call__(self, inputs):
"""Apply transform on single input data"""
if not isinstance(inputs, tuple):
inputs = (inputs, )
self.params = self._get_params(inputs)
outputs = []
for i in range(min(len(inputs), len(self.keys))):
apply_func = self._get_apply(self.keys[i])
if apply_func is None:
outputs.append(inputs[i])
else:
outputs.append(apply_func(inputs[i]))
if len(inputs) > len(self.keys):
outputs.extend(inputs[len(self.keys):])
if len(outputs) == 1:
outputs = outputs[0]
else:
outputs = tuple(outputs)
return outputs
def _get_apply(self, key):
return getattr(self, "_apply_{}".format(key), None)
def _apply_image(self, image):
raise NotImplementedError
def _apply_boxes(self, boxes):
raise NotImplementedError
def _apply_mask(self, mask):
raise NotImplementedError
class ToTensor(BaseTransform):
"""Convert a ``PIL.Image`` or ``numpy.ndarray`` to ``paddle.Tensor``.
Converts a PIL.Image or numpy.ndarray (H x W x C) to a paddle.Tensor of shape (C x H x W).
If input is a grayscale image (H x W), it will be converted to a image of shape (H x W x 1).
And the shape of output tensor will be (1 x H x W).
If you want to keep the shape of output tensor as (H x W x C), you can set data_format = ``HWC`` .
Converts a PIL.Image or numpy.ndarray in the range [0, 255] to a paddle.Tensor in the
range [0.0, 1.0] if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr,
RGBA, CMYK, 1) or if the numpy.ndarray has dtype = np.uint8.
In the other cases, tensors are returned without scaling.
Args:
data_format (str, optional): Data format of output tensor, should be 'HWC' or
'CHW'. Default: 'CHW'.
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray): The input image with shape (H x W x C).
- output(np.ndarray): A tensor with shape (C x H x W) or (H x W x C) according option data_format.
Returns:
A callable object of ToTensor.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
import paddle.vision.transforms as T
import paddle.vision.transforms.functional as F
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
transform = T.ToTensor()
tensor = transform(fake_img)
"""
def __init__(self, data_format='CHW', keys=None):
super(ToTensor, self).__init__(keys)
self.data_format = data_format
def _apply_image(self, img):
"""
Args:
img (PIL.Image|np.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
return F.to_tensor(img, self.data_format)
class Resize(BaseTransform):
"""Resize the input Image to the given size.
Args:
size (int|list|tuple): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int|str, optional): Interpolation method. Default: 'bilinear'.
when use pil backend, support method are as following:
- "nearest": Image.NEAREST,
- "bilinear": Image.BILINEAR,
- "bicubic": Image.BICUBIC,
- "box": Image.BOX,
- "lanczos": Image.LANCZOS,
- "hamming": Image.HAMMING
when use cv2 backend, support method are as following:
- "nearest": cv2.INTER_NEAREST,
- "bilinear": cv2.INTER_LINEAR,
- "area": cv2.INTER_AREA,
- "bicubic": cv2.INTER_CUBIC,
- "lanczos": cv2.INTER_LANCZOS4
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A resized image.
Returns:
A callable object of Resize.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import Resize
transform = Resize(size=224)
fake_img = Image.fromarray((np.random.rand(100, 120, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self, size, interpolation='bilinear', keys=None):
super(Resize, self).__init__(keys)
assert isinstance(size, int) or (isinstance(size, Iterable) and
len(size) == 2)
self.size = size
self.interpolation = interpolation
def _apply_image(self, img):
return F.resize(img, self.size, self.interpolation)
class RandomResizedCrop(BaseTransform):
"""Crop the input data to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 1.33) of the original aspect ratio is made.
After applying crop transfrom, the input data will be resized to given size.
Args:
size (int|list|tuple): Target size of output image, with (height, width) shape.
scale (list|tuple): Scale range of the cropped image before resizing, relatively to the origin
image. Default: (0.08, 1.0)
ratio (list|tuple): Range of aspect ratio of the origin aspect ratio cropped. Default: (0.75, 1.33)
interpolation (int|str, optional): Interpolation method. Default: 'bilinear'. when use pil backend,
support method are as following:
- "nearest": Image.NEAREST,
- "bilinear": Image.BILINEAR,
- "bicubic": Image.BICUBIC,
- "box": Image.BOX,
- "lanczos": Image.LANCZOS,
- "hamming": Image.HAMMING
when use cv2 backend, support method are as following:
- "nearest": cv2.INTER_NEAREST,
- "bilinear": cv2.INTER_LINEAR,
- "area": cv2.INTER_AREA,
- "bicubic": cv2.INTER_CUBIC,
- "lanczos": cv2.INTER_LANCZOS4
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A cropped image.
Returns:
A callable object of RandomResizedCrop.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import RandomResizedCrop
transform = RandomResizedCrop(224)
fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self,
size,
scale=(0.08, 1.0),
ratio=(3. / 4, 4. / 3),
interpolation='bilinear',
keys=None):
super(RandomResizedCrop, self).__init__(keys)
if isinstance(size, int):
self.size = (size, size)
else:
self.size = size
assert (scale[0] <= scale[1]), "scale should be of kind (min, max)"
assert (ratio[0] <= ratio[1]), "ratio should be of kind (min, max)"
self.scale = scale
self.ratio = ratio
self.interpolation = interpolation
def _get_param(self, image, attempts=10):
width, height = _get_image_size(image)
area = height * width
for _ in range(attempts):
target_area = np.random.uniform(*self.scale) * area
log_ratio = tuple(math.log(x) for x in self.ratio)
aspect_ratio = math.exp(np.random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < w <= width and 0 < h <= height:
i = random.randint(0, height - h)
j = random.randint(0, width - w)
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(self.ratio):
w = width
h = int(round(w / min(self.ratio)))
elif in_ratio > max(self.ratio):
h = height
w = int(round(h * max(self.ratio)))
else:
# return whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
def _apply_image(self, img):
i, j, h, w = self._get_param(img)
cropped_img = F.crop(img, i, j, h, w)
return F.resize(cropped_img, self.size, self.interpolation)
class CenterCrop(BaseTransform):
"""Crops the given the input data at the center.
Args:
size (int|list|tuple): Target size of output image, with (height, width) shape.
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A cropped image.
Returns:
A callable object of CenterCrop.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import CenterCrop
transform = CenterCrop(224)
fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self, size, keys=None):
super(CenterCrop, self).__init__(keys)
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def _apply_image(self, img):
return F.center_crop(img, self.size)
class RandomHorizontalFlip(BaseTransform):
"""Horizontally flip the input data randomly with a given probability.
Args:
prob (float, optional): Probability of the input data being flipped. Should be in [0, 1]. Default: 0.5
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A horiziotal flipped image.
Returns:
A callable object of RandomHorizontalFlip.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import RandomHorizontalFlip
transform = RandomHorizontalFlip(0.5)
fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self, prob=0.5, keys=None):
super(RandomHorizontalFlip, self).__init__(keys)
assert 0 <= prob <= 1, "probability must be between 0 and 1"
self.prob = prob
def _apply_image(self, img):
if random.random() < self.prob:
return F.hflip(img)
return img
class RandomVerticalFlip(BaseTransform):
"""Vertically flip the input data randomly with a given probability.
Args:
prob (float, optional): Probability of the input data being flipped. Default: 0.5
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A vertical flipped image.
Returns:
A callable object of RandomVerticalFlip.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import RandomVerticalFlip
transform = RandomVerticalFlip()
fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self, prob=0.5, keys=None):
super(RandomVerticalFlip, self).__init__(keys)
assert 0 <= prob <= 1, "probability must be between 0 and 1"
self.prob = prob
def _apply_image(self, img):
if random.random() < self.prob:
return F.vflip(img)
return img
class Normalize(BaseTransform):
"""Normalize the input data with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels,
this transform will normalize each channel of the input data.
``output[channel] = (input[channel] - mean[channel]) / std[channel]``
Args:
mean (int|float|list|tuple): Sequence of means for each channel.
std (int|float|list|tuple): Sequence of standard deviations for each channel.
data_format (str, optional): Data format of img, should be 'HWC' or
'CHW'. Default: 'CHW'.
to_rgb (bool, optional): Whether to convert to rgb. Default: False.
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A normalized array or tensor.
Returns:
A callable object of Normalize.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import Normalize
normalize = Normalize(mean=[127.5, 127.5, 127.5],
std=[127.5, 127.5, 127.5],
data_format='HWC')
fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8))
fake_img = normalize(fake_img)
print(fake_img.shape)
print(fake_img.max, fake_img.max)
"""
def __init__(self,
mean=0.0,
std=1.0,
data_format='CHW',
to_rgb=False,
keys=None):
super(Normalize, self).__init__(keys)
if isinstance(mean, numbers.Number):
mean = [mean, mean, mean]
if isinstance(std, numbers.Number):
std = [std, std, std]
self.mean = mean
self.std = std
self.data_format = data_format
self.to_rgb = to_rgb
def _apply_image(self, img):
return F.normalize(img, self.mean, self.std, self.data_format,
self.to_rgb)
class Transpose(BaseTransform):
"""Transpose input data to a target format.
For example, most transforms use HWC mode image,
while the Neural Network might use CHW mode input tensor.
output image will be an instance of numpy.ndarray.
Args:
order (list|tuple, optional): Target order of input data. Default: (2, 0, 1).
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(np.ndarray|Paddle.Tensor): A transposed array or tensor. If input
is a PIL.Image, output will be converted to np.ndarray automatically.
Returns:
A callable object of Transpose.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import Transpose
transform = Transpose()
fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.shape)
"""
def __init__(self, order=(2, 0, 1), keys=None):
super(Transpose, self).__init__(keys)
self.order = order
def _apply_image(self, img):
if F._is_tensor_image(img):
return img.transpose(self.order)
if F._is_pil_image(img):
img = np.asarray(img)
if len(img.shape) == 2:
img = img[..., np.newaxis]
return img.transpose(self.order)
class BrightnessTransform(BaseTransform):
"""Adjust brightness of the image.
Args:
value (float): How much to adjust the brightness. Can be any
non negative number. 0 gives the original image
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): An image with a transform in brghtness.
Returns:
A callable object of BrightnessTransform.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import BrightnessTransform
transform = BrightnessTransform(0.4)
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
"""
def __init__(self, value, keys=None):
super(BrightnessTransform, self).__init__(keys)
self.value = _check_input(value, 'brightness')
def _apply_image(self, img):
if self.value is None:
return img
brightness_factor = random.uniform(self.value[0], self.value[1])
return F.adjust_brightness(img, brightness_factor)
class ContrastTransform(BaseTransform):
"""Adjust contrast of the image.
Args:
value (float): How much to adjust the contrast. Can be any
non negative number. 0 gives the original image
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): An image with a transform in contrast.
Returns:
A callable object of ContrastTransform.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import ContrastTransform
transform = ContrastTransform(0.4)
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
"""
def __init__(self, value, keys=None):
super(ContrastTransform, self).__init__(keys)
if value < 0:
raise ValueError("contrast value should be non-negative")
self.value = _check_input(value, 'contrast')
def _apply_image(self, img):
if self.value is None:
return img
contrast_factor = random.uniform(self.value[0], self.value[1])
return F.adjust_contrast(img, contrast_factor)
class SaturationTransform(BaseTransform):
"""Adjust saturation of the image.
Args:
value (float): How much to adjust the saturation. Can be any
non negative number. 0 gives the original image
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): An image with a transform in saturation.
Returns:
A callable object of SaturationTransform.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import SaturationTransform
transform = SaturationTransform(0.4)
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
"""
def __init__(self, value, keys=None):
super(SaturationTransform, self).__init__(keys)
self.value = _check_input(value, 'saturation')
def _apply_image(self, img):
if self.value is None:
return img
saturation_factor = random.uniform(self.value[0], self.value[1])
return F.adjust_saturation(img, saturation_factor)
class HueTransform(BaseTransform):
"""Adjust hue of the image.
Args:
value (float): How much to adjust the hue. Can be any number
between 0 and 0.5, 0 gives the original image
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): An image with a transform in hue.
Returns:
A callable object of HueTransform.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import HueTransform
transform = HueTransform(0.4)
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
"""
def __init__(self, value, keys=None):
super(HueTransform, self).__init__(keys)
self.value = _check_input(
value, 'hue', center=0, bound=(-0.5, 0.5), clip_first_on_zero=False)
def _apply_image(self, img):
if self.value is None:
return img
hue_factor = random.uniform(self.value[0], self.value[1])
return F.adjust_hue(img, hue_factor)
class ColorJitter(BaseTransform):
"""Randomly change the brightness, contrast, saturation and hue of an image.
Args:
brightness (float): How much to jitter brightness.
Chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. Should be non negative numbers.
contrast (float): How much to jitter contrast.
Chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. Should be non negative numbers.
saturation (float): How much to jitter saturation.
Chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. Should be non negative numbers.
hue (float): How much to jitter hue.
Chosen uniformly from [-hue, hue]. Should have 0<= hue <= 0.5.
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A color jittered image.
Returns:
A callable object of ColorJitter.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import ColorJitter
transform = ColorJitter(0.4, 0.4, 0.4, 0.4)
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0,
keys=None):
super(ColorJitter, self).__init__(keys)
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def _get_param(self, brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness is not None:
transforms.append(BrightnessTransform(brightness, self.keys))
if contrast is not None:
transforms.append(ContrastTransform(contrast, self.keys))
if saturation is not None:
transforms.append(SaturationTransform(saturation, self.keys))
if hue is not None:
transforms.append(HueTransform(hue, self.keys))
random.shuffle(transforms)
transform = Compose(transforms)
return transform
def _apply_image(self, img):
"""
Args:
img (PIL Image): Input image.
Returns:
PIL Image: Color jittered image.
"""
transform = self._get_param(self.brightness, self.contrast,
self.saturation, self.hue)
return transform(img)
class RandomCrop(BaseTransform):
"""Crops the given CV Image at a random location.
Args:
size (sequence|int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
padding (int|sequence|optional): Optional padding on each border
of the image. If a sequence of length 4 is provided, it is used to pad left,
top, right, bottom borders respectively. Default: 0.
pad_if_needed (boolean|optional): It will pad the image if smaller than the
desired size to avoid raising an exception. Default: False.
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A random cropped image.
Returns:
A callable object of RandomCrop.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import RandomCrop
transform = RandomCrop(224)
fake_img = Image.fromarray((np.random.rand(324, 300, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self,
size,
padding=None,
pad_if_needed=False,
fill=0,
padding_mode='constant',
keys=None):
super(RandomCrop, self).__init__(keys)
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
self.pad_if_needed = pad_if_needed
self.fill = fill
self.padding_mode = padding_mode
def _get_param(self, img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = _get_image_size(img)
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def _apply_image(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
if self.padding is not None:
img = F.pad(img, self.padding, self.fill, self.padding_mode)
w, h = _get_image_size(img)
# pad the width if needed
if self.pad_if_needed and w < self.size[1]:
img = F.pad(img, (self.size[1] - w, 0), self.fill,
self.padding_mode)
# pad the height if needed
if self.pad_if_needed and h < self.size[0]:
img = F.pad(img, (0, self.size[0] - h), self.fill,
self.padding_mode)
i, j, h, w = self._get_param(img, self.size)
return F.crop(img, i, j, h, w)
class Pad(BaseTransform):
"""Pads the given CV Image on all sides with the given "pad" value.
Args:
padding (int|list|tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If list/tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a list/tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill (int|list|tuple): Pixel fill value for constant fill. Default is 0. If a list/tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
``constant`` means pads with a constant value, this value is specified with fill.
``edge`` means pads with the last value at the edge of the image.
``reflect`` means pads with reflection of image (without repeating the last value on the edge)
padding ``[1, 2, 3, 4]`` with 2 elements on both sides in reflect mode
will result in ``[3, 2, 1, 2, 3, 4, 3, 2]``.
``symmetric`` menas pads with reflection of image (repeating the last value on the edge)
padding ``[1, 2, 3, 4]`` with 2 elements on both sides in symmetric mode
will result in ``[2, 1, 1, 2, 3, 4, 4, 3]``.
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A paded image.
Returns:
A callable object of Pad.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import Pad
transform = Pad(2)
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self, padding, fill=0, padding_mode='constant', keys=None):
assert isinstance(padding, (numbers.Number, list, tuple))
assert isinstance(fill, (numbers.Number, str, list, tuple))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
if isinstance(padding, list):
padding = tuple(padding)
if isinstance(fill, list):
fill = tuple(fill)
if isinstance(padding, Sequence) and len(padding) not in [2, 4]:
raise ValueError(
"Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
super(Pad, self).__init__(keys)
self.padding = padding
self.fill = fill
self.padding_mode = padding_mode
def _apply_image(self, img):
"""
Args:
img (PIL Image): Image to be padded.
Returns:
PIL Image: Padded image.
"""
return F.pad(img, self.padding, self.fill, self.padding_mode)
class RandomRotation(BaseTransform):
"""Rotates the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees) clockwise order.
interpolation (str, optional): Interpolation method. If omitted, or if the
image has only one channel, it is set to PIL.Image.NEAREST or cv2.INTER_NEAREST
according the backend. when use pil backend, support method are as following:
- "nearest": Image.NEAREST,
- "bilinear": Image.BILINEAR,
- "bicubic": Image.BICUBIC
when use cv2 backend, support method are as following:
- "nearest": cv2.INTER_NEAREST,
- "bilinear": cv2.INTER_LINEAR,
- "bicubic": cv2.INTER_CUBIC
expand (bool|optional): Optional expansion flag. Default: False.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple|optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A rotated image.
Returns:
A callable object of RandomRotation.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import RandomRotation
transform = RandomRotation(90)
fake_img = Image.fromarray((np.random.rand(200, 150, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self,
degrees,
interpolation='nearest',
expand=False,
center=None,
fill=0,
keys=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError(
"If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError(
"If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
super(RandomRotation, self).__init__(keys)
self.interpolation = interpolation
self.expand = expand
self.center = center
self.fill = fill
def _get_param(self, degrees):
angle = random.uniform(degrees[0], degrees[1])
return angle
def _apply_image(self, img):
"""
Args:
img (PIL.Image|np.array): Image to be rotated.
Returns:
PIL.Image or np.array: Rotated image.
"""
angle = self._get_param(self.degrees)
return F.rotate(img, angle, self.interpolation, self.expand,
self.center, self.fill)
class Grayscale(BaseTransform):
"""Converts image to grayscale.
Args:
num_output_channels (int): (1 or 3) number of channels desired for output image
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): Grayscale version of the input image.
- If output_channels == 1 : returned image is single channel
- If output_channels == 3 : returned image is 3 channel with r == g == b
Returns:
A callable object of Grayscale.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import Grayscale
transform = Grayscale()
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(np.array(fake_img).shape)
"""
def __init__(self, num_output_channels=1, keys=None):
super(Grayscale, self).__init__(keys)
self.num_output_channels = num_output_channels
def _apply_image(self, img):
"""
Args:
img (PIL Image): Image to be converted to grayscale.
Returns:
PIL Image: Randomly grayscaled image.
"""
return F.to_grayscale(img, self.num_output_channels)
|
the-stack_0_5621 | import numpy as np
import pandas as pd
try:
import cudf.dataframe as gdf
except ImportError as e:
print("Failed to import cuDF: " + str(e))
print("Skipping this test")
from sklearn import datasets
import sys
import unittest
import xgboost as xgb
from regression_test_utilities import run_suite, parameter_combinations, \
assert_results_non_increasing, Dataset
def get_gdf():
rng = np.random.RandomState(199)
n = 50000
m = 20
sparsity = 0.25
X, y = datasets.make_regression(n, m, random_state=rng)
Xy = (np.ascontiguousarray
(np.transpose(np.concatenate((X, np.expand_dims(y, axis=1)), axis=1))))
df = gdf.DataFrame(list(zip(['col%d' % i for i in range(m+1)], Xy)))
all_columns = list(df.columns)
cols_X = all_columns[0:len(all_columns)-1]
cols_y = [all_columns[len(all_columns)-1]]
return df[cols_X], df[cols_y]
class TestGPU(unittest.TestCase):
gdf_datasets = [Dataset("GDF", get_gdf, "reg:linear", "rmse")]
def test_gdf(self):
variable_param = {'n_gpus': [1], 'max_depth': [10], 'max_leaves': [255],
'max_bin': [255],
'grow_policy': ['lossguide']}
for param in parameter_combinations(variable_param):
param['tree_method'] = 'gpu_hist'
gpu_results = run_suite(param, num_rounds=20,
select_datasets=self.gdf_datasets)
assert_results_non_increasing(gpu_results, 1e-2)
|
the-stack_0_5625 | #
# Parse tree nodes
#
from __future__ import absolute_import
import cython
cython.declare(sys=object, os=object, copy=object,
Builtin=object, error=object, warning=object, Naming=object, PyrexTypes=object,
py_object_type=object, ModuleScope=object, LocalScope=object, ClosureScope=object,
StructOrUnionScope=object, PyClassScope=object,
CppClassScope=object, UtilityCode=object, EncodedString=object,
absolute_path_length=cython.Py_ssize_t, error_type=object, _py_int_types=object)
import sys, os, copy
from itertools import chain
from . import Builtin
from .Errors import error, warning, InternalError, CompileError
from . import Naming
from . import PyrexTypes
from . import TypeSlots
from .PyrexTypes import py_object_type, error_type
from .Symtab import (ModuleScope, LocalScope, ClosureScope,
StructOrUnionScope, PyClassScope, CppClassScope, TemplateScope)
from .Code import UtilityCode
from .StringEncoding import EncodedString
from . import Future
from . import Options
from . import DebugFlags
from ..Utils import add_metaclass
absolute_path_length = 0
if sys.version_info[0] >= 3:
_py_int_types = int
else:
_py_int_types = (int, long)
def relative_position(pos):
"""
We embed the relative filename in the generated C file, since we
don't want to have to regenerate and compile all the source code
whenever the Python install directory moves (which could happen,
e.g,. when distributing binaries.)
INPUT:
a position tuple -- (absolute filename, line number column position)
OUTPUT:
relative filename
line number
AUTHOR: William Stein
"""
global absolute_path_length
if absolute_path_length == 0:
absolute_path_length = len(os.path.abspath(os.getcwd()))
return (pos[0].get_filenametable_entry()[absolute_path_length+1:], pos[1])
def embed_position(pos, docstring):
if not Options.embed_pos_in_docstring:
return docstring
pos_line = u'File: %s (starting at line %s)' % relative_position(pos)
if docstring is None:
# unicode string
return EncodedString(pos_line)
# make sure we can encode the filename in the docstring encoding
# otherwise make the docstring a unicode string
encoding = docstring.encoding
if encoding is not None:
try:
pos_line.encode(encoding)
except UnicodeEncodeError:
encoding = None
if not docstring:
# reuse the string encoding of the original docstring
doc = EncodedString(pos_line)
else:
doc = EncodedString(pos_line + u'\n' + docstring)
doc.encoding = encoding
return doc
def _analyse_signature_annotation(annotation, env):
base_type = None
explicit_pytype = explicit_ctype = False
if annotation.is_dict_literal:
for name, value in annotation.key_value_pairs:
if not name.is_string_literal:
continue
if name.value in ('type', b'type'):
explicit_pytype = True
if not explicit_ctype:
annotation = value
elif name.value in ('ctype', b'ctype'):
explicit_ctype = True
annotation = value
if explicit_pytype and explicit_ctype:
warning(annotation.pos, "Duplicate type declarations found in signature annotation")
arg_type = annotation.analyse_as_type(env)
if arg_type is not None:
if explicit_pytype and not explicit_ctype and not arg_type.is_pyobject:
warning(annotation.pos,
"Python type declaration in signature annotation does not refer to a Python type")
base_type = CAnalysedBaseTypeNode(
annotation.pos, type=arg_type, is_arg=True)
else:
warning(annotation.pos, "Unknown type declaration found in signature annotation")
return base_type, arg_type
def write_func_call(func, codewriter_class):
def f(*args, **kwds):
if len(args) > 1 and isinstance(args[1], codewriter_class):
# here we annotate the code with this function call
# but only if new code is generated
node, code = args[:2]
marker = ' /* %s -> %s.%s %s */' % (
' ' * code.call_level,
node.__class__.__name__,
func.__name__,
node.pos[1:])
pristine = code.buffer.stream.tell()
code.putln(marker)
start = code.buffer.stream.tell()
code.call_level += 4
res = func(*args, **kwds)
code.call_level -= 4
if start == code.buffer.stream.tell():
# no code written => undo writing marker
code.buffer.stream.truncate(pristine)
else:
marker = marker.replace('->', '<-', 1)
code.putln(marker)
return res
else:
return func(*args, **kwds)
return f
class VerboseCodeWriter(type):
# Set this as a metaclass to trace function calls in code.
# This slows down code generation and makes much larger files.
def __new__(cls, name, bases, attrs):
from types import FunctionType
from .Code import CCodeWriter
attrs = dict(attrs)
for mname, m in attrs.items():
if isinstance(m, FunctionType):
attrs[mname] = write_func_call(m, CCodeWriter)
return super(VerboseCodeWriter, cls).__new__(cls, name, bases, attrs)
class CheckAnalysers(type):
"""Metaclass to check that type analysis functions return a node.
"""
methods = set(['analyse_types',
'analyse_expressions',
'analyse_target_types'])
def __new__(cls, name, bases, attrs):
from types import FunctionType
def check(name, func):
def call(*args, **kwargs):
retval = func(*args, **kwargs)
if retval is None:
print('%s %s %s' % (name, args, kwargs))
return retval
return call
attrs = dict(attrs)
for mname, m in attrs.items():
if isinstance(m, FunctionType) and mname in cls.methods:
attrs[mname] = check(mname, m)
return super(CheckAnalysers, cls).__new__(cls, name, bases, attrs)
def _with_metaclass(cls):
if DebugFlags.debug_trace_code_generation:
return add_metaclass(VerboseCodeWriter)(cls)
#return add_metaclass(CheckAnalysers)(cls)
return cls
@_with_metaclass
class Node(object):
# pos (string, int, int) Source file position
# is_name boolean Is a NameNode
# is_literal boolean Is a ConstNode
is_name = 0
is_none = 0
is_nonecheck = 0
is_literal = 0
is_terminator = 0
temps = None
# All descendants should set child_attrs to a list of the attributes
# containing nodes considered "children" in the tree. Each such attribute
# can either contain a single node or a list of nodes. See Visitor.py.
child_attrs = None
cf_state = None
# This may be an additional (or 'actual') type that will be checked when
# this node is coerced to another type. This could be useful to set when
# the actual type to which it can coerce is known, but you want to leave
# the type a py_object_type
coercion_type = None
def __init__(self, pos, **kw):
self.pos = pos
self.__dict__.update(kw)
gil_message = "Operation"
nogil_check = None
def gil_error(self, env=None):
error(self.pos, "%s not allowed without gil" % self.gil_message)
cpp_message = "Operation"
def cpp_check(self, env):
if not env.is_cpp():
self.cpp_error()
def cpp_error(self):
error(self.pos, "%s only allowed in c++" % self.cpp_message)
def clone_node(self):
"""Clone the node. This is defined as a shallow copy, except for member lists
amongst the child attributes (from get_child_accessors) which are also
copied. Lists containing child nodes are thus seen as a way for the node
to hold multiple children directly; the list is not treated as a separate
level in the tree."""
result = copy.copy(self)
for attrname in result.child_attrs:
value = getattr(result, attrname)
if isinstance(value, list):
setattr(result, attrname, [x for x in value])
return result
#
# There are 3 phases of parse tree processing, applied in order to
# all the statements in a given scope-block:
#
# (0) analyse_declarations
# Make symbol table entries for all declarations at the current
# level, both explicit (def, cdef, etc.) and implicit (assignment
# to an otherwise undeclared name).
#
# (1) analyse_expressions
# Determine the result types of expressions and fill in the
# 'type' attribute of each ExprNode. Insert coercion nodes into the
# tree where needed to convert to and from Python objects.
# Allocate temporary locals for intermediate results. Fill
# in the 'result_code' attribute of each ExprNode with a C code
# fragment.
#
# (2) generate_code
# Emit C code for all declarations, statements and expressions.
# Recursively applies the 3 processing phases to the bodies of
# functions.
#
def analyse_declarations(self, env):
pass
def analyse_expressions(self, env):
raise InternalError("analyse_expressions not implemented for %s" % \
self.__class__.__name__)
def generate_code(self, code):
raise InternalError("generate_code not implemented for %s" % \
self.__class__.__name__)
def annotate(self, code):
# mro does the wrong thing
if isinstance(self, BlockNode):
self.body.annotate(code)
def end_pos(self):
try:
return self._end_pos
except AttributeError:
pos = self.pos
if not self.child_attrs:
self._end_pos = pos
return pos
for attr in self.child_attrs:
child = getattr(self, attr)
# Sometimes lists, sometimes nodes
if child is None:
pass
elif isinstance(child, list):
for c in child:
pos = max(pos, c.end_pos())
else:
pos = max(pos, child.end_pos())
self._end_pos = pos
return pos
def dump(self, level=0, filter_out=("pos",), cutoff=100, encountered=None):
"""Debug helper method that returns a recursive string representation of this node.
"""
if cutoff == 0:
return "<...nesting level cutoff...>"
if encountered is None:
encountered = set()
if id(self) in encountered:
return "<%s (0x%x) -- already output>" % (self.__class__.__name__, id(self))
encountered.add(id(self))
def dump_child(x, level):
if isinstance(x, Node):
return x.dump(level, filter_out, cutoff-1, encountered)
elif isinstance(x, list):
return "[%s]" % ", ".join([dump_child(item, level) for item in x])
else:
return repr(x)
attrs = [(key, value) for key, value in self.__dict__.items() if key not in filter_out]
if len(attrs) == 0:
return "<%s (0x%x)>" % (self.__class__.__name__, id(self))
else:
indent = " " * level
res = "<%s (0x%x)\n" % (self.__class__.__name__, id(self))
for key, value in attrs:
res += "%s %s: %s\n" % (indent, key, dump_child(value, level + 1))
res += "%s>" % indent
return res
def dump_pos(self, mark_column=False, marker='(#)'):
"""Debug helper method that returns the source code context of this node as a string.
"""
if not self.pos:
return u''
source_desc, line, col = self.pos
contents = source_desc.get_lines(encoding='ASCII', error_handling='ignore')
# line numbers start at 1
lines = contents[max(0, line-3):line]
current = lines[-1]
if mark_column:
current = current[:col] + marker + current[col:]
lines[-1] = current.rstrip() + u' # <<<<<<<<<<<<<<\n'
lines += contents[line:line+2]
return u'"%s":%d:%d\n%s\n' % (
source_desc.get_escaped_description(), line, col, u''.join(lines))
class CompilerDirectivesNode(Node):
"""
Sets compiler directives for the children nodes
"""
# directives {string:value} A dictionary holding the right value for
# *all* possible directives.
# body Node
child_attrs = ["body"]
def analyse_declarations(self, env):
old = env.directives
env.directives = self.directives
self.body.analyse_declarations(env)
env.directives = old
def analyse_expressions(self, env):
old = env.directives
env.directives = self.directives
self.body = self.body.analyse_expressions(env)
env.directives = old
return self
def generate_function_definitions(self, env, code):
env_old = env.directives
code_old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.generate_function_definitions(env, code)
env.directives = env_old
code.globalstate.directives = code_old
def generate_execution_code(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.generate_execution_code(code)
code.globalstate.directives = old
def annotate(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.annotate(code)
code.globalstate.directives = old
class BlockNode(object):
# Mixin class for nodes representing a declaration block.
def generate_cached_builtins_decls(self, env, code):
entries = env.global_scope().undeclared_cached_builtins
for entry in entries:
code.globalstate.add_cached_builtin_decl(entry)
del entries[:]
def generate_lambda_definitions(self, env, code):
for node in env.lambda_defs:
node.generate_function_definitions(env, code)
class StatListNode(Node):
# stats a list of StatNode
child_attrs = ["stats"]
@staticmethod
def create_analysed(pos, env, *args, **kw):
node = StatListNode(pos, *args, **kw)
return node # No node-specific analysis needed
def analyse_declarations(self, env):
#print "StatListNode.analyse_declarations" ###
for stat in self.stats:
stat.analyse_declarations(env)
def analyse_expressions(self, env):
#print "StatListNode.analyse_expressions" ###
self.stats = [stat.analyse_expressions(env)
for stat in self.stats]
return self
def generate_function_definitions(self, env, code):
#print "StatListNode.generate_function_definitions" ###
for stat in self.stats:
stat.generate_function_definitions(env, code)
def generate_execution_code(self, code):
#print "StatListNode.generate_execution_code" ###
for stat in self.stats:
code.mark_pos(stat.pos)
stat.generate_execution_code(code)
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
class StatNode(Node):
#
# Code generation for statements is split into the following subphases:
#
# (1) generate_function_definitions
# Emit C code for the definitions of any structs,
# unions, enums and functions defined in the current
# scope-block.
#
# (2) generate_execution_code
# Emit C code for executable statements.
#
def generate_function_definitions(self, env, code):
pass
def generate_execution_code(self, code):
raise InternalError("generate_execution_code not implemented for %s" % \
self.__class__.__name__)
class CDefExternNode(StatNode):
# include_file string or None
# body StatNode
child_attrs = ["body"]
def analyse_declarations(self, env):
if self.include_file:
env.add_include_file(self.include_file)
old_cinclude_flag = env.in_cinclude
env.in_cinclude = 1
self.body.analyse_declarations(env)
env.in_cinclude = old_cinclude_flag
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
def annotate(self, code):
self.body.annotate(code)
class CDeclaratorNode(Node):
# Part of a C declaration.
#
# Processing during analyse_declarations phase:
#
# analyse
# Returns (name, type) pair where name is the
# CNameDeclaratorNode of the name being declared
# and type is the type it is being declared as.
#
# calling_convention string Calling convention of CFuncDeclaratorNode
# for which this is a base
child_attrs = []
calling_convention = ""
def analyse_templates(self):
# Only C++ functions have templates.
return None
class CNameDeclaratorNode(CDeclaratorNode):
# name string The Cython name being declared
# cname string or None C name, if specified
# default ExprNode or None the value assigned on declaration
child_attrs = ['default']
default = None
def analyse(self, base_type, env, nonempty=0):
if nonempty and self.name == '':
# May have mistaken the name for the type.
if base_type.is_ptr or base_type.is_array or base_type.is_buffer:
error(self.pos, "Missing argument name")
elif base_type.is_void:
error(self.pos, "Use spam() rather than spam(void) to declare a function with no arguments.")
else:
self.name = base_type.declaration_code("", for_display=1, pyrex=1)
base_type = py_object_type
if base_type.is_fused and env.fused_to_specific:
base_type = base_type.specialize(env.fused_to_specific)
self.type = base_type
return self, base_type
class CPtrDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
child_attrs = ["base"]
def analyse(self, base_type, env, nonempty=0):
if base_type.is_pyobject:
error(self.pos, "Pointer base type cannot be a Python object")
ptr_type = PyrexTypes.c_ptr_type(base_type)
return self.base.analyse(ptr_type, env, nonempty=nonempty)
class CReferenceDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
child_attrs = ["base"]
def analyse(self, base_type, env, nonempty=0):
if base_type.is_pyobject:
error(self.pos, "Reference base type cannot be a Python object")
ref_type = PyrexTypes.c_ref_type(base_type)
return self.base.analyse(ref_type, env, nonempty=nonempty)
class CArrayDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
# dimension ExprNode
child_attrs = ["base", "dimension"]
def analyse(self, base_type, env, nonempty=0):
if (base_type.is_cpp_class and base_type.is_template_type()) or base_type.is_cfunction:
from .ExprNodes import TupleNode
if isinstance(self.dimension, TupleNode):
args = self.dimension.args
else:
args = self.dimension,
values = [v.analyse_as_type(env) for v in args]
if None in values:
ix = values.index(None)
error(args[ix].pos, "Template parameter not a type")
base_type = error_type
else:
base_type = base_type.specialize_here(self.pos, values)
return self.base.analyse(base_type, env, nonempty=nonempty)
if self.dimension:
self.dimension = self.dimension.analyse_const_expression(env)
if not self.dimension.type.is_int:
error(self.dimension.pos, "Array dimension not integer")
size = self.dimension.get_constant_c_result_code()
if size is not None:
try:
size = int(size)
except ValueError:
# runtime constant?
pass
else:
size = None
if not base_type.is_complete():
error(self.pos, "Array element type '%s' is incomplete" % base_type)
if base_type.is_pyobject:
error(self.pos, "Array element cannot be a Python object")
if base_type.is_cfunction:
error(self.pos, "Array element cannot be a function")
array_type = PyrexTypes.c_array_type(base_type, size)
return self.base.analyse(array_type, env, nonempty=nonempty)
class CFuncDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
# args [CArgDeclNode]
# templates [TemplatePlaceholderType]
# has_varargs boolean
# exception_value ConstNode
# exception_check boolean True if PyErr_Occurred check needed
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
# is_const_method boolean Whether this is a const method
child_attrs = ["base", "args", "exception_value"]
overridable = 0
optional_arg_count = 0
is_const_method = 0
templates = None
def analyse_templates(self):
if isinstance(self.base, CArrayDeclaratorNode):
from .ExprNodes import TupleNode, NameNode
template_node = self.base.dimension
if isinstance(template_node, TupleNode):
template_nodes = template_node.args
elif isinstance(template_node, NameNode):
template_nodes = [template_node]
else:
error(template_node.pos, "Template arguments must be a list of names")
return None
self.templates = []
for template in template_nodes:
if isinstance(template, NameNode):
self.templates.append(PyrexTypes.TemplatePlaceholderType(template.name))
else:
error(template.pos, "Template arguments must be a list of names")
self.base = self.base.base
return self.templates
else:
return None
def analyse(self, return_type, env, nonempty=0, directive_locals=None):
if directive_locals is None:
directive_locals = {}
if nonempty:
nonempty -= 1
func_type_args = []
for i, arg_node in enumerate(self.args):
name_declarator, type = arg_node.analyse(
env, nonempty=nonempty,
is_self_arg=(i == 0 and env.is_c_class_scope and 'staticmethod' not in env.directives))
name = name_declarator.name
if name in directive_locals:
type_node = directive_locals[name]
other_type = type_node.analyse_as_type(env)
if other_type is None:
error(type_node.pos, "Not a type")
elif (type is not PyrexTypes.py_object_type
and not type.same_as(other_type)):
error(self.base.pos, "Signature does not agree with previous declaration")
error(type_node.pos, "Previous declaration here")
else:
type = other_type
if name_declarator.cname:
error(self.pos, "Function argument cannot have C name specification")
if i == 0 and env.is_c_class_scope and type.is_unspecified:
# fix the type of self
type = env.parent_type
# Turn *[] argument into **
if type.is_array:
type = PyrexTypes.c_ptr_type(type.base_type)
# Catch attempted C-style func(void) decl
if type.is_void:
error(arg_node.pos, "Use spam() rather than spam(void) to declare a function with no arguments.")
func_type_args.append(
PyrexTypes.CFuncTypeArg(name, type, arg_node.pos))
if arg_node.default:
self.optional_arg_count += 1
elif self.optional_arg_count:
error(self.pos, "Non-default argument follows default argument")
exc_val = None
exc_check = 0
if self.exception_check == '+':
env.add_include_file('ios') # for std::ios_base::failure
env.add_include_file('new') # for std::bad_alloc
env.add_include_file('stdexcept')
env.add_include_file('typeinfo') # for std::bad_cast
if (return_type.is_pyobject
and (self.exception_value or self.exception_check)
and self.exception_check != '+'):
error(self.pos, "Exception clause not allowed for function returning Python object")
else:
if self.exception_value:
self.exception_value = self.exception_value.analyse_const_expression(env)
if self.exception_check == '+':
exc_val_type = self.exception_value.type
if (not exc_val_type.is_error
and not exc_val_type.is_pyobject
and not (exc_val_type.is_cfunction
and not exc_val_type.return_type.is_pyobject
and not exc_val_type.args)):
error(self.exception_value.pos,
"Exception value must be a Python exception or cdef function with no arguments.")
exc_val = self.exception_value
else:
self.exception_value = self.exception_value.coerce_to(
return_type, env).analyse_const_expression(env)
exc_val = self.exception_value.get_constant_c_result_code()
if exc_val is None:
raise InternalError(
"get_constant_c_result_code not implemented for %s" %
self.exception_value.__class__.__name__)
if not return_type.assignable_from(self.exception_value.type):
error(self.exception_value.pos,
"Exception value incompatible with function return type")
exc_check = self.exception_check
if return_type.is_cfunction:
error(self.pos, "Function cannot return a function")
func_type = PyrexTypes.CFuncType(
return_type, func_type_args, self.has_varargs,
optional_arg_count=self.optional_arg_count,
exception_value=exc_val, exception_check=exc_check,
calling_convention=self.base.calling_convention,
nogil=self.nogil, with_gil=self.with_gil, is_overridable=self.overridable,
is_const_method=self.is_const_method,
templates=self.templates)
if self.optional_arg_count:
if func_type.is_fused:
# This is a bit of a hack... When we need to create specialized CFuncTypes
# on the fly because the cdef is defined in a pxd, we need to declare the specialized optional arg
# struct
def declare_opt_arg_struct(func_type, fused_cname):
self.declare_optional_arg_struct(func_type, env, fused_cname)
func_type.declare_opt_arg_struct = declare_opt_arg_struct
else:
self.declare_optional_arg_struct(func_type, env)
callspec = env.directives['callspec']
if callspec:
current = func_type.calling_convention
if current and current != callspec:
error(self.pos, "cannot have both '%s' and '%s' "
"calling conventions" % (current, callspec))
func_type.calling_convention = callspec
return self.base.analyse(func_type, env)
def declare_optional_arg_struct(self, func_type, env, fused_cname=None):
"""
Declares the optional argument struct (the struct used to hold the
values for optional arguments). For fused cdef functions, this is
deferred as analyse_declarations is called only once (on the fused
cdef function).
"""
scope = StructOrUnionScope()
arg_count_member = '%sn' % Naming.pyrex_prefix
scope.declare_var(arg_count_member, PyrexTypes.c_int_type, self.pos)
for arg in func_type.args[len(func_type.args) - self.optional_arg_count:]:
scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject=1)
struct_cname = env.mangle(Naming.opt_arg_prefix, self.base.name)
if fused_cname is not None:
struct_cname = PyrexTypes.get_fused_cname(fused_cname, struct_cname)
op_args_struct = env.global_scope().declare_struct_or_union(
name=struct_cname,
kind='struct',
scope=scope,
typedef_flag=0,
pos=self.pos,
cname=struct_cname)
op_args_struct.defined_in_pxd = 1
op_args_struct.used = 1
func_type.op_arg_struct = PyrexTypes.c_ptr_type(op_args_struct.type)
class CConstDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
child_attrs = ["base"]
def analyse(self, base_type, env, nonempty=0):
if base_type.is_pyobject:
error(self.pos,
"Const base type cannot be a Python object")
const = PyrexTypes.c_const_type(base_type)
return self.base.analyse(const, env, nonempty=nonempty)
class CArgDeclNode(Node):
# Item in a function declaration argument list.
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# not_none boolean Tagged with 'not None'
# or_none boolean Tagged with 'or None'
# accept_none boolean Resolved boolean for not_none/or_none
# default ExprNode or None
# default_value PyObjectConst constant for default value
# annotation ExprNode or None Py3 function arg annotation
# is_self_arg boolean Is the "self" arg of an extension type method
# is_type_arg boolean Is the "class" arg of an extension type classmethod
# is_kw_only boolean Is a keyword-only argument
# is_dynamic boolean Non-literal arg stored inside CyFunction
child_attrs = ["base_type", "declarator", "default", "annotation"]
is_self_arg = 0
is_type_arg = 0
is_generic = 1
kw_only = 0
not_none = 0
or_none = 0
type = None
name_declarator = None
default_value = None
annotation = None
is_dynamic = 0
def analyse(self, env, nonempty=0, is_self_arg=False):
if is_self_arg:
self.base_type.is_self_arg = self.is_self_arg = True
if self.type is None:
# The parser may misinterpret names as types. We fix that here.
if isinstance(self.declarator, CNameDeclaratorNode) and self.declarator.name == '':
if nonempty:
if self.base_type.is_basic_c_type:
# char, short, long called "int"
type = self.base_type.analyse(env, could_be_name=True)
arg_name = type.empty_declaration_code()
else:
arg_name = self.base_type.name
self.declarator.name = EncodedString(arg_name)
self.base_type.name = None
self.base_type.is_basic_c_type = False
could_be_name = True
else:
could_be_name = False
self.base_type.is_arg = True
base_type = self.base_type.analyse(env, could_be_name=could_be_name)
if hasattr(self.base_type, 'arg_name') and self.base_type.arg_name:
self.declarator.name = self.base_type.arg_name
# The parser is unable to resolve the ambiguity of [] as part of the
# type (e.g. in buffers) or empty declarator (as with arrays).
# This is only arises for empty multi-dimensional arrays.
if (base_type.is_array
and isinstance(self.base_type, TemplatedTypeNode)
and isinstance(self.declarator, CArrayDeclaratorNode)):
declarator = self.declarator
while isinstance(declarator.base, CArrayDeclaratorNode):
declarator = declarator.base
declarator.base = self.base_type.array_declarator
base_type = base_type.base_type
# inject type declaration from annotations
if self.annotation and env.directives['annotation_typing'] and self.base_type.name is None:
arg_type = self.inject_type_from_annotations(env)
if arg_type is not None:
base_type = arg_type
return self.declarator.analyse(base_type, env, nonempty=nonempty)
else:
return self.name_declarator, self.type
def inject_type_from_annotations(self, env):
annotation = self.annotation
if not annotation:
return None
base_type, arg_type = _analyse_signature_annotation(annotation, env)
if base_type is not None:
self.base_type = base_type
return arg_type
def calculate_default_value_code(self, code):
if self.default_value is None:
if self.default:
if self.default.is_literal:
# will not output any code, just assign the result_code
self.default.generate_evaluation_code(code)
return self.type.cast_code(self.default.result())
self.default_value = code.get_argument_default_const(self.type)
return self.default_value
def annotate(self, code):
if self.default:
self.default.annotate(code)
def generate_assignment_code(self, code, target=None, overloaded_assignment=False):
default = self.default
if default is None or default.is_literal:
return
if target is None:
target = self.calculate_default_value_code(code)
default.generate_evaluation_code(code)
default.make_owned_reference(code)
result = default.result() if overloaded_assignment else default.result_as(self.type)
code.putln("%s = %s;" % (target, result))
if self.type.is_pyobject:
code.put_giveref(default.result())
default.generate_post_assignment_code(code)
default.free_temps(code)
class CBaseTypeNode(Node):
# Abstract base class for C base type nodes.
#
# Processing during analyse_declarations phase:
#
# analyse
# Returns the type.
def analyse_as_type(self, env):
return self.analyse(env)
class CAnalysedBaseTypeNode(Node):
# type type
child_attrs = []
def analyse(self, env, could_be_name=False):
return self.type
class CSimpleBaseTypeNode(CBaseTypeNode):
# name string
# module_path [string] Qualifying name components
# is_basic_c_type boolean
# signed boolean
# longness integer
# complex boolean
# is_self_arg boolean Is self argument of C method
# ##is_type_arg boolean Is type argument of class method
child_attrs = []
arg_name = None # in case the argument name was interpreted as a type
module_path = []
is_basic_c_type = False
complex = False
def analyse(self, env, could_be_name=False):
# Return type descriptor.
#print "CSimpleBaseTypeNode.analyse: is_self_arg =", self.is_self_arg ###
type = None
if self.is_basic_c_type:
type = PyrexTypes.simple_c_type(self.signed, self.longness, self.name)
if not type:
error(self.pos, "Unrecognised type modifier combination")
elif self.name == "object" and not self.module_path:
type = py_object_type
elif self.name is None:
if self.is_self_arg and env.is_c_class_scope:
#print "CSimpleBaseTypeNode.analyse: defaulting to parent type" ###
type = env.parent_type
## elif self.is_type_arg and env.is_c_class_scope:
## type = Builtin.type_type
else:
type = py_object_type
else:
if self.module_path:
# Maybe it's a nested C++ class.
scope = env
for item in self.module_path:
entry = scope.lookup(item)
if entry is not None and entry.is_cpp_class:
scope = entry.type.scope
else:
scope = None
break
if scope is None:
# Maybe it's a cimport.
scope = env.find_imported_module(self.module_path, self.pos)
if scope:
scope.fused_to_specific = env.fused_to_specific
else:
scope = env
if scope:
if scope.is_c_class_scope:
scope = scope.global_scope()
type = scope.lookup_type(self.name)
if type is not None:
pass
elif could_be_name:
if self.is_self_arg and env.is_c_class_scope:
type = env.parent_type
## elif self.is_type_arg and env.is_c_class_scope:
## type = Builtin.type_type
else:
type = py_object_type
self.arg_name = EncodedString(self.name)
else:
if self.templates:
if not self.name in self.templates:
error(self.pos, "'%s' is not a type identifier" % self.name)
type = PyrexTypes.TemplatePlaceholderType(self.name)
else:
error(self.pos, "'%s' is not a type identifier" % self.name)
if self.complex:
if not type.is_numeric or type.is_complex:
error(self.pos, "can only complexify c numeric types")
type = PyrexTypes.CComplexType(type)
type.create_declaration_utility_code(env)
elif type is Builtin.complex_type:
# Special case: optimise builtin complex type into C's
# double complex. The parser cannot do this (as for the
# normal scalar types) as the user may have redeclared the
# 'complex' type. Testing for the exact type here works.
type = PyrexTypes.c_double_complex_type
type.create_declaration_utility_code(env)
self.complex = True
if type:
return type
else:
return PyrexTypes.error_type
class MemoryViewSliceTypeNode(CBaseTypeNode):
name = 'memoryview'
child_attrs = ['base_type_node', 'axes']
def analyse(self, env, could_be_name=False):
base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
from . import MemoryView
try:
axes_specs = MemoryView.get_axes_specs(env, self.axes)
except CompileError as e:
error(e.position, e.message_only)
self.type = PyrexTypes.ErrorType()
return self.type
if not MemoryView.validate_axes(self.pos, axes_specs):
self.type = error_type
else:
self.type = PyrexTypes.MemoryViewSliceType(base_type, axes_specs)
self.type.validate_memslice_dtype(self.pos)
self.use_memview_utilities(env)
return self.type
def use_memview_utilities(self, env):
from . import MemoryView
env.use_utility_code(MemoryView.view_utility_code)
class CNestedBaseTypeNode(CBaseTypeNode):
# For C++ classes that live inside other C++ classes.
# name string
# base_type CBaseTypeNode
child_attrs = ['base_type']
def analyse(self, env, could_be_name=None):
base_type = self.base_type.analyse(env)
if base_type is PyrexTypes.error_type:
return PyrexTypes.error_type
if not base_type.is_cpp_class:
error(self.pos, "'%s' is not a valid type scope" % base_type)
return PyrexTypes.error_type
type_entry = base_type.scope.lookup_here(self.name)
if not type_entry or not type_entry.is_type:
error(self.pos, "'%s.%s' is not a type identifier" % (base_type, self.name))
return PyrexTypes.error_type
return type_entry.type
class TemplatedTypeNode(CBaseTypeNode):
# After parsing:
# positional_args [ExprNode] List of positional arguments
# keyword_args DictNode Keyword arguments
# base_type_node CBaseTypeNode
# After analysis:
# type PyrexTypes.BufferType or PyrexTypes.CppClassType ...containing the right options
child_attrs = ["base_type_node", "positional_args",
"keyword_args", "dtype_node"]
dtype_node = None
name = None
def analyse(self, env, could_be_name=False, base_type=None):
if base_type is None:
base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
if base_type.is_cpp_class and base_type.is_template_type():
# Templated class
if self.keyword_args and self.keyword_args.key_value_pairs:
error(self.pos, "c++ templates cannot take keyword arguments")
self.type = PyrexTypes.error_type
else:
template_types = []
for template_node in self.positional_args:
type = template_node.analyse_as_type(env)
if type is None:
error(template_node.pos, "unknown type in template argument")
return error_type
template_types.append(type)
self.type = base_type.specialize_here(self.pos, template_types)
elif base_type.is_pyobject:
# Buffer
from . import Buffer
options = Buffer.analyse_buffer_options(
self.pos,
env,
self.positional_args,
self.keyword_args,
base_type.buffer_defaults)
if sys.version_info[0] < 3:
# Py 2.x enforces byte strings as keyword arguments ...
options = dict([(name.encode('ASCII'), value)
for name, value in options.items()])
self.type = PyrexTypes.BufferType(base_type, **options)
else:
# Array
empty_declarator = CNameDeclaratorNode(self.pos, name="", cname=None)
if len(self.positional_args) > 1 or self.keyword_args.key_value_pairs:
error(self.pos, "invalid array declaration")
self.type = PyrexTypes.error_type
else:
# It would be nice to merge this class with CArrayDeclaratorNode,
# but arrays are part of the declaration, not the type...
if not self.positional_args:
dimension = None
else:
dimension = self.positional_args[0]
self.array_declarator = CArrayDeclaratorNode(
self.pos,
base=empty_declarator,
dimension=dimension)
self.type = self.array_declarator.analyse(base_type, env)[1]
if self.type.is_fused and env.fused_to_specific:
self.type = self.type.specialize(env.fused_to_specific)
return self.type
class CComplexBaseTypeNode(CBaseTypeNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
child_attrs = ["base_type", "declarator"]
def analyse(self, env, could_be_name=False):
base = self.base_type.analyse(env, could_be_name)
_, type = self.declarator.analyse(base, env)
return type
class CTupleBaseTypeNode(CBaseTypeNode):
# components [CBaseTypeNode]
child_attrs = ["components"]
def analyse(self, env, could_be_name=False):
component_types = []
for c in self.components:
type = c.analyse(env)
if type.is_pyobject:
error(c.pos, "Tuple types can't (yet) contain Python objects.")
return error_type
component_types.append(type)
entry = env.declare_tuple_type(self.pos, component_types)
entry.used = True
return entry.type
class FusedTypeNode(CBaseTypeNode):
"""
Represents a fused type in a ctypedef statement:
ctypedef cython.fused_type(int, long, long long) integral
name str name of this fused type
types [CSimpleBaseTypeNode] is the list of types to be fused
"""
child_attrs = []
def analyse_declarations(self, env):
type = self.analyse(env)
entry = env.declare_typedef(self.name, type, self.pos)
# Omit the typedef declaration that self.declarator would produce
entry.in_cinclude = True
def analyse(self, env, could_be_name=False):
types = []
for type_node in self.types:
type = type_node.analyse_as_type(env)
if not type:
error(type_node.pos, "Not a type")
continue
if type in types:
error(type_node.pos, "Type specified multiple times")
else:
types.append(type)
# if len(self.types) == 1:
# return types[0]
return PyrexTypes.FusedType(types, name=self.name)
class CConstTypeNode(CBaseTypeNode):
# base_type CBaseTypeNode
child_attrs = ["base_type"]
def analyse(self, env, could_be_name=False):
base = self.base_type.analyse(env, could_be_name)
if base.is_pyobject:
error(self.pos,
"Const base type cannot be a Python object")
return PyrexTypes.c_const_type(base)
class CVarDefNode(StatNode):
# C variable definition or forward/extern function declaration.
#
# visibility 'private' or 'public' or 'extern'
# base_type CBaseTypeNode
# declarators [CDeclaratorNode]
# in_pxd boolean
# api boolean
# overridable boolean whether it is a cpdef
# modifiers ['inline']
# decorators [cython.locals(...)] or None
# directive_locals { string : NameNode } locals defined by cython.locals(...)
child_attrs = ["base_type", "declarators"]
decorators = None
directive_locals = None
def analyse_declarations(self, env, dest_scope=None):
if self.directive_locals is None:
self.directive_locals = {}
if not dest_scope:
dest_scope = env
self.dest_scope = dest_scope
if self.declarators:
templates = self.declarators[0].analyse_templates()
else:
templates = None
if templates is not None:
if self.visibility != 'extern':
error(self.pos, "Only extern functions allowed")
if len(self.declarators) > 1:
error(self.declarators[1].pos, "Can't multiply declare template types")
env = TemplateScope('func_template', env)
env.directives = env.outer_scope.directives
for template_param in templates:
env.declare_type(template_param.name, template_param, self.pos)
base_type = self.base_type.analyse(env)
if base_type.is_fused and not self.in_pxd and (env.is_c_class_scope or
env.is_module_scope):
error(self.pos, "Fused types not allowed here")
return error_type
self.entry = None
visibility = self.visibility
for declarator in self.declarators:
if (len(self.declarators) > 1
and not isinstance(declarator, CNameDeclaratorNode)
and env.directives['warn.multiple_declarators']):
warning(
declarator.pos,
"Non-trivial type declarators in shared declaration (e.g. mix of pointers and values). "
"Each pointer declaration should be on its own line.", 1)
create_extern_wrapper = (self.overridable
and self.visibility == 'extern'
and env.is_module_scope)
if create_extern_wrapper:
declarator.overridable = False
if isinstance(declarator, CFuncDeclaratorNode):
name_declarator, type = declarator.analyse(base_type, env, directive_locals=self.directive_locals)
else:
name_declarator, type = declarator.analyse(base_type, env)
if not type.is_complete():
if not (self.visibility == 'extern' and type.is_array or type.is_memoryviewslice):
error(declarator.pos, "Variable type '%s' is incomplete" % type)
if self.visibility == 'extern' and type.is_pyobject:
error(declarator.pos, "Python object cannot be declared extern")
name = name_declarator.name
cname = name_declarator.cname
if name == '':
error(declarator.pos, "Missing name in declaration.")
return
if type.is_cfunction:
if 'staticmethod' in env.directives:
type.is_static_method = True
self.entry = dest_scope.declare_cfunction(
name, type, declarator.pos,
cname=cname, visibility=self.visibility, in_pxd=self.in_pxd,
api=self.api, modifiers=self.modifiers, overridable=self.overridable)
if self.entry is not None:
self.entry.directive_locals = copy.copy(self.directive_locals)
if create_extern_wrapper:
self.entry.type.create_to_py_utility_code(env)
self.entry.create_wrapper = True
else:
if self.directive_locals:
error(self.pos, "Decorators can only be followed by functions")
self.entry = dest_scope.declare_var(
name, type, declarator.pos,
cname=cname, visibility=visibility, in_pxd=self.in_pxd,
api=self.api, is_cdef=1)
if Options.docstrings:
self.entry.doc = embed_position(self.pos, self.doc)
class CStructOrUnionDefNode(StatNode):
# name string
# cname string or None
# kind "struct" or "union"
# typedef_flag boolean
# visibility "public" or "private"
# api boolean
# in_pxd boolean
# attributes [CVarDefNode] or None
# entry Entry
# packed boolean
child_attrs = ["attributes"]
def declare(self, env, scope=None):
self.entry = env.declare_struct_or_union(
self.name, self.kind, scope, self.typedef_flag, self.pos,
self.cname, visibility=self.visibility, api=self.api,
packed=self.packed)
def analyse_declarations(self, env):
scope = None
if self.attributes is not None:
scope = StructOrUnionScope(self.name)
self.declare(env, scope)
if self.attributes is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for attr in self.attributes:
attr.analyse_declarations(env, scope)
if self.visibility != 'extern':
for attr in scope.var_entries:
type = attr.type
while type.is_array:
type = type.base_type
if type == self.entry.type:
error(attr.pos, "Struct cannot contain itself as a member.")
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class CppClassNode(CStructOrUnionDefNode, BlockNode):
# name string
# cname string or None
# visibility "extern"
# in_pxd boolean
# attributes [CVarDefNode] or None
# entry Entry
# base_classes [CBaseTypeNode]
# templates [(string, bool)] or None
# decorators [DecoratorNode] or None
decorators = None
def declare(self, env):
if self.templates is None:
template_types = None
else:
template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required)
for template_name, required in self.templates]
num_optional_templates = sum(not required for _, required in self.templates)
if num_optional_templates and not all(required for _, required in self.templates[:-num_optional_templates]):
error(self.pos, "Required template parameters must precede optional template parameters.")
self.entry = env.declare_cpp_class(
self.name, None, self.pos, self.cname,
base_classes=[], visibility=self.visibility, templates=template_types)
def analyse_declarations(self, env):
if self.templates is None:
template_types = template_names = None
else:
template_names = [template_name for template_name, _ in self.templates]
template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required)
for template_name, required in self.templates]
scope = None
if self.attributes is not None:
scope = CppClassScope(self.name, env, templates=template_names)
def base_ok(base_class):
if base_class.is_cpp_class or base_class.is_struct:
return True
else:
error(self.pos, "Base class '%s' not a struct or class." % base_class)
base_class_types = filter(base_ok, [b.analyse(scope or env) for b in self.base_classes])
self.entry = env.declare_cpp_class(
self.name, scope, self.pos,
self.cname, base_class_types, visibility=self.visibility, templates=template_types)
if self.entry is None:
return
self.entry.is_cpp_class = 1
if scope is not None:
scope.type = self.entry.type
defined_funcs = []
def func_attributes(attributes):
for attr in attributes:
if isinstance(attr, CFuncDefNode):
yield attr
elif isinstance(attr, CompilerDirectivesNode):
for sub_attr in func_attributes(attr.body.stats):
yield sub_attr
if self.attributes is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for attr in self.attributes:
attr.analyse_declarations(scope)
for func in func_attributes(self.attributes):
defined_funcs.append(func)
if self.templates is not None:
func.template_declaration = "template <typename %s>" % ", typename ".join(template_names)
self.body = StatListNode(self.pos, stats=defined_funcs)
self.scope = scope
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(self.entry.type.scope)
return self
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(self.entry.type.scope, code)
def generate_execution_code(self, code):
self.body.generate_execution_code(code)
def annotate(self, code):
self.body.annotate(code)
class CEnumDefNode(StatNode):
# name string or None
# cname string or None
# items [CEnumDefItemNode]
# typedef_flag boolean
# visibility "public" or "private" or "extern"
# api boolean
# in_pxd boolean
# create_wrapper boolean
# entry Entry
child_attrs = ["items"]
def declare(self, env):
self.entry = env.declare_enum(
self.name, self.pos,
cname=self.cname, typedef_flag=self.typedef_flag,
visibility=self.visibility, api=self.api,
create_wrapper=self.create_wrapper and self.name is None)
def analyse_declarations(self, env):
if self.items is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for item in self.items:
item.analyse_declarations(env, self.entry)
if self.name is not None:
self.entry.type.values = set(item.name for item in self.items)
if self.create_wrapper and self.name is not None:
from .UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(
"EnumType", "CpdefEnums.pyx",
context={"name": self.name,
"items": tuple(item.name for item in self.items)},
outer_module_scope=env.global_scope()))
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if self.visibility == 'public' or self.api:
code.mark_pos(self.pos)
temp = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
for item in self.entry.enum_values:
code.putln("%s = PyInt_FromLong(%s); %s" % (
temp,
item.cname,
code.error_goto_if_null(temp, item.pos)))
code.put_gotref(temp)
code.putln('if (PyDict_SetItemString(%s, "%s", %s) < 0) %s' % (
Naming.moddict_cname,
item.name,
temp,
code.error_goto(item.pos)))
code.put_decref_clear(temp, PyrexTypes.py_object_type)
code.funcstate.release_temp(temp)
class CEnumDefItemNode(StatNode):
# name string
# cname string or None
# value ExprNode or None
child_attrs = ["value"]
def analyse_declarations(self, env, enum_entry):
if self.value:
self.value = self.value.analyse_const_expression(env)
if not self.value.type.is_int:
self.value = self.value.coerce_to(PyrexTypes.c_int_type, env)
self.value = self.value.analyse_const_expression(env)
entry = env.declare_const(
self.name, enum_entry.type,
self.value, self.pos, cname=self.cname,
visibility=enum_entry.visibility, api=enum_entry.api,
create_wrapper=enum_entry.create_wrapper and enum_entry.name is None)
enum_entry.enum_values.append(entry)
if enum_entry.name:
enum_entry.type.values.append(entry.cname)
class CTypeDefNode(StatNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# visibility "public" or "private"
# api boolean
# in_pxd boolean
child_attrs = ["base_type", "declarator"]
def analyse_declarations(self, env):
base = self.base_type.analyse(env)
name_declarator, type = self.declarator.analyse(base, env)
name = name_declarator.name
cname = name_declarator.cname
entry = env.declare_typedef(
name, type, self.pos,
cname=cname, visibility=self.visibility, api=self.api)
if type.is_fused:
entry.in_cinclude = True
if self.in_pxd and not env.in_cinclude:
entry.defined_in_pxd = 1
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class FuncDefNode(StatNode, BlockNode):
# Base class for function definition nodes.
#
# return_type PyrexType
# #filename string C name of filename string const
# entry Symtab.Entry
# needs_closure boolean Whether or not this function has inner functions/classes/yield
# needs_outer_scope boolean Whether or not this function requires outer scope
# pymethdef_required boolean Force Python method struct generation
# directive_locals { string : ExprNode } locals defined by cython.locals(...)
# directive_returns [ExprNode] type defined by cython.returns(...)
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
#
# is_async_def boolean is a Coroutine function
#
# has_fused_arguments boolean
# Whether this cdef function has fused parameters. This is needed
# by AnalyseDeclarationsTransform, so it can replace CFuncDefNodes
# with fused argument types with a FusedCFuncDefNode
py_func = None
needs_closure = False
needs_outer_scope = False
pymethdef_required = False
is_generator = False
is_generator_body = False
is_async_def = False
modifiers = []
has_fused_arguments = False
star_arg = None
starstar_arg = None
is_cyfunction = False
code_object = None
def analyse_default_values(self, env):
default_seen = 0
for arg in self.args:
if arg.default:
default_seen = 1
if arg.is_generic:
arg.default = arg.default.analyse_types(env)
arg.default = arg.default.coerce_to(arg.type, env)
else:
error(arg.pos, "This argument cannot have a default value")
arg.default = None
elif arg.kw_only:
default_seen = 1
elif default_seen:
error(arg.pos, "Non-default argument following default argument")
def analyse_annotations(self, env):
for arg in self.args:
if arg.annotation:
arg.annotation = arg.annotation.analyse_types(env)
def align_argument_type(self, env, arg):
# @cython.locals()
directive_locals = self.directive_locals
orig_type = arg.type
if arg.name in directive_locals:
type_node = directive_locals[arg.name]
other_type = type_node.analyse_as_type(env)
elif isinstance(arg, CArgDeclNode) and arg.annotation and env.directives['annotation_typing']:
type_node = arg.annotation
other_type = arg.inject_type_from_annotations(env)
if other_type is None:
return arg
else:
return arg
if other_type is None:
error(type_node.pos, "Not a type")
elif orig_type is not py_object_type and not orig_type.same_as(other_type):
error(arg.base_type.pos, "Signature does not agree with previous declaration")
error(type_node.pos, "Previous declaration here")
else:
arg.type = other_type
return arg
def need_gil_acquisition(self, lenv):
return 0
def create_local_scope(self, env):
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
if self.needs_closure:
lenv = ClosureScope(name=self.entry.name,
outer_scope=genv,
parent_scope=env,
scope_name=self.entry.cname)
else:
lenv = LocalScope(name=self.entry.name,
outer_scope=genv,
parent_scope=env)
lenv.return_type = self.return_type
type = self.entry.type
if type.is_cfunction:
lenv.nogil = type.nogil and not type.with_gil
self.local_scope = lenv
lenv.directives = env.directives
return lenv
def generate_function_body(self, env, code):
self.body.generate_execution_code(code)
def generate_function_definitions(self, env, code):
from . import Buffer
if self.return_type.is_memoryviewslice:
from . import MemoryView
lenv = self.local_scope
if lenv.is_closure_scope and not lenv.is_passthrough:
outer_scope_cname = "%s->%s" % (Naming.cur_scope_cname,
Naming.outer_scope_cname)
else:
outer_scope_cname = Naming.outer_scope_cname
lenv.mangle_closure_cnames(outer_scope_cname)
# Generate closure function definitions
self.body.generate_function_definitions(lenv, code)
# generate lambda function definitions
self.generate_lambda_definitions(lenv, code)
is_getbuffer_slot = (self.entry.name == "__getbuffer__" and
self.entry.scope.is_c_class_scope)
is_releasebuffer_slot = (self.entry.name == "__releasebuffer__" and
self.entry.scope.is_c_class_scope)
is_buffer_slot = is_getbuffer_slot or is_releasebuffer_slot
if is_buffer_slot:
if 'cython_unused' not in self.modifiers:
self.modifiers = self.modifiers + ['cython_unused']
preprocessor_guard = self.get_preprocessor_guard()
profile = code.globalstate.directives['profile']
linetrace = code.globalstate.directives['linetrace']
if profile or linetrace:
code.globalstate.use_utility_code(
UtilityCode.load_cached("Profile", "Profile.c"))
# Generate C code for header and body of function
code.enter_cfunc_scope()
code.return_from_error_cleanup_label = code.new_label()
code.funcstate.gil_owned = not lenv.nogil
# ----- Top-level constants used by this function
code.mark_pos(self.pos)
self.generate_cached_builtins_decls(lenv, code)
# ----- Function header
code.putln("")
if preprocessor_guard:
code.putln(preprocessor_guard)
with_pymethdef = (self.needs_assignment_synthesis(env, code) or
self.pymethdef_required)
if self.py_func:
self.py_func.generate_function_header(
code, with_pymethdef=with_pymethdef, proto_only=True)
self.generate_function_header(code, with_pymethdef=with_pymethdef)
# ----- Local variable declarations
# Find function scope
cenv = env
while cenv.is_py_class_scope or cenv.is_c_class_scope:
cenv = cenv.outer_scope
if self.needs_closure:
code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname))
code.putln(";")
elif self.needs_outer_scope:
if lenv.is_passthrough:
code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname))
code.putln(";")
code.put(cenv.scope_class.type.declaration_code(Naming.outer_scope_cname))
code.putln(";")
self.generate_argument_declarations(lenv, code)
for entry in lenv.var_entries:
if not (entry.in_closure or entry.is_arg):
code.put_var_declaration(entry)
# Initialize the return variable __pyx_r
init = ""
if not self.return_type.is_void:
if self.return_type.is_pyobject:
init = " = NULL"
elif self.return_type.is_memoryviewslice:
init = ' = ' + MemoryView.memslice_entry_init
code.putln("%s%s;" % (
self.return_type.declaration_code(Naming.retval_cname),
init))
tempvardecl_code = code.insertion_point()
self.generate_keyword_list(code)
# ----- Extern library function declarations
lenv.generate_library_function_declarations(code)
# ----- GIL acquisition
acquire_gil = self.acquire_gil
# See if we need to acquire the GIL for variable declarations, or for
# refnanny only
# Closures are not currently possible for cdef nogil functions,
# but check them anyway
have_object_args = self.needs_closure or self.needs_outer_scope
for arg in lenv.arg_entries:
if arg.type.is_pyobject:
have_object_args = True
break
used_buffer_entries = [entry for entry in lenv.buffer_entries if entry.used]
acquire_gil_for_var_decls_only = (
lenv.nogil and lenv.has_with_gil_block and
(have_object_args or used_buffer_entries))
acquire_gil_for_refnanny_only = (
lenv.nogil and lenv.has_with_gil_block and not
acquire_gil_for_var_decls_only)
use_refnanny = not lenv.nogil or lenv.has_with_gil_block
if acquire_gil or acquire_gil_for_var_decls_only:
code.put_ensure_gil()
code.funcstate.gil_owned = True
elif lenv.nogil and lenv.has_with_gil_block:
code.declare_gilstate()
if profile or linetrace:
tempvardecl_code.put_trace_declarations()
code_object = self.code_object.calculate_result_code(code) if self.code_object else None
code.put_trace_frame_init(code_object)
# ----- set up refnanny
if use_refnanny:
tempvardecl_code.put_declare_refcount_context()
code.put_setup_refcount_context(
self.entry.name, acquire_gil=acquire_gil_for_refnanny_only)
# ----- Automatic lead-ins for certain special functions
if is_getbuffer_slot:
self.getbuffer_init(code)
# ----- Create closure scope object
if self.needs_closure:
tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
slot_func_cname = TypeSlots.get_slot_function(lenv.scope_class.type.scope, tp_slot)
if not slot_func_cname:
slot_func_cname = '%s->tp_new' % lenv.scope_class.type.typeptr_cname
code.putln("%s = (%s)%s(%s, %s, NULL);" % (
Naming.cur_scope_cname,
lenv.scope_class.type.empty_declaration_code(),
slot_func_cname,
lenv.scope_class.type.typeptr_cname,
Naming.empty_tuple))
code.putln("if (unlikely(!%s)) {" % Naming.cur_scope_cname)
if is_getbuffer_slot:
self.getbuffer_error_cleanup(code)
if use_refnanny:
code.put_finish_refcount_context()
if acquire_gil or acquire_gil_for_var_decls_only:
code.put_release_ensured_gil()
# FIXME: what if the error return value is a Python value?
err_val = self.error_value()
if err_val is None:
if not self.caller_will_check_exceptions():
warning(self.entry.pos,
"Unraisable exception in function '%s'." %
self.entry.qualified_name, 0)
code.put_unraisable(self.entry.qualified_name, lenv.nogil)
#if self.return_type.is_void:
code.putln("return;")
else:
code.putln("return %s;" % err_val)
code.putln("}")
code.put_gotref(Naming.cur_scope_cname)
# Note that it is unsafe to decref the scope at this point.
if self.needs_outer_scope:
if self.is_cyfunction:
code.putln("%s = (%s) __Pyx_CyFunction_GetClosure(%s);" % (
outer_scope_cname,
cenv.scope_class.type.empty_declaration_code(),
Naming.self_cname))
else:
code.putln("%s = (%s) %s;" % (
outer_scope_cname,
cenv.scope_class.type.empty_declaration_code(),
Naming.self_cname))
if lenv.is_passthrough:
code.putln("%s = %s;" % (Naming.cur_scope_cname, outer_scope_cname))
elif self.needs_closure:
# inner closures own a reference to their outer parent
code.put_incref(outer_scope_cname, cenv.scope_class.type)
code.put_giveref(outer_scope_cname)
# ----- Trace function call
if profile or linetrace:
# this looks a bit late, but if we don't get here due to a
# fatal error before hand, it's not really worth tracing
if isinstance(self, DefNode) and self.is_wrapper:
trace_name = self.entry.name + " (wrapper)"
else:
trace_name = self.entry.name
code.put_trace_call(
trace_name, self.pos, nogil=not code.funcstate.gil_owned)
code.funcstate.can_trace = True
# ----- Fetch arguments
self.generate_argument_parsing_code(env, code)
# If an argument is assigned to in the body, we must
# incref it to properly keep track of refcounts.
is_cdef = isinstance(self, CFuncDefNode)
for entry in lenv.arg_entries:
if entry.type.is_pyobject:
if (acquire_gil or len(entry.cf_assignments) > 1) and not entry.in_closure:
code.put_var_incref(entry)
# Note: defaults are always incref-ed. For def functions, we
# we aquire arguments from object converstion, so we have
# new references. If we are a cdef function, we need to
# incref our arguments
elif is_cdef and entry.type.is_memoryviewslice and len(entry.cf_assignments) > 1:
code.put_incref_memoryviewslice(entry.cname, have_gil=code.funcstate.gil_owned)
for entry in lenv.var_entries:
if entry.is_arg and len(entry.cf_assignments) > 1 and not entry.in_closure:
if entry.xdecref_cleanup:
code.put_var_xincref(entry)
else:
code.put_var_incref(entry)
# ----- Initialise local buffer auxiliary variables
for entry in lenv.var_entries + lenv.arg_entries:
if entry.type.is_buffer and entry.buffer_aux.buflocal_nd_var.used:
Buffer.put_init_vars(entry, code)
# ----- Check and convert arguments
self.generate_argument_type_tests(code)
# ----- Acquire buffer arguments
for entry in lenv.arg_entries:
if entry.type.is_buffer:
Buffer.put_acquire_arg_buffer(entry, code, self.pos)
if acquire_gil_for_var_decls_only:
code.put_release_ensured_gil()
code.funcstate.gil_owned = False
# -------------------------
# ----- Function body -----
# -------------------------
self.generate_function_body(env, code)
code.mark_pos(self.pos, trace=False)
code.putln("")
code.putln("/* function exit code */")
# ----- Default return value
if not self.body.is_terminator:
if self.return_type.is_pyobject:
#if self.return_type.is_extension_type:
# lhs = "(PyObject *)%s" % Naming.retval_cname
#else:
lhs = Naming.retval_cname
code.put_init_to_py_none(lhs, self.return_type)
else:
val = self.return_type.default_value
if val:
code.putln("%s = %s;" % (Naming.retval_cname, val))
# ----- Error cleanup
if code.error_label in code.labels_used:
if not self.body.is_terminator:
code.put_goto(code.return_label)
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type, have_gil=not lenv.nogil)
# Clean up buffers -- this calls a Python function
# so need to save and restore error state
buffers_present = len(used_buffer_entries) > 0
#memslice_entries = [e for e in lenv.entries.values() if e.type.is_memoryviewslice]
if buffers_present:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln("{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;")
code.putln("__Pyx_PyThreadState_declare")
code.putln("__Pyx_PyThreadState_assign")
code.putln("__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);")
for entry in used_buffer_entries:
Buffer.put_release_buffer_code(code, entry)
#code.putln("%s = 0;" % entry.cname)
code.putln("__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}")
if self.return_type.is_memoryviewslice:
MemoryView.put_init_entry(Naming.retval_cname, code)
err_val = Naming.retval_cname
else:
err_val = self.error_value()
exc_check = self.caller_will_check_exceptions()
if err_val is not None or exc_check:
# TODO: Fix exception tracing (though currently unused by cProfile).
# code.globalstate.use_utility_code(get_exception_tuple_utility_code)
# code.put_trace_exception()
if lenv.nogil and not lenv.has_with_gil_block:
code.putln("{")
code.put_ensure_gil()
code.put_add_traceback(self.entry.qualified_name)
if lenv.nogil and not lenv.has_with_gil_block:
code.put_release_ensured_gil()
code.putln("}")
else:
warning(self.entry.pos,
"Unraisable exception in function '%s'." %
self.entry.qualified_name, 0)
code.put_unraisable(self.entry.qualified_name, lenv.nogil)
default_retval = self.return_type.default_value
if err_val is None and default_retval:
err_val = default_retval
if err_val is not None:
code.putln("%s = %s;" % (Naming.retval_cname, err_val))
if is_getbuffer_slot:
self.getbuffer_error_cleanup(code)
# If we are using the non-error cleanup section we should
# jump past it if we have an error. The if-test below determine
# whether this section is used.
if buffers_present or is_getbuffer_slot or self.return_type.is_memoryviewslice:
code.put_goto(code.return_from_error_cleanup_label)
# ----- Non-error return cleanup
code.put_label(code.return_label)
for entry in used_buffer_entries:
Buffer.put_release_buffer_code(code, entry)
if is_getbuffer_slot:
self.getbuffer_normal_cleanup(code)
if self.return_type.is_memoryviewslice:
# See if our return value is uninitialized on non-error return
# from . import MemoryView
# MemoryView.err_if_nogil_initialized_check(self.pos, env)
cond = code.unlikely(self.return_type.error_condition(Naming.retval_cname))
code.putln(
'if (%s) {' % cond)
if env.nogil:
code.put_ensure_gil()
code.putln(
'PyErr_SetString(PyExc_TypeError, "Memoryview return value is not initialized");')
if env.nogil:
code.put_release_ensured_gil()
code.putln(
'}')
# ----- Return cleanup for both error and no-error return
code.put_label(code.return_from_error_cleanup_label)
for entry in lenv.var_entries:
if not entry.used or entry.in_closure:
continue
if entry.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(entry.cname, have_gil=not lenv.nogil)
elif entry.type.is_pyobject:
if not entry.is_arg or len(entry.cf_assignments) > 1:
if entry.xdecref_cleanup:
code.put_var_xdecref(entry)
else:
code.put_var_decref(entry)
# Decref any increfed args
for entry in lenv.arg_entries:
if entry.type.is_pyobject:
if (acquire_gil or len(entry.cf_assignments) > 1) and not entry.in_closure:
code.put_var_decref(entry)
elif (entry.type.is_memoryviewslice and
(not is_cdef or len(entry.cf_assignments) > 1)):
# decref slices of def functions and acquired slices from cdef
# functions, but not borrowed slices from cdef functions.
code.put_xdecref_memoryviewslice(entry.cname,
have_gil=not lenv.nogil)
if self.needs_closure:
code.put_decref(Naming.cur_scope_cname, lenv.scope_class.type)
# ----- Return
# This code is duplicated in ModuleNode.generate_module_init_func
if not lenv.nogil:
default_retval = self.return_type.default_value
err_val = self.error_value()
if err_val is None and default_retval:
err_val = default_retval # FIXME: why is err_val not used?
if self.return_type.is_pyobject:
code.put_xgiveref(self.return_type.as_pyobject(Naming.retval_cname))
if self.entry.is_special and self.entry.name == "__hash__":
# Returning -1 for __hash__ is supposed to signal an error
# We do as Python instances and coerce -1 into -2.
code.putln("if (unlikely(%s == -1) && !PyErr_Occurred()) %s = -2;" % (
Naming.retval_cname, Naming.retval_cname))
if profile or linetrace:
code.funcstate.can_trace = False
if self.return_type.is_pyobject:
code.put_trace_return(
Naming.retval_cname, nogil=not code.funcstate.gil_owned)
else:
code.put_trace_return(
"Py_None", nogil=not code.funcstate.gil_owned)
if not lenv.nogil:
# GIL holding function
code.put_finish_refcount_context()
if acquire_gil or (lenv.nogil and lenv.has_with_gil_block):
# release the GIL (note that with-gil blocks acquire it on exit in their EnsureGILNode)
code.put_release_ensured_gil()
code.funcstate.gil_owned = False
if not self.return_type.is_void:
code.putln("return %s;" % Naming.retval_cname)
code.putln("}")
if preprocessor_guard:
code.putln("#endif /*!(%s)*/" % preprocessor_guard)
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
# ----- Python version
code.exit_cfunc_scope()
if self.py_func:
self.py_func.generate_function_definitions(env, code)
self.generate_wrapper_functions(code)
def declare_argument(self, env, arg):
if arg.type.is_void:
error(arg.pos, "Invalid use of 'void'")
elif not arg.type.is_complete() and not (arg.type.is_array or arg.type.is_memoryviewslice):
error(arg.pos, "Argument type '%s' is incomplete" % arg.type)
return env.declare_arg(arg.name, arg.type, arg.pos)
def generate_arg_type_test(self, arg, code):
# Generate type test for one argument.
if arg.type.typeobj_is_available():
code.globalstate.use_utility_code(
UtilityCode.load_cached("ArgTypeTest", "FunctionArguments.c"))
typeptr_cname = arg.type.typeptr_cname
arg_code = "((PyObject *)%s)" % arg.entry.cname
code.putln(
'if (unlikely(!__Pyx_ArgTypeTest(%s, %s, %d, "%s", %s))) %s' % (
arg_code,
typeptr_cname,
arg.accept_none,
arg.name,
arg.type.is_builtin_type,
code.error_goto(arg.pos)))
else:
error(arg.pos, "Cannot test type of extern C class without type object name specification")
def generate_arg_none_check(self, arg, code):
# Generate None check for one argument.
if arg.type.is_memoryviewslice:
cname = "%s.memview" % arg.entry.cname
else:
cname = arg.entry.cname
code.putln('if (unlikely(((PyObject *)%s) == Py_None)) {' % cname)
code.putln('''PyErr_Format(PyExc_TypeError, "Argument '%%.%ds' must not be None", "%s"); %s''' % (
max(200, len(arg.name)), arg.name,
code.error_goto(arg.pos)))
code.putln('}')
def generate_wrapper_functions(self, code):
pass
def generate_execution_code(self, code):
code.mark_pos(self.pos)
# Evaluate and store argument default values
for arg in self.args:
if not arg.is_dynamic:
arg.generate_assignment_code(code)
#
# Special code for the __getbuffer__ function
#
def getbuffer_init(self, code):
info = self.local_scope.arg_entries[1].cname
# Python 3.0 betas have a bug in memoryview which makes it call
# getbuffer with a NULL parameter. For now we work around this;
# the following block should be removed when this bug is fixed.
code.putln("if (%s != NULL) {" % info)
code.putln("%s->obj = Py_None; __Pyx_INCREF(Py_None);" % info)
code.put_giveref("%s->obj" % info) # Do not refnanny object within structs
code.putln("}")
def getbuffer_error_cleanup(self, code):
info = self.local_scope.arg_entries[1].cname
code.putln("if (%s != NULL && %s->obj != NULL) {"
% (info, info))
code.put_gotref("%s->obj" % info)
code.putln("__Pyx_DECREF(%s->obj); %s->obj = NULL;"
% (info, info))
code.putln("}")
def getbuffer_normal_cleanup(self, code):
info = self.local_scope.arg_entries[1].cname
code.putln("if (%s != NULL && %s->obj == Py_None) {" % (info, info))
code.put_gotref("Py_None")
code.putln("__Pyx_DECREF(Py_None); %s->obj = NULL;" % info)
code.putln("}")
def get_preprocessor_guard(self):
if not self.entry.is_special:
return None
name = self.entry.name
slot = TypeSlots.method_name_to_slot.get(name)
if not slot:
return None
if name == '__long__' and not self.entry.scope.lookup_here('__int__'):
return None
if name in ("__getbuffer__", "__releasebuffer__") and self.entry.scope.is_c_class_scope:
return None
return slot.preprocessor_guard_code()
class CFuncDefNode(FuncDefNode):
# C function definition.
#
# modifiers ['inline']
# visibility 'private' or 'public' or 'extern'
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# cfunc_declarator the CFuncDeclarator of this function
# (this is also available through declarator or a
# base thereof)
# body StatListNode
# api boolean
# decorators [DecoratorNode] list of decorators
#
# with_gil boolean Acquire GIL around body
# type CFuncType
# py_func wrapper for calling from Python
# overridable whether or not this is a cpdef function
# inline_in_pxd whether this is an inline function in a pxd file
# template_declaration String or None Used for c++ class methods
# is_const_method whether this is a const method
# is_static_method whether this is a static method
# is_c_class_method whether this is a cclass method
child_attrs = ["base_type", "declarator", "body", "py_func_stat"]
inline_in_pxd = False
decorators = None
directive_locals = None
directive_returns = None
override = None
template_declaration = None
is_const_method = False
py_func_stat = None
def unqualified_name(self):
return self.entry.name
def analyse_declarations(self, env):
self.is_c_class_method = env.is_c_class_scope
if self.directive_locals is None:
self.directive_locals = {}
self.directive_locals.update(env.directives['locals'])
if self.directive_returns is not None:
base_type = self.directive_returns.analyse_as_type(env)
if base_type is None:
error(self.directive_returns.pos, "Not a type")
base_type = PyrexTypes.error_type
else:
base_type = self.base_type.analyse(env)
self.is_static_method = 'staticmethod' in env.directives and not env.lookup_here('staticmethod')
# The 2 here is because we need both function and argument names.
if isinstance(self.declarator, CFuncDeclaratorNode):
name_declarator, type = self.declarator.analyse(
base_type, env, nonempty=2 * (self.body is not None),
directive_locals=self.directive_locals)
else:
name_declarator, type = self.declarator.analyse(
base_type, env, nonempty=2 * (self.body is not None))
if not type.is_cfunction:
error(self.pos, "Suite attached to non-function declaration")
# Remember the actual type according to the function header
# written here, because the type in the symbol table entry
# may be different if we're overriding a C method inherited
# from the base type of an extension type.
self.type = type
type.is_overridable = self.overridable
declarator = self.declarator
while not hasattr(declarator, 'args'):
declarator = declarator.base
self.cfunc_declarator = declarator
self.args = declarator.args
opt_arg_count = self.cfunc_declarator.optional_arg_count
if (self.visibility == 'public' or self.api) and opt_arg_count:
error(self.cfunc_declarator.pos,
"Function with optional arguments may not be declared public or api")
if type.exception_check == '+' and self.visibility != 'extern':
warning(self.cfunc_declarator.pos,
"Only extern functions can throw C++ exceptions.")
for formal_arg, type_arg in zip(self.args, type.args):
self.align_argument_type(env, type_arg)
formal_arg.type = type_arg.type
formal_arg.name = type_arg.name
formal_arg.cname = type_arg.cname
self._validate_type_visibility(type_arg.type, type_arg.pos, env)
if type_arg.type.is_fused:
self.has_fused_arguments = True
if type_arg.type.is_buffer and 'inline' in self.modifiers:
warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
if type_arg.type.is_buffer:
if self.type.nogil:
error(formal_arg.pos,
"Buffer may not be acquired without the GIL. Consider using memoryview slices instead.")
elif 'inline' in self.modifiers:
warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
self._validate_type_visibility(type.return_type, self.pos, env)
name = name_declarator.name
cname = name_declarator.cname
type.is_const_method = self.is_const_method
type.is_static_method = self.is_static_method
self.entry = env.declare_cfunction(
name, type, self.pos,
cname=cname, visibility=self.visibility, api=self.api,
defining=self.body is not None, modifiers=self.modifiers,
overridable=self.overridable)
self.entry.inline_func_in_pxd = self.inline_in_pxd
self.return_type = type.return_type
if self.return_type.is_array and self.visibility != 'extern':
error(self.pos, "Function cannot return an array")
if self.return_type.is_cpp_class:
self.return_type.check_nullary_constructor(self.pos, "used as a return value")
if self.overridable and not env.is_module_scope and not self.is_static_method:
if len(self.args) < 1 or not self.args[0].type.is_pyobject:
# An error will be produced in the cdef function
self.overridable = False
self.declare_cpdef_wrapper(env)
self.create_local_scope(env)
def declare_cpdef_wrapper(self, env):
if self.overridable:
if self.is_static_method:
# TODO(robertwb): Finish this up, perhaps via more function refactoring.
error(self.pos, "static cpdef methods not yet supported")
name = self.entry.name
py_func_body = self.call_self_node(is_module_scope=env.is_module_scope)
if self.is_static_method:
from .ExprNodes import NameNode
decorators = [DecoratorNode(self.pos, decorator=NameNode(self.pos, name='staticmethod'))]
decorators[0].decorator.analyse_types(env)
else:
decorators = []
self.py_func = DefNode(pos=self.pos,
name=self.entry.name,
args=self.args,
star_arg=None,
starstar_arg=None,
doc=self.doc,
body=py_func_body,
decorators=decorators,
is_wrapper=1)
self.py_func.is_module_scope = env.is_module_scope
self.py_func.analyse_declarations(env)
self.py_func_stat = StatListNode(self.pos, stats=[self.py_func])
self.py_func.type = PyrexTypes.py_object_type
self.entry.as_variable = self.py_func.entry
self.entry.used = self.entry.as_variable.used = True
# Reset scope entry the above cfunction
env.entries[name] = self.entry
if (not self.entry.is_final_cmethod and
(not env.is_module_scope or Options.lookup_module_cpdef)):
self.override = OverrideCheckNode(self.pos, py_func=self.py_func)
self.body = StatListNode(self.pos, stats=[self.override, self.body])
def _validate_type_visibility(self, type, pos, env):
"""
Ensure that types used in cdef functions are public or api, or
defined in a C header.
"""
public_or_api = (self.visibility == 'public' or self.api)
entry = getattr(type, 'entry', None)
if public_or_api and entry and env.is_module_scope:
if not (entry.visibility in ('public', 'extern') or
entry.api or entry.in_cinclude):
error(pos, "Function declared public or api may not have private types")
def call_self_node(self, omit_optional_args=0, is_module_scope=0):
from . import ExprNodes
args = self.type.args
if omit_optional_args:
args = args[:len(args) - self.type.optional_arg_count]
arg_names = [arg.name for arg in args]
if is_module_scope:
cfunc = ExprNodes.NameNode(self.pos, name=self.entry.name)
call_arg_names = arg_names
skip_dispatch = Options.lookup_module_cpdef
elif self.type.is_static_method:
class_entry = self.entry.scope.parent_type.entry
class_node = ExprNodes.NameNode(self.pos, name=class_entry.name)
class_node.entry = class_entry
cfunc = ExprNodes.AttributeNode(self.pos, obj=class_node, attribute=self.entry.name)
# Calling static c(p)def methods on an instance disallowed.
# TODO(robertwb): Support by passing self to check for override?
skip_dispatch = True
else:
type_entry = self.type.args[0].type.entry
type_arg = ExprNodes.NameNode(self.pos, name=type_entry.name)
type_arg.entry = type_entry
cfunc = ExprNodes.AttributeNode(self.pos, obj=type_arg, attribute=self.entry.name)
skip_dispatch = not is_module_scope or Options.lookup_module_cpdef
c_call = ExprNodes.SimpleCallNode(
self.pos,
function=cfunc,
args=[ExprNodes.NameNode(self.pos, name=n) for n in arg_names],
wrapper_call=skip_dispatch)
return ReturnStatNode(pos=self.pos, return_type=PyrexTypes.py_object_type, value=c_call)
def declare_arguments(self, env):
for arg in self.type.args:
if not arg.name:
error(arg.pos, "Missing argument name")
self.declare_argument(env, arg)
def need_gil_acquisition(self, lenv):
return self.type.with_gil
def nogil_check(self, env):
type = self.type
with_gil = type.with_gil
if type.nogil and not with_gil:
if type.return_type.is_pyobject:
error(self.pos,
"Function with Python return type cannot be declared nogil")
for entry in self.local_scope.var_entries:
if entry.type.is_pyobject and not entry.in_with_gil_block:
error(self.pos, "Function declared nogil has Python locals or temporaries")
def analyse_expressions(self, env):
self.local_scope.directives = env.directives
if self.py_func is not None:
# this will also analyse the default values
self.py_func = self.py_func.analyse_expressions(env)
else:
self.analyse_default_values(env)
self.analyse_annotations(env)
self.acquire_gil = self.need_gil_acquisition(self.local_scope)
return self
def needs_assignment_synthesis(self, env, code=None):
return False
def generate_function_header(self, code, with_pymethdef, with_opt_args=1, with_dispatch=1, cname=None):
scope = self.local_scope
arg_decls = []
type = self.type
for arg in type.args[:len(type.args)-type.optional_arg_count]:
arg_decl = arg.declaration_code()
entry = scope.lookup(arg.name)
if not entry.cf_used:
arg_decl = 'CYTHON_UNUSED %s' % arg_decl
arg_decls.append(arg_decl)
if with_dispatch and self.overridable:
dispatch_arg = PyrexTypes.c_int_type.declaration_code(
Naming.skip_dispatch_cname)
if self.override:
arg_decls.append(dispatch_arg)
else:
arg_decls.append('CYTHON_UNUSED %s' % dispatch_arg)
if type.optional_arg_count and with_opt_args:
arg_decls.append(type.op_arg_struct.declaration_code(Naming.optional_args_cname))
if type.has_varargs:
arg_decls.append("...")
if not arg_decls:
arg_decls = ["void"]
if cname is None:
cname = self.entry.func_cname
entity = type.function_header_code(cname, ', '.join(arg_decls))
if self.entry.visibility == 'private' and '::' not in cname:
storage_class = "static "
else:
storage_class = ""
dll_linkage = None
modifiers = code.build_function_modifiers(self.entry.func_modifiers)
header = self.return_type.declaration_code(entity, dll_linkage=dll_linkage)
#print (storage_class, modifiers, header)
needs_proto = self.is_c_class_method
if self.template_declaration:
if needs_proto:
code.globalstate.parts['module_declarations'].putln(self.template_declaration)
code.putln(self.template_declaration)
if needs_proto:
code.globalstate.parts['module_declarations'].putln(
"%s%s%s; /* proto*/" % (storage_class, modifiers, header))
code.putln("%s%s%s {" % (storage_class, modifiers, header))
def generate_argument_declarations(self, env, code):
scope = self.local_scope
for arg in self.args:
if arg.default:
entry = scope.lookup(arg.name)
if self.override or entry.cf_used:
result = arg.calculate_default_value_code(code)
code.putln('%s = %s;' % (
arg.type.declaration_code(arg.cname), result))
def generate_keyword_list(self, code):
pass
def generate_argument_parsing_code(self, env, code):
i = 0
used = 0
scope = self.local_scope
if self.type.optional_arg_count:
code.putln('if (%s) {' % Naming.optional_args_cname)
for arg in self.args:
if arg.default:
entry = scope.lookup(arg.name)
if self.override or entry.cf_used:
code.putln('if (%s->%sn > %s) {' %
(Naming.optional_args_cname,
Naming.pyrex_prefix, i))
declarator = arg.declarator
while not hasattr(declarator, 'name'):
declarator = declarator.base
code.putln('%s = %s->%s;' %
(arg.cname, Naming.optional_args_cname,
self.type.opt_arg_cname(declarator.name)))
used += 1
i += 1
for _ in range(used):
code.putln('}')
code.putln('}')
# Move arguments into closure if required
def put_into_closure(entry):
if entry.in_closure and not arg.default:
code.putln('%s = %s;' % (entry.cname, entry.original_cname))
code.put_var_incref(entry)
code.put_var_giveref(entry)
for arg in self.args:
put_into_closure(scope.lookup_here(arg.name))
def generate_argument_conversion_code(self, code):
pass
def generate_argument_type_tests(self, code):
# Generate type tests for args whose type in a parent
# class is a supertype of the declared type.
for arg in self.type.args:
if arg.needs_type_test:
self.generate_arg_type_test(arg, code)
elif arg.type.is_pyobject and not arg.accept_none:
self.generate_arg_none_check(arg, code)
def generate_execution_code(self, code):
super(CFuncDefNode, self).generate_execution_code(code)
if self.py_func_stat:
self.py_func_stat.generate_execution_code(code)
def error_value(self):
if self.return_type.is_pyobject:
return "0"
else:
#return None
return self.entry.type.exception_value
def caller_will_check_exceptions(self):
return self.entry.type.exception_check
def generate_wrapper_functions(self, code):
# If the C signature of a function has changed, we need to generate
# wrappers to put in the slots here.
k = 0
entry = self.entry
func_type = entry.type
while entry.prev_entry is not None:
k += 1
entry = entry.prev_entry
entry.func_cname = "%s%swrap_%s" % (self.entry.func_cname, Naming.pyrex_prefix, k)
code.putln()
self.generate_function_header(
code, 0,
with_dispatch=entry.type.is_overridable,
with_opt_args=entry.type.optional_arg_count,
cname=entry.func_cname)
if not self.return_type.is_void:
code.put('return ')
args = self.type.args
arglist = [arg.cname for arg in args[:len(args)-self.type.optional_arg_count]]
if entry.type.is_overridable:
arglist.append(Naming.skip_dispatch_cname)
elif func_type.is_overridable:
arglist.append('0')
if entry.type.optional_arg_count:
arglist.append(Naming.optional_args_cname)
elif func_type.optional_arg_count:
arglist.append('NULL')
code.putln('%s(%s);' % (self.entry.func_cname, ', '.join(arglist)))
code.putln('}')
class PyArgDeclNode(Node):
# Argument which must be a Python object (used
# for * and ** arguments).
#
# name string
# entry Symtab.Entry
# annotation ExprNode or None Py3 argument annotation
child_attrs = []
is_self_arg = False
is_type_arg = False
def generate_function_definitions(self, env, code):
self.entry.generate_function_definitions(env, code)
class DecoratorNode(Node):
# A decorator
#
# decorator NameNode or CallNode or AttributeNode
child_attrs = ['decorator']
class DefNode(FuncDefNode):
# A Python function definition.
#
# name string the Python name of the function
# lambda_name string the internal name of a lambda 'function'
# decorators [DecoratorNode] list of decorators
# args [CArgDeclNode] formal arguments
# doc EncodedString or None
# body StatListNode
# return_type_annotation
# ExprNode or None the Py3 return type annotation
#
# The following subnode is constructed internally
# when the def statement is inside a Python class definition.
#
# fused_py_func DefNode The original fused cpdef DefNode
# (in case this is a specialization)
# specialized_cpdefs [DefNode] list of specialized cpdef DefNodes
# py_cfunc_node PyCFunctionNode/InnerFunctionNode The PyCFunction to create and assign
#
# decorator_indirection IndirectionNode Used to remove __Pyx_Method_ClassMethod for fused functions
child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators", "return_type_annotation"]
lambda_name = None
reqd_kw_flags_cname = "0"
is_wrapper = 0
no_assignment_synthesis = 0
decorators = None
return_type_annotation = None
entry = None
acquire_gil = 0
self_in_stararg = 0
py_cfunc_node = None
requires_classobj = False
defaults_struct = None # Dynamic kwrds structure name
doc = None
fused_py_func = False
specialized_cpdefs = None
py_wrapper = None
py_wrapper_required = True
func_cname = None
defaults_getter = None
def __init__(self, pos, **kwds):
FuncDefNode.__init__(self, pos, **kwds)
k = rk = r = 0
for arg in self.args:
if arg.kw_only:
k += 1
if not arg.default:
rk += 1
if not arg.default:
r += 1
self.num_kwonly_args = k
self.num_required_kw_args = rk
self.num_required_args = r
def as_cfunction(self, cfunc=None, scope=None, overridable=True, returns=None, modifiers=None):
if self.star_arg:
error(self.star_arg.pos, "cdef function cannot have star argument")
if self.starstar_arg:
error(self.starstar_arg.pos, "cdef function cannot have starstar argument")
if cfunc is None:
cfunc_args = []
for formal_arg in self.args:
name_declarator, type = formal_arg.analyse(scope, nonempty=1)
cfunc_args.append(PyrexTypes.CFuncTypeArg(name=name_declarator.name,
cname=None,
type=py_object_type,
pos=formal_arg.pos))
cfunc_type = PyrexTypes.CFuncType(return_type=py_object_type,
args=cfunc_args,
has_varargs=False,
exception_value=None,
exception_check=False,
nogil=False,
with_gil=False,
is_overridable=overridable)
cfunc = CVarDefNode(self.pos, type=cfunc_type)
else:
if scope is None:
scope = cfunc.scope
cfunc_type = cfunc.type
if len(self.args) != len(cfunc_type.args) or cfunc_type.has_varargs:
error(self.pos, "wrong number of arguments")
error(cfunc.pos, "previous declaration here")
for i, (formal_arg, type_arg) in enumerate(zip(self.args, cfunc_type.args)):
name_declarator, type = formal_arg.analyse(scope, nonempty=1,
is_self_arg=(i == 0 and scope.is_c_class_scope))
if type is None or type is PyrexTypes.py_object_type:
formal_arg.type = type_arg.type
formal_arg.name_declarator = name_declarator
from . import ExprNodes
if cfunc_type.exception_value is None:
exception_value = None
else:
exception_value = ExprNodes.ConstNode(
self.pos, value=cfunc_type.exception_value, type=cfunc_type.return_type)
declarator = CFuncDeclaratorNode(self.pos,
base=CNameDeclaratorNode(self.pos, name=self.name, cname=None),
args=self.args,
has_varargs=False,
exception_check=cfunc_type.exception_check,
exception_value=exception_value,
with_gil=cfunc_type.with_gil,
nogil=cfunc_type.nogil)
return CFuncDefNode(self.pos,
modifiers=modifiers or [],
base_type=CAnalysedBaseTypeNode(self.pos, type=cfunc_type.return_type),
declarator=declarator,
body=self.body,
doc=self.doc,
overridable=cfunc_type.is_overridable,
type=cfunc_type,
with_gil=cfunc_type.with_gil,
nogil=cfunc_type.nogil,
visibility='private',
api=False,
directive_locals=getattr(cfunc, 'directive_locals', {}),
directive_returns=returns)
def is_cdef_func_compatible(self):
"""Determines if the function's signature is compatible with a
cdef function. This can be used before calling
.as_cfunction() to see if that will be successful.
"""
if self.needs_closure:
return False
if self.star_arg or self.starstar_arg:
return False
return True
def analyse_declarations(self, env):
self.is_classmethod = self.is_staticmethod = False
if self.decorators:
for decorator in self.decorators:
func = decorator.decorator
if func.is_name:
self.is_classmethod |= func.name == 'classmethod'
self.is_staticmethod |= func.name == 'staticmethod'
if self.is_classmethod and env.lookup_here('classmethod'):
# classmethod() was overridden - not much we can do here ...
self.is_classmethod = False
if self.is_staticmethod and env.lookup_here('staticmethod'):
# staticmethod() was overridden - not much we can do here ...
self.is_staticmethod = False
if self.name == '__new__' and env.is_py_class_scope:
self.is_staticmethod = 1
self.analyse_argument_types(env)
if self.name == '<lambda>':
self.declare_lambda_function(env)
else:
self.declare_pyfunction(env)
self.analyse_signature(env)
self.return_type = self.entry.signature.return_type()
# if a signature annotation provides a more specific return object type, use it
if self.return_type is py_object_type and self.return_type_annotation:
if env.directives['annotation_typing'] and not self.entry.is_special:
_, return_type = _analyse_signature_annotation(self.return_type_annotation, env)
if return_type and return_type.is_pyobject:
self.return_type = return_type
self.create_local_scope(env)
self.py_wrapper = DefNodeWrapper(
self.pos,
target=self,
name=self.entry.name,
args=self.args,
star_arg=self.star_arg,
starstar_arg=self.starstar_arg,
return_type=self.return_type)
self.py_wrapper.analyse_declarations(env)
def analyse_argument_types(self, env):
self.directive_locals = env.directives['locals']
allow_none_for_extension_args = env.directives['allow_none_for_extension_args']
f2s = env.fused_to_specific
env.fused_to_specific = None
for arg in self.args:
if hasattr(arg, 'name'):
name_declarator = None
else:
base_type = arg.base_type.analyse(env)
name_declarator, type = \
arg.declarator.analyse(base_type, env)
arg.name = name_declarator.name
arg.type = type
if type.is_fused:
self.has_fused_arguments = True
self.align_argument_type(env, arg)
if name_declarator and name_declarator.cname:
error(self.pos, "Python function argument cannot have C name specification")
arg.type = arg.type.as_argument_type()
arg.hdr_type = None
arg.needs_conversion = 0
arg.needs_type_test = 0
arg.is_generic = 1
if arg.type.is_pyobject or arg.type.is_buffer or arg.type.is_memoryviewslice:
if arg.or_none:
arg.accept_none = True
elif arg.not_none:
arg.accept_none = False
elif (arg.type.is_extension_type or arg.type.is_builtin_type
or arg.type.is_buffer or arg.type.is_memoryviewslice):
if arg.default and arg.default.constant_result is None:
# special case: def func(MyType obj = None)
arg.accept_none = True
else:
# default depends on compiler directive
arg.accept_none = allow_none_for_extension_args
else:
# probably just a plain 'object'
arg.accept_none = True
else:
arg.accept_none = True # won't be used, but must be there
if arg.not_none:
error(arg.pos, "Only Python type arguments can have 'not None'")
if arg.or_none:
error(arg.pos, "Only Python type arguments can have 'or None'")
env.fused_to_specific = f2s
def analyse_signature(self, env):
if self.entry.is_special:
if self.decorators:
error(self.pos, "special functions of cdef classes cannot have decorators")
self.entry.trivial_signature = len(self.args) == 1 and not (self.star_arg or self.starstar_arg)
elif not env.directives['always_allow_keywords'] and not (self.star_arg or self.starstar_arg):
# Use the simpler calling signature for zero- and one-argument functions.
if self.entry.signature is TypeSlots.pyfunction_signature:
if len(self.args) == 0:
self.entry.signature = TypeSlots.pyfunction_noargs
elif len(self.args) == 1:
if self.args[0].default is None and not self.args[0].kw_only:
self.entry.signature = TypeSlots.pyfunction_onearg
elif self.entry.signature is TypeSlots.pymethod_signature:
if len(self.args) == 1:
self.entry.signature = TypeSlots.unaryfunc
elif len(self.args) == 2:
if self.args[1].default is None and not self.args[1].kw_only:
self.entry.signature = TypeSlots.ibinaryfunc
sig = self.entry.signature
nfixed = sig.num_fixed_args()
if (sig is TypeSlots.pymethod_signature and nfixed == 1
and len(self.args) == 0 and self.star_arg):
# this is the only case where a diverging number of
# arguments is not an error - when we have no explicit
# 'self' parameter as in method(*args)
sig = self.entry.signature = TypeSlots.pyfunction_signature # self is not 'really' used
self.self_in_stararg = 1
nfixed = 0
if self.is_staticmethod and env.is_c_class_scope:
nfixed = 0
self.self_in_stararg = True # FIXME: why for staticmethods?
self.entry.signature = sig = copy.copy(sig)
sig.fixed_arg_format = "*"
sig.is_staticmethod = True
sig.has_generic_args = True
if ((self.is_classmethod or self.is_staticmethod) and
self.has_fused_arguments and env.is_c_class_scope):
del self.decorator_indirection.stats[:]
for i in range(min(nfixed, len(self.args))):
arg = self.args[i]
arg.is_generic = 0
if sig.is_self_arg(i) and not self.is_staticmethod:
if self.is_classmethod:
arg.is_type_arg = 1
arg.hdr_type = arg.type = Builtin.type_type
else:
arg.is_self_arg = 1
arg.hdr_type = arg.type = env.parent_type
arg.needs_conversion = 0
else:
arg.hdr_type = sig.fixed_arg_type(i)
if not arg.type.same_as(arg.hdr_type):
if arg.hdr_type.is_pyobject and arg.type.is_pyobject:
arg.needs_type_test = 1
else:
arg.needs_conversion = 1
if arg.needs_conversion:
arg.hdr_cname = Naming.arg_prefix + arg.name
else:
arg.hdr_cname = Naming.var_prefix + arg.name
if nfixed > len(self.args):
self.bad_signature()
return
elif nfixed < len(self.args):
if not sig.has_generic_args:
self.bad_signature()
for arg in self.args:
if arg.is_generic and (arg.type.is_extension_type or arg.type.is_builtin_type):
arg.needs_type_test = 1
def bad_signature(self):
sig = self.entry.signature
expected_str = "%d" % sig.num_fixed_args()
if sig.has_generic_args:
expected_str += " or more"
name = self.name
if name.startswith("__") and name.endswith("__"):
desc = "Special method"
else:
desc = "Method"
error(self.pos, "%s %s has wrong number of arguments (%d declared, %s expected)" % (
desc, self.name, len(self.args), expected_str))
def declare_pyfunction(self, env):
#print "DefNode.declare_pyfunction:", self.name, "in", env ###
name = self.name
entry = env.lookup_here(name)
if entry:
if entry.is_final_cmethod and not env.parent_type.is_final_type:
error(self.pos, "Only final types can have final Python (def/cpdef) methods")
if entry.type.is_cfunction and not entry.is_builtin_cmethod and not self.is_wrapper:
warning(self.pos, "Overriding cdef method with def method.", 5)
entry = env.declare_pyfunction(name, self.pos, allow_redefine=not self.is_wrapper)
self.entry = entry
prefix = env.next_id(env.scope_prefix)
self.entry.pyfunc_cname = Naming.pyfunc_prefix + prefix + name
if Options.docstrings:
entry.doc = embed_position(self.pos, self.doc)
entry.doc_cname = Naming.funcdoc_prefix + prefix + name
if entry.is_special:
if entry.name in TypeSlots.invisible or not entry.doc or (
entry.name in '__getattr__' and env.directives['fast_getattr']):
entry.wrapperbase_cname = None
else:
entry.wrapperbase_cname = Naming.wrapperbase_prefix + prefix + name
else:
entry.doc = None
def declare_lambda_function(self, env):
entry = env.declare_lambda_function(self.lambda_name, self.pos)
entry.doc = None
self.entry = entry
self.entry.pyfunc_cname = entry.cname
def declare_arguments(self, env):
for arg in self.args:
if not arg.name:
error(arg.pos, "Missing argument name")
if arg.needs_conversion:
arg.entry = env.declare_var(arg.name, arg.type, arg.pos)
if arg.type.is_pyobject:
arg.entry.init = "0"
else:
arg.entry = self.declare_argument(env, arg)
arg.entry.is_arg = 1
arg.entry.used = 1
arg.entry.is_self_arg = arg.is_self_arg
self.declare_python_arg(env, self.star_arg)
self.declare_python_arg(env, self.starstar_arg)
def declare_python_arg(self, env, arg):
if arg:
if env.directives['infer_types'] != False:
type = PyrexTypes.unspecified_type
else:
type = py_object_type
entry = env.declare_var(arg.name, type, arg.pos)
entry.is_arg = 1
entry.used = 1
entry.init = "0"
entry.xdecref_cleanup = 1
arg.entry = entry
def analyse_expressions(self, env):
self.local_scope.directives = env.directives
self.analyse_default_values(env)
self.analyse_annotations(env)
if self.return_type_annotation:
self.return_type_annotation = self.return_type_annotation.analyse_types(env)
if not self.needs_assignment_synthesis(env) and self.decorators:
for decorator in self.decorators[::-1]:
decorator.decorator = decorator.decorator.analyse_expressions(env)
self.py_wrapper.prepare_argument_coercion(env)
return self
def needs_assignment_synthesis(self, env, code=None):
if self.is_staticmethod:
return True
if self.is_wrapper or self.specialized_cpdefs or self.entry.is_fused_specialized:
return False
if self.no_assignment_synthesis:
return False
# Should enable for module level as well, that will require more testing...
if self.entry.is_anonymous:
return True
if env.is_module_scope:
if code is None:
return env.directives['binding']
else:
return code.globalstate.directives['binding']
return env.is_py_class_scope or env.is_closure_scope
def error_value(self):
return self.entry.signature.error_value
def caller_will_check_exceptions(self):
return self.entry.signature.exception_check
def generate_function_definitions(self, env, code):
if self.defaults_getter:
self.defaults_getter.generate_function_definitions(env, code)
# Before closure cnames are mangled
if self.py_wrapper_required:
# func_cname might be modified by @cname
self.py_wrapper.func_cname = self.entry.func_cname
self.py_wrapper.generate_function_definitions(env, code)
FuncDefNode.generate_function_definitions(self, env, code)
def generate_function_header(self, code, with_pymethdef, proto_only=0):
if proto_only:
if self.py_wrapper_required:
self.py_wrapper.generate_function_header(
code, with_pymethdef, True)
return
arg_code_list = []
if self.entry.signature.has_dummy_arg:
self_arg = 'PyObject *%s' % Naming.self_cname
if not self.needs_outer_scope:
self_arg = 'CYTHON_UNUSED ' + self_arg
arg_code_list.append(self_arg)
def arg_decl_code(arg):
entry = arg.entry
if entry.in_closure:
cname = entry.original_cname
else:
cname = entry.cname
decl = entry.type.declaration_code(cname)
if not entry.cf_used:
decl = 'CYTHON_UNUSED ' + decl
return decl
for arg in self.args:
arg_code_list.append(arg_decl_code(arg))
if self.star_arg:
arg_code_list.append(arg_decl_code(self.star_arg))
if self.starstar_arg:
arg_code_list.append(arg_decl_code(self.starstar_arg))
arg_code = ', '.join(arg_code_list)
dc = self.return_type.declaration_code(self.entry.pyfunc_cname)
decls_code = code.globalstate['decls']
preprocessor_guard = self.get_preprocessor_guard()
if preprocessor_guard:
decls_code.putln(preprocessor_guard)
decls_code.putln(
"static %s(%s); /* proto */" % (dc, arg_code))
if preprocessor_guard:
decls_code.putln("#endif")
code.putln("static %s(%s) {" % (dc, arg_code))
def generate_argument_declarations(self, env, code):
pass
def generate_keyword_list(self, code):
pass
def generate_argument_parsing_code(self, env, code):
# Move arguments into closure if required
def put_into_closure(entry):
if entry.in_closure:
code.putln('%s = %s;' % (entry.cname, entry.original_cname))
code.put_var_incref(entry)
code.put_var_giveref(entry)
for arg in self.args:
put_into_closure(arg.entry)
for arg in self.star_arg, self.starstar_arg:
if arg:
put_into_closure(arg.entry)
def generate_argument_type_tests(self, code):
pass
class DefNodeWrapper(FuncDefNode):
# DefNode python wrapper code generator
defnode = None
target = None # Target DefNode
def __init__(self, *args, **kwargs):
FuncDefNode.__init__(self, *args, **kwargs)
self.num_kwonly_args = self.target.num_kwonly_args
self.num_required_kw_args = self.target.num_required_kw_args
self.num_required_args = self.target.num_required_args
self.self_in_stararg = self.target.self_in_stararg
self.signature = None
def analyse_declarations(self, env):
target_entry = self.target.entry
name = self.name
prefix = env.next_id(env.scope_prefix)
target_entry.func_cname = Naming.pywrap_prefix + prefix + name
target_entry.pymethdef_cname = Naming.pymethdef_prefix + prefix + name
self.signature = target_entry.signature
def prepare_argument_coercion(self, env):
# This is only really required for Cython utility code at this time,
# everything else can be done during code generation. But we expand
# all utility code here, simply because we cannot easily distinguish
# different code types.
for arg in self.args:
if not arg.type.is_pyobject:
if not arg.type.create_from_py_utility_code(env):
pass # will fail later
elif arg.hdr_type and not arg.hdr_type.is_pyobject:
if not arg.hdr_type.create_to_py_utility_code(env):
pass # will fail later
if self.starstar_arg and not self.starstar_arg.entry.cf_used:
# we will set the kwargs argument to NULL instead of a new dict
# and must therefore correct the control flow state
entry = self.starstar_arg.entry
entry.xdecref_cleanup = 1
for ass in entry.cf_assignments:
if not ass.is_arg and ass.lhs.is_name:
ass.lhs.cf_maybe_null = True
def signature_has_nongeneric_args(self):
argcount = len(self.args)
if argcount == 0 or (
argcount == 1 and (self.args[0].is_self_arg or
self.args[0].is_type_arg)):
return 0
return 1
def signature_has_generic_args(self):
return self.signature.has_generic_args
def generate_function_body(self, code):
args = []
if self.signature.has_dummy_arg:
args.append(Naming.self_cname)
for arg in self.args:
if arg.hdr_type and not (arg.type.is_memoryviewslice or
arg.type.is_struct or
arg.type.is_complex):
args.append(arg.type.cast_code(arg.entry.cname))
else:
args.append(arg.entry.cname)
if self.star_arg:
args.append(self.star_arg.entry.cname)
if self.starstar_arg:
args.append(self.starstar_arg.entry.cname)
args = ', '.join(args)
if not self.return_type.is_void:
code.put('%s = ' % Naming.retval_cname)
code.putln('%s(%s);' % (
self.target.entry.pyfunc_cname, args))
def generate_function_definitions(self, env, code):
lenv = self.target.local_scope
# Generate C code for header and body of function
code.mark_pos(self.pos)
code.putln("")
code.putln("/* Python wrapper */")
preprocessor_guard = self.target.get_preprocessor_guard()
if preprocessor_guard:
code.putln(preprocessor_guard)
code.enter_cfunc_scope()
code.return_from_error_cleanup_label = code.new_label()
with_pymethdef = (self.target.needs_assignment_synthesis(env, code) or
self.target.pymethdef_required)
self.generate_function_header(code, with_pymethdef)
self.generate_argument_declarations(lenv, code)
tempvardecl_code = code.insertion_point()
if self.return_type.is_pyobject:
retval_init = ' = 0'
else:
retval_init = ''
if not self.return_type.is_void:
code.putln('%s%s;' % (
self.return_type.declaration_code(Naming.retval_cname),
retval_init))
code.put_declare_refcount_context()
code.put_setup_refcount_context('%s (wrapper)' % self.name)
self.generate_argument_parsing_code(lenv, code)
self.generate_argument_type_tests(code)
self.generate_function_body(code)
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
code.mark_pos(self.pos)
code.putln("")
code.putln("/* function exit code */")
# ----- Error cleanup
if code.error_label in code.labels_used:
code.put_goto(code.return_label)
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type)
err_val = self.error_value()
if err_val is not None:
code.putln("%s = %s;" % (Naming.retval_cname, err_val))
# ----- Non-error return cleanup
code.put_label(code.return_label)
for entry in lenv.var_entries:
if entry.is_arg and entry.type.is_pyobject:
code.put_var_decref(entry)
code.put_finish_refcount_context()
if not self.return_type.is_void:
code.putln("return %s;" % Naming.retval_cname)
code.putln('}')
code.exit_cfunc_scope()
if preprocessor_guard:
code.putln("#endif /*!(%s)*/" % preprocessor_guard)
def generate_function_header(self, code, with_pymethdef, proto_only=0):
arg_code_list = []
sig = self.signature
if sig.has_dummy_arg or self.self_in_stararg:
arg_code = "PyObject *%s" % Naming.self_cname
if not sig.has_dummy_arg:
arg_code = 'CYTHON_UNUSED ' + arg_code
arg_code_list.append(arg_code)
for arg in self.args:
if not arg.is_generic:
if arg.is_self_arg or arg.is_type_arg:
arg_code_list.append("PyObject *%s" % arg.hdr_cname)
else:
arg_code_list.append(
arg.hdr_type.declaration_code(arg.hdr_cname))
entry = self.target.entry
if not entry.is_special and sig.method_flags() == [TypeSlots.method_noargs]:
arg_code_list.append("CYTHON_UNUSED PyObject *unused")
if entry.scope.is_c_class_scope and entry.name == "__ipow__":
arg_code_list.append("CYTHON_UNUSED PyObject *unused")
if sig.has_generic_args:
arg_code_list.append(
"PyObject *%s, PyObject *%s" % (
Naming.args_cname, Naming.kwds_cname))
arg_code = ", ".join(arg_code_list)
# Prevent warning: unused function '__pyx_pw_5numpy_7ndarray_1__getbuffer__'
mf = ""
if (entry.name in ("__getbuffer__", "__releasebuffer__")
and entry.scope.is_c_class_scope):
mf = "CYTHON_UNUSED "
with_pymethdef = False
dc = self.return_type.declaration_code(entry.func_cname)
header = "static %s%s(%s)" % (mf, dc, arg_code)
code.putln("%s; /*proto*/" % header)
if proto_only:
if self.target.fused_py_func:
# If we are the specialized version of the cpdef, we still
# want the prototype for the "fused cpdef", in case we're
# checking to see if our method was overridden in Python
self.target.fused_py_func.generate_function_header(
code, with_pymethdef, proto_only=True)
return
if (Options.docstrings and entry.doc and
not self.target.fused_py_func and
not entry.scope.is_property_scope and
(not entry.is_special or entry.wrapperbase_cname)):
# h_code = code.globalstate['h_code']
docstr = entry.doc
if docstr.is_unicode:
docstr = docstr.as_utf8_string()
code.putln(
'static char %s[] = %s;' % (
entry.doc_cname,
docstr.as_c_string_literal()))
if entry.is_special:
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln(
"struct wrapperbase %s;" % entry.wrapperbase_cname)
code.putln('#endif')
if with_pymethdef or self.target.fused_py_func:
code.put(
"static PyMethodDef %s = " % entry.pymethdef_cname)
code.put_pymethoddef(self.target.entry, ";", allow_skip=False)
code.putln("%s {" % header)
def generate_argument_declarations(self, env, code):
for arg in self.args:
if arg.is_generic:
if arg.needs_conversion:
code.putln("PyObject *%s = 0;" % arg.hdr_cname)
else:
code.put_var_declaration(arg.entry)
for entry in env.var_entries:
if entry.is_arg:
code.put_var_declaration(entry)
def generate_argument_parsing_code(self, env, code):
# Generate fast equivalent of PyArg_ParseTuple call for
# generic arguments, if any, including args/kwargs
old_error_label = code.new_error_label()
our_error_label = code.error_label
end_label = code.new_label("argument_unpacking_done")
has_kwonly_args = self.num_kwonly_args > 0
has_star_or_kw_args = self.star_arg is not None \
or self.starstar_arg is not None or has_kwonly_args
for arg in self.args:
if not arg.type.is_pyobject:
if not arg.type.create_from_py_utility_code(env):
pass # will fail later
if not self.signature_has_generic_args():
if has_star_or_kw_args:
error(self.pos, "This method cannot have * or keyword arguments")
self.generate_argument_conversion_code(code)
elif not self.signature_has_nongeneric_args():
# func(*args) or func(**kw) or func(*args, **kw)
self.generate_stararg_copy_code(code)
else:
self.generate_tuple_and_keyword_parsing_code(self.args, end_label, code)
code.error_label = old_error_label
if code.label_used(our_error_label):
if not code.label_used(end_label):
code.put_goto(end_label)
code.put_label(our_error_label)
if has_star_or_kw_args:
self.generate_arg_decref(self.star_arg, code)
if self.starstar_arg:
if self.starstar_arg.entry.xdecref_cleanup:
code.put_var_xdecref_clear(self.starstar_arg.entry)
else:
code.put_var_decref_clear(self.starstar_arg.entry)
code.put_add_traceback(self.target.entry.qualified_name)
code.put_finish_refcount_context()
code.putln("return %s;" % self.error_value())
if code.label_used(end_label):
code.put_label(end_label)
def generate_arg_xdecref(self, arg, code):
if arg:
code.put_var_xdecref_clear(arg.entry)
def generate_arg_decref(self, arg, code):
if arg:
code.put_var_decref_clear(arg.entry)
def generate_stararg_copy_code(self, code):
if not self.star_arg:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
code.putln("if (unlikely(PyTuple_GET_SIZE(%s) > 0)) {" %
Naming.args_cname)
code.put('__Pyx_RaiseArgtupleInvalid("%s", 1, 0, 0, PyTuple_GET_SIZE(%s)); return %s;' % (
self.name, Naming.args_cname, self.error_value()))
code.putln("}")
if self.starstar_arg:
if self.star_arg or not self.starstar_arg.entry.cf_used:
kwarg_check = "unlikely(%s)" % Naming.kwds_cname
else:
kwarg_check = "%s" % Naming.kwds_cname
else:
kwarg_check = "unlikely(%s) && unlikely(PyDict_Size(%s) > 0)" % (
Naming.kwds_cname, Naming.kwds_cname)
code.globalstate.use_utility_code(
UtilityCode.load_cached("KeywordStringCheck", "FunctionArguments.c"))
code.putln(
"if (%s && unlikely(!__Pyx_CheckKeywordStrings(%s, \"%s\", %d))) return %s;" % (
kwarg_check, Naming.kwds_cname, self.name,
bool(self.starstar_arg), self.error_value()))
if self.starstar_arg and self.starstar_arg.entry.cf_used:
if all(ref.node.allow_null for ref in self.starstar_arg.entry.cf_references):
code.putln("if (%s) {" % kwarg_check)
code.putln("%s = PyDict_Copy(%s); if (unlikely(!%s)) return %s;" % (
self.starstar_arg.entry.cname,
Naming.kwds_cname,
self.starstar_arg.entry.cname,
self.error_value()))
code.put_gotref(self.starstar_arg.entry.cname)
code.putln("} else {")
code.putln("%s = NULL;" % (self.starstar_arg.entry.cname,))
code.putln("}")
self.starstar_arg.entry.xdecref_cleanup = 1
else:
code.put("%s = (%s) ? PyDict_Copy(%s) : PyDict_New(); " % (
self.starstar_arg.entry.cname,
Naming.kwds_cname,
Naming.kwds_cname))
code.putln("if (unlikely(!%s)) return %s;" % (
self.starstar_arg.entry.cname, self.error_value()))
self.starstar_arg.entry.xdecref_cleanup = 0
code.put_gotref(self.starstar_arg.entry.cname)
if self.self_in_stararg and not self.target.is_staticmethod:
# need to create a new tuple with 'self' inserted as first item
code.put("%s = PyTuple_New(PyTuple_GET_SIZE(%s)+1); if (unlikely(!%s)) " % (
self.star_arg.entry.cname,
Naming.args_cname,
self.star_arg.entry.cname))
if self.starstar_arg and self.starstar_arg.entry.cf_used:
code.putln("{")
code.put_xdecref_clear(self.starstar_arg.entry.cname, py_object_type)
code.putln("return %s;" % self.error_value())
code.putln("}")
else:
code.putln("return %s;" % self.error_value())
code.put_gotref(self.star_arg.entry.cname)
code.put_incref(Naming.self_cname, py_object_type)
code.put_giveref(Naming.self_cname)
code.putln("PyTuple_SET_ITEM(%s, 0, %s);" % (
self.star_arg.entry.cname, Naming.self_cname))
temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln("for (%s=0; %s < PyTuple_GET_SIZE(%s); %s++) {" % (
temp, temp, Naming.args_cname, temp))
code.putln("PyObject* item = PyTuple_GET_ITEM(%s, %s);" % (
Naming.args_cname, temp))
code.put_incref("item", py_object_type)
code.put_giveref("item")
code.putln("PyTuple_SET_ITEM(%s, %s+1, item);" % (
self.star_arg.entry.cname, temp))
code.putln("}")
code.funcstate.release_temp(temp)
self.star_arg.entry.xdecref_cleanup = 0
elif self.star_arg:
code.put_incref(Naming.args_cname, py_object_type)
code.putln("%s = %s;" % (
self.star_arg.entry.cname,
Naming.args_cname))
self.star_arg.entry.xdecref_cleanup = 0
def generate_tuple_and_keyword_parsing_code(self, args, success_label, code):
argtuple_error_label = code.new_label("argtuple_error")
positional_args = []
required_kw_only_args = []
optional_kw_only_args = []
for arg in args:
if arg.is_generic:
if arg.default:
if not arg.is_self_arg and not arg.is_type_arg:
if arg.kw_only:
optional_kw_only_args.append(arg)
else:
positional_args.append(arg)
elif arg.kw_only:
required_kw_only_args.append(arg)
elif not arg.is_self_arg and not arg.is_type_arg:
positional_args.append(arg)
# sort required kw-only args before optional ones to avoid special
# cases in the unpacking code
kw_only_args = required_kw_only_args + optional_kw_only_args
min_positional_args = self.num_required_args - self.num_required_kw_args
if len(args) > 0 and (args[0].is_self_arg or args[0].is_type_arg):
min_positional_args -= 1
max_positional_args = len(positional_args)
has_fixed_positional_count = not self.star_arg and \
min_positional_args == max_positional_args
has_kw_only_args = bool(kw_only_args)
if self.num_required_kw_args:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
if self.starstar_arg or self.star_arg:
self.generate_stararg_init_code(max_positional_args, code)
code.putln('{')
all_args = tuple(positional_args) + tuple(kw_only_args)
code.putln("static PyObject **%s[] = {%s,0};" % (
Naming.pykwdlist_cname,
','.join(['&%s' % code.intern_identifier(arg.name)
for arg in all_args])))
# Before being converted and assigned to the target variables,
# borrowed references to all unpacked argument values are
# collected into a local PyObject* array called "values",
# regardless if they were taken from default arguments,
# positional arguments or keyword arguments. Note that
# C-typed default arguments are handled at conversion time,
# so their array value is NULL in the end if no argument
# was passed for them.
self.generate_argument_values_setup_code(all_args, code)
# --- optimised code when we receive keyword arguments
code.putln("if (%s(%s)) {" % (
(self.num_required_kw_args > 0) and "likely" or "unlikely",
Naming.kwds_cname))
self.generate_keyword_unpacking_code(
min_positional_args, max_positional_args,
has_fixed_positional_count, has_kw_only_args,
all_args, argtuple_error_label, code)
# --- optimised code when we do not receive any keyword arguments
if (self.num_required_kw_args and min_positional_args > 0) or min_positional_args == max_positional_args:
# Python raises arg tuple related errors first, so we must
# check the length here
if min_positional_args == max_positional_args and not self.star_arg:
compare = '!='
else:
compare = '<'
code.putln('} else if (PyTuple_GET_SIZE(%s) %s %d) {' % (
Naming.args_cname, compare, min_positional_args))
code.put_goto(argtuple_error_label)
if self.num_required_kw_args:
# pure error case: keywords required but not passed
if max_positional_args > min_positional_args and not self.star_arg:
code.putln('} else if (PyTuple_GET_SIZE(%s) > %d) {' % (
Naming.args_cname, max_positional_args))
code.put_goto(argtuple_error_label)
code.putln('} else {')
for i, arg in enumerate(kw_only_args):
if not arg.default:
pystring_cname = code.intern_identifier(arg.name)
# required keyword-only argument missing
code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % (
self.name,
pystring_cname))
code.putln(code.error_goto(self.pos))
break
else:
# optimised tuple unpacking code
code.putln('} else {')
if min_positional_args == max_positional_args:
# parse the exact number of positional arguments from
# the args tuple
for i, arg in enumerate(positional_args):
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
else:
# parse the positional arguments from the variable length
# args tuple and reject illegal argument tuple sizes
code.putln('switch (PyTuple_GET_SIZE(%s)) {' % Naming.args_cname)
if self.star_arg:
code.putln('default:')
reversed_args = list(enumerate(positional_args))[::-1]
for i, arg in reversed_args:
if i >= min_positional_args-1:
code.put('case %2d: ' % (i+1))
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
if min_positional_args == 0:
code.put('case 0: ')
code.putln('break;')
if self.star_arg:
if min_positional_args:
for i in range(min_positional_args-1, -1, -1):
code.putln('case %2d:' % i)
code.put_goto(argtuple_error_label)
else:
code.put('default: ')
code.put_goto(argtuple_error_label)
code.putln('}')
code.putln('}') # end of the conditional unpacking blocks
# Convert arg values to their final type and assign them.
# Also inject non-Python default arguments, which do cannot
# live in the values[] array.
for i, arg in enumerate(all_args):
self.generate_arg_assignment(arg, "values[%d]" % i, code)
code.putln('}') # end of the whole argument unpacking block
if code.label_used(argtuple_error_label):
code.put_goto(success_label)
code.put_label(argtuple_error_label)
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, PyTuple_GET_SIZE(%s)); ' % (
self.name, has_fixed_positional_count,
min_positional_args, max_positional_args,
Naming.args_cname))
code.putln(code.error_goto(self.pos))
def generate_arg_assignment(self, arg, item, code):
if arg.type.is_pyobject:
# Python default arguments were already stored in 'item' at the very beginning
if arg.is_generic:
item = PyrexTypes.typecast(arg.type, PyrexTypes.py_object_type, item)
entry = arg.entry
code.putln("%s = %s;" % (entry.cname, item))
else:
func = arg.type.from_py_function
if func:
if arg.default:
# C-typed default arguments must be handled here
code.putln('if (%s) {' % item)
rhs = "%s(%s)" % (func, item)
if arg.type.is_enum:
rhs = arg.type.cast_code(rhs)
code.putln("%s = %s; %s" % (
arg.entry.cname,
rhs,
code.error_goto_if(arg.type.error_condition(arg.entry.cname), arg.pos)))
if arg.default:
code.putln('} else {')
code.putln("%s = %s;" % (
arg.entry.cname,
arg.calculate_default_value_code(code)))
if arg.type.is_memoryviewslice:
code.put_incref_memoryviewslice(arg.entry.cname,
have_gil=True)
code.putln('}')
else:
error(arg.pos, "Cannot convert Python object argument to type '%s'" % arg.type)
def generate_stararg_init_code(self, max_positional_args, code):
if self.starstar_arg:
self.starstar_arg.entry.xdecref_cleanup = 0
code.putln('%s = PyDict_New(); if (unlikely(!%s)) return %s;' % (
self.starstar_arg.entry.cname,
self.starstar_arg.entry.cname,
self.error_value()))
code.put_gotref(self.starstar_arg.entry.cname)
if self.star_arg:
self.star_arg.entry.xdecref_cleanup = 0
code.putln('if (PyTuple_GET_SIZE(%s) > %d) {' % (
Naming.args_cname,
max_positional_args))
code.putln('%s = PyTuple_GetSlice(%s, %d, PyTuple_GET_SIZE(%s));' % (
self.star_arg.entry.cname, Naming.args_cname,
max_positional_args, Naming.args_cname))
code.putln("if (unlikely(!%s)) {" % self.star_arg.entry.cname)
if self.starstar_arg:
code.put_decref_clear(self.starstar_arg.entry.cname, py_object_type)
code.put_finish_refcount_context()
code.putln('return %s;' % self.error_value())
code.putln('}')
code.put_gotref(self.star_arg.entry.cname)
code.putln('} else {')
code.put("%s = %s; " % (self.star_arg.entry.cname, Naming.empty_tuple))
code.put_incref(Naming.empty_tuple, py_object_type)
code.putln('}')
def generate_argument_values_setup_code(self, args, code):
max_args = len(args)
# the 'values' array collects borrowed references to arguments
# before doing any type coercion etc.
code.putln("PyObject* values[%d] = {%s};" % (
max_args, ','.join('0'*max_args)))
if self.target.defaults_struct:
code.putln('%s *%s = __Pyx_CyFunction_Defaults(%s, %s);' % (
self.target.defaults_struct, Naming.dynamic_args_cname,
self.target.defaults_struct, Naming.self_cname))
# assign borrowed Python default values to the values array,
# so that they can be overwritten by received arguments below
for i, arg in enumerate(args):
if arg.default and arg.type.is_pyobject:
default_value = arg.calculate_default_value_code(code)
code.putln('values[%d] = %s;' % (i, arg.type.as_pyobject(default_value)))
def generate_keyword_unpacking_code(self, min_positional_args, max_positional_args,
has_fixed_positional_count, has_kw_only_args,
all_args, argtuple_error_label, code):
code.putln('Py_ssize_t kw_args;')
code.putln('const Py_ssize_t pos_args = PyTuple_GET_SIZE(%s);' % Naming.args_cname)
# copy the values from the args tuple and check that it's not too long
code.putln('switch (pos_args) {')
if self.star_arg:
code.putln('default:')
for i in range(max_positional_args-1, -1, -1):
code.put('case %2d: ' % (i+1))
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (
i, Naming.args_cname, i))
code.putln('case 0: break;')
if not self.star_arg:
code.put('default: ') # more arguments than allowed
code.put_goto(argtuple_error_label)
code.putln('}')
# The code above is very often (but not always) the same as
# the optimised non-kwargs tuple unpacking code, so we keep
# the code block above at the very top, before the following
# 'external' PyDict_Size() call, to make it easy for the C
# compiler to merge the two separate tuple unpacking
# implementations into one when they turn out to be identical.
# If we received kwargs, fill up the positional/required
# arguments with values from the kw dict
code.putln('kw_args = PyDict_Size(%s);' % Naming.kwds_cname)
if self.num_required_args or max_positional_args > 0:
last_required_arg = -1
for i, arg in enumerate(all_args):
if not arg.default:
last_required_arg = i
if last_required_arg < max_positional_args:
last_required_arg = max_positional_args-1
if max_positional_args > 0:
code.putln('switch (pos_args) {')
for i, arg in enumerate(all_args[:last_required_arg+1]):
if max_positional_args > 0 and i <= max_positional_args:
if self.star_arg and i == max_positional_args:
code.putln('default:')
else:
code.putln('case %2d:' % i)
pystring_cname = code.intern_identifier(arg.name)
if arg.default:
if arg.kw_only:
# optional kw-only args are handled separately below
continue
code.putln('if (kw_args > 0) {')
# don't overwrite default argument
code.putln('PyObject* value = PyDict_GetItem(%s, %s);' % (
Naming.kwds_cname, pystring_cname))
code.putln('if (value) { values[%d] = value; kw_args--; }' % i)
code.putln('}')
else:
code.putln('if (likely((values[%d] = PyDict_GetItem(%s, %s)) != 0)) kw_args--;' % (
i, Naming.kwds_cname, pystring_cname))
if i < min_positional_args:
if i == 0:
# special case: we know arg 0 is missing
code.put('else ')
code.put_goto(argtuple_error_label)
else:
# print the correct number of values (args or
# kwargs) that were passed into positional
# arguments up to this point
code.putln('else {')
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, %d); ' % (
self.name, has_fixed_positional_count,
min_positional_args, max_positional_args, i))
code.putln(code.error_goto(self.pos))
code.putln('}')
elif arg.kw_only:
code.putln('else {')
code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % (
self.name, pystring_cname))
code.putln(code.error_goto(self.pos))
code.putln('}')
if max_positional_args > 0:
code.putln('}')
if has_kw_only_args:
# unpack optional keyword-only arguments separately because
# checking for interned strings in a dict is faster than iterating
self.generate_optional_kwonly_args_unpacking_code(all_args, code)
code.putln('if (unlikely(kw_args > 0)) {')
# non-positional/-required kw args left in dict: default args,
# kw-only args, **kwargs or error
#
# This is sort of a catch-all: except for checking required
# arguments, this will always do the right thing for unpacking
# keyword arguments, so that we can concentrate on optimising
# common cases above.
if max_positional_args == 0:
pos_arg_count = "0"
elif self.star_arg:
code.putln("const Py_ssize_t used_pos_args = (pos_args < %d) ? pos_args : %d;" % (
max_positional_args, max_positional_args))
pos_arg_count = "used_pos_args"
else:
pos_arg_count = "pos_args"
code.globalstate.use_utility_code(
UtilityCode.load_cached("ParseKeywords", "FunctionArguments.c"))
code.putln('if (unlikely(__Pyx_ParseOptionalKeywords(%s, %s, %s, values, %s, "%s") < 0)) %s' % (
Naming.kwds_cname,
Naming.pykwdlist_cname,
self.starstar_arg and self.starstar_arg.entry.cname or '0',
pos_arg_count,
self.name,
code.error_goto(self.pos)))
code.putln('}')
def generate_optional_kwonly_args_unpacking_code(self, all_args, code):
optional_args = []
first_optional_arg = -1
for i, arg in enumerate(all_args):
if not arg.kw_only or not arg.default:
continue
if not optional_args:
first_optional_arg = i
optional_args.append(arg.name)
if optional_args:
if len(optional_args) > 1:
# if we receive more than the named kwargs, we either have **kwargs
# (in which case we must iterate anyway) or it's an error (which we
# also handle during iteration) => skip this part if there are more
code.putln('if (kw_args > 0 && %s(kw_args <= %d)) {' % (
not self.starstar_arg and 'likely' or '',
len(optional_args)))
code.putln('Py_ssize_t index;')
# not unrolling the loop here reduces the C code overhead
code.putln('for (index = %d; index < %d && kw_args > 0; index++) {' % (
first_optional_arg, first_optional_arg + len(optional_args)))
else:
code.putln('if (kw_args == 1) {')
code.putln('const Py_ssize_t index = %d;' % first_optional_arg)
code.putln('PyObject* value = PyDict_GetItem(%s, *%s[index]);' % (
Naming.kwds_cname, Naming.pykwdlist_cname))
code.putln('if (value) { values[index] = value; kw_args--; }')
if len(optional_args) > 1:
code.putln('}')
code.putln('}')
def generate_argument_conversion_code(self, code):
# Generate code to convert arguments from signature type to
# declared type, if needed. Also copies signature arguments
# into closure fields.
for arg in self.args:
if arg.needs_conversion:
self.generate_arg_conversion(arg, code)
def generate_arg_conversion(self, arg, code):
# Generate conversion code for one argument.
old_type = arg.hdr_type
new_type = arg.type
if old_type.is_pyobject:
if arg.default:
code.putln("if (%s) {" % arg.hdr_cname)
else:
code.putln("assert(%s); {" % arg.hdr_cname)
self.generate_arg_conversion_from_pyobject(arg, code)
code.putln("}")
elif new_type.is_pyobject:
self.generate_arg_conversion_to_pyobject(arg, code)
else:
if new_type.assignable_from(old_type):
code.putln("%s = %s;" % (arg.entry.cname, arg.hdr_cname))
else:
error(arg.pos, "Cannot convert 1 argument from '%s' to '%s'" % (old_type, new_type))
def generate_arg_conversion_from_pyobject(self, arg, code):
new_type = arg.type
func = new_type.from_py_function
# copied from CoerceFromPyTypeNode
if func:
lhs = arg.entry.cname
rhs = "%s(%s)" % (func, arg.hdr_cname)
if new_type.is_enum:
rhs = PyrexTypes.typecast(new_type, PyrexTypes.c_long_type, rhs)
code.putln("%s = %s; %s" % (
lhs,
rhs,
code.error_goto_if(new_type.error_condition(arg.entry.cname), arg.pos)))
else:
error(arg.pos, "Cannot convert Python object argument to type '%s'" % new_type)
def generate_arg_conversion_to_pyobject(self, arg, code):
old_type = arg.hdr_type
func = old_type.to_py_function
if func:
code.putln("%s = %s(%s); %s" % (
arg.entry.cname,
func,
arg.hdr_cname,
code.error_goto_if_null(arg.entry.cname, arg.pos)))
code.put_var_gotref(arg.entry)
else:
error(arg.pos, "Cannot convert argument of type '%s' to Python object" % old_type)
def generate_argument_type_tests(self, code):
# Generate type tests for args whose signature
# type is PyObject * and whose declared type is
# a subtype thereof.
for arg in self.args:
if arg.needs_type_test:
self.generate_arg_type_test(arg, code)
elif not arg.accept_none and (arg.type.is_pyobject or
arg.type.is_buffer or
arg.type.is_memoryviewslice):
self.generate_arg_none_check(arg, code)
def error_value(self):
return self.signature.error_value
class GeneratorDefNode(DefNode):
# Generator function node that creates a new generator instance when called.
#
# gbody GeneratorBodyDefNode the function implementing the generator
#
is_generator = True
is_coroutine = False
needs_closure = True
child_attrs = DefNode.child_attrs + ["gbody"]
def __init__(self, pos, **kwargs):
# XXX: don't actually needs a body
kwargs['body'] = StatListNode(pos, stats=[], is_terminator=True)
super(GeneratorDefNode, self).__init__(pos, **kwargs)
def analyse_declarations(self, env):
super(GeneratorDefNode, self).analyse_declarations(env)
self.gbody.local_scope = self.local_scope
self.gbody.analyse_declarations(env)
def generate_function_body(self, env, code):
body_cname = self.gbody.entry.func_cname
name = code.intern_identifier(self.name)
qualname = code.intern_identifier(self.qualname)
code.putln('{')
code.putln('__pyx_CoroutineObject *gen = __Pyx_%s_New('
'(__pyx_coroutine_body_t) %s, (PyObject *) %s, %s, %s); %s' % (
'Coroutine' if self.is_coroutine else 'Generator',
body_cname, Naming.cur_scope_cname, name, qualname,
code.error_goto_if_null('gen', self.pos)))
code.put_decref(Naming.cur_scope_cname, py_object_type)
if self.requires_classobj:
classobj_cname = 'gen->classobj'
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
classobj_cname, Naming.self_cname))
code.put_incref(classobj_cname, py_object_type)
code.put_giveref(classobj_cname)
code.put_finish_refcount_context()
code.putln('return (PyObject *) gen;')
code.putln('}')
def generate_function_definitions(self, env, code):
env.use_utility_code(UtilityCode.load_cached(
'Coroutine' if self.is_coroutine else 'Generator', "Coroutine.c"))
self.gbody.generate_function_header(code, proto=True)
super(GeneratorDefNode, self).generate_function_definitions(env, code)
self.gbody.generate_function_definitions(env, code)
class AsyncDefNode(GeneratorDefNode):
is_coroutine = True
class GeneratorBodyDefNode(DefNode):
# Main code body of a generator implemented as a DefNode.
#
is_generator_body = True
is_inlined = False
inlined_comprehension_type = None # container type for inlined comprehensions
def __init__(self, pos=None, name=None, body=None):
super(GeneratorBodyDefNode, self).__init__(
pos=pos, body=body, name=name, doc=None,
args=[], star_arg=None, starstar_arg=None)
def declare_generator_body(self, env):
prefix = env.next_id(env.scope_prefix)
name = env.next_id('generator')
cname = Naming.genbody_prefix + prefix + name
entry = env.declare_var(None, py_object_type, self.pos,
cname=cname, visibility='private')
entry.func_cname = cname
entry.qualified_name = EncodedString(self.name)
self.entry = entry
def analyse_declarations(self, env):
self.analyse_argument_types(env)
self.declare_generator_body(env)
def generate_function_header(self, code, proto=False):
header = "static PyObject *%s(__pyx_CoroutineObject *%s, PyObject *%s)" % (
self.entry.func_cname,
Naming.generator_cname,
Naming.sent_value_cname)
if proto:
code.putln('%s; /* proto */' % header)
else:
code.putln('%s /* generator body */\n{' % header)
def generate_function_definitions(self, env, code):
lenv = self.local_scope
# Generate closure function definitions
self.body.generate_function_definitions(lenv, code)
# Generate C code for header and body of function
code.enter_cfunc_scope()
code.return_from_error_cleanup_label = code.new_label()
# ----- Top-level constants used by this function
code.mark_pos(self.pos)
self.generate_cached_builtins_decls(lenv, code)
# ----- Function header
code.putln("")
self.generate_function_header(code)
closure_init_code = code.insertion_point()
# ----- Local variables
code.putln("PyObject *%s = NULL;" % Naming.retval_cname)
tempvardecl_code = code.insertion_point()
code.put_declare_refcount_context()
code.put_setup_refcount_context(self.entry.name)
# ----- Resume switch point.
code.funcstate.init_closure_temps(lenv.scope_class.type.scope)
resume_code = code.insertion_point()
first_run_label = code.new_label('first_run')
code.use_label(first_run_label)
code.put_label(first_run_label)
code.putln('%s' %
(code.error_goto_if_null(Naming.sent_value_cname, self.pos)))
# ----- prepare target container for inlined comprehension
if self.is_inlined and self.inlined_comprehension_type is not None:
target_type = self.inlined_comprehension_type
if target_type is Builtin.list_type:
comp_init = 'PyList_New(0)'
elif target_type is Builtin.set_type:
comp_init = 'PySet_New(NULL)'
elif target_type is Builtin.dict_type:
comp_init = 'PyDict_New()'
else:
raise InternalError(
"invalid type of inlined comprehension: %s" % target_type)
code.putln("%s = %s; %s" % (
Naming.retval_cname, comp_init,
code.error_goto_if_null(Naming.retval_cname, self.pos)))
code.put_gotref(Naming.retval_cname)
# ----- Function body
self.generate_function_body(env, code)
# ----- Closure initialization
if lenv.scope_class.type.scope.entries:
closure_init_code.putln('%s = %s;' % (
lenv.scope_class.type.declaration_code(Naming.cur_scope_cname),
lenv.scope_class.type.cast_code('%s->closure' %
Naming.generator_cname)))
code.mark_pos(self.pos)
code.putln("")
code.putln("/* function exit code */")
# on normal generator termination, we do not take the exception propagation
# path: no traceback info is required and not creating it is much faster
if not self.is_inlined and not self.body.is_terminator:
code.putln('PyErr_SetNone(PyExc_StopIteration);')
# ----- Error cleanup
if code.error_label in code.labels_used:
if not self.body.is_terminator:
code.put_goto(code.return_label)
code.put_label(code.error_label)
if self.is_inlined and self.inlined_comprehension_type is not None:
code.put_xdecref_clear(Naming.retval_cname, py_object_type)
if Future.generator_stop in env.global_scope().context.future_directives:
# PEP 479: turn accidental StopIteration exceptions into a RuntimeError
code.globalstate.use_utility_code(UtilityCode.load_cached("pep479", "Coroutine.c"))
code.putln("if (unlikely(PyErr_ExceptionMatches(PyExc_StopIteration))) "
"__Pyx_Generator_Replace_StopIteration();")
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type)
code.put_add_traceback(self.entry.qualified_name)
# ----- Non-error return cleanup
code.put_label(code.return_label)
if self.is_inlined:
code.put_xgiveref(Naming.retval_cname)
else:
code.put_xdecref_clear(Naming.retval_cname, py_object_type)
code.putln('%s->resume_label = -1;' % Naming.generator_cname)
# clean up as early as possible to help breaking any reference cycles
code.putln('__Pyx_Coroutine_clear((PyObject*)%s);' % Naming.generator_cname)
code.put_finish_refcount_context()
code.putln("return %s;" % Naming.retval_cname)
code.putln("}")
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
# ----- Generator resume code
resume_code.putln("switch (%s->resume_label) {" % (
Naming.generator_cname))
resume_code.putln("case 0: goto %s;" % first_run_label)
for i, label in code.yield_labels:
resume_code.putln("case %d: goto %s;" % (i, label))
resume_code.putln("default: /* CPython raises the right error here */")
resume_code.put_finish_refcount_context()
resume_code.putln("return NULL;")
resume_code.putln("}")
code.exit_cfunc_scope()
class OverrideCheckNode(StatNode):
# A Node for dispatching to the def method if it
# is overriden.
#
# py_func
#
# args
# func_temp
# body
child_attrs = ['body']
body = None
def analyse_expressions(self, env):
self.args = env.arg_entries
if self.py_func.is_module_scope:
first_arg = 0
else:
first_arg = 1
from . import ExprNodes
self.func_node = ExprNodes.RawCNameExprNode(self.pos, py_object_type)
call_node = ExprNodes.SimpleCallNode(
self.pos, function=self.func_node,
args=[ExprNodes.NameNode(self.pos, name=arg.name)
for arg in self.args[first_arg:]])
if env.return_type.is_void or env.return_type.is_returncode:
self.body = StatListNode(self.pos, stats=[
ExprStatNode(self.pos, expr=call_node),
ReturnStatNode(self.pos, value=None)])
else:
self.body = ReturnStatNode(self.pos, value=call_node)
self.body = self.body.analyse_expressions(env)
return self
def generate_execution_code(self, code):
interned_attr_cname = code.intern_identifier(self.py_func.entry.name)
# Check to see if we are an extension type
if self.py_func.is_module_scope:
self_arg = "((PyObject *)%s)" % Naming.module_cname
else:
self_arg = "((PyObject *)%s)" % self.args[0].cname
code.putln("/* Check if called by wrapper */")
code.putln("if (unlikely(%s)) ;" % Naming.skip_dispatch_cname)
code.putln("/* Check if overridden in Python */")
if self.py_func.is_module_scope:
code.putln("else {")
else:
code.putln("else if (unlikely(Py_TYPE(%s)->tp_dictoffset != 0)) {" % self_arg)
func_node_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.func_node.set_cname(func_node_temp)
# need to get attribute manually--scope would return cdef method
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
err = code.error_goto_if_null(func_node_temp, self.pos)
code.putln("%s = __Pyx_PyObject_GetAttrStr(%s, %s); %s" % (
func_node_temp, self_arg, interned_attr_cname, err))
code.put_gotref(func_node_temp)
is_builtin_function_or_method = "PyCFunction_Check(%s)" % func_node_temp
is_overridden = "(PyCFunction_GET_FUNCTION(%s) != (PyCFunction)%s)" % (
func_node_temp, self.py_func.entry.func_cname)
code.putln("if (!%s || %s) {" % (is_builtin_function_or_method, is_overridden))
self.body.generate_execution_code(code)
code.putln("}")
code.put_decref_clear(func_node_temp, PyrexTypes.py_object_type)
code.funcstate.release_temp(func_node_temp)
code.putln("}")
class ClassDefNode(StatNode, BlockNode):
pass
class PyClassDefNode(ClassDefNode):
# A Python class definition.
#
# name EncodedString Name of the class
# doc string or None
# body StatNode Attribute definition code
# entry Symtab.Entry
# scope PyClassScope
# decorators [DecoratorNode] list of decorators or None
#
# The following subnodes are constructed internally:
#
# dict DictNode Class dictionary or Py3 namespace
# classobj ClassNode Class object
# target NameNode Variable to assign class object to
child_attrs = ["body", "dict", "metaclass", "mkw", "bases", "class_result",
"target", "class_cell", "decorators"]
decorators = None
class_result = None
is_py3_style_class = False # Python3 style class (kwargs)
metaclass = None
mkw = None
def __init__(self, pos, name, bases, doc, body, decorators=None,
keyword_args=None, force_py3_semantics=False):
StatNode.__init__(self, pos)
self.name = name
self.doc = doc
self.body = body
self.decorators = decorators
self.bases = bases
from . import ExprNodes
if self.doc and Options.docstrings:
doc = embed_position(self.pos, self.doc)
doc_node = ExprNodes.StringNode(pos, value=doc)
else:
doc_node = None
allow_py2_metaclass = not force_py3_semantics
if keyword_args:
allow_py2_metaclass = False
self.is_py3_style_class = True
if keyword_args.is_dict_literal:
if keyword_args.key_value_pairs:
for i, item in list(enumerate(keyword_args.key_value_pairs))[::-1]:
if item.key.value == 'metaclass':
if self.metaclass is not None:
error(item.pos, "keyword argument 'metaclass' passed multiple times")
# special case: we already know the metaclass,
# so we don't need to do the "build kwargs,
# find metaclass" dance at runtime
self.metaclass = item.value
del keyword_args.key_value_pairs[i]
self.mkw = keyword_args
else:
assert self.metaclass is not None
else:
# MergedDictNode
self.mkw = ExprNodes.ProxyNode(keyword_args)
if force_py3_semantics or self.bases or self.mkw or self.metaclass:
if self.metaclass is None:
if keyword_args and not keyword_args.is_dict_literal:
# **kwargs may contain 'metaclass' arg
mkdict = self.mkw
else:
mkdict = None
if (not mkdict and
self.bases.is_sequence_constructor and
not self.bases.args):
pass # no base classes => no inherited metaclass
else:
self.metaclass = ExprNodes.PyClassMetaclassNode(
pos, mkw=mkdict, bases=self.bases)
needs_metaclass_calculation = False
else:
needs_metaclass_calculation = True
self.dict = ExprNodes.PyClassNamespaceNode(
pos, name=name, doc=doc_node,
metaclass=self.metaclass, bases=self.bases, mkw=self.mkw)
self.classobj = ExprNodes.Py3ClassNode(
pos, name=name,
bases=self.bases, dict=self.dict, doc=doc_node,
metaclass=self.metaclass, mkw=self.mkw,
calculate_metaclass=needs_metaclass_calculation,
allow_py2_metaclass=allow_py2_metaclass)
else:
# no bases, no metaclass => old style class creation
self.dict = ExprNodes.DictNode(pos, key_value_pairs=[])
self.classobj = ExprNodes.ClassNode(
pos, name=name,
bases=bases, dict=self.dict, doc=doc_node)
self.target = ExprNodes.NameNode(pos, name=name)
self.class_cell = ExprNodes.ClassCellInjectorNode(self.pos)
def as_cclass(self):
"""
Return this node as if it were declared as an extension class
"""
if self.is_py3_style_class:
error(self.classobj.pos, "Python3 style class could not be represented as C class")
return
bases = self.classobj.bases.args
if len(bases) == 0:
base_class_name = None
base_class_module = None
elif len(bases) == 1:
base = bases[0]
path = []
from .ExprNodes import AttributeNode, NameNode
while isinstance(base, AttributeNode):
path.insert(0, base.attribute)
base = base.obj
if isinstance(base, NameNode):
path.insert(0, base.name)
base_class_name = path[-1]
if len(path) > 1:
base_class_module = u'.'.join(path[:-1])
else:
base_class_module = None
else:
error(self.classobj.bases.args.pos, "Invalid base class")
else:
error(self.classobj.bases.args.pos, "C class may only have one base class")
return None
return CClassDefNode(self.pos,
visibility='private',
module_name=None,
class_name=self.name,
base_class_module=base_class_module,
base_class_name=base_class_name,
decorators=self.decorators,
body=self.body,
in_pxd=False,
doc=self.doc)
def create_scope(self, env):
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
cenv = self.scope = PyClassScope(name=self.name, outer_scope=genv)
return cenv
def analyse_declarations(self, env):
class_result = self.classobj
if self.decorators:
from .ExprNodes import SimpleCallNode
for decorator in self.decorators[::-1]:
class_result = SimpleCallNode(
decorator.pos,
function=decorator.decorator,
args=[class_result])
self.decorators = None
self.class_result = class_result
self.class_result.analyse_declarations(env)
self.target.analyse_target_declaration(env)
cenv = self.create_scope(env)
cenv.directives = env.directives
cenv.class_obj_cname = self.target.entry.cname
self.body.analyse_declarations(cenv)
def analyse_expressions(self, env):
if self.bases:
self.bases = self.bases.analyse_expressions(env)
if self.metaclass:
self.metaclass = self.metaclass.analyse_expressions(env)
if self.mkw:
self.mkw = self.mkw.analyse_expressions(env)
self.dict = self.dict.analyse_expressions(env)
self.class_result = self.class_result.analyse_expressions(env)
cenv = self.scope
self.body = self.body.analyse_expressions(cenv)
self.target.analyse_target_expression(env, self.classobj)
self.class_cell = self.class_cell.analyse_expressions(cenv)
return self
def generate_function_definitions(self, env, code):
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
code.pyclass_stack.append(self)
cenv = self.scope
if self.bases:
self.bases.generate_evaluation_code(code)
if self.mkw:
self.mkw.generate_evaluation_code(code)
if self.metaclass:
self.metaclass.generate_evaluation_code(code)
self.dict.generate_evaluation_code(code)
cenv.namespace_cname = cenv.class_obj_cname = self.dict.result()
self.class_cell.generate_evaluation_code(code)
self.body.generate_execution_code(code)
self.class_result.generate_evaluation_code(code)
self.class_cell.generate_injection_code(
code, self.class_result.result())
self.class_cell.generate_disposal_code(code)
cenv.namespace_cname = cenv.class_obj_cname = self.classobj.result()
self.target.generate_assignment_code(self.class_result, code)
self.dict.generate_disposal_code(code)
self.dict.free_temps(code)
if self.metaclass:
self.metaclass.generate_disposal_code(code)
self.metaclass.free_temps(code)
if self.mkw:
self.mkw.generate_disposal_code(code)
self.mkw.free_temps(code)
if self.bases:
self.bases.generate_disposal_code(code)
self.bases.free_temps(code)
code.pyclass_stack.pop()
class CClassDefNode(ClassDefNode):
# An extension type definition.
#
# visibility 'private' or 'public' or 'extern'
# typedef_flag boolean
# api boolean
# module_name string or None For import of extern type objects
# class_name string Unqualified name of class
# as_name string or None Name to declare as in this scope
# base_class_module string or None Module containing the base class
# base_class_name string or None Name of the base class
# objstruct_name string or None Specified C name of object struct
# typeobj_name string or None Specified C name of type object
# in_pxd boolean Is in a .pxd file
# decorators [DecoratorNode] list of decorators or None
# doc string or None
# body StatNode or None
# entry Symtab.Entry
# base_type PyExtensionType or None
# buffer_defaults_node DictNode or None Declares defaults for a buffer
# buffer_defaults_pos
child_attrs = ["body"]
buffer_defaults_node = None
buffer_defaults_pos = None
typedef_flag = False
api = False
objstruct_name = None
typeobj_name = None
decorators = None
shadow = False
def buffer_defaults(self, env):
if not hasattr(self, '_buffer_defaults'):
from . import Buffer
if self.buffer_defaults_node:
self._buffer_defaults = Buffer.analyse_buffer_options(
self.buffer_defaults_pos,
env, [], self.buffer_defaults_node,
need_complete=False)
else:
self._buffer_defaults = None
return self._buffer_defaults
def declare(self, env):
if self.module_name and self.visibility != 'extern':
module_path = self.module_name.split(".")
home_scope = env.find_imported_module(module_path, self.pos)
if not home_scope:
return None
else:
home_scope = env
self.entry = home_scope.declare_c_class(
name=self.class_name,
pos=self.pos,
defining=0,
implementing=0,
module_name=self.module_name,
base_type=None,
objstruct_cname=self.objstruct_name,
typeobj_cname=self.typeobj_name,
visibility=self.visibility,
typedef_flag=self.typedef_flag,
api=self.api,
buffer_defaults=self.buffer_defaults(env),
shadow=self.shadow)
def analyse_declarations(self, env):
#print "CClassDefNode.analyse_declarations:", self.class_name
#print "...visibility =", self.visibility
#print "...module_name =", self.module_name
if env.in_cinclude and not self.objstruct_name:
error(self.pos, "Object struct name specification required for C class defined in 'extern from' block")
if self.decorators:
error(self.pos, "Decorators not allowed on cdef classes (used on type '%s')" % self.class_name)
self.base_type = None
# Now that module imports are cached, we need to
# import the modules for extern classes.
if self.module_name:
self.module = None
for module in env.cimported_modules:
if module.name == self.module_name:
self.module = module
if self.module is None:
self.module = ModuleScope(self.module_name, None, env.context)
self.module.has_extern_class = 1
env.add_imported_module(self.module)
if self.base_class_name:
if self.base_class_module:
base_class_scope = env.find_module(self.base_class_module, self.pos)
else:
base_class_scope = env
if self.base_class_name == 'object':
# extension classes are special and don't need to inherit from object
if base_class_scope is None or base_class_scope.lookup('object') is None:
self.base_class_name = None
self.base_class_module = None
base_class_scope = None
if base_class_scope:
base_class_entry = base_class_scope.find(self.base_class_name, self.pos)
if base_class_entry:
if not base_class_entry.is_type:
error(self.pos, "'%s' is not a type name" % self.base_class_name)
elif not base_class_entry.type.is_extension_type and \
not (base_class_entry.type.is_builtin_type and
base_class_entry.type.objstruct_cname):
error(self.pos, "'%s' is not an extension type" % self.base_class_name)
elif not base_class_entry.type.is_complete():
error(self.pos, "Base class '%s' of type '%s' is incomplete" % (
self.base_class_name, self.class_name))
elif base_class_entry.type.scope and base_class_entry.type.scope.directives and \
base_class_entry.type.is_final_type:
error(self.pos, "Base class '%s' of type '%s' is final" % (
self.base_class_name, self.class_name))
elif base_class_entry.type.is_builtin_type and \
base_class_entry.type.name in ('tuple', 'str', 'bytes'):
error(self.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
% base_class_entry.type.name)
else:
self.base_type = base_class_entry.type
if env.directives.get('freelist', 0) > 0:
warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1)
has_body = self.body is not None
if has_body and self.base_type and not self.base_type.scope:
# To properly initialize inherited attributes, the base type must
# be analysed before this type.
self.base_type.defered_declarations.append(lambda : self.analyse_declarations(env))
return
if self.module_name and self.visibility != 'extern':
module_path = self.module_name.split(".")
home_scope = env.find_imported_module(module_path, self.pos)
if not home_scope:
return
else:
home_scope = env
if self.visibility == 'extern':
if (self.module_name == '__builtin__' and
self.class_name in Builtin.builtin_types and
env.qualified_name[:8] != 'cpython.'): # allow overloaded names for cimporting from cpython
warning(self.pos, "%s already a builtin Cython type" % self.class_name, 1)
self.entry = home_scope.declare_c_class(
name=self.class_name,
pos=self.pos,
defining=has_body and self.in_pxd,
implementing=has_body and not self.in_pxd,
module_name=self.module_name,
base_type=self.base_type,
objstruct_cname=self.objstruct_name,
typeobj_cname=self.typeobj_name,
visibility=self.visibility,
typedef_flag=self.typedef_flag,
api=self.api,
buffer_defaults=self.buffer_defaults(env),
shadow=self.shadow)
if self.shadow:
home_scope.lookup(self.class_name).as_variable = self.entry
if home_scope is not env and self.visibility == 'extern':
env.add_imported_entry(self.class_name, self.entry, self.pos)
self.scope = scope = self.entry.type.scope
if scope is not None:
scope.directives = env.directives
if self.doc and Options.docstrings:
scope.doc = embed_position(self.pos, self.doc)
if has_body:
self.body.analyse_declarations(scope)
if self.in_pxd:
scope.defined = 1
else:
scope.implemented = 1
env.allocate_vtable_names(self.entry)
for thunk in self.entry.type.defered_declarations:
thunk()
def analyse_expressions(self, env):
if self.body:
scope = self.entry.type.scope
self.body = self.body.analyse_expressions(scope)
return self
def generate_function_definitions(self, env, code):
if self.body:
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
def generate_execution_code(self, code):
# This is needed to generate evaluation code for
# default values of method arguments.
code.mark_pos(self.pos)
if self.body:
self.body.generate_execution_code(code)
def annotate(self, code):
if self.body:
self.body.annotate(code)
class PropertyNode(StatNode):
# Definition of a property in an extension type.
#
# name string
# doc EncodedString or None Doc string
# entry Symtab.Entry
# body StatListNode
child_attrs = ["body"]
def analyse_declarations(self, env):
self.entry = env.declare_property(self.name, self.doc, self.pos)
self.entry.scope.directives = env.directives
self.body.analyse_declarations(self.entry.scope)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
pass
def annotate(self, code):
self.body.annotate(code)
class GlobalNode(StatNode):
# Global variable declaration.
#
# names [string]
child_attrs = []
def analyse_declarations(self, env):
for name in self.names:
env.declare_global(name, self.pos)
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class NonlocalNode(StatNode):
# Nonlocal variable declaration via the 'nonlocal' keyword.
#
# names [string]
child_attrs = []
def analyse_declarations(self, env):
for name in self.names:
env.declare_nonlocal(name, self.pos)
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class ExprStatNode(StatNode):
# Expression used as a statement.
#
# expr ExprNode
child_attrs = ["expr"]
def analyse_declarations(self, env):
from . import ExprNodes
if isinstance(self.expr, ExprNodes.GeneralCallNode):
func = self.expr.function.as_cython_attribute()
if func == u'declare':
args, kwds = self.expr.explicit_args_kwds()
if len(args):
error(self.expr.pos, "Variable names must be specified.")
for var, type_node in kwds.key_value_pairs:
type = type_node.analyse_as_type(env)
if type is None:
error(type_node.pos, "Unknown type")
else:
env.declare_var(var.value, type, var.pos, is_cdef=True)
self.__class__ = PassStatNode
def analyse_expressions(self, env):
self.expr.result_is_used = False # hint that .result() may safely be left empty
self.expr = self.expr.analyse_expressions(env)
return self
def nogil_check(self, env):
if self.expr.type.is_pyobject and self.expr.is_temp:
self.gil_error()
gil_message = "Discarding owned Python object"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
self.expr.generate_evaluation_code(code)
if not self.expr.is_temp and self.expr.result():
code.putln("%s;" % self.expr.result())
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class AssignmentNode(StatNode):
# Abstract base class for assignment nodes.
#
# The analyse_expressions and generate_execution_code
# phases of assignments are split into two sub-phases
# each, to enable all the right hand sides of a
# parallel assignment to be evaluated before assigning
# to any of the left hand sides.
def analyse_expressions(self, env):
node = self.analyse_types(env)
if isinstance(node, AssignmentNode) and not isinstance(node, ParallelAssignmentNode):
if node.rhs.type.is_ptr and node.rhs.is_ephemeral():
error(self.pos, "Storing unsafe C derivative of temporary Python reference")
return node
# def analyse_expressions(self, env):
# self.analyse_expressions_1(env)
# self.analyse_expressions_2(env)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
self.generate_rhs_evaluation_code(code)
self.generate_assignment_code(code)
class SingleAssignmentNode(AssignmentNode):
# The simplest case:
#
# a = b
#
# lhs ExprNode Left hand side
# rhs ExprNode Right hand side
# first bool Is this guaranteed the first assignment to lhs?
# is_overloaded_assignment bool Is this assignment done via an overloaded operator=
# exception_check
# exception_value
child_attrs = ["lhs", "rhs"]
first = False
is_overloaded_assignment = False
declaration_only = False
def analyse_declarations(self, env):
from . import ExprNodes
# handle declarations of the form x = cython.foo()
if isinstance(self.rhs, ExprNodes.CallNode):
func_name = self.rhs.function.as_cython_attribute()
if func_name:
args, kwds = self.rhs.explicit_args_kwds()
if func_name in ['declare', 'typedef']:
if len(args) > 2:
error(args[2].pos, "Invalid positional argument.")
return
if kwds is not None:
kwdict = kwds.compile_time_value(None)
if func_name == 'typedef' or 'visibility' not in kwdict:
error(kwds.pos, "Invalid keyword argument.")
return
visibility = kwdict['visibility']
else:
visibility = 'private'
type = args[0].analyse_as_type(env)
if type is None:
error(args[0].pos, "Unknown type")
return
lhs = self.lhs
if func_name == 'declare':
if isinstance(lhs, ExprNodes.NameNode):
vars = [(lhs.name, lhs.pos)]
elif isinstance(lhs, ExprNodes.TupleNode):
vars = [(var.name, var.pos) for var in lhs.args]
else:
error(lhs.pos, "Invalid declaration")
return
for var, pos in vars:
env.declare_var(var, type, pos, is_cdef=True, visibility=visibility)
if len(args) == 2:
# we have a value
self.rhs = args[1]
else:
self.declaration_only = True
else:
self.declaration_only = True
if not isinstance(lhs, ExprNodes.NameNode):
error(lhs.pos, "Invalid declaration.")
env.declare_typedef(lhs.name, type, self.pos, visibility='private')
elif func_name in ['struct', 'union']:
self.declaration_only = True
if len(args) > 0 or kwds is None:
error(self.rhs.pos, "Struct or union members must be given by name.")
return
members = []
for member, type_node in kwds.key_value_pairs:
type = type_node.analyse_as_type(env)
if type is None:
error(type_node.pos, "Unknown type")
else:
members.append((member.value, type, member.pos))
if len(members) < len(kwds.key_value_pairs):
return
if not isinstance(self.lhs, ExprNodes.NameNode):
error(self.lhs.pos, "Invalid declaration.")
name = self.lhs.name
scope = StructOrUnionScope(name)
env.declare_struct_or_union(name, func_name, scope, False, self.rhs.pos)
for member, type, pos in members:
scope.declare_var(member, type, pos)
elif func_name == 'fused_type':
# dtype = cython.fused_type(...)
self.declaration_only = True
if kwds:
error(self.rhs.function.pos,
"fused_type does not take keyword arguments")
fusednode = FusedTypeNode(self.rhs.pos,
name=self.lhs.name, types=args)
fusednode.analyse_declarations(env)
if self.declaration_only:
return
else:
self.lhs.analyse_target_declaration(env)
def analyse_types(self, env, use_temp=0):
from . import ExprNodes
self.rhs = self.rhs.analyse_types(env)
unrolled_assignment = self.unroll_rhs(env)
if unrolled_assignment:
return unrolled_assignment
self.lhs = self.lhs.analyse_target_types(env)
self.lhs.gil_assignment_check(env)
unrolled_assignment = self.unroll_lhs(env)
if unrolled_assignment:
return unrolled_assignment
if isinstance(self.lhs, ExprNodes.MemoryViewIndexNode):
self.lhs.analyse_broadcast_operation(self.rhs)
self.lhs = self.lhs.analyse_as_memview_scalar_assignment(self.rhs)
elif self.lhs.type.is_array:
if not isinstance(self.lhs, ExprNodes.SliceIndexNode):
# cannot assign to C array, only to its full slice
self.lhs = ExprNodes.SliceIndexNode(self.lhs.pos, base=self.lhs, start=None, stop=None)
self.lhs = self.lhs.analyse_target_types(env)
if self.lhs.type.is_cpp_class:
op = env.lookup_operator_for_types(self.pos, '=', [self.lhs.type, self.rhs.type])
if op:
rhs = self.rhs
self.is_overloaded_assignment = True
self.exception_check = op.type.exception_check
self.exception_value = op.type.exception_value
if self.exception_check == '+' and self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
else:
rhs = self.rhs.coerce_to(self.lhs.type, env)
else:
rhs = self.rhs.coerce_to(self.lhs.type, env)
if use_temp or rhs.is_attribute or (
not rhs.is_name and not rhs.is_literal and
rhs.type.is_pyobject):
# things like (cdef) attribute access are not safe (traverses pointers)
rhs = rhs.coerce_to_temp(env)
elif rhs.type.is_pyobject:
rhs = rhs.coerce_to_simple(env)
self.rhs = rhs
return self
def unroll(self, node, target_size, env):
from . import ExprNodes, UtilNodes
base = node
start_node = stop_node = step_node = check_node = None
if node.type.is_ctuple:
slice_size = node.type.size
elif node.type.is_ptr or node.type.is_array:
while isinstance(node, ExprNodes.SliceIndexNode) and not (node.start or node.stop):
base = node = node.base
if isinstance(node, ExprNodes.SliceIndexNode):
base = node.base
start_node = node.start
if start_node:
start_node = start_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
stop_node = node.stop
if stop_node:
stop_node = stop_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
else:
if node.type.is_array and node.type.size:
stop_node = ExprNodes.IntNode(
self.pos, value=str(node.type.size),
constant_result=(node.type.size if isinstance(node.type.size, _py_int_types)
else ExprNodes.constant_value_not_set))
else:
error(self.pos, "C array iteration requires known end index")
return
step_node = None #node.step
if step_node:
step_node = step_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
# TODO: Factor out SliceIndexNode.generate_slice_guard_code() for use here.
def get_const(node, none_value):
if node is None:
return none_value
elif node.has_constant_result():
return node.constant_result
else:
raise ValueError("Not a constant.")
try:
slice_size = (get_const(stop_node, None) - get_const(start_node, 0)) / get_const(step_node, 1)
except ValueError:
error(self.pos, "C array assignment currently requires known endpoints")
return
elif node.type.is_array:
slice_size = node.type.size
if not isinstance(slice_size, _py_int_types):
return # might still work when coercing to Python
else:
return
else:
return
if slice_size != target_size:
error(self.pos, "Assignment to/from slice of wrong length, expected %s, got %s" % (
slice_size, target_size))
return
items = []
base = UtilNodes.LetRefNode(base)
refs = [base]
if start_node and not start_node.is_literal:
start_node = UtilNodes.LetRefNode(start_node)
refs.append(start_node)
if stop_node and not stop_node.is_literal:
stop_node = UtilNodes.LetRefNode(stop_node)
refs.append(stop_node)
if step_node and not step_node.is_literal:
step_node = UtilNodes.LetRefNode(step_node)
refs.append(step_node)
for ix in range(target_size):
ix_node = ExprNodes.IntNode(self.pos, value=str(ix), constant_result=ix, type=PyrexTypes.c_py_ssize_t_type)
if step_node is not None:
if step_node.has_constant_result():
step_value = ix_node.constant_result * step_node.constant_result
ix_node = ExprNodes.IntNode(self.pos, value=str(step_value), constant_result=step_value)
else:
ix_node = ExprNodes.MulNode(self.pos, operator='*', operand1=step_node, operand2=ix_node)
if start_node is not None:
if start_node.has_constant_result() and ix_node.has_constant_result():
index_value = ix_node.constant_result + start_node.constant_result
ix_node = ExprNodes.IntNode(self.pos, value=str(index_value), constant_result=index_value)
else:
ix_node = ExprNodes.AddNode(
self.pos, operator='+', operand1=start_node, operand2=ix_node)
items.append(ExprNodes.IndexNode(self.pos, base=base, index=ix_node.analyse_types(env)))
return check_node, refs, items
def unroll_assignments(self, refs, check_node, lhs_list, rhs_list, env):
from . import UtilNodes
assignments = []
for lhs, rhs in zip(lhs_list, rhs_list):
assignments.append(SingleAssignmentNode(self.pos, lhs=lhs, rhs=rhs, first=self.first))
node = ParallelAssignmentNode(pos=self.pos, stats=assignments).analyse_expressions(env)
if check_node:
node = StatListNode(pos=self.pos, stats=[check_node, node])
for ref in refs[::-1]:
node = UtilNodes.LetNode(ref, node)
return node
def unroll_rhs(self, env):
from . import ExprNodes
if not isinstance(self.lhs, ExprNodes.TupleNode):
return
if any(arg.is_starred for arg in self.lhs.args):
return
unrolled = self.unroll(self.rhs, len(self.lhs.args), env)
if not unrolled:
return
check_node, refs, rhs = unrolled
return self.unroll_assignments(refs, check_node, self.lhs.args, rhs, env)
def unroll_lhs(self, env):
if self.lhs.type.is_ctuple:
# Handled directly.
return
from . import ExprNodes
if not isinstance(self.rhs, ExprNodes.TupleNode):
return
unrolled = self.unroll(self.lhs, len(self.rhs.args), env)
if not unrolled:
return
check_node, refs, lhs = unrolled
return self.unroll_assignments(refs, check_node, lhs, self.rhs.args, env)
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
def generate_assignment_code(self, code, overloaded_assignment=False):
if self.is_overloaded_assignment:
self.lhs.generate_assignment_code(
self.rhs,
code,
overloaded_assignment=self.is_overloaded_assignment,
exception_check=self.exception_check,
exception_value=self.exception_value)
else:
self.lhs.generate_assignment_code(self.rhs, code)
def generate_function_definitions(self, env, code):
self.rhs.generate_function_definitions(env, code)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
class CascadedAssignmentNode(AssignmentNode):
# An assignment with multiple left hand sides:
#
# a = b = c
#
# lhs_list [ExprNode] Left hand sides
# rhs ExprNode Right hand sides
#
# Used internally:
#
# coerced_values [ExprNode] RHS coerced to all distinct LHS types
# cloned_values [ExprNode] cloned RHS value for each LHS
# assignment_overloads [Bool] If each assignment uses a C++ operator=
child_attrs = ["lhs_list", "rhs", "coerced_values", "cloned_values"]
cloned_values = None
coerced_values = None
assignment_overloads = None
def analyse_declarations(self, env):
for lhs in self.lhs_list:
lhs.analyse_target_declaration(env)
def analyse_types(self, env, use_temp=0):
from .ExprNodes import CloneNode, ProxyNode
# collect distinct types used on the LHS
lhs_types = set()
for i, lhs in enumerate(self.lhs_list):
lhs = self.lhs_list[i] = lhs.analyse_target_types(env)
lhs.gil_assignment_check(env)
lhs_types.add(lhs.type)
rhs = self.rhs.analyse_types(env)
# common special case: only one type needed on the LHS => coerce only once
if len(lhs_types) == 1:
# Avoid coercion for overloaded assignment operators.
if next(iter(lhs_types)).is_cpp_class:
op = env.lookup_operator('=', [lhs, self.rhs])
if not op:
rhs = rhs.coerce_to(lhs_types.pop(), env)
else:
rhs = rhs.coerce_to(lhs_types.pop(), env)
if not rhs.is_name and not rhs.is_literal and (
use_temp or rhs.is_attribute or rhs.type.is_pyobject):
rhs = rhs.coerce_to_temp(env)
else:
rhs = rhs.coerce_to_simple(env)
self.rhs = ProxyNode(rhs) if rhs.is_temp else rhs
# clone RHS and coerce it to all distinct LHS types
self.coerced_values = []
coerced_values = {}
self.assignment_overloads = []
for lhs in self.lhs_list:
overloaded = lhs.type.is_cpp_class and env.lookup_operator('=', [lhs, self.rhs])
self.assignment_overloads.append(overloaded)
if lhs.type not in coerced_values and lhs.type != rhs.type:
rhs = CloneNode(self.rhs)
if not overloaded:
rhs = rhs.coerce_to(lhs.type, env)
self.coerced_values.append(rhs)
coerced_values[lhs.type] = rhs
# clone coerced values for all LHS assignments
self.cloned_values = []
for lhs in self.lhs_list:
rhs = coerced_values.get(lhs.type, self.rhs)
self.cloned_values.append(CloneNode(rhs))
return self
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
def generate_assignment_code(self, code, overloaded_assignment=False):
# prepare all coercions
for rhs in self.coerced_values:
rhs.generate_evaluation_code(code)
# assign clones to LHS
for lhs, rhs, overload in zip(self.lhs_list, self.cloned_values, self.assignment_overloads):
rhs.generate_evaluation_code(code)
lhs.generate_assignment_code(rhs, code, overloaded_assignment=overload)
# dispose of coerced values and original RHS
for rhs_value in self.coerced_values:
rhs_value.generate_disposal_code(code)
rhs_value.free_temps(code)
self.rhs.generate_disposal_code(code)
self.rhs.free_temps(code)
def generate_function_definitions(self, env, code):
self.rhs.generate_function_definitions(env, code)
def annotate(self, code):
for rhs in self.coerced_values:
rhs.annotate(code)
for lhs, rhs in zip(self.lhs_list, self.cloned_values):
lhs.annotate(code)
rhs.annotate(code)
self.rhs.annotate(code)
class ParallelAssignmentNode(AssignmentNode):
# A combined packing/unpacking assignment:
#
# a, b, c = d, e, f
#
# This has been rearranged by the parser into
#
# a = d ; b = e ; c = f
#
# but we must evaluate all the right hand sides
# before assigning to any of the left hand sides.
#
# stats [AssignmentNode] The constituent assignments
child_attrs = ["stats"]
def analyse_declarations(self, env):
for stat in self.stats:
stat.analyse_declarations(env)
def analyse_expressions(self, env):
self.stats = [stat.analyse_types(env, use_temp=1)
for stat in self.stats]
return self
# def analyse_expressions(self, env):
# for stat in self.stats:
# stat.analyse_expressions_1(env, use_temp=1)
# for stat in self.stats:
# stat.analyse_expressions_2(env)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
for stat in self.stats:
stat.generate_rhs_evaluation_code(code)
for stat in self.stats:
stat.generate_assignment_code(code)
def generate_function_definitions(self, env, code):
for stat in self.stats:
stat.generate_function_definitions(env, code)
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
class InPlaceAssignmentNode(AssignmentNode):
# An in place arithmetic operand:
#
# a += b
# a -= b
# ...
#
# lhs ExprNode Left hand side
# rhs ExprNode Right hand side
# operator char one of "+-*/%^&|"
#
# This code is a bit tricky because in order to obey Python
# semantics the sub-expressions (e.g. indices) of the lhs must
# not be evaluated twice. So we must re-use the values calculated
# in evaluation phase for the assignment phase as well.
# Fortunately, the type of the lhs node is fairly constrained
# (it must be a NameNode, AttributeNode, or IndexNode).
child_attrs = ["lhs", "rhs"]
def analyse_declarations(self, env):
self.lhs.analyse_target_declaration(env)
def analyse_types(self, env):
self.rhs = self.rhs.analyse_types(env)
self.lhs = self.lhs.analyse_target_types(env)
# When assigning to a fully indexed buffer or memoryview, coerce the rhs
if self.lhs.is_memview_index or self.lhs.is_buffer_access:
self.rhs = self.rhs.coerce_to(self.lhs.type, env)
elif self.lhs.type.is_string and self.operator in '+-':
# use pointer arithmetic for char* LHS instead of string concat
self.rhs = self.rhs.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
lhs, rhs = self.lhs, self.rhs
rhs.generate_evaluation_code(code)
lhs.generate_subexpr_evaluation_code(code)
c_op = self.operator
if c_op == "//":
c_op = "/"
elif c_op == "**":
error(self.pos, "No C inplace power operator")
if lhs.is_buffer_access or lhs.is_memview_index:
if lhs.type.is_pyobject:
error(self.pos, "In-place operators not allowed on object buffers in this release.")
if c_op in ('/', '%') and lhs.type.is_int and not code.globalstate.directives['cdivision']:
error(self.pos, "In-place non-c divide operators not allowed on int buffers.")
lhs.generate_buffer_setitem_code(rhs, code, c_op)
elif lhs.is_memview_slice:
error(self.pos, "Inplace operators not supported on memoryview slices")
else:
# C++
# TODO: make sure overload is declared
code.putln("%s %s= %s;" % (lhs.result(), c_op, rhs.result()))
lhs.generate_subexpr_disposal_code(code)
lhs.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
def create_binop_node(self):
from . import ExprNodes
return ExprNodes.binop_node(self.pos, self.operator, self.lhs, self.rhs)
class PrintStatNode(StatNode):
# print statement
#
# arg_tuple TupleNode
# stream ExprNode or None (stdout)
# append_newline boolean
child_attrs = ["arg_tuple", "stream"]
def analyse_expressions(self, env):
if self.stream:
stream = self.stream.analyse_expressions(env)
self.stream = stream.coerce_to_pyobject(env)
arg_tuple = self.arg_tuple.analyse_expressions(env)
self.arg_tuple = arg_tuple.coerce_to_pyobject(env)
env.use_utility_code(printing_utility_code)
if len(self.arg_tuple.args) == 1 and self.append_newline:
env.use_utility_code(printing_one_utility_code)
return self
nogil_check = Node.gil_error
gil_message = "Python print statement"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if self.stream:
self.stream.generate_evaluation_code(code)
stream_result = self.stream.py_result()
else:
stream_result = '0'
if len(self.arg_tuple.args) == 1 and self.append_newline:
arg = self.arg_tuple.args[0]
arg.generate_evaluation_code(code)
code.putln(
"if (__Pyx_PrintOne(%s, %s) < 0) %s" % (
stream_result,
arg.py_result(),
code.error_goto(self.pos)))
arg.generate_disposal_code(code)
arg.free_temps(code)
else:
self.arg_tuple.generate_evaluation_code(code)
code.putln(
"if (__Pyx_Print(%s, %s, %d) < 0) %s" % (
stream_result,
self.arg_tuple.py_result(),
self.append_newline,
code.error_goto(self.pos)))
self.arg_tuple.generate_disposal_code(code)
self.arg_tuple.free_temps(code)
if self.stream:
self.stream.generate_disposal_code(code)
self.stream.free_temps(code)
def generate_function_definitions(self, env, code):
if self.stream:
self.stream.generate_function_definitions(env, code)
self.arg_tuple.generate_function_definitions(env, code)
def annotate(self, code):
if self.stream:
self.stream.annotate(code)
self.arg_tuple.annotate(code)
class ExecStatNode(StatNode):
# exec statement
#
# args [ExprNode]
child_attrs = ["args"]
def analyse_expressions(self, env):
for i, arg in enumerate(self.args):
arg = arg.analyse_expressions(env)
arg = arg.coerce_to_pyobject(env)
self.args[i] = arg
env.use_utility_code(Builtin.pyexec_utility_code)
return self
nogil_check = Node.gil_error
gil_message = "Python exec statement"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
args = []
for arg in self.args:
arg.generate_evaluation_code(code)
args.append(arg.py_result())
args = tuple(args + ['0', '0'][:3-len(args)])
temp_result = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
code.putln("%s = __Pyx_PyExec3(%s, %s, %s);" % ((temp_result,) + args))
for arg in self.args:
arg.generate_disposal_code(code)
arg.free_temps(code)
code.putln(
code.error_goto_if_null(temp_result, self.pos))
code.put_gotref(temp_result)
code.put_decref_clear(temp_result, py_object_type)
code.funcstate.release_temp(temp_result)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
class DelStatNode(StatNode):
# del statement
#
# args [ExprNode]
child_attrs = ["args"]
ignore_nonexisting = False
def analyse_declarations(self, env):
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_expressions(self, env):
for i, arg in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_expression(env, None)
if arg.type.is_pyobject or (arg.is_name and arg.type.is_memoryviewslice):
if arg.is_name and arg.entry.is_cglobal:
error(arg.pos, "Deletion of global C variable")
elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
self.cpp_check(env)
elif arg.type.is_cpp_class:
error(arg.pos, "Deletion of non-heap C++ object")
elif arg.is_subscript and arg.base.type is Builtin.bytearray_type:
pass # del ba[i]
else:
error(arg.pos, "Deletion of non-Python, non-C++ object")
#arg.release_target_temp(env)
return self
def nogil_check(self, env):
for arg in self.args:
if arg.type.is_pyobject:
self.gil_error()
gil_message = "Deleting Python object"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
for arg in self.args:
if (arg.type.is_pyobject or
arg.type.is_memoryviewslice or
arg.is_subscript and arg.base.type is Builtin.bytearray_type):
arg.generate_deletion_code(
code, ignore_nonexisting=self.ignore_nonexisting)
elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
arg.generate_result_code(code)
code.putln("delete %s;" % arg.result())
# else error reported earlier
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
class PassStatNode(StatNode):
# pass statement
child_attrs = []
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class IndirectionNode(StatListNode):
"""
This adds an indirection so that the node can be shared and a subtree can
be removed at any time by clearing self.stats.
"""
def __init__(self, stats):
super(IndirectionNode, self).__init__(stats[0].pos, stats=stats)
class BreakStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if not code.break_label:
error(self.pos, "break statement not inside loop")
else:
code.put_goto(code.break_label)
class ContinueStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if not code.continue_label:
error(self.pos, "continue statement not inside loop")
return
code.mark_pos(self.pos)
code.put_goto(code.continue_label)
class ReturnStatNode(StatNode):
# return statement
#
# value ExprNode or None
# return_type PyrexType
# in_generator return inside of generator => raise StopIteration
child_attrs = ["value"]
is_terminator = True
in_generator = False
# Whether we are in a parallel section
in_parallel = False
def analyse_expressions(self, env):
return_type = env.return_type
self.return_type = return_type
if not return_type:
error(self.pos, "Return not inside a function body")
return self
if self.value:
self.value = self.value.analyse_types(env)
if return_type.is_void or return_type.is_returncode:
error(self.value.pos, "Return with value in void function")
else:
self.value = self.value.coerce_to(env.return_type, env)
else:
if (not return_type.is_void
and not return_type.is_pyobject
and not return_type.is_returncode):
error(self.pos, "Return value required")
return self
def nogil_check(self, env):
if self.return_type.is_pyobject:
self.gil_error()
gil_message = "Returning Python object"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if not self.return_type:
# error reported earlier
return
if self.return_type.is_pyobject:
code.put_xdecref(Naming.retval_cname,
self.return_type)
if self.value:
self.value.generate_evaluation_code(code)
if self.return_type.is_memoryviewslice:
from . import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=Naming.retval_cname,
lhs_type=self.return_type,
lhs_pos=self.value.pos,
rhs=self.value,
code=code,
have_gil=self.in_nogil_context)
elif self.in_generator:
# return value == raise StopIteration(value), but uncatchable
code.globalstate.use_utility_code(
UtilityCode.load_cached("ReturnWithStopIteration", "Coroutine.c"))
code.putln("%s = NULL; __Pyx_ReturnWithStopIteration(%s);" % (
Naming.retval_cname,
self.value.py_result()))
self.value.generate_disposal_code(code)
else:
self.value.make_owned_reference(code)
code.putln("%s = %s;" % (
Naming.retval_cname,
self.value.result_as(self.return_type)))
self.value.generate_post_assignment_code(code)
self.value.free_temps(code)
else:
if self.return_type.is_pyobject:
if self.in_generator:
code.putln("%s = NULL;" % Naming.retval_cname)
else:
code.put_init_to_py_none(Naming.retval_cname, self.return_type)
elif self.return_type.is_returncode:
self.put_return(code, self.return_type.default_value)
for cname, type in code.funcstate.temps_holding_reference():
code.put_decref_clear(cname, type)
code.put_goto(code.return_label)
def put_return(self, code, value):
if self.in_parallel:
code.putln_openmp("#pragma omp critical(__pyx_returning)")
code.putln("%s = %s;" % (Naming.retval_cname, value))
def generate_function_definitions(self, env, code):
if self.value is not None:
self.value.generate_function_definitions(env, code)
def annotate(self, code):
if self.value:
self.value.annotate(code)
class RaiseStatNode(StatNode):
# raise statement
#
# exc_type ExprNode or None
# exc_value ExprNode or None
# exc_tb ExprNode or None
# cause ExprNode or None
child_attrs = ["exc_type", "exc_value", "exc_tb", "cause"]
is_terminator = True
def analyse_expressions(self, env):
if self.exc_type:
exc_type = self.exc_type.analyse_types(env)
self.exc_type = exc_type.coerce_to_pyobject(env)
if self.exc_value:
exc_value = self.exc_value.analyse_types(env)
self.exc_value = exc_value.coerce_to_pyobject(env)
if self.exc_tb:
exc_tb = self.exc_tb.analyse_types(env)
self.exc_tb = exc_tb.coerce_to_pyobject(env)
if self.cause:
cause = self.cause.analyse_types(env)
self.cause = cause.coerce_to_pyobject(env)
# special cases for builtin exceptions
self.builtin_exc_name = None
if self.exc_type and not self.exc_value and not self.exc_tb:
exc = self.exc_type
from . import ExprNodes
if (isinstance(exc, ExprNodes.SimpleCallNode) and
not (exc.args or (exc.arg_tuple is not None and exc.arg_tuple.args))):
exc = exc.function # extract the exception type
if exc.is_name and exc.entry.is_builtin:
self.builtin_exc_name = exc.name
if self.builtin_exc_name == 'MemoryError':
self.exc_type = None # has a separate implementation
return self
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if self.builtin_exc_name == 'MemoryError':
code.putln('PyErr_NoMemory(); %s' % code.error_goto(self.pos))
return
if self.exc_type:
self.exc_type.generate_evaluation_code(code)
type_code = self.exc_type.py_result()
else:
type_code = "0"
if self.exc_value:
self.exc_value.generate_evaluation_code(code)
value_code = self.exc_value.py_result()
else:
value_code = "0"
if self.exc_tb:
self.exc_tb.generate_evaluation_code(code)
tb_code = self.exc_tb.py_result()
else:
tb_code = "0"
if self.cause:
self.cause.generate_evaluation_code(code)
cause_code = self.cause.py_result()
else:
cause_code = "0"
code.globalstate.use_utility_code(raise_utility_code)
code.putln(
"__Pyx_Raise(%s, %s, %s, %s);" % (
type_code,
value_code,
tb_code,
cause_code))
for obj in (self.exc_type, self.exc_value, self.exc_tb, self.cause):
if obj:
obj.generate_disposal_code(code)
obj.free_temps(code)
code.putln(
code.error_goto(self.pos))
def generate_function_definitions(self, env, code):
if self.exc_type is not None:
self.exc_type.generate_function_definitions(env, code)
if self.exc_value is not None:
self.exc_value.generate_function_definitions(env, code)
if self.exc_tb is not None:
self.exc_tb.generate_function_definitions(env, code)
if self.cause is not None:
self.cause.generate_function_definitions(env, code)
def annotate(self, code):
if self.exc_type:
self.exc_type.annotate(code)
if self.exc_value:
self.exc_value.annotate(code)
if self.exc_tb:
self.exc_tb.annotate(code)
if self.cause:
self.cause.annotate(code)
class ReraiseStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
return self
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
vars = code.funcstate.exc_vars
if vars:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.put_giveref(vars[0])
code.put_giveref(vars[1])
# fresh exceptions may not have a traceback yet (-> finally!)
code.put_xgiveref(vars[2])
code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % tuple(vars))
for varname in vars:
code.put("%s = 0; " % varname)
code.putln()
code.putln(code.error_goto(self.pos))
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ReRaiseException", "Exceptions.c"))
code.putln("__Pyx_ReraiseException(); %s" % code.error_goto(self.pos))
class AssertStatNode(StatNode):
# assert statement
#
# cond ExprNode
# value ExprNode or None
child_attrs = ["cond", "value"]
def analyse_expressions(self, env):
self.cond = self.cond.analyse_boolean_expression(env)
if self.value:
value = self.value.analyse_types(env)
if value.type is Builtin.tuple_type or not value.type.is_builtin_type:
# prevent tuple values from being interpreted as argument value tuples
from .ExprNodes import TupleNode
value = TupleNode(value.pos, args=[value], slow=True)
self.value = value.analyse_types(env, skip_children=True).coerce_to_pyobject(env)
else:
self.value = value.coerce_to_pyobject(env)
return self
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
code.putln("#ifndef CYTHON_WITHOUT_ASSERTIONS")
code.putln("if (unlikely(!Py_OptimizeFlag)) {")
code.mark_pos(self.pos)
self.cond.generate_evaluation_code(code)
code.putln(
"if (unlikely(!%s)) {" % self.cond.result())
if self.value:
self.value.generate_evaluation_code(code)
code.putln(
"PyErr_SetObject(PyExc_AssertionError, %s);" % self.value.py_result())
self.value.generate_disposal_code(code)
self.value.free_temps(code)
else:
code.putln(
"PyErr_SetNone(PyExc_AssertionError);")
code.putln(
code.error_goto(self.pos))
code.putln(
"}")
self.cond.generate_disposal_code(code)
self.cond.free_temps(code)
code.putln(
"}")
code.putln("#endif")
def generate_function_definitions(self, env, code):
self.cond.generate_function_definitions(env, code)
if self.value is not None:
self.value.generate_function_definitions(env, code)
def annotate(self, code):
self.cond.annotate(code)
if self.value:
self.value.annotate(code)
class IfStatNode(StatNode):
# if statement
#
# if_clauses [IfClauseNode]
# else_clause StatNode or None
child_attrs = ["if_clauses", "else_clause"]
def analyse_declarations(self, env):
for if_clause in self.if_clauses:
if_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.if_clauses = [if_clause.analyse_expressions(env) for if_clause in self.if_clauses]
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
end_label = code.new_label()
last = len(self.if_clauses)
if not self.else_clause:
last -= 1 # avoid redundant goto at end of last if-clause
for i, if_clause in enumerate(self.if_clauses):
if_clause.generate_execution_code(code, end_label, is_last=i == last)
if self.else_clause:
code.mark_pos(self.else_clause.pos)
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(end_label)
def generate_function_definitions(self, env, code):
for clause in self.if_clauses:
clause.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
for if_clause in self.if_clauses:
if_clause.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class IfClauseNode(Node):
# if or elif clause in an if statement
#
# condition ExprNode
# body StatNode
child_attrs = ["condition", "body"]
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.condition = self.condition.analyse_temp_boolean_expression(env)
self.body = self.body.analyse_expressions(env)
return self
def generate_execution_code(self, code, end_label, is_last):
self.condition.generate_evaluation_code(code)
code.mark_pos(self.pos)
code.putln("if (%s) {" % self.condition.result())
self.condition.generate_disposal_code(code)
self.condition.free_temps(code)
self.body.generate_execution_code(code)
code.mark_pos(self.pos, trace=False)
if not (is_last or self.body.is_terminator):
code.put_goto(end_label)
code.putln("}")
def generate_function_definitions(self, env, code):
self.condition.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def annotate(self, code):
self.condition.annotate(code)
self.body.annotate(code)
class SwitchCaseNode(StatNode):
# Generated in the optimization of an if-elif-else node
#
# conditions [ExprNode]
# body StatNode
child_attrs = ['conditions', 'body']
def generate_execution_code(self, code):
for cond in self.conditions:
code.mark_pos(cond.pos)
cond.generate_evaluation_code(code)
code.putln("case %s:" % cond.result())
self.body.generate_execution_code(code)
code.mark_pos(self.pos, trace=False)
code.putln("break;")
def generate_function_definitions(self, env, code):
for cond in self.conditions:
cond.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def annotate(self, code):
for cond in self.conditions:
cond.annotate(code)
self.body.annotate(code)
class SwitchStatNode(StatNode):
# Generated in the optimization of an if-elif-else node
#
# test ExprNode
# cases [SwitchCaseNode]
# else_clause StatNode or None
child_attrs = ['test', 'cases', 'else_clause']
def generate_execution_code(self, code):
self.test.generate_evaluation_code(code)
code.mark_pos(self.pos)
code.putln("switch (%s) {" % self.test.result())
for case in self.cases:
case.generate_execution_code(code)
if self.else_clause is not None:
code.putln("default:")
self.else_clause.generate_execution_code(code)
code.putln("break;")
else:
# Always generate a default clause to prevent C compiler warnings
# about unmatched enum values (it was not the user who decided to
# generate the switch statement, so shouldn't be bothered).
code.putln("default: break;")
code.putln("}")
def generate_function_definitions(self, env, code):
self.test.generate_function_definitions(env, code)
for case in self.cases:
case.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.test.annotate(code)
for case in self.cases:
case.annotate(code)
if self.else_clause is not None:
self.else_clause.annotate(code)
class LoopNode(object):
pass
class WhileStatNode(LoopNode, StatNode):
# while statement
#
# condition ExprNode
# body StatNode
# else_clause StatNode
child_attrs = ["condition", "body", "else_clause"]
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
if self.condition:
self.condition = self.condition.analyse_temp_boolean_expression(env)
self.body = self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
old_loop_labels = code.new_loop_labels()
code.putln(
"while (1) {")
if self.condition:
self.condition.generate_evaluation_code(code)
self.condition.generate_disposal_code(code)
code.putln(
"if (!%s) break;" % self.condition.result())
self.condition.free_temps(code)
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
code.putln("}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.mark_pos(self.else_clause.pos)
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
def generate_function_definitions(self, env, code):
if self.condition:
self.condition.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
if self.condition:
self.condition.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class DictIterationNextNode(Node):
# Helper node for calling PyDict_Next() inside of a WhileStatNode
# and checking the dictionary size for changes. Created in
# Optimize.py.
child_attrs = ['dict_obj', 'expected_size', 'pos_index_var',
'coerced_key_var', 'coerced_value_var', 'coerced_tuple_var',
'key_target', 'value_target', 'tuple_target', 'is_dict_flag']
coerced_key_var = key_ref = None
coerced_value_var = value_ref = None
coerced_tuple_var = tuple_ref = None
def __init__(self, dict_obj, expected_size, pos_index_var,
key_target, value_target, tuple_target, is_dict_flag):
Node.__init__(
self, dict_obj.pos,
dict_obj=dict_obj,
expected_size=expected_size,
pos_index_var=pos_index_var,
key_target=key_target,
value_target=value_target,
tuple_target=tuple_target,
is_dict_flag=is_dict_flag,
is_temp=True,
type=PyrexTypes.c_bint_type)
def analyse_expressions(self, env):
from . import ExprNodes
self.dict_obj = self.dict_obj.analyse_types(env)
self.expected_size = self.expected_size.analyse_types(env)
if self.pos_index_var:
self.pos_index_var = self.pos_index_var.analyse_types(env)
if self.key_target:
self.key_target = self.key_target.analyse_target_types(env)
self.key_ref = ExprNodes.TempNode(self.key_target.pos, PyrexTypes.py_object_type)
self.coerced_key_var = self.key_ref.coerce_to(self.key_target.type, env)
if self.value_target:
self.value_target = self.value_target.analyse_target_types(env)
self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type)
self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env)
if self.tuple_target:
self.tuple_target = self.tuple_target.analyse_target_types(env)
self.tuple_ref = ExprNodes.TempNode(self.tuple_target.pos, PyrexTypes.py_object_type)
self.coerced_tuple_var = self.tuple_ref.coerce_to(self.tuple_target.type, env)
self.is_dict_flag = self.is_dict_flag.analyse_types(env)
return self
def generate_function_definitions(self, env, code):
self.dict_obj.generate_function_definitions(env, code)
def generate_execution_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("dict_iter", "Optimize.c"))
self.dict_obj.generate_evaluation_code(code)
assignments = []
temp_addresses = []
for var, result, target in [(self.key_ref, self.coerced_key_var, self.key_target),
(self.value_ref, self.coerced_value_var, self.value_target),
(self.tuple_ref, self.coerced_tuple_var, self.tuple_target)]:
if target is None:
addr = 'NULL'
else:
assignments.append((var, result, target))
var.allocate(code)
addr = '&%s' % var.result()
temp_addresses.append(addr)
result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
code.putln("%s = __Pyx_dict_iter_next(%s, %s, &%s, %s, %s, %s, %s);" % (
result_temp,
self.dict_obj.py_result(),
self.expected_size.result(),
self.pos_index_var.result(),
temp_addresses[0],
temp_addresses[1],
temp_addresses[2],
self.is_dict_flag.result()
))
code.putln("if (unlikely(%s == 0)) break;" % result_temp)
code.putln(code.error_goto_if("%s == -1" % result_temp, self.pos))
code.funcstate.release_temp(result_temp)
# evaluate all coercions before the assignments
for var, result, target in assignments:
code.put_gotref(var.result())
for var, result, target in assignments:
result.generate_evaluation_code(code)
for var, result, target in assignments:
target.generate_assignment_code(result, code)
var.release(code)
def ForStatNode(pos, **kw):
if 'iterator' in kw:
if kw['iterator'].is_async:
return AsyncForStatNode(pos, **kw)
else:
return ForInStatNode(pos, **kw)
else:
return ForFromStatNode(pos, **kw)
class _ForInStatNode(LoopNode, StatNode):
# Base class of 'for-in' statements.
#
# target ExprNode
# iterator IteratorNode | AwaitExprNode(AsyncIteratorNode)
# body StatNode
# else_clause StatNode
# item NextNode | AwaitExprNode(AsyncNextNode)
# is_async boolean true for 'async for' statements
child_attrs = ["target", "item", "iterator", "body", "else_clause"]
item = None
is_async = False
def _create_item_node(self):
raise NotImplementedError("must be implemented by subclasses")
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
self._create_item_node()
def analyse_expressions(self, env):
self.target = self.target.analyse_target_types(env)
self.iterator = self.iterator.analyse_expressions(env)
self._create_item_node() # must rewrap self.item after analysis
self.item = self.item.analyse_expressions(env)
if (not self.is_async and
(self.iterator.type.is_ptr or self.iterator.type.is_array) and
self.target.type.assignable_from(self.iterator.type)):
# C array slice optimization.
pass
else:
self.item = self.item.coerce_to(self.target.type, env)
self.body = self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
old_loop_labels = code.new_loop_labels()
self.iterator.generate_evaluation_code(code)
code.putln("for (;;) {")
self.item.generate_evaluation_code(code)
self.target.generate_assignment_code(self.item, code)
self.body.generate_execution_code(code)
code.mark_pos(self.pos)
code.put_label(code.continue_label)
code.putln("}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
# in nested loops, the 'else' block can contain a
# 'continue' statement for the outer loop, but we may need
# to generate cleanup code before taking that path, so we
# intercept it here
orig_continue_label = code.continue_label
code.continue_label = code.new_label('outer_continue')
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
if code.label_used(code.continue_label):
code.put_goto(break_label)
code.mark_pos(self.pos)
code.put_label(code.continue_label)
self.iterator.generate_disposal_code(code)
code.put_goto(orig_continue_label)
code.set_loop_labels(old_loop_labels)
code.mark_pos(self.pos)
if code.label_used(break_label):
code.put_label(break_label)
self.iterator.generate_disposal_code(code)
self.iterator.free_temps(code)
def generate_function_definitions(self, env, code):
self.target.generate_function_definitions(env, code)
self.iterator.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.target.annotate(code)
self.iterator.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
self.item.annotate(code)
class ForInStatNode(_ForInStatNode):
# 'for' statement
is_async = False
def _create_item_node(self):
from .ExprNodes import NextNode
self.item = NextNode(self.iterator)
class AsyncForStatNode(_ForInStatNode):
# 'async for' statement
#
# iterator AwaitExprNode(AsyncIteratorNode)
# item AwaitIterNextExprNode(AsyncIteratorNode)
is_async = True
def __init__(self, pos, iterator, **kw):
assert 'item' not in kw
from . import ExprNodes
# AwaitExprNodes must appear before running MarkClosureVisitor
kw['iterator'] = ExprNodes.AwaitExprNode(iterator.pos, arg=iterator)
kw['item'] = ExprNodes.AwaitIterNextExprNode(iterator.pos, arg=None)
_ForInStatNode.__init__(self, pos, **kw)
def _create_item_node(self):
from . import ExprNodes
self.item.arg = ExprNodes.AsyncNextNode(self.iterator)
class ForFromStatNode(LoopNode, StatNode):
# for name from expr rel name rel expr
#
# target NameNode
# bound1 ExprNode
# relation1 string
# relation2 string
# bound2 ExprNode
# step ExprNode or None
# body StatNode
# else_clause StatNode or None
#
# Used internally:
#
# from_range bool
# is_py_target bool
# loopvar_node ExprNode (usually a NameNode or temp node)
# py_loopvar_node PyTempNode or None
child_attrs = ["target", "bound1", "bound2", "step", "body", "else_clause"]
is_py_target = False
loopvar_node = None
py_loopvar_node = None
from_range = False
gil_message = "For-loop using object bounds or target"
def nogil_check(self, env):
for x in (self.target, self.bound1, self.bound2):
if x.type.is_pyobject:
self.gil_error()
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
from . import ExprNodes
self.target = self.target.analyse_target_types(env)
self.bound1 = self.bound1.analyse_types(env)
self.bound2 = self.bound2.analyse_types(env)
if self.step is not None:
if isinstance(self.step, ExprNodes.UnaryMinusNode):
warning(self.step.pos, "Probable infinite loop in for-from-by statement. "
"Consider switching the directions of the relations.", 2)
self.step = self.step.analyse_types(env)
if self.target.type.is_numeric:
loop_type = self.target.type
else:
loop_type = PyrexTypes.c_int_type
if not self.bound1.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound1.type)
if not self.bound2.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound2.type)
if self.step is not None and not self.step.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.step.type)
self.bound1 = self.bound1.coerce_to(loop_type, env)
self.bound2 = self.bound2.coerce_to(loop_type, env)
if not self.bound2.is_literal:
self.bound2 = self.bound2.coerce_to_temp(env)
if self.step is not None:
self.step = self.step.coerce_to(loop_type, env)
if not self.step.is_literal:
self.step = self.step.coerce_to_temp(env)
target_type = self.target.type
if not (target_type.is_pyobject or target_type.is_numeric):
error(self.target.pos, "for-from loop variable must be c numeric type or Python object")
if target_type.is_numeric:
self.is_py_target = False
if isinstance(self.target, ExprNodes.BufferIndexNode):
raise error(self.pos, "Buffer or memoryview slicing/indexing not allowed as for-loop target.")
self.loopvar_node = self.target
self.py_loopvar_node = None
else:
self.is_py_target = True
c_loopvar_node = ExprNodes.TempNode(self.pos, loop_type, env)
self.loopvar_node = c_loopvar_node
self.py_loopvar_node = \
ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
self.body = self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
old_loop_labels = code.new_loop_labels()
from_range = self.from_range
self.bound1.generate_evaluation_code(code)
self.bound2.generate_evaluation_code(code)
offset, incop = self.relation_table[self.relation1]
if self.step is not None:
self.step.generate_evaluation_code(code)
step = self.step.result()
incop = "%s=%s" % (incop[0], step)
from . import ExprNodes
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.allocate(code)
if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
self.py_loopvar_node.allocate(code)
if from_range:
loopvar_name = code.funcstate.allocate_temp(self.target.type, False)
else:
loopvar_name = self.loopvar_node.result()
if self.target.type.is_int and not self.target.type.signed and self.relation2[0] == '>':
# Handle the case where the endpoint of an unsigned int iteration
# is within step of 0.
if not self.step:
step = 1
code.putln("for (%s = %s%s + %s; %s %s %s + %s; ) { %s%s;" % (
loopvar_name,
self.bound1.result(), offset, step,
loopvar_name, self.relation2, self.bound2.result(), step,
loopvar_name, incop))
else:
code.putln("for (%s = %s%s; %s %s %s; %s%s) {" % (
loopvar_name,
self.bound1.result(), offset,
loopvar_name, self.relation2, self.bound2.result(),
loopvar_name, incop))
if self.py_loopvar_node:
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
elif from_range:
code.putln("%s = %s;" % (
self.target.result(), loopvar_name))
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
if self.py_loopvar_node:
# This mess is to make for..from loops with python targets behave
# exactly like those with C targets with regards to re-assignment
# of the loop variable.
if self.target.entry.is_pyglobal:
# We know target is a NameNode, this is the only ugly case.
target_node = ExprNodes.PyTempNode(self.target.pos, None)
target_node.allocate(code)
interned_cname = code.intern_identifier(self.target.entry.name)
if self.target.entry.scope.is_module_scope:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
lookup_func = '__Pyx_GetModuleGlobalName(%s)'
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
lookup_func = '__Pyx_GetNameInClass(%s, %%s)' % (
self.target.entry.scope.namespace_cname)
code.putln("%s = %s; %s" % (
target_node.result(),
lookup_func % interned_cname,
code.error_goto_if_null(target_node.result(), self.target.pos)))
code.put_gotref(target_node.result())
else:
target_node = self.target
from_py_node = ExprNodes.CoerceFromPyTypeNode(
self.loopvar_node.type, target_node, self.target.entry.scope)
from_py_node.temp_code = loopvar_name
from_py_node.generate_result_code(code)
if self.target.entry.is_pyglobal:
code.put_decref(target_node.result(), target_node.type)
target_node.release(code)
code.putln("}")
if self.py_loopvar_node:
# This is potentially wasteful, but we don't want the semantics to
# depend on whether or not the loop is a python type.
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
if from_range:
code.funcstate.release_temp(loopvar_name)
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
self.bound1.generate_disposal_code(code)
self.bound1.free_temps(code)
self.bound2.generate_disposal_code(code)
self.bound2.free_temps(code)
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.release(code)
if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
self.py_loopvar_node.release(code)
if self.step is not None:
self.step.generate_disposal_code(code)
self.step.free_temps(code)
relation_table = {
# {relop : (initial offset, increment op)}
'<=': ("", "++"),
'<' : ("+1", "++"),
'>=': ("", "--"),
'>' : ("-1", "--"),
}
def generate_function_definitions(self, env, code):
self.target.generate_function_definitions(env, code)
self.bound1.generate_function_definitions(env, code)
self.bound2.generate_function_definitions(env, code)
if self.step is not None:
self.step.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.target.annotate(code)
self.bound1.annotate(code)
self.bound2.annotate(code)
if self.step:
self.step.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class WithStatNode(StatNode):
"""
Represents a Python with statement.
Implemented by the WithTransform as follows:
MGR = EXPR
EXIT = MGR.__exit__
VALUE = MGR.__enter__()
EXC = True
try:
try:
TARGET = VALUE # optional
BODY
except:
EXC = False
if not EXIT(*EXCINFO):
raise
finally:
if EXC:
EXIT(None, None, None)
MGR = EXIT = VALUE = None
"""
# manager The with statement manager object
# target ExprNode the target lhs of the __enter__() call
# body StatNode
# enter_call ExprNode the call to the __enter__() method
# exit_var String the cname of the __exit__() method reference
child_attrs = ["manager", "enter_call", "target", "body"]
enter_call = None
target_temp = None
def analyse_declarations(self, env):
self.manager.analyse_declarations(env)
self.enter_call.analyse_declarations(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.manager = self.manager.analyse_types(env)
self.enter_call = self.enter_call.analyse_types(env)
if self.target:
# set up target_temp before descending into body (which uses it)
from .ExprNodes import TempNode
self.target_temp = TempNode(self.enter_call.pos, self.enter_call.type)
self.body = self.body.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
self.manager.generate_function_definitions(env, code)
self.enter_call.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
code.putln("/*with:*/ {")
self.manager.generate_evaluation_code(code)
self.exit_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_LookupSpecial(%s, %s); %s" % (
self.exit_var,
self.manager.py_result(),
code.intern_identifier(EncodedString('__aexit__' if self.is_async else '__exit__')),
code.error_goto_if_null(self.exit_var, self.pos),
))
code.put_gotref(self.exit_var)
# need to free exit_var in the face of exceptions during setup
old_error_label = code.new_error_label()
intermediate_error_label = code.error_label
self.enter_call.generate_evaluation_code(code)
if self.target:
# The temp result will be cleaned up by the WithTargetAssignmentStatNode
# after assigning its result to the target of the 'with' statement.
self.target_temp.allocate(code)
self.enter_call.make_owned_reference(code)
code.putln("%s = %s;" % (self.target_temp.result(), self.enter_call.result()))
self.enter_call.generate_post_assignment_code(code)
else:
self.enter_call.generate_disposal_code(code)
self.enter_call.free_temps(code)
self.manager.generate_disposal_code(code)
self.manager.free_temps(code)
code.error_label = old_error_label
self.body.generate_execution_code(code)
if code.label_used(intermediate_error_label):
step_over_label = code.new_label()
code.put_goto(step_over_label)
code.put_label(intermediate_error_label)
code.put_decref_clear(self.exit_var, py_object_type)
code.put_goto(old_error_label)
code.put_label(step_over_label)
code.funcstate.release_temp(self.exit_var)
code.putln('}')
class WithTargetAssignmentStatNode(AssignmentNode):
# The target assignment of the 'with' statement value (return
# value of the __enter__() call).
#
# This is a special cased assignment that properly cleans up the RHS.
#
# lhs ExprNode the assignment target
# rhs ExprNode a (coerced) TempNode for the rhs (from WithStatNode)
# with_node WithStatNode the surrounding with-statement
child_attrs = ["rhs", "lhs"]
with_node = None
rhs = None
def analyse_declarations(self, env):
self.lhs.analyse_target_declaration(env)
def analyse_expressions(self, env):
self.lhs = self.lhs.analyse_target_types(env)
self.lhs.gil_assignment_check(env)
self.rhs = self.with_node.target_temp.coerce_to(self.lhs.type, env)
return self
def generate_execution_code(self, code):
self.rhs.generate_evaluation_code(code)
self.lhs.generate_assignment_code(self.rhs, code)
self.with_node.target_temp.release(code)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
class TryExceptStatNode(StatNode):
# try .. except statement
#
# body StatNode
# except_clauses [ExceptClauseNode]
# else_clause StatNode or None
child_attrs = ["body", "except_clauses", "else_clause"]
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
for except_clause in self.except_clauses:
except_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
default_clause_seen = 0
for i, except_clause in enumerate(self.except_clauses):
except_clause = self.except_clauses[i] = except_clause.analyse_expressions(env)
if default_clause_seen:
error(except_clause.pos, "default 'except:' must be last")
if not except_clause.pattern:
default_clause_seen = 1
self.has_default_clause = default_clause_seen
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
nogil_check = Node.gil_error
gil_message = "Try-except statement"
def generate_execution_code(self, code):
old_return_label = code.return_label
old_break_label = code.break_label
old_continue_label = code.continue_label
old_error_label = code.new_error_label()
our_error_label = code.error_label
except_end_label = code.new_label('exception_handled')
except_error_label = code.new_label('except_error')
except_return_label = code.new_label('except_return')
try_return_label = code.new_label('try_return')
try_break_label = code.new_label('try_break')
try_continue_label = code.new_label('try_continue')
try_end_label = code.new_label('try_end')
exc_save_vars = [code.funcstate.allocate_temp(py_object_type, False)
for _ in range(3)]
code.mark_pos(self.pos)
code.putln("{")
save_exc = code.insertion_point()
code.putln(
"/*try:*/ {")
code.return_label = try_return_label
code.break_label = try_break_label
code.continue_label = try_continue_label
self.body.generate_execution_code(code)
code.mark_pos(self.pos, trace=False)
code.putln(
"}")
temps_to_clean_up = code.funcstate.all_free_managed_temps()
can_raise = code.label_used(our_error_label)
if can_raise:
# inject code before the try block to save away the exception state
code.globalstate.use_utility_code(reset_exception_utility_code)
save_exc.putln("__Pyx_PyThreadState_declare")
save_exc.putln("__Pyx_PyThreadState_assign")
save_exc.putln("__Pyx_ExceptionSave(%s);" % (
', '.join(['&%s' % var for var in exc_save_vars])))
for var in exc_save_vars:
save_exc.put_xgotref(var)
def restore_saved_exception():
for name in exc_save_vars:
code.put_xgiveref(name)
code.putln("__Pyx_ExceptionReset(%s);" %
', '.join(exc_save_vars))
else:
# try block cannot raise exceptions, but we had to allocate the temps above,
# so just keep the C compiler from complaining about them being unused
save_exc.putln("if (%s); else {/*mark used*/}" % '||'.join(exc_save_vars))
def restore_saved_exception():
pass
code.error_label = except_error_label
code.return_label = except_return_label
normal_case_terminates = self.body.is_terminator
if self.else_clause:
code.mark_pos(self.else_clause.pos)
code.putln(
"/*else:*/ {")
self.else_clause.generate_execution_code(code)
code.putln(
"}")
if not normal_case_terminates:
normal_case_terminates = self.else_clause.is_terminator
if can_raise:
if not normal_case_terminates:
for var in exc_save_vars:
code.put_xdecref_clear(var, py_object_type)
code.put_goto(try_end_label)
code.put_label(our_error_label)
code.putln("__Pyx_PyThreadState_assign") # re-assign in case a generator yielded
for temp_name, temp_type in temps_to_clean_up:
code.put_xdecref_clear(temp_name, temp_type)
for except_clause in self.except_clauses:
except_clause.generate_handling_code(code, except_end_label)
if not self.has_default_clause:
code.put_goto(except_error_label)
for exit_label, old_label in [(except_error_label, old_error_label),
(try_break_label, old_break_label),
(try_continue_label, old_continue_label),
(try_return_label, old_return_label),
(except_return_label, old_return_label)]:
if code.label_used(exit_label):
if not normal_case_terminates and not code.label_used(try_end_label):
code.put_goto(try_end_label)
code.put_label(exit_label)
code.mark_pos(self.pos, trace=False)
if can_raise:
code.putln("__Pyx_PyThreadState_assign") # re-assign in case a generator yielded
restore_saved_exception()
code.put_goto(old_label)
if code.label_used(except_end_label):
if not normal_case_terminates and not code.label_used(try_end_label):
code.put_goto(try_end_label)
code.put_label(except_end_label)
if can_raise:
code.putln("__Pyx_PyThreadState_assign") # re-assign in case a generator yielded
restore_saved_exception()
if code.label_used(try_end_label):
code.put_label(try_end_label)
code.putln("}")
for cname in exc_save_vars:
code.funcstate.release_temp(cname)
code.return_label = old_return_label
code.break_label = old_break_label
code.continue_label = old_continue_label
code.error_label = old_error_label
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
for except_clause in self.except_clauses:
except_clause.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.body.annotate(code)
for except_node in self.except_clauses:
except_node.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class ExceptClauseNode(Node):
# Part of try ... except statement.
#
# pattern [ExprNode]
# target ExprNode or None
# body StatNode
# excinfo_target TupleNode(3*ResultRefNode) or None optional target for exception info (not owned here!)
# match_flag string result of exception match
# exc_value ExcValueNode used internally
# function_name string qualified name of enclosing function
# exc_vars (string * 3) local exception variables
# is_except_as bool Py3-style "except ... as xyz"
# excinfo_target is never set by the parser, but can be set by a transform
# in order to extract more extensive information about the exception as a
# sys.exc_info()-style tuple into a target variable
child_attrs = ["pattern", "target", "body", "exc_value"]
exc_value = None
excinfo_target = None
is_except_as = False
def analyse_declarations(self, env):
if self.target:
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.function_name = env.qualified_name
if self.pattern:
# normalise/unpack self.pattern into a list
for i, pattern in enumerate(self.pattern):
pattern = pattern.analyse_expressions(env)
self.pattern[i] = pattern.coerce_to_pyobject(env)
if self.target:
from . import ExprNodes
self.exc_value = ExprNodes.ExcValueNode(self.pos)
self.target = self.target.analyse_target_expression(env, self.exc_value)
self.body = self.body.analyse_expressions(env)
return self
def generate_handling_code(self, code, end_label):
code.mark_pos(self.pos)
if self.pattern:
code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrExceptionMatches", "Exceptions.c"))
exc_tests = []
for pattern in self.pattern:
pattern.generate_evaluation_code(code)
exc_tests.append("__Pyx_PyErr_ExceptionMatches(%s)" % pattern.py_result())
match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
code.putln(
"%s = %s;" % (match_flag, ' || '.join(exc_tests)))
for pattern in self.pattern:
pattern.generate_disposal_code(code)
pattern.free_temps(code)
code.putln(
"if (%s) {" %
match_flag)
code.funcstate.release_temp(match_flag)
else:
code.putln("/*except:*/ {")
if (not getattr(self.body, 'stats', True)
and self.excinfo_target is None
and self.target is None):
# most simple case: no exception variable, empty body (pass)
# => reset the exception state, done
code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c"))
code.putln("__Pyx_ErrRestore(0,0,0);")
code.put_goto(end_label)
code.putln("}")
return
exc_vars = [code.funcstate.allocate_temp(py_object_type,
manage_ref=True)
for _ in range(3)]
code.put_add_traceback(self.function_name)
# We always have to fetch the exception value even if
# there is no target, because this also normalises the
# exception and stores it in the thread state.
code.globalstate.use_utility_code(get_exception_utility_code)
exc_args = "&%s, &%s, &%s" % tuple(exc_vars)
code.putln("if (__Pyx_GetException(%s) < 0) %s" % (
exc_args, code.error_goto(self.pos)))
for x in exc_vars:
code.put_gotref(x)
if self.target:
self.exc_value.set_var(exc_vars[1])
self.exc_value.generate_evaluation_code(code)
self.target.generate_assignment_code(self.exc_value, code)
if self.excinfo_target is not None:
for tempvar, node in zip(exc_vars, self.excinfo_target.args):
node.set_var(tempvar)
old_break_label, old_continue_label = code.break_label, code.continue_label
code.break_label = code.new_label('except_break')
code.continue_label = code.new_label('except_continue')
old_exc_vars = code.funcstate.exc_vars
code.funcstate.exc_vars = exc_vars
self.body.generate_execution_code(code)
code.funcstate.exc_vars = old_exc_vars
if not self.body.is_terminator:
for var in exc_vars:
code.put_decref_clear(var, py_object_type)
code.put_goto(end_label)
for new_label, old_label in [(code.break_label, old_break_label),
(code.continue_label, old_continue_label)]:
if code.label_used(new_label):
code.put_label(new_label)
for var in exc_vars:
code.put_decref_clear(var, py_object_type)
code.put_goto(old_label)
code.break_label = old_break_label
code.continue_label = old_continue_label
for temp in exc_vars:
code.funcstate.release_temp(temp)
code.putln(
"}")
def generate_function_definitions(self, env, code):
if self.target is not None:
self.target.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def annotate(self, code):
if self.pattern:
for pattern in self.pattern:
pattern.annotate(code)
if self.target:
self.target.annotate(code)
self.body.annotate(code)
class TryFinallyStatNode(StatNode):
# try ... finally statement
#
# body StatNode
# finally_clause StatNode
# finally_except_clause deep-copy of finally_clause for exception case
#
# Each of the continue, break, return and error gotos runs
# into its own deep-copy of the finally block code.
# In addition, if we're doing an error, we save the
# exception on entry to the finally block and restore
# it on exit.
child_attrs = ["body", "finally_clause", "finally_except_clause"]
preserve_exception = 1
# handle exception case, in addition to return/break/continue
handle_error_case = True
func_return_type = None
finally_except_clause = None
is_try_finally_in_nogil = False
@staticmethod
def create_analysed(pos, env, body, finally_clause):
node = TryFinallyStatNode(pos, body=body, finally_clause=finally_clause)
return node
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.finally_except_clause = copy.deepcopy(self.finally_clause)
self.finally_except_clause.analyse_declarations(env)
self.finally_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
self.finally_clause = self.finally_clause.analyse_expressions(env)
self.finally_except_clause = self.finally_except_clause.analyse_expressions(env)
if env.return_type and not env.return_type.is_void:
self.func_return_type = env.return_type
return self
nogil_check = Node.gil_error
gil_message = "Try-finally statement"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
old_error_label = code.error_label
old_labels = code.all_new_labels()
new_labels = code.get_all_labels()
new_error_label = code.error_label
if not self.handle_error_case:
code.error_label = old_error_label
catch_label = code.new_label()
code.putln("/*try:*/ {")
was_in_try_finally = code.funcstate.in_try_finally
code.funcstate.in_try_finally = 1
self.body.generate_execution_code(code)
code.funcstate.in_try_finally = was_in_try_finally
code.putln("}")
code.set_all_labels(old_labels)
temps_to_clean_up = code.funcstate.all_free_managed_temps()
code.mark_pos(self.finally_clause.pos)
code.putln("/*finally:*/ {")
def fresh_finally_clause(_next=[self.finally_clause]):
# generate the original subtree once and always keep a fresh copy
node = _next[0]
node_copy = copy.deepcopy(node)
if node is self.finally_clause:
_next[0] = node_copy
else:
node = node_copy
return node
preserve_error = self.preserve_exception and code.label_used(new_error_label)
needs_success_cleanup = not self.finally_clause.is_terminator
if not self.body.is_terminator:
code.putln('/*normal exit:*/{')
fresh_finally_clause().generate_execution_code(code)
if not self.finally_clause.is_terminator:
code.put_goto(catch_label)
code.putln('}')
if preserve_error:
code.putln('/*exception exit:*/{')
code.putln("__Pyx_PyThreadState_declare")
if self.is_try_finally_in_nogil:
code.declare_gilstate()
if needs_success_cleanup:
exc_lineno_cnames = tuple([
code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
for _ in range(2)])
exc_filename_cname = code.funcstate.allocate_temp(
PyrexTypes.CPtrType(PyrexTypes.c_const_type(PyrexTypes.c_char_type)),
manage_ref=False)
else:
exc_lineno_cnames = exc_filename_cname = None
exc_vars = tuple([
code.funcstate.allocate_temp(py_object_type, manage_ref=False)
for _ in range(6)])
code.put_label(new_error_label)
self.put_error_catcher(
code, temps_to_clean_up, exc_vars, exc_lineno_cnames, exc_filename_cname)
finally_old_labels = code.all_new_labels()
code.putln('{')
old_exc_vars = code.funcstate.exc_vars
code.funcstate.exc_vars = exc_vars[:3]
self.finally_except_clause.generate_execution_code(code)
code.funcstate.exc_vars = old_exc_vars
code.putln('}')
if needs_success_cleanup:
self.put_error_uncatcher(code, exc_vars, exc_lineno_cnames, exc_filename_cname)
if exc_lineno_cnames:
for cname in exc_lineno_cnames:
code.funcstate.release_temp(cname)
if exc_filename_cname:
code.funcstate.release_temp(exc_filename_cname)
code.put_goto(old_error_label)
for new_label, old_label in zip(code.get_all_labels(), finally_old_labels):
if not code.label_used(new_label):
continue
code.put_label(new_label)
self.put_error_cleaner(code, exc_vars)
code.put_goto(old_label)
for cname in exc_vars:
code.funcstate.release_temp(cname)
code.putln('}')
code.set_all_labels(old_labels)
return_label = code.return_label
for i, (new_label, old_label) in enumerate(zip(new_labels, old_labels)):
if not code.label_used(new_label):
continue
if new_label == new_error_label and preserve_error:
continue # handled above
code.put('%s: ' % new_label)
code.putln('{')
ret_temp = None
if old_label == return_label and not self.finally_clause.is_terminator:
# store away return value for later reuse
if (self.func_return_type and
not self.is_try_finally_in_nogil and
not isinstance(self.finally_clause, GILExitNode)):
ret_temp = code.funcstate.allocate_temp(
self.func_return_type, manage_ref=False)
code.putln("%s = %s;" % (ret_temp, Naming.retval_cname))
if self.func_return_type.is_pyobject:
code.putln("%s = 0;" % Naming.retval_cname)
fresh_finally_clause().generate_execution_code(code)
if ret_temp:
code.putln("%s = %s;" % (Naming.retval_cname, ret_temp))
if self.func_return_type.is_pyobject:
code.putln("%s = 0;" % ret_temp)
code.funcstate.release_temp(ret_temp)
ret_temp = None
if not self.finally_clause.is_terminator:
code.put_goto(old_label)
code.putln('}')
# End finally
code.put_label(catch_label)
code.putln(
"}")
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
self.finally_clause.generate_function_definitions(env, code)
def put_error_catcher(self, code, temps_to_clean_up, exc_vars,
exc_lineno_cnames, exc_filename_cname):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.globalstate.use_utility_code(get_exception_utility_code)
code.globalstate.use_utility_code(swap_exception_utility_code)
code.putln(' '.join(["%s = 0;"]*len(exc_vars)) % exc_vars)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
code.putln("__Pyx_PyThreadState_assign")
for temp_name, type in temps_to_clean_up:
code.put_xdecref_clear(temp_name, type)
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
code.putln("if (PY_MAJOR_VERSION >= 3)"
" __Pyx_ExceptionSwap(&%s, &%s, &%s);" % exc_vars[3:])
code.putln("if ((PY_MAJOR_VERSION < 3) ||"
# if __Pyx_GetException() fails in Py3,
# store the newly raised exception instead
" unlikely(__Pyx_GetException(&%s, &%s, &%s) < 0)) "
"__Pyx_ErrFetch(&%s, &%s, &%s);" % (exc_vars[:3] * 2))
for var in exc_vars:
code.put_xgotref(var)
if exc_lineno_cnames:
code.putln("%s = %s; %s = %s; %s = %s;" % (
exc_lineno_cnames[0], Naming.lineno_cname,
exc_lineno_cnames[1], Naming.clineno_cname,
exc_filename_cname, Naming.filename_cname))
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
def put_error_uncatcher(self, code, exc_vars, exc_lineno_cnames, exc_filename_cname):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.globalstate.use_utility_code(reset_exception_utility_code)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
code.putln("__Pyx_PyThreadState_assign") # re-assign in case a generator yielded
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
code.putln("if (PY_MAJOR_VERSION >= 3) {")
for var in exc_vars[3:]:
code.put_xgiveref(var)
code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:])
code.putln("}")
for var in exc_vars[:3]:
code.put_xgiveref(var)
code.putln("__Pyx_ErrRestore(%s, %s, %s);" % exc_vars[:3])
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
code.putln(' '.join(["%s = 0;"]*len(exc_vars)) % exc_vars)
if exc_lineno_cnames:
code.putln("%s = %s; %s = %s; %s = %s;" % (
Naming.lineno_cname, exc_lineno_cnames[0],
Naming.clineno_cname, exc_lineno_cnames[1],
Naming.filename_cname, exc_filename_cname))
def put_error_cleaner(self, code, exc_vars):
code.globalstate.use_utility_code(reset_exception_utility_code)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
code.putln("__Pyx_PyThreadState_assign") # re-assign in case a generator yielded
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
code.putln("if (PY_MAJOR_VERSION >= 3) {")
for var in exc_vars[3:]:
code.put_xgiveref(var)
code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:])
code.putln("}")
for var in exc_vars[:3]:
code.put_xdecref_clear(var, py_object_type)
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
code.putln(' '.join(["%s = 0;"]*3) % exc_vars[3:])
def annotate(self, code):
self.body.annotate(code)
self.finally_clause.annotate(code)
class NogilTryFinallyStatNode(TryFinallyStatNode):
"""
A try/finally statement that may be used in nogil code sections.
"""
preserve_exception = False
nogil_check = None
class GILStatNode(NogilTryFinallyStatNode):
# 'with gil' or 'with nogil' statement
#
# state string 'gil' or 'nogil'
state_temp = None
def __init__(self, pos, state, body):
self.state = state
self.create_state_temp_if_needed(pos, state, body)
TryFinallyStatNode.__init__(
self, pos,
body=body,
finally_clause=GILExitNode(
pos, state=state, state_temp=self.state_temp))
def create_state_temp_if_needed(self, pos, state, body):
from .ParseTreeTransforms import YieldNodeCollector
collector = YieldNodeCollector()
collector.visitchildren(body)
if not collector.yields and not collector.awaits:
return
if state == 'gil':
temp_type = PyrexTypes.c_gilstate_type
else:
temp_type = PyrexTypes.c_threadstate_ptr_type
from . import ExprNodes
self.state_temp = ExprNodes.TempNode(pos, temp_type)
def analyse_declarations(self, env):
env._in_with_gil_block = (self.state == 'gil')
if self.state == 'gil':
env.has_with_gil_block = True
return super(GILStatNode, self).analyse_declarations(env)
def analyse_expressions(self, env):
env.use_utility_code(
UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
was_nogil = env.nogil
env.nogil = self.state == 'nogil'
node = TryFinallyStatNode.analyse_expressions(self, env)
env.nogil = was_nogil
return node
def generate_execution_code(self, code):
code.mark_pos(self.pos)
code.begin_block()
if self.state_temp:
self.state_temp.allocate(code)
variable = self.state_temp.result()
else:
variable = None
old_gil_config = code.funcstate.gil_owned
if self.state == 'gil':
code.put_ensure_gil(variable=variable)
code.funcstate.gil_owned = True
else:
code.put_release_gil(variable=variable)
code.funcstate.gil_owned = False
TryFinallyStatNode.generate_execution_code(self, code)
if self.state_temp:
self.state_temp.release(code)
code.funcstate.gil_owned = old_gil_config
code.end_block()
class GILExitNode(StatNode):
"""
Used as the 'finally' block in a GILStatNode
state string 'gil' or 'nogil'
"""
child_attrs = []
state_temp = None
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if self.state_temp:
variable = self.state_temp.result()
else:
variable = None
if self.state == 'gil':
code.put_release_ensured_gil(variable)
else:
code.put_acquire_gil(variable)
class EnsureGILNode(GILExitNode):
"""
Ensure the GIL in nogil functions for cleanup before returning.
"""
def generate_execution_code(self, code):
code.put_ensure_gil(declare_gilstate=False)
utility_code_for_cimports = {
# utility code (or inlining c) in a pxd (or pyx) file.
# TODO: Consider a generic user-level mechanism for importing
'cpython.array' : ("ArrayAPI", "arrayarray.h"),
'cpython.array.array' : ("ArrayAPI", "arrayarray.h"),
}
utility_code_for_imports = {
# utility code used when special modules are imported.
# TODO: Consider a generic user-level mechanism for importing
'asyncio': ("__Pyx_patch_asyncio", "PatchAsyncIO", "Coroutine.c"),
'inspect': ("__Pyx_patch_inspect", "PatchInspect", "Coroutine.c"),
}
class CImportStatNode(StatNode):
# cimport statement
#
# module_name string Qualified name of module being imported
# as_name string or None Name specified in "as" clause, if any
# is_absolute bool True for absolute imports, False otherwise
child_attrs = []
is_absolute = False
def analyse_declarations(self, env):
if not env.is_module_scope:
error(self.pos, "cimport only allowed at module level")
return
module_scope = env.find_module(
self.module_name, self.pos, relative_level=0 if self.is_absolute else -1)
if "." in self.module_name:
names = [EncodedString(name) for name in self.module_name.split(".")]
top_name = names[0]
top_module_scope = env.context.find_submodule(top_name)
module_scope = top_module_scope
for name in names[1:]:
submodule_scope = module_scope.find_submodule(name)
module_scope.declare_module(name, submodule_scope, self.pos)
module_scope = submodule_scope
if self.as_name:
env.declare_module(self.as_name, module_scope, self.pos)
else:
env.add_imported_module(module_scope)
env.declare_module(top_name, top_module_scope, self.pos)
else:
name = self.as_name or self.module_name
env.declare_module(name, module_scope, self.pos)
if self.module_name in utility_code_for_cimports:
env.use_utility_code(UtilityCode.load_cached(
*utility_code_for_cimports[self.module_name]))
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class FromCImportStatNode(StatNode):
# from ... cimport statement
#
# module_name string Qualified name of module
# relative_level int or None Relative import: number of dots before module_name
# imported_names [(pos, name, as_name, kind)] Names to be imported
child_attrs = []
module_name = None
relative_level = None
imported_names = None
def analyse_declarations(self, env):
if not env.is_module_scope:
error(self.pos, "cimport only allowed at module level")
return
if self.relative_level and self.relative_level > env.qualified_name.count('.'):
error(self.pos, "relative cimport beyond main package is not allowed")
return
module_scope = env.find_module(self.module_name, self.pos, relative_level=self.relative_level)
module_name = module_scope.qualified_name
env.add_imported_module(module_scope)
for pos, name, as_name, kind in self.imported_names:
if name == "*":
for local_name, entry in list(module_scope.entries.items()):
env.add_imported_entry(local_name, entry, pos)
else:
entry = module_scope.lookup(name)
if entry:
if kind and not self.declaration_matches(entry, kind):
entry.redeclared(pos)
entry.used = 1
else:
if kind == 'struct' or kind == 'union':
entry = module_scope.declare_struct_or_union(
name, kind=kind, scope=None, typedef_flag=0, pos=pos)
elif kind == 'class':
entry = module_scope.declare_c_class(name, pos=pos, module_name=module_name)
else:
submodule_scope = env.context.find_module(
name, relative_to=module_scope, pos=self.pos, absolute_fallback=False)
if submodule_scope.parent_module is module_scope:
env.declare_module(as_name or name, submodule_scope, self.pos)
else:
error(pos, "Name '%s' not declared in module '%s'" % (name, module_name))
if entry:
local_name = as_name or name
env.add_imported_entry(local_name, entry, pos)
if module_name.startswith('cpython'): # enough for now
if module_name in utility_code_for_cimports:
env.use_utility_code(UtilityCode.load_cached(
*utility_code_for_cimports[module_name]))
for _, name, _, _ in self.imported_names:
fqname = '%s.%s' % (module_name, name)
if fqname in utility_code_for_cimports:
env.use_utility_code(UtilityCode.load_cached(
*utility_code_for_cimports[fqname]))
def declaration_matches(self, entry, kind):
if not entry.is_type:
return 0
type = entry.type
if kind == 'class':
if not type.is_extension_type:
return 0
else:
if not type.is_struct_or_union:
return 0
if kind != type.kind:
return 0
return 1
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
class FromImportStatNode(StatNode):
# from ... import statement
#
# module ImportNode
# items [(string, NameNode)]
# interned_items [(string, NameNode, ExprNode)]
# item PyTempNode used internally
# import_star boolean used internally
child_attrs = ["module"]
import_star = 0
def analyse_declarations(self, env):
for name, target in self.items:
if name == "*":
if not env.is_module_scope:
error(self.pos, "import * only allowed at module level")
return
env.has_import_star = 1
self.import_star = 1
else:
target.analyse_target_declaration(env)
def analyse_expressions(self, env):
from . import ExprNodes
self.module = self.module.analyse_expressions(env)
self.item = ExprNodes.RawCNameExprNode(self.pos, py_object_type)
self.interned_items = []
for name, target in self.items:
if name == '*':
for _, entry in env.entries.items():
if not entry.is_type and entry.type.is_extension_type:
env.use_utility_code(UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
break
else:
entry = env.lookup(target.name)
# check whether or not entry is already cimported
if (entry.is_type and entry.type.name == name
and hasattr(entry.type, 'module_name')):
if entry.type.module_name == self.module.module_name.value:
# cimported with absolute name
continue
try:
# cimported with relative name
module = env.find_module(self.module.module_name.value, pos=self.pos,
relative_level=self.module.level)
if entry.type.module_name == module.qualified_name:
continue
except AttributeError:
pass
target = target.analyse_target_expression(env, None) # FIXME?
if target.type is py_object_type:
coerced_item = None
else:
coerced_item = self.item.coerce_to(target.type, env)
self.interned_items.append((name, target, coerced_item))
return self
def generate_execution_code(self, code):
code.mark_pos(self.pos)
self.module.generate_evaluation_code(code)
if self.import_star:
code.putln(
'if (%s(%s) < 0) %s;' % (
Naming.import_star,
self.module.py_result(),
code.error_goto(self.pos)))
item_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.item.set_cname(item_temp)
if self.interned_items:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ImportFrom", "ImportExport.c"))
for name, target, coerced_item in self.interned_items:
code.putln(
'%s = __Pyx_ImportFrom(%s, %s); %s' % (
item_temp,
self.module.py_result(),
code.intern_identifier(name),
code.error_goto_if_null(item_temp, self.pos)))
code.put_gotref(item_temp)
if coerced_item is None:
target.generate_assignment_code(self.item, code)
else:
coerced_item.allocate_temp_result(code)
coerced_item.generate_result_code(code)
target.generate_assignment_code(coerced_item, code)
code.put_decref_clear(item_temp, py_object_type)
code.funcstate.release_temp(item_temp)
self.module.generate_disposal_code(code)
self.module.free_temps(code)
class ParallelNode(Node):
"""
Base class for cython.parallel constructs.
"""
nogil_check = None
class ParallelStatNode(StatNode, ParallelNode):
"""
Base class for 'with cython.parallel.parallel():' and 'for i in prange():'.
assignments { Entry(var) : (var.pos, inplace_operator_or_None) }
assignments to variables in this parallel section
parent parent ParallelStatNode or None
is_parallel indicates whether this node is OpenMP parallel
(true for #pragma omp parallel for and
#pragma omp parallel)
is_parallel is true for:
#pragma omp parallel
#pragma omp parallel for
sections, but NOT for
#pragma omp for
We need this to determine the sharing attributes.
privatization_insertion_point a code insertion point used to make temps
private (esp. the "nsteps" temp)
args tuple the arguments passed to the parallel construct
kwargs DictNode the keyword arguments passed to the parallel
construct (replaced by its compile time value)
"""
child_attrs = ['body', 'num_threads']
body = None
is_prange = False
is_nested_prange = False
error_label_used = False
num_threads = None
chunksize = None
parallel_exc = (
Naming.parallel_exc_type,
Naming.parallel_exc_value,
Naming.parallel_exc_tb,
)
parallel_pos_info = (
Naming.parallel_filename,
Naming.parallel_lineno,
Naming.parallel_clineno,
)
pos_info = (
Naming.filename_cname,
Naming.lineno_cname,
Naming.clineno_cname,
)
critical_section_counter = 0
def __init__(self, pos, **kwargs):
super(ParallelStatNode, self).__init__(pos, **kwargs)
# All assignments in this scope
self.assignments = kwargs.get('assignments') or {}
# All seen closure cnames and their temporary cnames
self.seen_closure_vars = set()
# Dict of variables that should be declared (first|last|)private or
# reduction { Entry: (op, lastprivate) }.
# If op is not None, it's a reduction.
self.privates = {}
# [NameNode]
self.assigned_nodes = []
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.num_threads = None
if self.kwargs:
# Try to find num_threads and chunksize keyword arguments
pairs = []
for dictitem in self.kwargs.key_value_pairs:
if dictitem.key.value == 'num_threads':
self.num_threads = dictitem.value
elif self.is_prange and dictitem.key.value == 'chunksize':
self.chunksize = dictitem.value
else:
pairs.append(dictitem)
self.kwargs.key_value_pairs = pairs
try:
self.kwargs = self.kwargs.compile_time_value(env)
except Exception as e:
error(self.kwargs.pos, "Only compile-time values may be "
"supplied as keyword arguments")
else:
self.kwargs = {}
for kw, val in self.kwargs.items():
if kw not in self.valid_keyword_arguments:
error(self.pos, "Invalid keyword argument: %s" % kw)
else:
setattr(self, kw, val)
def analyse_expressions(self, env):
if self.num_threads:
self.num_threads = self.num_threads.analyse_expressions(env)
if self.chunksize:
self.chunksize = self.chunksize.analyse_expressions(env)
self.body = self.body.analyse_expressions(env)
self.analyse_sharing_attributes(env)
if self.num_threads is not None:
if self.parent and self.parent.num_threads is not None and not self.parent.is_prange:
error(self.pos, "num_threads already declared in outer section")
elif self.parent and not self.parent.is_prange:
error(self.pos, "num_threads must be declared in the parent parallel section")
elif (self.num_threads.type.is_int and
self.num_threads.is_literal and
self.num_threads.compile_time_value(env) <= 0):
error(self.pos, "argument to num_threads must be greater than 0")
if not self.num_threads.is_simple():
self.num_threads = self.num_threads.coerce_to(
PyrexTypes.c_int_type, env).coerce_to_temp(env)
return self
def analyse_sharing_attributes(self, env):
"""
Analyse the privates for this block and set them in self.privates.
This should be called in a post-order fashion during the
analyse_expressions phase
"""
for entry, (pos, op) in self.assignments.items():
if self.is_prange and not self.is_parallel:
# closely nested prange in a with parallel block, disallow
# assigning to privates in the with parallel block (we
# consider it too implicit and magicky for users)
if entry in self.parent.assignments:
error(pos, "Cannot assign to private of outer parallel block")
continue
if not self.is_prange and op:
# Again possible, but considered to magicky
error(pos, "Reductions not allowed for parallel blocks")
continue
# By default all variables should have the same values as if
# executed sequentially
lastprivate = True
self.propagate_var_privatization(entry, pos, op, lastprivate)
def propagate_var_privatization(self, entry, pos, op, lastprivate):
"""
Propagate the sharing attributes of a variable. If the privatization is
determined by a parent scope, done propagate further.
If we are a prange, we propagate our sharing attributes outwards to
other pranges. If we are a prange in parallel block and the parallel
block does not determine the variable private, we propagate to the
parent of the parent. Recursion stops at parallel blocks, as they have
no concept of lastprivate or reduction.
So the following cases propagate:
sum is a reduction for all loops:
for i in prange(n):
for j in prange(n):
for k in prange(n):
sum += i * j * k
sum is a reduction for both loops, local_var is private to the
parallel with block:
for i in prange(n):
with parallel:
local_var = ... # private to the parallel
for j in prange(n):
sum += i * j
Nested with parallel blocks are disallowed, because they wouldn't
allow you to propagate lastprivates or reductions:
#pragma omp parallel for lastprivate(i)
for i in prange(n):
sum = 0
#pragma omp parallel private(j, sum)
with parallel:
#pragma omp parallel
with parallel:
#pragma omp for lastprivate(j) reduction(+:sum)
for j in prange(n):
sum += i
# sum and j are well-defined here
# sum and j are undefined here
# sum and j are undefined here
"""
self.privates[entry] = (op, lastprivate)
if entry.type.is_memoryviewslice:
error(pos, "Memoryview slices can only be shared in parallel sections")
return
if self.is_prange:
if not self.is_parallel and entry not in self.parent.assignments:
# Parent is a parallel with block
parent = self.parent.parent
else:
parent = self.parent
# We don't need to propagate privates, only reductions and
# lastprivates
if parent and (op or lastprivate):
parent.propagate_var_privatization(entry, pos, op, lastprivate)
def _allocate_closure_temp(self, code, entry):
"""
Helper function that allocate a temporary for a closure variable that
is assigned to.
"""
if self.parent:
return self.parent._allocate_closure_temp(code, entry)
if entry.cname in self.seen_closure_vars:
return entry.cname
cname = code.funcstate.allocate_temp(entry.type, True)
# Add both the actual cname and the temp cname, as the actual cname
# will be replaced with the temp cname on the entry
self.seen_closure_vars.add(entry.cname)
self.seen_closure_vars.add(cname)
self.modified_entries.append((entry, entry.cname))
code.putln("%s = %s;" % (cname, entry.cname))
entry.cname = cname
def initialize_privates_to_nan(self, code, exclude=None):
first = True
for entry, (op, lastprivate) in sorted(self.privates.items()):
if not op and (not exclude or entry != exclude):
invalid_value = entry.type.invalid_value()
if invalid_value:
if first:
code.putln("/* Initialize private variables to "
"invalid values */")
first = False
code.putln("%s = %s;" % (entry.cname,
entry.type.cast_code(invalid_value)))
def evaluate_before_block(self, code, expr):
c = self.begin_of_parallel_control_block_point_after_decls
# we need to set the owner to ourselves temporarily, as
# allocate_temp may generate a comment in the middle of our pragma
# otherwise when DebugFlags.debug_temp_code_comments is in effect
owner = c.funcstate.owner
c.funcstate.owner = c
expr.generate_evaluation_code(c)
c.funcstate.owner = owner
return expr.result()
def put_num_threads(self, code):
"""
Write self.num_threads if set as the num_threads OpenMP directive
"""
if self.num_threads is not None:
code.put(" num_threads(%s)" % self.evaluate_before_block(code, self.num_threads))
def declare_closure_privates(self, code):
"""
If a variable is in a scope object, we need to allocate a temp and
assign the value from the temp to the variable in the scope object
after the parallel section. This kind of copying should be done only
in the outermost parallel section.
"""
self.modified_entries = []
for entry in sorted(self.assignments):
if entry.from_closure or entry.in_closure:
self._allocate_closure_temp(code, entry)
def release_closure_privates(self, code):
"""
Release any temps used for variables in scope objects. As this is the
outermost parallel block, we don't need to delete the cnames from
self.seen_closure_vars.
"""
for entry, original_cname in self.modified_entries:
code.putln("%s = %s;" % (original_cname, entry.cname))
code.funcstate.release_temp(entry.cname)
entry.cname = original_cname
def privatize_temps(self, code, exclude_temps=()):
"""
Make any used temporaries private. Before the relevant code block
code.start_collecting_temps() should have been called.
"""
if self.is_parallel:
c = self.privatization_insertion_point
self.temps = temps = code.funcstate.stop_collecting_temps()
privates, firstprivates = [], []
for temp, type in sorted(temps):
if type.is_pyobject or type.is_memoryviewslice:
firstprivates.append(temp)
else:
privates.append(temp)
if privates:
c.put(" private(%s)" % ", ".join(privates))
if firstprivates:
c.put(" firstprivate(%s)" % ", ".join(firstprivates))
if self.breaking_label_used:
shared_vars = [Naming.parallel_why]
if self.error_label_used:
shared_vars.extend(self.parallel_exc)
c.put(" private(%s, %s, %s)" % self.pos_info)
c.put(" shared(%s)" % ', '.join(shared_vars))
def cleanup_temps(self, code):
# Now clean up any memoryview slice and object temporaries
if self.is_parallel and not self.is_nested_prange:
code.putln("/* Clean up any temporaries */")
for temp, type in sorted(self.temps):
if type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(temp, have_gil=False)
elif type.is_pyobject:
code.put_xdecref(temp, type)
code.putln("%s = NULL;" % temp)
def setup_parallel_control_flow_block(self, code):
"""
Sets up a block that surrounds the parallel block to determine
how the parallel section was exited. Any kind of return is
trapped (break, continue, return, exceptions). This is the idea:
{
int why = 0;
#pragma omp parallel
{
return # -> goto new_return_label;
goto end_parallel;
new_return_label:
why = 3;
goto end_parallel;
end_parallel:;
#pragma omp flush(why) # we need to flush for every iteration
}
if (why == 3)
goto old_return_label;
}
"""
self.old_loop_labels = code.new_loop_labels()
self.old_error_label = code.new_error_label()
self.old_return_label = code.return_label
code.return_label = code.new_label(name="return")
code.begin_block() # parallel control flow block
self.begin_of_parallel_control_block_point = code.insertion_point()
self.begin_of_parallel_control_block_point_after_decls = code.insertion_point()
self.undef_builtin_expect_apple_gcc_bug(code)
def begin_parallel_block(self, code):
"""
Each OpenMP thread in a parallel section that contains a with gil block
must have the thread-state initialized. The call to
PyGILState_Release() then deallocates our threadstate. If we wouldn't
do this, each with gil block would allocate and deallocate one, thereby
losing exception information before it can be saved before leaving the
parallel section.
"""
self.begin_of_parallel_block = code.insertion_point()
def end_parallel_block(self, code):
"""
To ensure all OpenMP threads have thread states, we ensure the GIL
in each thread (which creates a thread state if it doesn't exist),
after which we release the GIL.
On exit, reacquire the GIL and release the thread state.
If compiled without OpenMP support (at the C level), then we still have
to acquire the GIL to decref any object temporaries.
"""
if self.error_label_used:
begin_code = self.begin_of_parallel_block
end_code = code
begin_code.putln("#ifdef _OPENMP")
begin_code.put_ensure_gil(declare_gilstate=True)
begin_code.putln("Py_BEGIN_ALLOW_THREADS")
begin_code.putln("#endif /* _OPENMP */")
end_code.putln("#ifdef _OPENMP")
end_code.putln("Py_END_ALLOW_THREADS")
end_code.putln("#else")
end_code.put_safe("{\n")
end_code.put_ensure_gil()
end_code.putln("#endif /* _OPENMP */")
self.cleanup_temps(end_code)
end_code.put_release_ensured_gil()
end_code.putln("#ifndef _OPENMP")
end_code.put_safe("}\n")
end_code.putln("#endif /* _OPENMP */")
def trap_parallel_exit(self, code, should_flush=False):
"""
Trap any kind of return inside a parallel construct. 'should_flush'
indicates whether the variable should be flushed, which is needed by
prange to skip the loop. It also indicates whether we need to register
a continue (we need this for parallel blocks, but not for prange
loops, as it is a direct jump there).
It uses the same mechanism as try/finally:
1 continue
2 break
3 return
4 error
"""
save_lastprivates_label = code.new_label()
dont_return_label = code.new_label()
self.any_label_used = False
self.breaking_label_used = False
self.error_label_used = False
self.parallel_private_temps = []
all_labels = code.get_all_labels()
# Figure this out before starting to generate any code
for label in all_labels:
if code.label_used(label):
self.breaking_label_used = (self.breaking_label_used or
label != code.continue_label)
self.any_label_used = True
if self.any_label_used:
code.put_goto(dont_return_label)
for i, label in enumerate(all_labels):
if not code.label_used(label):
continue
is_continue_label = label == code.continue_label
code.put_label(label)
if not (should_flush and is_continue_label):
if label == code.error_label:
self.error_label_used = True
self.fetch_parallel_exception(code)
code.putln("%s = %d;" % (Naming.parallel_why, i + 1))
if (self.breaking_label_used and self.is_prange and not
is_continue_label):
code.put_goto(save_lastprivates_label)
else:
code.put_goto(dont_return_label)
if self.any_label_used:
if self.is_prange and self.breaking_label_used:
# Don't rely on lastprivate, save our lastprivates
code.put_label(save_lastprivates_label)
self.save_parallel_vars(code)
code.put_label(dont_return_label)
if should_flush and self.breaking_label_used:
code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_why)
def save_parallel_vars(self, code):
"""
The following shenanigans are instated when we break, return or
propagate errors from a prange. In this case we cannot rely on
lastprivate() to do its job, as no iterations may have executed yet
in the last thread, leaving the values undefined. It is most likely
that the breaking thread has well-defined values of the lastprivate
variables, so we keep those values.
"""
section_name = "__pyx_parallel_lastprivates%d" % self.critical_section_counter
code.putln_openmp("#pragma omp critical(%s)" % section_name)
ParallelStatNode.critical_section_counter += 1
code.begin_block() # begin critical section
c = self.begin_of_parallel_control_block_point
temp_count = 0
for entry, (op, lastprivate) in sorted(self.privates.items()):
if not lastprivate or entry.type.is_pyobject:
continue
type_decl = entry.type.empty_declaration_code()
temp_cname = "__pyx_parallel_temp%d" % temp_count
private_cname = entry.cname
temp_count += 1
invalid_value = entry.type.invalid_value()
if invalid_value:
init = ' = ' + invalid_value
else:
init = ''
# Declare the parallel private in the outer block
c.putln("%s %s%s;" % (type_decl, temp_cname, init))
# Initialize before escaping
code.putln("%s = %s;" % (temp_cname, private_cname))
self.parallel_private_temps.append((temp_cname, private_cname))
code.end_block() # end critical section
def fetch_parallel_exception(self, code):
"""
As each OpenMP thread may raise an exception, we need to fetch that
exception from the threadstate and save it for after the parallel
section where it can be re-raised in the master thread.
Although it would seem that __pyx_filename, __pyx_lineno and
__pyx_clineno are only assigned to under exception conditions (i.e.,
when we have the GIL), and thus should be allowed to be shared without
any race condition, they are in fact subject to the same race
conditions that they were previously when they were global variables
and functions were allowed to release the GIL:
thread A thread B
acquire
set lineno
release
acquire
set lineno
release
acquire
fetch exception
release
skip the fetch
deallocate threadstate deallocate threadstate
"""
code.begin_block()
code.put_ensure_gil(declare_gilstate=True)
code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_exc_type)
code.putln(
"if (!%s) {" % Naming.parallel_exc_type)
code.putln("__Pyx_ErrFetchWithState(&%s, &%s, &%s);" % self.parallel_exc)
pos_info = chain(*zip(self.parallel_pos_info, self.pos_info))
code.funcstate.uses_error_indicator = True
code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
code.put_gotref(Naming.parallel_exc_type)
code.putln(
"}")
code.put_release_ensured_gil()
code.end_block()
def restore_parallel_exception(self, code):
"Re-raise a parallel exception"
code.begin_block()
code.put_ensure_gil(declare_gilstate=True)
code.put_giveref(Naming.parallel_exc_type)
code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % self.parallel_exc)
pos_info = chain(*zip(self.pos_info, self.parallel_pos_info))
code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
code.put_release_ensured_gil()
code.end_block()
def restore_labels(self, code):
"""
Restore all old labels. Call this before the 'else' clause to for
loops and always before ending the parallel control flow block.
"""
code.set_all_labels(self.old_loop_labels + (self.old_return_label,
self.old_error_label))
def end_parallel_control_flow_block(
self, code, break_=False, continue_=False, return_=False):
"""
This ends the parallel control flow block and based on how the parallel
section was exited, takes the corresponding action. The break_ and
continue_ parameters indicate whether these should be propagated
outwards:
for i in prange(...):
with cython.parallel.parallel():
continue
Here break should be trapped in the parallel block, and propagated to
the for loop.
"""
c = self.begin_of_parallel_control_block_point
# Firstly, always prefer errors over returning, continue or break
if self.error_label_used:
c.putln("const char *%s = NULL; int %s = 0, %s = 0;" % self.parallel_pos_info)
c.putln("PyObject *%s = NULL, *%s = NULL, *%s = NULL;" % self.parallel_exc)
code.putln(
"if (%s) {" % Naming.parallel_exc_type)
code.putln("/* This may have been overridden by a continue, "
"break or return in another thread. Prefer the error. */")
code.putln("%s = 4;" % Naming.parallel_why)
code.putln(
"}")
if continue_:
any_label_used = self.any_label_used
else:
any_label_used = self.breaking_label_used
if any_label_used:
# __pyx_parallel_why is used, declare and initialize
c.putln("int %s;" % Naming.parallel_why)
c.putln("%s = 0;" % Naming.parallel_why)
code.putln(
"if (%s) {" % Naming.parallel_why)
for temp_cname, private_cname in self.parallel_private_temps:
code.putln("%s = %s;" % (private_cname, temp_cname))
code.putln("switch (%s) {" % Naming.parallel_why)
if continue_:
code.put(" case 1: ")
code.put_goto(code.continue_label)
if break_:
code.put(" case 2: ")
code.put_goto(code.break_label)
if return_:
code.put(" case 3: ")
code.put_goto(code.return_label)
if self.error_label_used:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln(" case 4:")
self.restore_parallel_exception(code)
code.put_goto(code.error_label)
code.putln("}") # end switch
code.putln(
"}") # end if
code.end_block() # end parallel control flow block
self.redef_builtin_expect_apple_gcc_bug(code)
# FIXME: improve with version number for OS X Lion
buggy_platform_macro_condition = "(defined(__APPLE__) || defined(__OSX__))"
have_expect_condition = "(defined(__GNUC__) && " \
"(__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))"
redef_condition = "(%s && %s)" % (buggy_platform_macro_condition, have_expect_condition)
def undef_builtin_expect_apple_gcc_bug(self, code):
"""
A bug on OS X Lion disallows __builtin_expect macros. This code avoids them
"""
if not self.parent:
code.undef_builtin_expect(self.redef_condition)
def redef_builtin_expect_apple_gcc_bug(self, code):
if not self.parent:
code.redef_builtin_expect(self.redef_condition)
class ParallelWithBlockNode(ParallelStatNode):
"""
This node represents a 'with cython.parallel.parallel():' block
"""
valid_keyword_arguments = ['num_threads']
num_threads = None
def analyse_declarations(self, env):
super(ParallelWithBlockNode, self).analyse_declarations(env)
if self.args:
error(self.pos, "cython.parallel.parallel() does not take "
"positional arguments")
def generate_execution_code(self, code):
self.declare_closure_privates(code)
self.setup_parallel_control_flow_block(code)
code.putln("#ifdef _OPENMP")
code.put("#pragma omp parallel ")
if self.privates:
privates = [e.cname for e in self.privates
if not e.type.is_pyobject]
code.put('private(%s)' % ', '.join(sorted(privates)))
self.privatization_insertion_point = code.insertion_point()
self.put_num_threads(code)
code.putln("")
code.putln("#endif /* _OPENMP */")
code.begin_block() # parallel block
self.begin_parallel_block(code)
self.initialize_privates_to_nan(code)
code.funcstate.start_collecting_temps()
self.body.generate_execution_code(code)
self.trap_parallel_exit(code)
self.privatize_temps(code)
self.end_parallel_block(code)
code.end_block() # end parallel block
continue_ = code.label_used(code.continue_label)
break_ = code.label_used(code.break_label)
return_ = code.label_used(code.return_label)
self.restore_labels(code)
self.end_parallel_control_flow_block(code, break_=break_,
continue_=continue_,
return_=return_)
self.release_closure_privates(code)
class ParallelRangeNode(ParallelStatNode):
"""
This node represents a 'for i in cython.parallel.prange():' construct.
target NameNode the target iteration variable
else_clause Node or None the else clause of this loop
"""
child_attrs = ['body', 'target', 'else_clause', 'args', 'num_threads',
'chunksize']
body = target = else_clause = args = None
start = stop = step = None
is_prange = True
nogil = None
schedule = None
valid_keyword_arguments = ['schedule', 'nogil', 'num_threads', 'chunksize']
def __init__(self, pos, **kwds):
super(ParallelRangeNode, self).__init__(pos, **kwds)
# Pretend to be a ForInStatNode for control flow analysis
self.iterator = PassStatNode(pos)
def analyse_declarations(self, env):
super(ParallelRangeNode, self).analyse_declarations(env)
self.target.analyse_target_declaration(env)
if self.else_clause is not None:
self.else_clause.analyse_declarations(env)
if not self.args or len(self.args) > 3:
error(self.pos, "Invalid number of positional arguments to prange")
return
if len(self.args) == 1:
self.stop, = self.args
elif len(self.args) == 2:
self.start, self.stop = self.args
else:
self.start, self.stop, self.step = self.args
if hasattr(self.schedule, 'decode'):
self.schedule = self.schedule.decode('ascii')
if self.schedule not in (None, 'static', 'dynamic', 'guided', 'runtime'):
error(self.pos, "Invalid schedule argument to prange: %s" % (self.schedule,))
def analyse_expressions(self, env):
was_nogil = env.nogil
if self.nogil:
env.nogil = True
if self.target is None:
error(self.pos, "prange() can only be used as part of a for loop")
return self
self.target = self.target.analyse_target_types(env)
if not self.target.type.is_numeric:
# Not a valid type, assume one for now anyway
if not self.target.type.is_pyobject:
# nogil_check will catch the is_pyobject case
error(self.target.pos,
"Must be of numeric type, not %s" % self.target.type)
self.index_type = PyrexTypes.c_py_ssize_t_type
else:
self.index_type = self.target.type
if not self.index_type.signed:
warning(self.target.pos,
"Unsigned index type not allowed before OpenMP 3.0",
level=2)
# Setup start, stop and step, allocating temps if needed
self.names = 'start', 'stop', 'step'
start_stop_step = self.start, self.stop, self.step
for node, name in zip(start_stop_step, self.names):
if node is not None:
node.analyse_types(env)
if not node.type.is_numeric:
error(node.pos, "%s argument must be numeric" % name)
continue
if not node.is_literal:
node = node.coerce_to_temp(env)
setattr(self, name, node)
# As we range from 0 to nsteps, computing the index along the
# way, we need a fitting type for 'i' and 'nsteps'
self.index_type = PyrexTypes.widest_numeric_type(
self.index_type, node.type)
if self.else_clause is not None:
self.else_clause = self.else_clause.analyse_expressions(env)
# Although not actually an assignment in this scope, it should be
# treated as such to ensure it is unpacked if a closure temp, and to
# ensure lastprivate behaviour and propagation. If the target index is
# not a NameNode, it won't have an entry, and an error was issued by
# ParallelRangeTransform
if hasattr(self.target, 'entry'):
self.assignments[self.target.entry] = self.target.pos, None
node = super(ParallelRangeNode, self).analyse_expressions(env)
if node.chunksize:
if not node.schedule:
error(node.chunksize.pos,
"Must provide schedule with chunksize")
elif node.schedule == 'runtime':
error(node.chunksize.pos,
"Chunksize not valid for the schedule runtime")
elif (node.chunksize.type.is_int and
node.chunksize.is_literal and
node.chunksize.compile_time_value(env) <= 0):
error(node.chunksize.pos, "Chunksize must not be negative")
node.chunksize = node.chunksize.coerce_to(
PyrexTypes.c_int_type, env).coerce_to_temp(env)
if node.nogil:
env.nogil = was_nogil
node.is_nested_prange = node.parent and node.parent.is_prange
if node.is_nested_prange:
parent = node
while parent.parent and parent.parent.is_prange:
parent = parent.parent
parent.assignments.update(node.assignments)
parent.privates.update(node.privates)
parent.assigned_nodes.extend(node.assigned_nodes)
return node
def nogil_check(self, env):
names = 'start', 'stop', 'step', 'target'
nodes = self.start, self.stop, self.step, self.target
for name, node in zip(names, nodes):
if node is not None and node.type.is_pyobject:
error(node.pos, "%s may not be a Python object "
"as we don't have the GIL" % name)
def generate_execution_code(self, code):
"""
Generate code in the following steps
1) copy any closure variables determined thread-private
into temporaries
2) allocate temps for start, stop and step
3) generate a loop that calculates the total number of steps,
which then computes the target iteration variable for every step:
for i in prange(start, stop, step):
...
becomes
nsteps = (stop - start) / step;
i = start;
#pragma omp parallel for lastprivate(i)
for (temp = 0; temp < nsteps; temp++) {
i = start + step * temp;
...
}
Note that accumulation of 'i' would have a data dependency
between iterations.
Also, you can't do this
for (i = start; i < stop; i += step)
...
as the '<' operator should become '>' for descending loops.
'for i from x < i < y:' does not suffer from this problem
as the relational operator is known at compile time!
4) release our temps and write back any private closure variables
"""
self.declare_closure_privates(code)
# This can only be a NameNode
target_index_cname = self.target.entry.cname
# This will be used as the dict to format our code strings, holding
# the start, stop , step, temps and target cnames
fmt_dict = {
'target': target_index_cname,
'target_type': self.target.type.empty_declaration_code()
}
# Setup start, stop and step, allocating temps if needed
start_stop_step = self.start, self.stop, self.step
defaults = '0', '0', '1'
for node, name, default in zip(start_stop_step, self.names, defaults):
if node is None:
result = default
elif node.is_literal:
result = node.get_constant_c_result_code()
else:
node.generate_evaluation_code(code)
result = node.result()
fmt_dict[name] = result
fmt_dict['i'] = code.funcstate.allocate_temp(self.index_type, False)
fmt_dict['nsteps'] = code.funcstate.allocate_temp(self.index_type, False)
# TODO: check if the step is 0 and if so, raise an exception in a
# 'with gil' block. For now, just abort
code.putln("if (%(step)s == 0) abort();" % fmt_dict)
self.setup_parallel_control_flow_block(code) # parallel control flow block
self.control_flow_var_code_point = code.insertion_point()
# Note: nsteps is private in an outer scope if present
code.putln("%(nsteps)s = (%(stop)s - %(start)s + %(step)s - %(step)s/abs(%(step)s)) / %(step)s;" % fmt_dict)
# The target iteration variable might not be initialized, do it only if
# we are executing at least 1 iteration, otherwise we should leave the
# target unaffected. The target iteration variable is firstprivate to
# shut up compiler warnings caused by lastprivate, as the compiler
# erroneously believes that nsteps may be <= 0, leaving the private
# target index uninitialized
code.putln("if (%(nsteps)s > 0)" % fmt_dict)
code.begin_block() # if block
self.generate_loop(code, fmt_dict)
code.end_block() # end if block
self.restore_labels(code)
if self.else_clause:
if self.breaking_label_used:
code.put("if (%s < 2)" % Naming.parallel_why)
code.begin_block() # else block
code.putln("/* else */")
self.else_clause.generate_execution_code(code)
code.end_block() # end else block
# ------ cleanup ------
self.end_parallel_control_flow_block(code) # end parallel control flow block
# And finally, release our privates and write back any closure
# variables
for temp in start_stop_step + (self.chunksize, self.num_threads):
if temp is not None:
temp.generate_disposal_code(code)
temp.free_temps(code)
code.funcstate.release_temp(fmt_dict['i'])
code.funcstate.release_temp(fmt_dict['nsteps'])
self.release_closure_privates(code)
def generate_loop(self, code, fmt_dict):
if self.is_nested_prange:
code.putln("#if 0")
else:
code.putln("#ifdef _OPENMP")
if not self.is_parallel:
code.put("#pragma omp for")
self.privatization_insertion_point = code.insertion_point()
reduction_codepoint = self.parent.privatization_insertion_point
else:
code.put("#pragma omp parallel")
self.privatization_insertion_point = code.insertion_point()
reduction_codepoint = self.privatization_insertion_point
code.putln("")
code.putln("#endif /* _OPENMP */")
code.begin_block() # pragma omp parallel begin block
# Initialize the GIL if needed for this thread
self.begin_parallel_block(code)
if self.is_nested_prange:
code.putln("#if 0")
else:
code.putln("#ifdef _OPENMP")
code.put("#pragma omp for")
for entry, (op, lastprivate) in sorted(self.privates.items()):
# Don't declare the index variable as a reduction
if op and op in "+*-&^|" and entry != self.target.entry:
if entry.type.is_pyobject:
error(self.pos, "Python objects cannot be reductions")
else:
#code.put(" reduction(%s:%s)" % (op, entry.cname))
# This is the only way reductions + nesting works in gcc4.5
reduction_codepoint.put(
" reduction(%s:%s)" % (op, entry.cname))
else:
if entry == self.target.entry:
code.put(" firstprivate(%s)" % entry.cname)
code.put(" lastprivate(%s)" % entry.cname)
continue
if not entry.type.is_pyobject:
if lastprivate:
private = 'lastprivate'
else:
private = 'private'
code.put(" %s(%s)" % (private, entry.cname))
if self.schedule:
if self.chunksize:
chunksize = ", %s" % self.evaluate_before_block(code, self.chunksize)
else:
chunksize = ""
code.put(" schedule(%s%s)" % (self.schedule, chunksize))
self.put_num_threads(reduction_codepoint)
code.putln("")
code.putln("#endif /* _OPENMP */")
code.put("for (%(i)s = 0; %(i)s < %(nsteps)s; %(i)s++)" % fmt_dict)
code.begin_block() # for loop block
guard_around_body_codepoint = code.insertion_point()
# Start if guard block around the body. This may be unnecessary, but
# at least it doesn't spoil indentation
code.begin_block()
code.putln("%(target)s = (%(target_type)s)(%(start)s + %(step)s * %(i)s);" % fmt_dict)
self.initialize_privates_to_nan(code, exclude=self.target.entry)
if self.is_parallel:
code.funcstate.start_collecting_temps()
self.body.generate_execution_code(code)
self.trap_parallel_exit(code, should_flush=True)
self.privatize_temps(code)
if self.breaking_label_used:
# Put a guard around the loop body in case return, break or
# exceptions might be used
guard_around_body_codepoint.putln("if (%s < 2)" % Naming.parallel_why)
code.end_block() # end guard around loop body
code.end_block() # end for loop block
if self.is_parallel:
# Release the GIL and deallocate the thread state
self.end_parallel_block(code)
code.end_block() # pragma omp parallel end block
class CnameDecoratorNode(StatNode):
"""
This node is for the cname decorator in CythonUtilityCode:
@cname('the_cname')
cdef func(...):
...
In case of a cdef class the cname specifies the objstruct_cname.
node the node to which the cname decorator is applied
cname the cname the node should get
"""
child_attrs = ['node']
def analyse_declarations(self, env):
self.node.analyse_declarations(env)
node = self.node
if isinstance(node, CompilerDirectivesNode):
node = node.body.stats[0]
self.is_function = isinstance(node, FuncDefNode)
is_struct_or_enum = isinstance(node, (CStructOrUnionDefNode, CEnumDefNode))
e = node.entry
if self.is_function:
e.cname = self.cname
e.func_cname = self.cname
e.used = True
if e.pyfunc_cname and '.' in e.pyfunc_cname:
e.pyfunc_cname = self.mangle(e.pyfunc_cname)
elif is_struct_or_enum:
e.cname = e.type.cname = self.cname
else:
scope = node.scope
e.cname = self.cname
e.type.objstruct_cname = self.cname + '_obj'
e.type.typeobj_cname = Naming.typeobj_prefix + self.cname
e.type.typeptr_cname = self.cname + '_type'
e.type.scope.namespace_cname = e.type.typeptr_cname
e.as_variable.cname = e.type.typeptr_cname
scope.scope_prefix = self.cname + "_"
for name, entry in scope.entries.items():
if entry.func_cname:
entry.func_cname = self.mangle(entry.cname)
if entry.pyfunc_cname:
entry.pyfunc_cname = self.mangle(entry.pyfunc_cname)
def mangle(self, cname):
if '.' in cname:
# remove __pyx_base from func_cname
cname = cname.split('.')[-1]
return '%s_%s' % (self.cname, cname)
def analyse_expressions(self, env):
self.node = self.node.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
"Ensure a prototype for every @cname method in the right place"
if self.is_function and env.is_c_class_scope:
# method in cdef class, generate a prototype in the header
h_code = code.globalstate['utility_code_proto']
if isinstance(self.node, DefNode):
self.node.generate_function_header(
h_code, with_pymethdef=False, proto_only=True)
else:
from . import ModuleNode
entry = self.node.entry
cname = entry.cname
entry.cname = entry.func_cname
ModuleNode.generate_cfunction_declaration(
entry,
env.global_scope(),
h_code,
definition=True)
entry.cname = cname
self.node.generate_function_definitions(env, code)
def generate_execution_code(self, code):
self.node.generate_execution_code(code)
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
if Options.gcc_branch_hints:
branch_prediction_macros = """
/* Test for GCC > 2.95 */
#if defined(__GNUC__) \
&& (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
"""
else:
branch_prediction_macros = """
#define likely(x) (x)
#define unlikely(x) (x)
"""
#------------------------------------------------------------------------------------
printing_utility_code = UtilityCode.load_cached("Print", "Printing.c")
printing_one_utility_code = UtilityCode.load_cached("PrintOne", "Printing.c")
#------------------------------------------------------------------------------------
# Exception raising code
#
# Exceptions are raised by __Pyx_Raise() and stored as plain
# type/value/tb in PyThreadState->curexc_*. When being caught by an
# 'except' statement, curexc_* is moved over to exc_* by
# __Pyx_GetException()
restore_exception_utility_code = UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c")
raise_utility_code = UtilityCode.load_cached("RaiseException", "Exceptions.c")
get_exception_utility_code = UtilityCode.load_cached("GetException", "Exceptions.c")
swap_exception_utility_code = UtilityCode.load_cached("SwapException", "Exceptions.c")
reset_exception_utility_code = UtilityCode.load_cached("SaveResetException", "Exceptions.c")
traceback_utility_code = UtilityCode.load_cached("AddTraceback", "Exceptions.c")
#------------------------------------------------------------------------------------
get_exception_tuple_utility_code = UtilityCode(
proto="""
static PyObject *__Pyx_GetExceptionTuple(PyThreadState *__pyx_tstate); /*proto*/
""",
# I doubt that calling __Pyx_GetException() here is correct as it moves
# the exception from tstate->curexc_* to tstate->exc_*, which prevents
# exception handlers later on from receiving it.
# NOTE: "__pyx_tstate" may be used by __Pyx_GetException() macro
impl = """
static PyObject *__Pyx_GetExceptionTuple(CYTHON_UNUSED PyThreadState *__pyx_tstate) {
PyObject *type = NULL, *value = NULL, *tb = NULL;
if (__Pyx_GetException(&type, &value, &tb) == 0) {
PyObject* exc_info = PyTuple_New(3);
if (exc_info) {
Py_INCREF(type);
Py_INCREF(value);
Py_INCREF(tb);
PyTuple_SET_ITEM(exc_info, 0, type);
PyTuple_SET_ITEM(exc_info, 1, value);
PyTuple_SET_ITEM(exc_info, 2, tb);
return exc_info;
}
}
return NULL;
}
""",
requires=[get_exception_utility_code])
|
the-stack_0_5626 | import mxnet as mx
import numpy as np
class SEC_expand_loss(mx.metric.EvalMetric):
def __init__(self):
super(SEC_expand_loss, self).__init__("SEC_expand_loss")
def update(self, labels, preds):
self.num_inst += 1
self.sum_metric += preds[2].asnumpy()[0]
class SEC_seed_loss(mx.metric.EvalMetric):
def __init__(self):
super(SEC_seed_loss, self).__init__("SEC_seed_loss")
def update(self, labels, preds):
self.num_inst += 1
self.sum_metric += preds[0].asnumpy()[0]
class SEC_constrain_loss(mx.metric.EvalMetric):
def __init__(self):
super(SEC_constrain_loss, self).__init__("SEC_constrain_loss")
def update(self, labels, preds):
self.num_inst += 1
self.sum_metric += preds[1].asnumpy()[0]
class L2Loss(mx.metric.EvalMetric):
def __init__(self):
super(L2Loss, self).__init__('L2Loss')
def update(self, labels, preds):
labels = labels[0].asnumpy()
preds = preds[0].asnumpy()
labels = labels.reshape(-1)
preds = preds.reshape(-1)
self.num_inst += labels.shape[0]
res = np.sum((labels - preds) * (labels - preds))
self.sum_metric += res
class MultiLogisticLoss(mx.metric.EvalMetric):
def __init__(self, l_index=0, p_index=0):
self.epsilon = 1e-20
self.l_index = l_index
self.p_index = p_index
super(MultiLogisticLoss, self).__init__('MultiLogisticLoss')
def update(self, labels, preds):
labels = labels[self.l_index].asnumpy()
preds = preds[self.p_index].asnumpy()
labels = labels.reshape(-1)
preds = preds.reshape(-1)
self.num_inst += labels.shape[0]
res = 0
pred_l1 = preds[labels == 1]
pred_l1[pred_l1 <= self.epsilon] = self.epsilon
pred_l2 = 1 - preds[labels == 0]
pred_l2[pred_l2 <= self.epsilon] = self.epsilon
res += -np.log(pred_l1).sum()
res += -np.log(pred_l2).sum()
self.sum_metric += res
class Loss(mx.metric.EvalMetric):
"""Calculate loss"""
def __init__(self):
super(Loss, self).__init__('loss')
def update(self, labels, preds):
label = labels[0].asnumpy()
pred = preds[0].asnumpy()
pred = pred.reshape(pred.shape[0],pred.shape[1], -1)
label = label.astype(np.int32)
valid_index = label != 255
prob = np.swapaxes(pred, 0, 1)
prob = prob[:, valid_index]
label = label[valid_index]
loss = np.sum(-np.log(prob[label, np.arange(len(label))]))
self.sum_metric += loss
self.num_inst += valid_index.sum()
class Accuracy(mx.metric.EvalMetric):
"""Calculate accuracy"""
def __init__(self):
super(Accuracy, self).__init__('accuracy')
def update(self, labels, preds):
label = labels[0].asnumpy()
pred = preds[0].asnumpy()
pred = pred.argmax(1)
pred = pred.astype(np.int32).reshape(pred.shape[0], -1)
label = label.astype(np.int32)
valid_index = label != 255
self.sum_metric += (label[valid_index] == pred[valid_index]).sum()
self.num_inst += valid_index.sum()
class IOU(object):
def __init__(self, class_num, class_names, ignored_label=255):
self.ignored_label = ignored_label
self.class_num = class_num
self.class_names = class_names
assert len(class_names) == class_num
self.conf_mat = None
self.reset()
def reset(self):
self.conf_mat = np.zeros((self.class_num, self.class_num), dtype=np.ulonglong)
def update(self, label, pred_label):
label = label.reshape(1, -1)
pred_label = pred_label.reshape(1, -1)
self.__eval_pair(pred_label, label)
def __eval_pair(self, pred_label, label):
valid_index = label.flat != self.ignored_label
gt = np.extract(valid_index, label.flat)
p = np.extract(valid_index, pred_label.flat)
temp = np.ravel_multi_index(np.array([gt, p]), (self.conf_mat.shape))
temp_mat = np.bincount(temp, minlength=np.prod(self.conf_mat.shape)).reshape(self.conf_mat.shape)
self.conf_mat[:]=self.conf_mat+temp_mat
def get(self):
return "iou", np.mean(self.get_scores())
def get_scores(self):
scores = []
for i in range(self.class_num):
tp = np.longlong(self.conf_mat[i, i])
gti = np.longlong(self.conf_mat[i, :].sum())
resi = np.longlong(self.conf_mat[:, i].sum())
denom = gti+resi-tp
try:
res = float(tp)/denom
except ZeroDivisionError:
res = 0
scores.append(res)
return scores
def get_class_values(self):
return zip(self.class_names, self.get_scores())
|
the-stack_0_5627 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from matplotlib import pyplot as plt
data = np.load("blur_experiment.npz")
# Make each column a set of data
blurs = data['blurs']
detections = data['detections'].T
weighted = data['weighted'].T
# Which blur configuration had the best results
best_detections = np.argmax(detections, axis=1)
best_weighted = np.argmax(weighted, axis=1)
best_weighted_blurs = blurs[best_weighted]
cumsum = np.cumsum(best_weighted_blurs)
average = cumsum / np.arange(1, len(cumsum) + 1)
print(best_weighted_blurs)
print(len(best_weighted_blurs))
max_counts = np.array([np.argmax(np.bincount(best_weighted_blurs[:i+1]))
for i in range(len(best_weighted_blurs))])
# plt.plot(best_detections.T)
plt.plot(best_weighted_blurs)
plt.plot(average)
plt.plot(max_counts)
plt.ylabel("Blur Amount")
plt.xlabel("Frame Number")
plt.title("Weighted detection best performance")
plt.legend(["Highest Weighted Confidence Blur", "Best Average Blur",
"Best Overall Blur"])
# plt.plot(data['weighted'].T)
plt.show()
|
the-stack_0_5628 | # Copyright 2021 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requi_RED by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe solution drawing styles."""
from typing import Mapping, Tuple
from mediapipe.python.solutions.drawing_utils import DrawingSpec
from mediapipe.python.solutions.hands import HandLandmark
_RADIUS = 5
_RED = (54, 67, 244)
_GREEN = (118, 230, 0)
_BLUE = (192, 101, 21)
_YELLOW = (0, 204, 255)
_GRAY = (174, 164, 144)
_PURPLE = (128, 64, 128)
_PEACH = (180, 229, 255)
# Hands
_THICKNESS_WRIST_MCP = 3
_THICKNESS_FINGER = 2
_THICKNESS_DOT = -1
# Hand landmarks
_PALM_LANMARKS = (HandLandmark.WRIST, HandLandmark.THUMB_CMC,
HandLandmark.INDEX_FINGER_MCP, HandLandmark.MIDDLE_FINGER_MCP,
HandLandmark.RING_FINGER_MCP, HandLandmark.PINKY_MCP)
_THUMP_LANDMARKS = (HandLandmark.THUMB_MCP, HandLandmark.THUMB_IP,
HandLandmark.THUMB_TIP)
_INDEX_FINGER_LANDMARKS = (HandLandmark.INDEX_FINGER_PIP,
HandLandmark.INDEX_FINGER_DIP,
HandLandmark.INDEX_FINGER_TIP)
_MIDDLE_FINGER_LANDMARKS = (HandLandmark.MIDDLE_FINGER_PIP,
HandLandmark.MIDDLE_FINGER_DIP,
HandLandmark.MIDDLE_FINGER_TIP)
_RING_FINGER_LANDMARKS = (HandLandmark.RING_FINGER_PIP,
HandLandmark.RING_FINGER_DIP,
HandLandmark.RING_FINGER_TIP)
_PINKY_FINGER_LANDMARKS = (HandLandmark.PINKY_PIP, HandLandmark.PINKY_DIP,
HandLandmark.PINKY_TIP)
_HAND_LANDMARK_STYLE = {
_PALM_LANMARKS:
DrawingSpec(
color=_RED, thickness=_THICKNESS_DOT, circle_radius=_RADIUS),
_THUMP_LANDMARKS:
DrawingSpec(
color=_PEACH, thickness=_THICKNESS_DOT, circle_radius=_RADIUS),
_INDEX_FINGER_LANDMARKS:
DrawingSpec(
color=_PURPLE, thickness=_THICKNESS_DOT, circle_radius=_RADIUS),
_MIDDLE_FINGER_LANDMARKS:
DrawingSpec(
color=_YELLOW, thickness=_THICKNESS_DOT, circle_radius=_RADIUS),
_RING_FINGER_LANDMARKS:
DrawingSpec(
color=_GREEN, thickness=_THICKNESS_DOT, circle_radius=_RADIUS),
_PINKY_FINGER_LANDMARKS:
DrawingSpec(
color=_BLUE, thickness=_THICKNESS_DOT, circle_radius=_RADIUS),
}
# Hand connections
_PALM_CONNECTIONS = ((HandLandmark.WRIST, HandLandmark.THUMB_CMC),
(HandLandmark.WRIST, HandLandmark.INDEX_FINGER_MCP),
(HandLandmark.MIDDLE_FINGER_MCP,
HandLandmark.RING_FINGER_MCP),
(HandLandmark.RING_FINGER_MCP, HandLandmark.PINKY_MCP),
(HandLandmark.INDEX_FINGER_MCP,
HandLandmark.MIDDLE_FINGER_MCP), (HandLandmark.WRIST,
HandLandmark.PINKY_MCP))
_THUMB_CONNECTIONS = ((HandLandmark.THUMB_CMC, HandLandmark.THUMB_MCP),
(HandLandmark.THUMB_MCP, HandLandmark.THUMB_IP),
(HandLandmark.THUMB_IP, HandLandmark.THUMB_TIP))
_INDEX_FINGER_CONNECTIONS = ((HandLandmark.INDEX_FINGER_MCP,
HandLandmark.INDEX_FINGER_PIP),
(HandLandmark.INDEX_FINGER_PIP,
HandLandmark.INDEX_FINGER_DIP),
(HandLandmark.INDEX_FINGER_DIP,
HandLandmark.INDEX_FINGER_TIP))
_MIDDLE_FINGER_CONNECTIONS = ((HandLandmark.MIDDLE_FINGER_MCP,
HandLandmark.MIDDLE_FINGER_PIP),
(HandLandmark.MIDDLE_FINGER_PIP,
HandLandmark.MIDDLE_FINGER_DIP),
(HandLandmark.MIDDLE_FINGER_DIP,
HandLandmark.MIDDLE_FINGER_TIP))
_RING_FINGER_CONNECTIONS = ((HandLandmark.RING_FINGER_MCP,
HandLandmark.RING_FINGER_PIP),
(HandLandmark.RING_FINGER_PIP,
HandLandmark.RING_FINGER_DIP),
(HandLandmark.RING_FINGER_DIP,
HandLandmark.RING_FINGER_TIP))
_PINKY_FINGER_CONNECTIONS = ((HandLandmark.PINKY_MCP, HandLandmark.PINKY_PIP),
(HandLandmark.PINKY_PIP, HandLandmark.PINKY_DIP),
(HandLandmark.PINKY_DIP, HandLandmark.PINKY_TIP))
_HAND_CONNECTION_STYLE = {
_PALM_CONNECTIONS:
DrawingSpec(color=_GRAY, thickness=_THICKNESS_WRIST_MCP),
_THUMB_CONNECTIONS:
DrawingSpec(color=_PEACH, thickness=_THICKNESS_FINGER),
_INDEX_FINGER_CONNECTIONS:
DrawingSpec(color=_PURPLE, thickness=_THICKNESS_FINGER),
_MIDDLE_FINGER_CONNECTIONS:
DrawingSpec(color=_YELLOW, thickness=_THICKNESS_FINGER),
_RING_FINGER_CONNECTIONS:
DrawingSpec(color=_GREEN, thickness=_THICKNESS_FINGER),
_PINKY_FINGER_CONNECTIONS:
DrawingSpec(color=_BLUE, thickness=_THICKNESS_FINGER)
}
def get_default_hand_landmark_style() -> Mapping[int, DrawingSpec]:
"""Returns the default hand landmark drawing style.
Returns:
A mapping from each hand landmark to the default drawing spec.
"""
hand_landmark_style = {}
for k, v in _HAND_LANDMARK_STYLE.items():
for landmark in k:
hand_landmark_style[landmark] = v
return hand_landmark_style
def get_default_hand_connection_style(
) -> Mapping[Tuple[int, int], DrawingSpec]:
"""Returns the default hand connection drawing style.
Returns:
A mapping from each hand connection to the default drawing spec.
"""
hand_connection_style = {}
for k, v in _HAND_CONNECTION_STYLE.items():
for connection in k:
hand_connection_style[connection] = v
return hand_connection_style
|
the-stack_0_5630 | class Articles:
"""
class to define Article objects
"""
def __init__(self, source: dict, author: str, title: str, description: str,
url: str, url_to_image: str, published_at: str):
"""
method to define Article object properties
:param source:
:param author:
:param title:
:param description:
:param url:
:param url_to_image:
"""
self.source = source
self.author = author
self.title = title
self.description = description
self.url = url
self.url_to_image = url_to_image
self.published_at = published_at
class NewsSources():
"""
class to model News Sources objects
"""
def __init__(self, id: str, name: str, description: str, url: str, category: str, language: str, country: str):
"""
method to define News Sources properties
:param id:
:param name:
:param description:
:param url:
:param category:
:param language:
:param country:
"""
self.id = id
self.name = name
self.description = description
self.url = url
self.category = category
self.language = language
self.country = country
|
the-stack_0_5631 | """Setup script for gristmill."""
from setuptools import setup, find_packages
with open('README.rst', 'r') as readme:
DESCRIPTION = readme.read()
CLASSIFIERS = [
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Mathematics'
]
setup(
name='gristmill',
version='0.5.0dev',
description=DESCRIPTION.splitlines()[0],
long_description=DESCRIPTION,
url='https://github.com/tschijnmo/gristmill',
author='Jinmo Zhao and Gustavo E Scuseria',
author_email='[email protected]',
license='MIT',
classifiers=CLASSIFIERS,
packages=find_packages(),
package_data={'gristmill': ['templates/*']},
install_requires=['drudge', 'Jinja2', 'sympy']
)
|
the-stack_0_5632 | from ops import *
from utils import *
from glob import glob
import time
from tensorflow.contrib.data import prefetch_to_device, shuffle_and_repeat, map_and_batch
class DRIT(object) :
def __init__(self, sess, args):
self.model_name = 'DRIT'
self.sess = sess
self.checkpoint_dir = args.checkpoint_dir
self.result_dir = args.result_dir
self.log_dir = args.log_dir
self.sample_dir = args.sample_dir
self.dataset_name = args.dataset
self.augment_flag = args.augment_flag
self.epoch = args.epoch
self.iteration = args.iteration
self.decay_flag = args.decay_flag
self.decay_epoch = args.decay_epoch
self.gan_type = args.gan_type
self.batch_size = args.batch_size
self.print_freq = args.print_freq
self.save_freq = args.save_freq
self.num_attribute = args.num_attribute # for test
self.guide_img = args.guide_img
self.direction = args.direction
self.img_size = args.img_size
self.img_ch = args.img_ch
self.init_lr = args.lr
self.d_content_init_lr = args.lr / 2.5
self.ch = args.ch
""" Weight """
self.content_adv_w = args.content_adv_w
self.domain_adv_w = args.domain_adv_w
self.cycle_w = args.cycle_w
self.recon_w = args.recon_w
self.latent_w = args.latent_w
self.kl_w = args.kl_w
""" Generator """
self.n_layer = args.n_layer
self.n_z = args.n_z
self.concat = args.concat
""" Discriminator """
self.n_dis = args.n_dis
self.n_scale = args.n_scale
self.n_d_con = args.n_d_con
self.multi = True if args.n_scale > 1 else False
self.sn = args.sn
self.sample_dir = os.path.join(args.sample_dir, self.model_dir)
check_folder(self.sample_dir)
self.trainA_dataset = glob('./dataset/{}/*.*'.format(self.dataset_name + '/trainA'))
self.trainB_dataset = glob('./dataset/{}/*.*'.format(self.dataset_name + '/trainB'))
self.dataset_num = max(len(self.trainA_dataset), len(self.trainB_dataset))
print("##### Information #####")
print("# gan type : ", self.gan_type)
print("# dataset : ", self.dataset_name)
print("# max dataset number : ", self.dataset_num)
print("# batch_size : ", self.batch_size)
print("# decay_flag : ", self.decay_flag)
print("# epoch : ", self.epoch)
print("# decay_epoch : ", self.decay_epoch)
print("# iteration per epoch : ", self.iteration)
print("# attribute in test phase : ", self.num_attribute)
print()
print("##### Generator #####")
print("# layer : ", self.n_layer)
print("# z dimension : ", self.n_z)
print("# concat : ", self.concat)
print()
print("##### Discriminator #####")
print("# discriminator layer : ", self.n_dis)
print("# multi-scale Dis : ", self.n_scale)
print("# updating iteration of con_dis : ", self.n_d_con)
print("# spectral_norm : ", self.sn)
print()
print("##### Weight #####")
print("# domain_adv_weight : ", self.domain_adv_w)
print("# content_adv_weight : ", self.content_adv_w)
print("# cycle_weight : ", self.cycle_w)
print("# recon_weight : ", self.recon_w)
print("# latent_weight : ", self.latent_w)
print("# kl_weight : ", self.kl_w)
##################################################################################
# Encoder and Decoders
##################################################################################
def content_encoder(self, x, is_training=True, reuse=False, scope='content_encoder'):
channel = self.ch
with tf.variable_scope(scope, reuse=reuse) :
x = conv(x, channel, kernel=7, stride=1, pad=3, pad_type='reflect', scope='conv')
x = lrelu(x, 0.01)
for i in range(2) :
x = conv(x, channel * 2, kernel=3, stride=2, pad=1, pad_type='reflect', scope='conv_' + str(i))
x = instance_norm(x, scope='ins_norm_' + str(i))
x = relu(x)
channel = channel * 2
for i in range(1, self.n_layer) :
x = resblock(x, channel, scope='resblock_'+str(i))
with tf.variable_scope('content_encoder_share', reuse=tf.AUTO_REUSE) :
x = resblock(x, channel, scope='resblock_share')
x = gaussian_noise_layer(x, is_training)
return x
def attribute_encoder(self, x, reuse=False, scope='attribute_encder'):
channel = self.ch
with tf.variable_scope(scope, reuse=reuse) :
x = conv(x, channel, kernel=7, stride=1, pad=3, pad_type='reflect', scope='conv')
x = relu(x)
channel = channel * 2
x = conv(x, channel, kernel=4, stride=2, pad=1, pad_type='reflect', scope='conv_0')
x = relu(x)
channel = channel * 2
for i in range(1, self.n_layer) :
x = conv(x, channel, kernel=4, stride=2, pad=1, pad_type='reflect', scope='conv_' + str(i))
x = relu(x)
x = global_avg_pooling(x)
x = conv(x, channels=self.n_z, kernel=1, stride=1, scope='attribute_logit')
return x
def attribute_encoder_concat(self, x, reuse=False, scope='attribute_encoder_concat'):
channel = self.ch
with tf.variable_scope(scope, reuse=reuse) :
x = conv(x, channel, kernel=4, stride=2, pad=1, pad_type='reflect', scope='conv')
for i in range(1, self.n_layer) :
channel = channel * (i+1)
x = basic_block(x, channel, scope='basic_block_' + str(i))
x = lrelu(x, 0.2)
x = global_avg_pooling(x)
mean = fully_conneted(x, channels=self.n_z, scope='z_mean')
logvar = fully_conneted(x, channels=self.n_z, scope='z_logvar')
return mean, logvar
def MLP(self, z, reuse=False, scope='MLP'):
channel = self.ch * self.n_layer
with tf.variable_scope(scope, reuse=reuse):
for i in range(2):
z = fully_conneted(z, channel, scope='fully_' + str(i))
z = relu(z)
z = fully_conneted(z, channel * self.n_layer, scope='fully_logit')
return z
def generator(self, x, z, reuse=False, scope="generator"):
channel = self.ch * self.n_layer
with tf.variable_scope(scope, reuse=reuse) :
z = self.MLP(z, reuse=reuse)
z = tf.split(z, num_or_size_splits=self.n_layer, axis=-1)
for i in range(self.n_layer) :
x = mis_resblock(x, z[i], channel, scope='mis_resblock_' + str(i))
for i in range(2) :
x = deconv(x, channel // 2, kernel=3, stride=2, scope='deconv_' + str(i))
x = layer_norm(x, scope='layer_norm_' + str(i))
x = relu(x)
channel = channel // 2
x = deconv(x, channels=self.img_ch, kernel=1, stride=1, scope='G_logit')
x = tanh(x)
return x
def generator_concat(self, x, z, reuse=False, scope='generator_concat'):
channel = self.ch * self.n_layer
with tf.variable_scope('generator_concat_share', reuse=tf.AUTO_REUSE) :
x = resblock(x, channel, scope='resblock')
with tf.variable_scope(scope, reuse=reuse) :
channel = channel + self.n_z
x = expand_concat(x, z)
for i in range(1, self.n_layer) :
x = resblock(x, channel, scope='resblock_' + str(i))
for i in range(2) :
channel = channel + self.n_z
x = expand_concat(x, z)
x = deconv(x, channel // 2, kernel=3, stride=2, scope='deconv_' + str(i))
x = layer_norm(x, scope='layer_norm_' + str(i))
x = relu(x)
channel = channel // 2
x = expand_concat(x, z)
x = deconv(x, channels=self.img_ch, kernel=1, stride=1, scope='G_logit')
x = tanh(x)
return x
##################################################################################
# Discriminator
##################################################################################
def content_discriminator(self, x, reuse=False, scope='content_discriminator'):
D_logit = []
with tf.variable_scope(scope, reuse=reuse) :
channel = self.ch * self.n_layer
for i in range(3) :
x = conv(x, channel, kernel=7, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv_' + str(i))
x = instance_norm(x, scope='ins_norm_' + str(i))
x = lrelu(x, 0.01)
x = conv(x, channel, kernel=4, stride=1, scope='conv_3')
x = lrelu(x, 0.01)
x = conv(x, channels=1, kernel=1, stride=1, scope='D_content_logit')
D_logit.append(x)
return D_logit
def multi_discriminator(self, x_init, reuse=False, scope="multi_discriminator"):
D_logit = []
with tf.variable_scope(scope, reuse=reuse) :
for scale in range(self.n_scale) :
channel = self.ch
x = conv(x_init, channel, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='ms_' + str(scale) + 'conv_0')
x = lrelu(x, 0.01)
for i in range(1, self.n_dis):
x = conv(x, channel * 2, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='ms_' + str(scale) +'conv_' + str(i))
x = lrelu(x, 0.01)
channel = channel * 2
x = conv(x, channels=1, kernel=1, stride=1, sn=self.sn, scope='ms_' + str(scale) + 'D_logit')
D_logit.append(x)
x_init = down_sample(x_init)
return D_logit
def discriminator(self, x, reuse=False, scope="discriminator"):
D_logit = []
with tf.variable_scope(scope, reuse=reuse) :
channel = self.ch
x = conv(x, channel, kernel=3, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv')
x = lrelu(x, 0.01)
for i in range(1, self.n_dis) :
x = conv(x, channel * 2, kernel=3, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv_' + str(i))
x = lrelu(x, 0.01)
channel = channel * 2
x = conv(x, channels=1, kernel=1, stride=1, sn=self.sn, scope='D_logit')
D_logit.append(x)
return D_logit
##################################################################################
# Model
##################################################################################
def Encoder_A(self, x_A, is_training=True, reuse=False):
mean = None
logvar = None
content_A = self.content_encoder(x_A, is_training=is_training, reuse=reuse, scope='content_encoder_A')
if self.concat :
mean, logvar = self.attribute_encoder_concat(x_A, reuse=reuse, scope='attribute_encoder_concat_A')
attribute_A = z_sample(mean, logvar)
else :
attribute_A = self.attribute_encoder(x_A, reuse=reuse, scope='attribute_encoder_A')
return content_A, attribute_A, mean, logvar
def Encoder_B(self, x_B, is_training=True, reuse=False):
mean = None
logvar = None
content_B = self.content_encoder(x_B, is_training=is_training, reuse=reuse, scope='content_encoder_B')
if self.concat:
mean, logvar = self.attribute_encoder_concat(x_B, reuse=reuse, scope='attribute_encoder_concat_B')
attribute_B = z_sample(mean, logvar)
else:
attribute_B = self.attribute_encoder(x_B, reuse=reuse, scope='attribute_encoder_B')
return content_B, attribute_B, mean, logvar
def Decoder_A(self, content_B, attribute_A, reuse=False):
# x = fake_A, identity_A, random_fake_A
# x = (B, A), (A, A), (B, z)
if self.concat :
x = self.generator_concat(x=content_B, z=attribute_A, reuse=reuse, scope='generator_concat_A')
else :
x = self.generator(x=content_B, z=attribute_A, reuse=reuse, scope='generator_A')
return x
def Decoder_B(self, content_A, attribute_B, reuse=False):
# x = fake_B, identity_B, random_fake_B
# x = (A, B), (B, B), (A, z)
if self.concat :
x = self.generator_concat(x=content_A, z=attribute_B, reuse=reuse, scope='generator_concat_B')
else :
x = self.generator(x=content_A, z=attribute_B, reuse=reuse, scope='generator_B')
return x
def discriminate_real(self, x_A, x_B):
if self.multi :
real_A_logit = self.multi_discriminator(x_A, scope='multi_discriminator_A')
real_B_logit = self.multi_discriminator(x_B, scope='multi_discriminator_B')
else :
real_A_logit = self.discriminator(x_A, scope="discriminator_A")
real_B_logit = self.discriminator(x_B, scope="discriminator_B")
return real_A_logit, real_B_logit
def discriminate_fake(self, x_ba, x_ab):
if self.multi :
fake_A_logit = self.multi_discriminator(x_ba, reuse=True, scope='multi_discriminator_A')
fake_B_logit = self.multi_discriminator(x_ab, reuse=True, scope='multi_discriminator_B')
else :
fake_A_logit = self.discriminator(x_ba, reuse=True, scope="discriminator_A")
fake_B_logit = self.discriminator(x_ab, reuse=True, scope="discriminator_B")
return fake_A_logit, fake_B_logit
def discriminate_content(self, content_A, content_B, reuse=False):
content_A_logit = self.content_discriminator(content_A, reuse=reuse, scope='content_discriminator')
content_B_logit = self.content_discriminator(content_B, reuse=True, scope='content_discriminator')
return content_A_logit, content_B_logit
def build_model(self):
self.lr = tf.placeholder(tf.float32, name='lr')
""" Input Image"""
Image_Data_Class = ImageData(self.img_size, self.img_ch, self.augment_flag)
trainA = tf.data.Dataset.from_tensor_slices(self.trainA_dataset)
trainB = tf.data.Dataset.from_tensor_slices(self.trainB_dataset)
gpu_device = '/gpu:0'
trainA = trainA.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(prefetch_to_device(gpu_device, self.batch_size))
trainB = trainB.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(prefetch_to_device(gpu_device, self.batch_size))
trainA_iterator = trainA.make_one_shot_iterator()
trainB_iterator = trainB.make_one_shot_iterator()
self.domain_A = trainA_iterator.get_next()
self.domain_B = trainB_iterator.get_next()
""" Define Encoder, Generator, Discriminator """
random_z = tf.random_normal(shape=[self.batch_size, self.n_z], mean=0.0, stddev=1.0, dtype=tf.float32)
# encode
content_a, attribute_a, mean_a, logvar_a = self.Encoder_A(self.domain_A)
content_b, attribute_b, mean_b, logvar_b = self.Encoder_B(self.domain_B)
# decode (fake, identity, random)
fake_a = self.Decoder_A(content_B=content_b, attribute_A=attribute_a)
fake_b = self.Decoder_B(content_A=content_a, attribute_B=attribute_b)
recon_a = self.Decoder_A(content_B=content_a, attribute_A=attribute_a, reuse=True)
recon_b = self.Decoder_B(content_A=content_b, attribute_B=attribute_b, reuse=True)
random_fake_a = self.Decoder_A(content_B=content_b, attribute_A=random_z, reuse=True)
random_fake_b = self.Decoder_B(content_A=content_a, attribute_B=random_z, reuse=True)
# encode & decode again for cycle-consistency
content_fake_a, attribute_fake_a, _, _ = self.Encoder_A(fake_a, reuse=True)
content_fake_b, attribute_fake_b, _, _ = self.Encoder_B(fake_b, reuse=True)
cycle_a = self.Decoder_A(content_B=content_fake_b, attribute_A=attribute_fake_a, reuse=True)
cycle_b = self.Decoder_B(content_A=content_fake_a, attribute_B=attribute_fake_b, reuse=True)
# for latent regression
_, attribute_fake_random_a, _, _ = self.Encoder_A(random_fake_a, reuse=True)
_, attribute_fake_random_b, _, _ = self.Encoder_B(random_fake_b, reuse=True)
# discriminate
real_A_logit, real_B_logit = self.discriminate_real(self.domain_A, self.domain_B)
fake_A_logit, fake_B_logit = self.discriminate_fake(fake_a, fake_b)
random_fake_A_logit, random_fake_B_logit = self.discriminate_fake(random_fake_a, random_fake_b)
content_A_logit, content_B_logit = self.discriminate_content(content_a, content_b)
""" Define Loss """
g_adv_loss_a = generator_loss(self.gan_type, fake_A_logit) + generator_loss(self.gan_type, random_fake_A_logit)
g_adv_loss_b = generator_loss(self.gan_type, fake_B_logit) + generator_loss(self.gan_type, random_fake_B_logit)
g_con_loss_a = generator_loss(self.gan_type, content_A_logit, content=True)
g_con_loss_b = generator_loss(self.gan_type, content_B_logit, content=True)
g_cyc_loss_a = L1_loss(cycle_a, self.domain_A)
g_cyc_loss_b = L1_loss(cycle_b, self.domain_B)
g_rec_loss_a = L1_loss(recon_a, self.domain_A)
g_rec_loss_b = L1_loss(recon_b, self.domain_B)
g_latent_loss_a = L1_loss(attribute_fake_random_a, random_z)
g_latent_loss_b = L1_loss(attribute_fake_random_b, random_z)
if self.concat :
g_kl_loss_a = kl_loss_concat(mean_a, logvar_a)
g_kl_loss_b = kl_loss_concat(mean_b, logvar_b)
else :
g_kl_loss_a = kl_loss(attribute_a)
g_kl_loss_b = kl_loss(attribute_b)
d_adv_loss_a = discriminator_loss(self.gan_type, real_A_logit, fake_A_logit)
d_adv_loss_b = discriminator_loss(self.gan_type, real_B_logit, fake_B_logit)
d_con_loss = discriminator_loss(self.gan_type, content_A_logit, content_B_logit)
Generator_A_domain_loss = self.domain_adv_w * g_adv_loss_a
Generator_A_content_loss = self.content_adv_w * g_con_loss_a
Generator_A_cycle_loss = self.cycle_w * g_cyc_loss_b
Generator_A_recon_loss = self.recon_w * g_rec_loss_a
Generator_A_latent_loss = self.latent_w * g_latent_loss_a
Generator_A_kl_loss = self.kl_w * g_kl_loss_a
Generator_A_loss = Generator_A_domain_loss + \
Generator_A_content_loss + \
Generator_A_cycle_loss + \
Generator_A_recon_loss + \
Generator_A_latent_loss + \
Generator_A_kl_loss
Generator_B_domain_loss = self.domain_adv_w * g_adv_loss_b
Generator_B_content_loss = self.content_adv_w * g_con_loss_b
Generator_B_cycle_loss = self.cycle_w * g_cyc_loss_a
Generator_B_recon_loss = self.recon_w * g_rec_loss_b
Generator_B_latent_loss = self.latent_w * g_latent_loss_b
Generator_B_kl_loss = self.kl_w * g_kl_loss_b
Generator_B_loss = Generator_B_domain_loss + \
Generator_B_content_loss + \
Generator_B_cycle_loss + \
Generator_B_recon_loss + \
Generator_B_latent_loss + \
Generator_B_kl_loss
Discriminator_A_loss = self.domain_adv_w * d_adv_loss_a
Discriminator_B_loss = self.domain_adv_w * d_adv_loss_b
Discriminator_content_loss = self.content_adv_w * d_con_loss
self.Generator_loss = Generator_A_loss + Generator_B_loss
self.Discriminator_loss = Discriminator_A_loss + Discriminator_B_loss
self.Discriminator_content_loss = Discriminator_content_loss
""" Training """
t_vars = tf.trainable_variables()
G_vars = [var for var in t_vars if 'endoer' in var.name or 'generator' in var.name]
D_vars = [var for var in t_vars if 'discriminator' in var.name and 'content' not in var.name]
D_content_vars = [var for var in t_vars if 'content_discriminator' in var.name]
grads, _ = tf.clip_by_global_norm(tf.gradients(self.Discriminator_content_loss, D_content_vars), clip_norm=5)
self.G_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.Generator_loss, var_list=G_vars)
self.D_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.Discriminator_loss, var_list=D_vars)
self.D_content_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).apply_gradients(zip(grads, D_content_vars))
"""" Summary """
self.lr_write = tf.summary.scalar("learning_rate", self.lr)
self.all_G_loss = tf.summary.scalar("Generator_loss", self.Generator_loss)
self.all_D_loss = tf.summary.scalar("Discriminator_loss", self.Discriminator_loss)
self.G_A_loss = tf.summary.scalar("G_A_loss", Generator_A_loss)
self.G_A_domain_loss = tf.summary.scalar("G_A_domain_loss", Generator_A_domain_loss)
self.G_A_content_loss = tf.summary.scalar("G_A_content_loss", Generator_A_content_loss)
self.G_A_cycle_loss = tf.summary.scalar("G_A_cycle_loss", Generator_A_cycle_loss)
self.G_A_recon_loss = tf.summary.scalar("G_A_recon_loss", Generator_A_recon_loss)
self.G_A_latent_loss = tf.summary.scalar("G_A_latent_loss", Generator_A_latent_loss)
self.G_A_kl_loss = tf.summary.scalar("G_A_kl_loss", Generator_A_kl_loss)
self.G_B_loss = tf.summary.scalar("G_B_loss", Generator_B_loss)
self.G_B_domain_loss = tf.summary.scalar("G_B_domain_loss", Generator_B_domain_loss)
self.G_B_content_loss = tf.summary.scalar("G_B_content_loss", Generator_B_content_loss)
self.G_B_cycle_loss = tf.summary.scalar("G_B_cycle_loss", Generator_B_cycle_loss)
self.G_B_recon_loss = tf.summary.scalar("G_B_recon_loss", Generator_B_recon_loss)
self.G_B_latent_loss = tf.summary.scalar("G_B_latent_loss", Generator_B_latent_loss)
self.G_B_kl_loss = tf.summary.scalar("G_B_kl_loss", Generator_B_kl_loss)
self.D_A_loss = tf.summary.scalar("D_A_loss", Discriminator_A_loss)
self.D_B_loss = tf.summary.scalar("D_B_loss", Discriminator_B_loss)
self.G_loss = tf.summary.merge([self.G_A_loss,
self.G_A_domain_loss, self.G_A_content_loss,
self.G_A_cycle_loss, self.G_A_recon_loss,
self.G_A_latent_loss, self.G_A_kl_loss,
self.G_B_loss,
self.G_B_domain_loss, self.G_B_content_loss,
self.G_B_cycle_loss, self.G_B_recon_loss,
self.G_B_latent_loss, self.G_B_kl_loss,
self.all_G_loss])
self.D_loss = tf.summary.merge([self.D_A_loss,
self.D_B_loss,
self.all_D_loss])
self.D_content_loss = tf.summary.scalar("Discriminator_content_loss", self.Discriminator_content_loss)
""" Image """
self.fake_A = fake_a
self.fake_B = fake_b
self.real_A = self.domain_A
self.real_B = self.domain_B
""" Test """
self.test_image = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='test_image')
self.test_random_z = tf.random_normal(shape=[1, self.n_z], mean=0.0, stddev=1.0, dtype=tf.float32)
test_content_a, test_attribute_a, _, _ = self.Encoder_A(self.test_image, is_training=False, reuse=True)
test_content_b, test_attribute_b, _, _ = self.Encoder_B(self.test_image, is_training=False, reuse=True)
self.test_fake_A = self.Decoder_A(content_B=test_content_b, attribute_A=self.test_random_z, reuse=True)
self.test_fake_B = self.Decoder_B(content_A=test_content_a, attribute_B=self.test_random_z, reuse=True)
""" Guided Image Translation """
self.content_image = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='content_image')
self.attribute_image = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='guide_attribute_image')
guide_content_A, guide_attribute_A, _, _ = self.Encoder_A(self.content_image, is_training=False, reuse=True)
guide_content_B, guide_attribute_B, _, _ = self.Encoder_B(self.attribute_image, is_training=False, reuse=True)
self.guide_fake_A = self.Decoder_A(content_B=guide_content_B, attribute_A=guide_attribute_A, reuse=True)
self.guide_fake_B = self.Decoder_B(content_A=guide_content_A, attribute_B=guide_attribute_B, reuse=True)
def train(self):
# initialize all variables
tf.global_variables_initializer().run()
# saver to save model
self.saver = tf.train.Saver()
# summary writer
self.writer = tf.summary.FileWriter(self.log_dir + '/' + self.model_dir, self.sess.graph)
# restore check-point if it exits
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
start_epoch = (int)(checkpoint_counter / self.iteration)
start_batch_id = checkpoint_counter - start_epoch * self.iteration
counter = checkpoint_counter
print(" [*] Load SUCCESS")
else:
start_epoch = 0
start_batch_id = 0
counter = 1
print(" [!] Load failed...")
# loop for epoch
start_time = time.time()
lr = self.init_lr
for epoch in range(start_epoch, self.epoch):
if self.decay_flag:
lr = self.init_lr if epoch < self.decay_epoch else self.init_lr * (self.epoch - epoch) / (self.epoch - self.decay_epoch) # linear decay
for idx in range(start_batch_id, self.iteration):
train_feed_dict = {
self.lr : lr
}
summary_str = self.sess.run(self.lr_write, feed_dict=train_feed_dict)
self.writer.add_summary(summary_str, counter)
# Update content D
_, d_con_loss, summary_str = self.sess.run([self.D_content_optim, self.Discriminator_content_loss, self.D_content_loss],
feed_dict=train_feed_dict)
self.writer.add_summary(summary_str, counter)
if (counter - 1) % self.n_d_con == 0:
# Update D
_, d_loss, summary_str = self.sess.run([self.D_optim, self.Discriminator_loss, self.D_loss],
feed_dict=train_feed_dict)
self.writer.add_summary(summary_str, counter)
# Update G
batch_A_images, batch_B_images, fake_A, fake_B, _, g_loss, summary_str = self.sess.run([self.real_A, self.real_B, self.fake_A, self.fake_B, self.G_optim, self.Generator_loss, self.G_loss],
feed_dict=train_feed_dict)
self.writer.add_summary(summary_str, counter)
print("Epoch: [%2d] [%6d/%6d] time: %4.4f d_con_loss: %.8f, d_loss: %.8f, g_loss: %.8f" \
% (epoch, idx, self.iteration, time.time() - start_time, d_con_loss, d_loss, g_loss))
else:
print("Epoch: [%2d] [%6d/%6d] time: %4.4f d_con_loss: %.8f" % (
epoch, idx, self.iteration, time.time() - start_time, d_con_loss))
if np.mod(idx + 1, self.print_freq) == 0:
save_images(batch_A_images, [self.batch_size, 1],
'./{}/real_A_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx + 1))
# save_images(batch_B_images, [self.batch_size, 1],
# './{}/real_B_{}_{:03d}_{:05d}.png'.format(self.sample_dir, gpu_id, epoch, idx+1))
# save_images(fake_A, [self.batch_size, 1],
# './{}/fake_A_{}_{:03d}_{:05d}.png'.format(self.sample_dir, gpu_id, epoch, idx+1))
save_images(fake_B, [self.batch_size, 1],
'./{}/fake_B_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx + 1))
# display training status
counter += 1
if np.mod(idx+1, self.save_freq) == 0 :
self.save(self.checkpoint_dir, counter)
# After an epoch, start_batch_id is set to zero
# non-zero value is only for the first epoch after loading pre-trained model
start_batch_id = 0
# save model for final step
self.save(self.checkpoint_dir, counter)
@property
def model_dir(self):
if self.concat :
concat = "_concat"
else :
concat = ""
if self.sn :
sn = "_sn"
else :
sn = ""
return "{}{}_{}_{}_{}layer_{}dis_{}scale_{}con{}".format(self.model_name, concat, self.dataset_name, self.gan_type,
self.n_layer, self.n_dis, self.n_scale, self.n_d_con, sn)
def save(self, checkpoint_dir, step):
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess, os.path.join(checkpoint_dir, self.model_name + '.model'), global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(ckpt_name.split('-')[-1])
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
def test(self):
tf.global_variables_initializer().run()
test_A_files = glob('./dataset/{}/*.*'.format(self.dataset_name + '/testA'))
test_B_files = glob('./dataset/{}/*.*'.format(self.dataset_name + '/testB'))
self.saver = tf.train.Saver()
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
self.result_dir = os.path.join(self.result_dir, self.model_dir)
check_folder(self.result_dir)
if could_load :
print(" [*] Load SUCCESS")
else :
print(" [!] Load failed...")
# write html for visual comparison
index_path = os.path.join(self.result_dir, 'index.html')
index = open(index_path, 'w')
index.write("<html><body><table><tr>")
index.write("<th>name</th><th>input</th><th>output</th></tr>")
for sample_file in test_A_files : # A -> B
print('Processing A image: ' + sample_file)
sample_image = np.asarray(load_test_data(sample_file, size=self.img_size))
file_name = os.path.basename(sample_file).split(".")[0]
file_extension = os.path.basename(sample_file).split(".")[1]
for i in range(self.num_attribute) :
image_path = os.path.join(self.result_dir, '{}_attribute{}.{}'.format(file_name, i, file_extension))
fake_img = self.sess.run(self.test_fake_B, feed_dict = {self.test_image : sample_image})
save_images(fake_img, [1, 1], image_path)
index.write("<td>%s</td>" % os.path.basename(image_path))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (sample_file if os.path.isabs(sample_file) else (
'../..' + os.path.sep + sample_file), self.img_size, self.img_size))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (image_path if os.path.isabs(image_path) else (
'../..' + os.path.sep + image_path), self.img_size, self.img_size))
index.write("</tr>")
for sample_file in test_B_files : # B -> A
print('Processing B image: ' + sample_file)
sample_image = np.asarray(load_test_data(sample_file, size=self.img_size))
file_name = os.path.basename(sample_file).split(".")[0]
file_extension = os.path.basename(sample_file).split(".")[1]
for i in range(self.num_attribute):
image_path = os.path.join(self.result_dir, '{}_attribute{}.{}'.format(file_name, i, file_extension))
fake_img = self.sess.run(self.test_fake_A, feed_dict={self.test_image: sample_image})
save_images(fake_img, [1, 1], image_path)
index.write("<td>%s</td>" % os.path.basename(image_path))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (sample_file if os.path.isabs(sample_file) else (
'../..' + os.path.sep + sample_file), self.img_size, self.img_size))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (image_path if os.path.isabs(image_path) else (
'../..' + os.path.sep + image_path), self.img_size, self.img_size))
index.write("</tr>")
index.close()
def guide_test(self):
tf.global_variables_initializer().run()
test_A_files = glob('./dataset/{}/*.*'.format(self.dataset_name + '/testA'))
test_B_files = glob('./dataset/{}/*.*'.format(self.dataset_name + '/testB'))
attribute_file = np.asarray(load_test_data(self.guide_img, size=self.img_size))
self.saver = tf.train.Saver()
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
self.result_dir = os.path.join(self.result_dir, self.model_dir, 'guide')
check_folder(self.result_dir)
if could_load:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# write html for visual comparison
index_path = os.path.join(self.result_dir, 'index.html')
index = open(index_path, 'w')
index.write("<html><body><table><tr>")
index.write("<th>name</th><th>input</th><th>output</th></tr>")
if self.direction == 'a2b' :
for sample_file in test_A_files: # A -> B
print('Processing A image: ' + sample_file)
sample_image = np.asarray(load_test_data(sample_file, size=self.img_size))
image_path = os.path.join(self.result_dir, '{}'.format(os.path.basename(sample_file)))
fake_img = self.sess.run(self.guide_fake_B, feed_dict={self.content_image: sample_image, self.attribute_image : attribute_file})
save_images(fake_img, [1, 1], image_path)
index.write("<td>%s</td>" % os.path.basename(image_path))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (sample_file if os.path.isabs(sample_file) else (
'../../..' + os.path.sep + sample_file), self.img_size, self.img_size))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (image_path if os.path.isabs(image_path) else (
'../../..' + os.path.sep + image_path), self.img_size, self.img_size))
index.write("</tr>")
else :
for sample_file in test_B_files: # B -> A
print('Processing B image: ' + sample_file)
sample_image = np.asarray(load_test_data(sample_file, size=self.img_size))
image_path = os.path.join(self.result_dir, '{}'.format(os.path.basename(sample_file)))
fake_img = self.sess.run(self.guide_fake_A, feed_dict={self.content_image: sample_image, self.attribute_image : attribute_file})
save_images(fake_img, [1, 1], image_path)
index.write("<td>%s</td>" % os.path.basename(image_path))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (sample_file if os.path.isabs(sample_file) else (
'../../..' + os.path.sep + sample_file), self.img_size, self.img_size))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (image_path if os.path.isabs(image_path) else (
'../../..' + os.path.sep + image_path), self.img_size, self.img_size))
index.write("</tr>")
index.close() |
the-stack_0_5633 | import discord
from discord.ext import commands
class Example(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print('Bot is online')
@commands.command()
async def loadtest(self, ctx):
await ctx.send('yes non1')
def setup(client):
client.add_cog(Example(client)) |
the-stack_0_5637 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import click
import dataloader as torcharrow_dataloader
import torch
import torch.distributed as dist
from fbgemm_gpu.split_embedding_configs import EmbOptimType
from torch.distributed.elastic.multiprocessing.errors import record
from torchrec import EmbeddingBagCollection
from torchrec.datasets.criteo import DEFAULT_CAT_NAMES, INT_FEATURE_COUNT
from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder
from torchrec.distributed.model_parallel import DistributedModelParallel
from torchrec.models.dlrm import DLRM
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.optim.keyed import KeyedOptimizerWrapper
@record
@click.command()
@click.option("--batch_size", default=256)
@click.option("--num_embeddings", default=2048)
@click.option("--sigrid_hash_salt", default=0)
@click.option("--parquet_directory", default="/data/criteo_preproc")
def main(
batch_size,
num_embeddings,
sigrid_hash_salt,
parquet_directory,
) -> None:
rank = int(os.environ["LOCAL_RANK"])
if torch.cuda.is_available():
device = torch.device(f"cuda:{rank}")
backend = "nccl"
torch.cuda.set_device(device)
else:
device = torch.device("cpu")
backend = "gloo"
print(
"\033[92m"
+ f"WARNING: Running in CPU mode. cuda availablility {torch.cuda.is_available()}."
)
dist.init_process_group(backend=backend)
world_size = dist.get_world_size()
dataloader = torcharrow_dataloader.get_dataloader(
parquet_directory,
world_size,
rank,
batch_size=batch_size,
num_embeddings=num_embeddings,
salt=sigrid_hash_salt,
)
it = iter(dataloader)
model = DLRM(
embedding_bag_collection=EmbeddingBagCollection(
tables=[
EmbeddingBagConfig(
name=f"table_{cat_name}",
embedding_dim=64,
num_embeddings=num_embeddings,
feature_names=[cat_name],
)
for cat_name in DEFAULT_CAT_NAMES + ["bucketize_int_0"]
],
device=torch.device("meta"),
),
dense_in_features=INT_FEATURE_COUNT,
dense_arch_layer_sizes=[64],
over_arch_layer_sizes=[32, 1],
dense_device=device,
)
fused_params = {
"learning_rate": 0.02,
"optimizer": EmbOptimType.EXACT_ROWWISE_ADAGRAD,
}
sharded_model = DistributedModelParallel(
module=model,
device=device,
sharders=[
EmbeddingBagCollectionSharder(fused_params=fused_params),
],
)
optimizer = KeyedOptimizerWrapper(
dict(model.named_parameters()),
lambda params: torch.optim.SGD(params, lr=0.01),
)
loss_fn = torch.nn.BCEWithLogitsLoss()
print_example = dist.get_rank() == 0
for (dense_features, kjt, labels) in it:
if print_example:
print("Example dense_features", dense_features)
print("Example KJT input", kjt)
print_example = False
dense_features = dense_features.to(device)
kjt = kjt.to(device)
labels = labels.to(device)
optimizer.zero_grad()
preds = sharded_model(dense_features, kjt)
loss = loss_fn(preds.squeeze(), labels.squeeze())
loss.sum().backward()
optimizer.step()
print("\033[92m" + "DLRM run with torcharrow last-mile preprocessing finished!")
if __name__ == "__main__":
main()
|
the-stack_0_5638 | # Copyright 2019 Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=wildcard-import
import os
import pytest
from tools.test_data_yum import bash_yum_info, \
conntrack_tools_yum_info, \
pacemaker_yum_info
from tools.yum import YumInfoParser
from tools.yum_test_data import bash_expected, pacemaker_expected, \
yum_info_installed_header, \
yum_info_available_header, \
yum_info_available_header2
from tools.yum_test_data import conntrack_tools_expected
@pytest.mark.parametrize('yum_info, expected_output', [
(bash_yum_info, bash_expected),
(conntrack_tools_yum_info, conntrack_tools_expected),
(pacemaker_yum_info, pacemaker_expected)
])
def test_parse_package(yum_info, expected_output):
parsed = YumInfoParser().parse_package(yum_info)
expected = expected_output
assert parsed == expected
def test_parse_installed():
fake_out = '\n'.join([yum_info_installed_header,
bash_yum_info,
conntrack_tools_yum_info])
parsed = YumInfoParser().parse_installed(fake_out)
expected = [bash_expected, conntrack_tools_expected]
assert parsed == expected
@pytest.mark.parametrize('available_header', [
yum_info_available_header,
yum_info_available_header2
])
def test_parse_available(available_header):
fake_out = '\n'.join([available_header,
bash_yum_info,
conntrack_tools_yum_info])
parsed = YumInfoParser().parse_available(fake_out)
expected = [bash_expected, conntrack_tools_expected]
assert parsed == expected
def test_parse_file():
test_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'yum_info_installed.sample')
parsed = YumInfoParser().parse_file(test_file)
assert len(parsed) == 14
|
the-stack_0_5639 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Entry point for training AttGAN network"""
import argparse
import datetime
import json
import math
import os
from os.path import join
import numpy as np
import mindspore.common.dtype as mstype
from mindspore import Tensor, context
from mindspore import nn
from mindspore.common import set_seed
from mindspore.communication.management import init, get_rank
from mindspore.context import ParallelMode
from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, _InternalCallbackParam, RunContext
from mindspore.train.serialization import load_param_into_net
from src.attgan import Gen, Dis
from src.cell import TrainOneStepCellGen, TrainOneStepCellDis, init_weights
from src.data import data_loader
from src.helpers import Progressbar
from src.loss import GenLoss, DisLoss
from src.utils import resume_generator, resume_discriminator
attrs_default = [
'Bald', 'Bangs', 'Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Bushy_Eyebrows',
'Eyeglasses', 'Male', 'Mouth_Slightly_Open', 'Mustache', 'No_Beard', 'Pale_Skin', 'Young'
]
def parse(arg=None):
"""Define configuration of Model"""
parser = argparse.ArgumentParser()
parser.add_argument('--attrs', dest='attrs', default=attrs_default, nargs='+', help='attributes to learn')
parser.add_argument('--data', dest='data', type=str, choices=['CelebA'], default='CelebA')
parser.add_argument('--data_path', dest='data_path', type=str, default='./data/img_align_celeba')
parser.add_argument('--attr_path', dest='attr_path', type=str, default='./data/list_attr_celeba.txt')
parser.add_argument('--img_size', dest='img_size', type=int, default=128)
parser.add_argument('--shortcut_layers', dest='shortcut_layers', type=int, default=1)
parser.add_argument('--inject_layers', dest='inject_layers', type=int, default=1)
parser.add_argument('--enc_dim', dest='enc_dim', type=int, default=64)
parser.add_argument('--dec_dim', dest='dec_dim', type=int, default=64)
parser.add_argument('--dis_dim', dest='dis_dim', type=int, default=64)
parser.add_argument('--dis_fc_dim', dest='dis_fc_dim', type=int, default=1024)
parser.add_argument('--enc_layers', dest='enc_layers', type=int, default=5)
parser.add_argument('--dec_layers', dest='dec_layers', type=int, default=5)
parser.add_argument('--dis_layers', dest='dis_layers', type=int, default=5)
parser.add_argument('--enc_norm', dest='enc_norm', type=str, default='batchnorm')
parser.add_argument('--dec_norm', dest='dec_norm', type=str, default='batchnorm')
parser.add_argument('--dis_norm', dest='dis_norm', type=str, default='instancenorm')
parser.add_argument('--dis_fc_norm', dest='dis_fc_norm', type=str, default='none')
parser.add_argument('--enc_acti', dest='enc_acti', type=str, default='lrelu')
parser.add_argument('--dec_acti', dest='dec_acti', type=str, default='relu')
parser.add_argument('--dis_acti', dest='dis_acti', type=str, default='lrelu')
parser.add_argument('--dis_fc_acti', dest='dis_fc_acti', type=str, default='relu')
parser.add_argument('--lambda_1', dest='lambda_1', type=float, default=100.0)
parser.add_argument('--lambda_2', dest='lambda_2', type=float, default=10.0)
parser.add_argument('--lambda_3', dest='lambda_3', type=float, default=1.0)
parser.add_argument('--lambda_gp', dest='lambda_gp', type=float, default=10.0)
parser.add_argument('--epochs', dest='epochs', type=int, default=200, help='# of epochs')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=32)
parser.add_argument('--num_workers', dest='num_workers', type=int, default=16)
parser.add_argument('--lr', dest='lr', type=float, default=0.0002, help='learning rate')
parser.add_argument('--beta1', dest='beta1', type=float, default=0.5)
parser.add_argument('--beta2', dest='beta2', type=float, default=0.999)
parser.add_argument('--n_d', dest='n_d', type=int, default=5, help='# of d updates per g update')
parser.add_argument('--split_point', dest='split_point', type=int, default=182000, help='# of dataset split point')
parser.add_argument('--thres_int', dest='thres_int', type=float, default=0.5)
parser.add_argument('--test_int', dest='test_int', type=float, default=1.0)
parser.add_argument('--save_interval', dest='save_interval', type=int, default=500)
parser.add_argument('--experiment_name', dest='experiment_name',
default=datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y"))
parser.add_argument("--run_distribute", type=int, default=0, help="Run distribute, default: false.")
parser.add_argument('--resume_model', action='store_true')
parser.add_argument('--gen_ckpt_name', type=str, default='')
parser.add_argument('--dis_ckpt_name', type=str, default='')
return parser.parse_args(arg)
args = parse()
print(args)
args.lr_base = args.lr
args.n_attrs = len(args.attrs)
# initialize environment
set_seed(1)
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False)
if args.run_distribute:
if os.getenv("DEVICE_ID", "not_set").isdigit():
context.set_context(device_id=int(os.getenv("DEVICE_ID")))
device_num = int(os.getenv('RANK_SIZE'))
print(device_num)
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
device_num=device_num)
init()
rank = get_rank()
else:
if os.getenv("DEVICE_ID", "not_set").isdigit():
context.set_context(device_id=int(os.getenv("DEVICE_ID")))
device_num = int(os.getenv('RANK_SIZE'))
rank = 0
print("Initialize successful!")
os.makedirs(join('output', args.experiment_name), exist_ok=True)
os.makedirs(join('output', args.experiment_name, 'checkpoint'), exist_ok=True)
with open(join('output', args.experiment_name, 'setting.txt'), 'w') as f:
f.write(json.dumps(vars(args), indent=4, separators=(',', ':')))
if __name__ == '__main__':
# Define dataloader
train_dataset, train_length = data_loader(img_path=args.data_path,
attr_path=args.attr_path,
selected_attrs=args.attrs,
mode="train",
batch_size=args.batch_size,
device_num=device_num,
shuffle=True,
split_point=args.split_point)
train_loader = train_dataset.create_dict_iterator()
print('Training images:', train_length)
# Define network
gen = Gen(args.enc_dim, args.enc_layers, args.enc_norm, args.enc_acti, args.dec_dim, args.dec_layers, args.dec_norm,
args.dec_acti, args.n_attrs, args.shortcut_layers, args.inject_layers, args.img_size, mode='train')
dis = Dis(args.dis_dim, args.dis_norm, args.dis_acti, args.dis_fc_dim, args.dis_fc_norm, args.dis_fc_acti,
args.dis_layers, args.img_size, mode='train')
# Initialize network
init_weights(gen, 'KaimingUniform', math.sqrt(5))
init_weights(dis, 'KaimingUniform', math.sqrt(5))
# Resume from checkpoint
if args.resume_model:
para_gen = resume_generator(args, gen, args.gen_ckpt_name)
para_dis = resume_discriminator(args, dis, args.dis_ckpt_name)
load_param_into_net(gen, para_gen)
load_param_into_net(dis, para_dis)
# Define network with loss
G_loss_cell = GenLoss(args, gen, dis)
D_loss_cell = DisLoss(args, gen, dis)
# Define Optimizer
optimizer_G = nn.Adam(params=gen.trainable_params(), learning_rate=args.lr, beta1=args.beta1, beta2=args.beta2)
optimizer_D = nn.Adam(params=dis.trainable_params(), learning_rate=args.lr, beta1=args.beta1, beta2=args.beta2)
# Define One Step Train
G_trainOneStep = TrainOneStepCellGen(G_loss_cell, optimizer_G)
D_trainOneStep = TrainOneStepCellDis(D_loss_cell, optimizer_D)
# Train
G_trainOneStep.set_train(True)
D_trainOneStep.set_train(True)
print("Start Training")
train_iter = train_length // args.batch_size
ckpt_config = CheckpointConfig(save_checkpoint_steps=args.save_interval)
if rank == 0:
local_train_url = os.path.join('output', args.experiment_name, 'checkpoint/rank{}'.format(rank))
ckpt_cb_gen = ModelCheckpoint(config=ckpt_config, directory=local_train_url, prefix='generator')
ckpt_cb_dis = ModelCheckpoint(config=ckpt_config, directory=local_train_url, prefix='discriminator')
cb_params_gen = _InternalCallbackParam()
cb_params_gen.train_network = gen
cb_params_gen.cur_epoch_num = 0
gen_run_context = RunContext(cb_params_gen)
ckpt_cb_gen.begin(gen_run_context)
cb_params_dis = _InternalCallbackParam()
cb_params_dis.train_network = dis
cb_params_dis.cur_epoch_num = 0
dis_run_context = RunContext(cb_params_dis)
ckpt_cb_dis.begin(dis_run_context)
# Initialize Progressbar
progressbar = Progressbar()
it = 0
for epoch in range(args.epochs):
for data in progressbar(train_loader, train_iter):
img_a = data["image"]
att_a = data["attr"]
att_a = att_a.asnumpy()
att_b = np.random.permutation(att_a)
att_a_ = (att_a * 2 - 1) * args.thres_int
att_b_ = (att_b * 2 - 1) * args.thres_int
att_a = Tensor(att_a, mstype.float32)
att_a_ = Tensor(att_a_, mstype.float32)
att_b = Tensor(att_b, mstype.float32)
att_b_ = Tensor(att_b_, mstype.float32)
if (it + 1) % (args.n_d + 1) != 0:
d_out, d_real_loss, d_fake_loss, dc_loss, df_gp = D_trainOneStep(img_a, att_a, att_a_, att_b, att_b_)
else:
g_out, gf_loss, gc_loss, gr_loss = G_trainOneStep(img_a, att_a, att_a_, att_b, att_b_)
progressbar.say(epoch=epoch, iter=it + 1, d_loss=d_out, g_loss=g_out, gf_loss=gf_loss, gc_loss=gc_loss,
gr_loss=gr_loss, dc_loss=dc_loss, df_gp=df_gp)
if (epoch + 1) % 5 == 0 and (it + 1) % args.save_interval == 0 and rank == 0:
cb_params_gen.cur_epoch_num = epoch + 1
cb_params_dis.cur_epoch_num = epoch + 1
cb_params_gen.cur_step_num = it + 1
cb_params_dis.cur_step_num = it + 1
cb_params_gen.batch_num = it + 2
cb_params_dis.batch_num = it + 2
ckpt_cb_gen.step_end(gen_run_context)
ckpt_cb_dis.step_end(dis_run_context)
it += 1
|
the-stack_0_5641 | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
from __future__ import division, print_function, unicode_literals
import subprocess
import re
import sys
import os
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used'
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ALLOWED_LIBRARIES = {
# digibyted and digibyte-qt
b'libgcc_s.so.1', # GCC base support
b'libc.so.6', # C library
b'libpthread.so.0', # threading
b'libanl.so.1', # DNS resolve
b'libm.so.6', # math library
b'librt.so.1', # real-time (clock)
b'ld-linux-x86-64.so.2', # 64-bit dynamic linker
b'ld-linux.so.2', # 32-bit dynamic linker
# digibyte-qt only
b'libX11-xcb.so.1', # part of X11
b'libX11.so.6', # part of X11
b'libxcb.so.1', # part of X11
b'libfontconfig.so.1', # font support
b'libfreetype.so.6', # font parsing
b'libdl.so.2' # programming interface to dynamic linker
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def __call__(self, mangled):
self.proc.stdin.write(mangled + b'\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>7 and re.match(b'[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition(b'@')
is_import = line[6] == b'UND'
if version.startswith(b'@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if b'_' in version:
(lib, _, ver) = version.rpartition(b'_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split(b'.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
def read_libraries(filename):
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>2 and tokens[1] == b'(NEEDED)':
match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8')))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
if sym in IGNORE_EXPORTS:
continue
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8')))
retval = 1
# Check dependency libraries
for library_name in read_libraries(filename):
if library_name not in ALLOWED_LIBRARIES:
print('%s: NEEDED library %s is not allowed' % (filename, library_name.decode('utf-8')))
retval = 1
sys.exit(retval)
|
the-stack_0_5643 | # sqlalchemy/pool.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Base constructs for connection pools.
"""
from collections import deque
import time
import weakref
from .. import event
from .. import exc
from .. import log
from .. import util
reset_rollback = util.symbol("reset_rollback")
reset_commit = util.symbol("reset_commit")
reset_none = util.symbol("reset_none")
class _ConnDialect:
"""partial implementation of :class:`.Dialect`
which provides DBAPI connection methods.
When a :class:`_pool.Pool` is combined with an :class:`_engine.Engine`,
the :class:`_engine.Engine` replaces this with its own
:class:`.Dialect`.
"""
is_async = False
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
def do_ping(self, dbapi_connection):
raise NotImplementedError(
"The ping feature requires that a dialect is "
"passed to the connection pool."
)
def get_driver_connection(self, connection):
return connection
class _AsyncConnDialect(_ConnDialect):
is_async = True
class Pool(log.Identified):
"""Abstract base class for connection pools."""
_dialect = _ConnDialect()
def __init__(
self,
creator,
recycle=-1,
echo=None,
logging_name=None,
reset_on_return=True,
events=None,
dialect=None,
pre_ping=False,
_dispatch=None,
):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to a value other than -1, number of
seconds between connection recycling, which means upon
checkout, if this timeout is surpassed the connection will be
closed and replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: if True, the connection pool will log
informational output such as when connections are invalidated
as well as when connections are recycled to the default log handler,
which defaults to ``sys.stdout`` for output.. If set to the string
``"debug"``, the logging will include pool checkouts and checkins.
The :paramref:`_pool.Pool.echo` parameter can also be set from the
:func:`_sa.create_engine` call by using the
:paramref:`_sa.create_engine.echo_pool` parameter.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param reset_on_return: Determine steps to take on
connections as they are returned to the pool, which were
not otherwise handled by a :class:`_engine.Connection`.
reset_on_return can have any of these values:
* ``"rollback"`` - call rollback() on the connection,
to release locks and transaction resources.
This is the default value. The vast majority
of use cases should leave this value set.
* ``True`` - same as 'rollback', this is here for
backwards compatibility.
* ``"commit"`` - call commit() on the connection,
to release locks and transaction resources.
A commit here may be desirable for databases that
cache query plans if a commit is emitted,
such as Microsoft SQL Server. However, this
value is more dangerous than 'rollback' because
any data changes present on the transaction
are committed unconditionally.
* ``None`` - don't do anything on the connection.
This setting may be appropriate if the database / DBAPI
works in pure "autocommit" mode at all times, or if the
application uses the :class:`_engine.Engine` with consistent
connectivity patterns. See the section
:ref:`pool_reset_on_return` for more details.
* ``False`` - same as None, this is here for
backwards compatibility.
.. seealso::
:ref:`pool_reset_on_return`
:param events: a list of 2-tuples, each of the form
``(callable, target)`` which will be passed to :func:`.event.listen`
upon construction. Provided here so that event listeners
can be assigned via :func:`_sa.create_engine` before dialect-level
listeners are applied.
:param dialect: a :class:`.Dialect` that will handle the job
of calling rollback(), close(), or commit() on DBAPI connections.
If omitted, a built-in "stub" dialect is used. Applications that
make use of :func:`_sa.create_engine` should not use this parameter
as it is handled by the engine creation strategy.
.. versionadded:: 1.1 - ``dialect`` is now a public parameter
to the :class:`_pool.Pool`.
:param pre_ping: if True, the pool will emit a "ping" (typically
"SELECT 1", but is dialect-specific) on the connection
upon checkout, to test if the connection is alive or not. If not,
the connection is transparently re-connected and upon success, all
other pooled connections established prior to that timestamp are
invalidated. Requires that a dialect is passed as well to
interpret the disconnection error.
.. versionadded:: 1.2
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
log.instance_logger(self, echoflag=echo)
self._creator = creator
self._recycle = recycle
self._invalidate_time = 0
self._pre_ping = pre_ping
self._reset_on_return = util.symbol.parse_user_argument(
reset_on_return,
{
reset_rollback: ["rollback", True],
reset_none: ["none", None, False],
reset_commit: ["commit"],
},
"reset_on_return",
resolve_symbol_names=False,
)
self.echo = echo
if _dispatch:
self.dispatch._update(_dispatch, only_propagate=False)
if dialect:
self._dialect = dialect
if events:
for fn, target in events:
event.listen(self, target, fn)
@util.hybridproperty
def _is_asyncio(self):
return self._dialect.is_async
@property
def _creator(self):
return self.__dict__["_creator"]
@_creator.setter
def _creator(self, creator):
self.__dict__["_creator"] = creator
self._invoke_creator = self._should_wrap_creator(creator)
def _should_wrap_creator(self, creator):
"""Detect if creator accepts a single argument, or is sent
as a legacy style no-arg function.
"""
try:
argspec = util.get_callable_argspec(self._creator, no_self=True)
except TypeError:
return lambda crec: creator()
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
# look for the exact arg signature that DefaultStrategy
# sends us
if (argspec[0], argspec[3]) == (["connection_record"], (None,)):
return creator
# or just a single positional
elif positionals == 1:
return creator
# all other cases, just wrap and assume legacy "creator" callable
# thing
else:
return lambda crec: creator()
def _close_connection(self, connection):
self.logger.debug("Closing connection %r", connection)
try:
self._dialect.do_close(connection)
except Exception:
self.logger.error(
"Exception closing connection %r", connection, exc_info=True
)
def _create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def _invalidate(self, connection, exception=None, _checkin=True):
"""Mark all connections established within the generation
of the given connection as invalidated.
If this pool's last invalidate time is before when the given
connection was created, update the timestamp til now. Otherwise,
no action is performed.
Connections with a start time prior to this pool's invalidation
time will be recycled upon next checkout.
"""
rec = getattr(connection, "_connection_record", None)
if not rec or self._invalidate_time < rec.starttime:
self._invalidate_time = time.time()
if _checkin and getattr(connection, "is_valid", False):
connection.invalidate(exception)
def recreate(self):
"""Return a new :class:`_pool.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunction with :meth:`dispose`
to close out an entire :class:`_pool.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, as it only affects connections that are
idle in the pool.
.. seealso::
:meth:`Pool.recreate`
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
return _ConnectionFairy._checkout(self)
def _return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`_pool.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
self._do_return_conn(record)
def _do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def _do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
class _ConnectionRecord:
"""Internal object which maintains an individual DBAPI connection
referenced by a :class:`_pool.Pool`.
The :class:`._ConnectionRecord` object always exists for any particular
DBAPI connection whether or not that DBAPI connection has been
"checked out". This is in contrast to the :class:`._ConnectionFairy`
which is only a public facade to the DBAPI connection while it is checked
out.
A :class:`._ConnectionRecord` may exist for a span longer than that
of a single DBAPI connection. For example, if the
:meth:`._ConnectionRecord.invalidate`
method is called, the DBAPI connection associated with this
:class:`._ConnectionRecord`
will be discarded, but the :class:`._ConnectionRecord` may be used again,
in which case a new DBAPI connection is produced when the
:class:`_pool.Pool`
next uses this record.
The :class:`._ConnectionRecord` is delivered along with connection
pool events, including :meth:`_events.PoolEvents.connect` and
:meth:`_events.PoolEvents.checkout`, however :class:`._ConnectionRecord`
still
remains an internal object whose API and internals may change.
.. seealso::
:class:`._ConnectionFairy`
"""
def __init__(self, pool, connect=True):
self.__pool = pool
if connect:
self.__connect()
self.finalize_callback = deque()
fresh = False
fairy_ref = None
starttime = None
dbapi_connection = None
"""A reference to the actual DBAPI connection being tracked.
May be ``None`` if this :class:`._ConnectionRecord` has been marked
as invalidated; a new DBAPI connection may replace it if the owning
pool calls upon this :class:`._ConnectionRecord` to reconnect.
For adapted drivers, like the Asyncio implementations, this is a
:class:`.AdaptedConnection` that adapts the driver connection
to the DBAPI protocol.
Use :attr:`._ConnectionRecord.driver_connection` to obtain the
connection objected returned by the driver.
.. versionadded:: 1.4.24
"""
@property
def driver_connection(self):
"""The connection object as returned by the driver after a connect.
For normal sync drivers that support the DBAPI protocol, this object
is the same as the one referenced by
:attr:`._ConnectionRecord.dbapi_connection`.
For adapted drivers, like the Asyncio ones, this is the actual object
that was returned by the driver ``connect`` call.
As :attr:`._ConnectionRecord.dbapi_connection` it may be ``None``
if this :class:`._ConnectionRecord` has been marked as invalidated.
.. versionadded:: 1.4.24
"""
if self.dbapi_connection is None:
return None
else:
return self.__pool._dialect.get_driver_connection(
self.dbapi_connection
)
@property
def connection(self):
"""An alias to :attr:`._ConnectionRecord.dbapi_connection`.
This alias is deprecated, please use the new name.
.. deprecated:: 1.4.24
"""
return self.dbapi_connection
@connection.setter
def connection(self, value):
self.dbapi_connection = value
_soft_invalidate_time = 0
@util.memoized_property
def info(self):
"""The ``.info`` dictionary associated with the DBAPI connection.
This dictionary is shared among the :attr:`._ConnectionFairy.info`
and :attr:`_engine.Connection.info` accessors.
.. note::
The lifespan of this dictionary is linked to the
DBAPI connection itself, meaning that it is **discarded** each time
the DBAPI connection is closed and/or invalidated. The
:attr:`._ConnectionRecord.record_info` dictionary remains
persistent throughout the lifespan of the
:class:`._ConnectionRecord` container.
"""
return {}
@util.memoized_property
def record_info(self):
"""An "info' dictionary associated with the connection record
itself.
Unlike the :attr:`._ConnectionRecord.info` dictionary, which is linked
to the lifespan of the DBAPI connection, this dictionary is linked
to the lifespan of the :class:`._ConnectionRecord` container itself
and will remain persistent throughout the life of the
:class:`._ConnectionRecord`.
.. versionadded:: 1.1
"""
return {}
@classmethod
def checkout(cls, pool):
rec = pool._do_get()
try:
dbapi_connection = rec.get_connection()
except Exception as err:
with util.safe_reraise():
rec._checkin_failed(err, _fairy_was_created=False)
echo = pool._should_log_debug()
fairy = _ConnectionFairy(dbapi_connection, rec, echo)
rec.fairy_ref = ref = weakref.ref(
fairy,
lambda ref: _finalize_fairy
and _finalize_fairy(None, rec, pool, ref, echo, True),
)
_strong_ref_connection_records[ref] = rec
if echo:
pool.logger.debug(
"Connection %r checked out from pool", dbapi_connection
)
return fairy
def _checkin_failed(self, err, _fairy_was_created=True):
self.invalidate(e=err)
self.checkin(
_fairy_was_created=_fairy_was_created,
)
def checkin(self, _fairy_was_created=True):
if self.fairy_ref is None and _fairy_was_created:
# _fairy_was_created is False for the initial get connection phase;
# meaning there was no _ConnectionFairy and we must unconditionally
# do a checkin.
#
# otherwise, if fairy_was_created==True, if fairy_ref is None here
# that means we were checked in already, so this looks like
# a double checkin.
util.warn("Double checkin attempted on %s" % self)
return
self.fairy_ref = None
connection = self.dbapi_connection
pool = self.__pool
while self.finalize_callback:
finalizer = self.finalize_callback.pop()
finalizer(connection)
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, self)
pool._return_conn(self)
@property
def in_use(self):
return self.fairy_ref is not None
@property
def last_connect_time(self):
return self.starttime
def close(self):
if self.dbapi_connection is not None:
self.__close()
def invalidate(self, e=None, soft=False):
"""Invalidate the DBAPI connection held by this
:class:`._ConnectionRecord`.
This method is called for all connection invalidations, including
when the :meth:`._ConnectionFairy.invalidate` or
:meth:`_engine.Connection.invalidate` methods are called,
as well as when any
so-called "automatic invalidation" condition occurs.
:param e: an exception object indicating a reason for the
invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
# already invalidated
if self.dbapi_connection is None:
return
if soft:
self.__pool.dispatch.soft_invalidate(
self.dbapi_connection, self, e
)
else:
self.__pool.dispatch.invalidate(self.dbapi_connection, self, e)
if e is not None:
self.__pool.logger.info(
"%sInvalidate connection %r (reason: %s:%s)",
"Soft " if soft else "",
self.dbapi_connection,
e.__class__.__name__,
e,
)
else:
self.__pool.logger.info(
"%sInvalidate connection %r",
"Soft " if soft else "",
self.dbapi_connection,
)
if soft:
self._soft_invalidate_time = time.time()
else:
self.__close()
self.dbapi_connection = None
def get_connection(self):
recycle = False
# NOTE: the various comparisons here are assuming that measurable time
# passes between these state changes. however, time.time() is not
# guaranteed to have sub-second precision. comparisons of
# "invalidation time" to "starttime" should perhaps use >= so that the
# state change can take place assuming no measurable time has passed,
# however this does not guarantee correct behavior here as if time
# continues to not pass, it will try to reconnect repeatedly until
# these timestamps diverge, so in that sense using > is safer. Per
# https://stackoverflow.com/a/1938096/34549, Windows time.time() may be
# within 16 milliseconds accuracy, so unit tests for connection
# invalidation need a sleep of at least this long between initial start
# time and invalidation for the logic below to work reliably.
if self.dbapi_connection is None:
self.info.clear()
self.__connect()
elif (
self.__pool._recycle > -1
and time.time() - self.starttime > self.__pool._recycle
):
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling",
self.dbapi_connection,
)
recycle = True
elif self.__pool._invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to pool invalidation; "
+ "recycling",
self.dbapi_connection,
)
recycle = True
elif self._soft_invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to local soft invalidation; "
+ "recycling",
self.dbapi_connection,
)
recycle = True
if recycle:
self.__close()
self.info.clear()
self.__connect()
return self.dbapi_connection
def _is_hard_or_soft_invalidated(self):
return (
self.dbapi_connection is None
or self.__pool._invalidate_time > self.starttime
or (self._soft_invalidate_time > self.starttime)
)
def __close(self):
self.finalize_callback.clear()
if self.__pool.dispatch.close:
self.__pool.dispatch.close(self.dbapi_connection, self)
self.__pool._close_connection(self.dbapi_connection)
self.dbapi_connection = None
def __connect(self):
pool = self.__pool
# ensure any existing connection is removed, so that if
# creator fails, this attribute stays None
self.dbapi_connection = None
try:
self.starttime = time.time()
self.dbapi_connection = connection = pool._invoke_creator(self)
pool.logger.debug("Created new connection %r", connection)
self.fresh = True
except Exception as e:
with util.safe_reraise():
pool.logger.debug("Error on connect(): %s", e)
else:
# in SQLAlchemy 1.4 the first_connect event is not used by
# the engine, so this will usually not be set
if pool.dispatch.first_connect:
pool.dispatch.first_connect.for_modify(
pool.dispatch
).exec_once_unless_exception(self.dbapi_connection, self)
# init of the dialect now takes place within the connect
# event, so ensure a mutex is used on the first run
pool.dispatch.connect.for_modify(
pool.dispatch
)._exec_w_sync_on_first_run(self.dbapi_connection, self)
def _finalize_fairy(
dbapi_connection,
connection_record,
pool,
ref, # this is None when called directly, not by the gc
echo,
reset=True,
fairy=None,
):
"""Cleanup for a :class:`._ConnectionFairy` whether or not it's already
been garbage collected.
When using an async dialect no IO can happen here (without using
a dedicated thread), since this is called outside the greenlet
context and with an already running loop. In this case function
will only log a message and raise a warning.
"""
if ref:
_strong_ref_connection_records.pop(ref, None)
elif fairy:
_strong_ref_connection_records.pop(weakref.ref(fairy), None)
if ref is not None:
if connection_record.fairy_ref is not ref:
return
assert dbapi_connection is None
dbapi_connection = connection_record.dbapi_connection
# null pool is not _is_asyncio but can be used also with async dialects
dont_restore_gced = pool._dialect.is_async
if dont_restore_gced:
detach = not connection_record or ref
can_manipulate_connection = not ref
else:
detach = not connection_record
can_manipulate_connection = True
if dbapi_connection is not None:
if connection_record and echo:
pool.logger.debug(
"Connection %r being returned to pool%s",
dbapi_connection,
", transaction state was already reset by caller"
if not reset
else "",
)
try:
fairy = fairy or _ConnectionFairy(
dbapi_connection,
connection_record,
echo,
)
assert fairy.dbapi_connection is dbapi_connection
if reset and can_manipulate_connection:
fairy._reset(pool)
if detach:
if connection_record:
fairy._pool = pool
fairy.detach()
if can_manipulate_connection:
if pool.dispatch.close_detached:
pool.dispatch.close_detached(dbapi_connection)
pool._close_connection(dbapi_connection)
else:
message = (
"The garbage collector is trying to clean up "
"connection %r. This feature is unsupported on async "
"dbapi, since no IO can be performed at this stage to "
"reset the connection. Please close out all "
"connections when they are no longer used, calling "
"``close()`` or using a context manager to "
"manage their lifetime."
) % dbapi_connection
pool.logger.error(message)
util.warn(message)
except BaseException as e:
pool.logger.error(
"Exception during reset or similar", exc_info=True
)
if connection_record:
connection_record.invalidate(e=e)
if not isinstance(e, Exception):
raise
if connection_record and connection_record.fairy_ref is not None:
connection_record.checkin()
# a dictionary of the _ConnectionFairy weakrefs to _ConnectionRecord, so that
# GC under pypy will call ConnectionFairy finalizers. linked directly to the
# weakref that will empty itself when collected so that it should not create
# any unmanaged memory references.
_strong_ref_connection_records = {}
class _ConnectionFairy:
"""Proxies a DBAPI connection and provides return-on-dereference
support.
This is an internal object used by the :class:`_pool.Pool` implementation
to provide context management to a DBAPI connection delivered by
that :class:`_pool.Pool`.
The name "fairy" is inspired by the fact that the
:class:`._ConnectionFairy` object's lifespan is transitory, as it lasts
only for the length of a specific DBAPI connection being checked out from
the pool, and additionally that as a transparent proxy, it is mostly
invisible.
.. seealso::
:class:`._ConnectionRecord`
"""
def __init__(self, dbapi_connection, connection_record, echo):
self.dbapi_connection = dbapi_connection
self._connection_record = connection_record
self._echo = echo
dbapi_connection = None
"""A reference to the actual DBAPI connection being tracked.
.. versionadded:: 1.4.24
.. seealso::
:attr:`._ConnectionFairy.driver_connection`
:attr:`._ConnectionRecord.dbapi_connection`
:ref:`faq_dbapi_connection`
"""
_connection_record = None
"""A reference to the :class:`._ConnectionRecord` object associated
with the DBAPI connection.
This is currently an internal accessor which is subject to change.
"""
@property
def driver_connection(self):
"""The connection object as returned by the driver after a connect.
.. versionadded:: 1.4.24
.. seealso::
:attr:`._ConnectionFairy.dbapi_connection`
:attr:`._ConnectionRecord.driver_connection`
:ref:`faq_dbapi_connection`
"""
return self._connection_record.driver_connection
@property
def connection(self):
"""An alias to :attr:`._ConnectionFairy.dbapi_connection`.
This alias is deprecated, please use the new name.
.. deprecated:: 1.4.24
"""
return self.dbapi_connection
@connection.setter
def connection(self, value):
self.dbapi_connection = value
@classmethod
def _checkout(cls, pool, threadconns=None, fairy=None):
if not fairy:
fairy = _ConnectionRecord.checkout(pool)
fairy._pool = pool
fairy._counter = 0
if threadconns is not None:
threadconns.current = weakref.ref(fairy)
if fairy.dbapi_connection is None:
raise exc.InvalidRequestError("This connection is closed")
fairy._counter += 1
if (
not pool.dispatch.checkout and not pool._pre_ping
) or fairy._counter != 1:
return fairy
# Pool listeners can trigger a reconnection on checkout, as well
# as the pre-pinger.
# there are three attempts made here, but note that if the database
# is not accessible from a connection standpoint, those won't proceed
# here.
attempts = 2
while attempts > 0:
connection_is_fresh = fairy._connection_record.fresh
fairy._connection_record.fresh = False
try:
if pool._pre_ping:
if not connection_is_fresh:
if fairy._echo:
pool.logger.debug(
"Pool pre-ping on connection %s",
fairy.dbapi_connection,
)
result = pool._dialect.do_ping(fairy.dbapi_connection)
if not result:
if fairy._echo:
pool.logger.debug(
"Pool pre-ping on connection %s failed, "
"will invalidate pool",
fairy.dbapi_connection,
)
raise exc.InvalidatePoolError()
elif fairy._echo:
pool.logger.debug(
"Connection %s is fresh, skipping pre-ping",
fairy.dbapi_connection,
)
pool.dispatch.checkout(
fairy.dbapi_connection, fairy._connection_record, fairy
)
return fairy
except exc.DisconnectionError as e:
if e.invalidate_pool:
pool.logger.info(
"Disconnection detected on checkout, "
"invalidating all pooled connections prior to "
"current timestamp (reason: %r)",
e,
)
fairy._connection_record.invalidate(e)
pool._invalidate(fairy, e, _checkin=False)
else:
pool.logger.info(
"Disconnection detected on checkout, "
"invalidating individual connection %s (reason: %r)",
fairy.dbapi_connection,
e,
)
fairy._connection_record.invalidate(e)
try:
fairy.dbapi_connection = (
fairy._connection_record.get_connection()
)
except Exception as err:
with util.safe_reraise():
fairy._connection_record._checkin_failed(
err,
_fairy_was_created=True,
)
# prevent _ConnectionFairy from being carried
# in the stack trace. Do this after the
# connection record has been checked in, so that
# if the del triggers a finalize fairy, it won't
# try to checkin a second time.
del fairy
attempts -= 1
pool.logger.info("Reconnection attempts exhausted on checkout")
fairy.invalidate()
raise exc.InvalidRequestError("This connection is closed")
def _checkout_existing(self):
return _ConnectionFairy._checkout(self._pool, fairy=self)
def _checkin(self, reset=True):
_finalize_fairy(
self.dbapi_connection,
self._connection_record,
self._pool,
None,
self._echo,
reset=reset,
fairy=self,
)
self.dbapi_connection = None
self._connection_record = None
_close = _checkin
def _reset(self, pool):
if pool.dispatch.reset:
pool.dispatch.reset(self, self._connection_record)
if pool._reset_on_return is reset_rollback:
if self._echo:
pool.logger.debug(
"Connection %s rollback-on-return", self.dbapi_connection
)
pool._dialect.do_rollback(self)
elif pool._reset_on_return is reset_commit:
if self._echo:
pool.logger.debug(
"Connection %s commit-on-return",
self.dbapi_connection,
)
pool._dialect.do_commit(self)
@property
def _logger(self):
return self._pool.logger
@property
def is_valid(self):
"""Return True if this :class:`._ConnectionFairy` still refers
to an active DBAPI connection."""
return self.dbapi_connection is not None
@util.memoized_property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.ConnectionFairy`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`._ConnectionFairy`. It is shared
with the :attr:`._ConnectionRecord.info` and
:attr:`_engine.Connection.info`
accessors.
The dictionary associated with a particular DBAPI connection is
discarded when the connection itself is discarded.
"""
return self._connection_record.info
@property
def record_info(self):
"""Info dictionary associated with the :class:`._ConnectionRecord
container referred to by this :class:`.ConnectionFairy`.
Unlike the :attr:`._ConnectionFairy.info` dictionary, the lifespan
of this dictionary is persistent across connections that are
disconnected and/or invalidated within the lifespan of a
:class:`._ConnectionRecord`.
.. versionadded:: 1.1
"""
if self._connection_record:
return self._connection_record.record_info
else:
return None
def invalidate(self, e=None, soft=False):
"""Mark this connection as invalidated.
This method can be called directly, and is also called as a result
of the :meth:`_engine.Connection.invalidate` method. When invoked,
the DBAPI connection is immediately closed and discarded from
further use by the pool. The invalidation mechanism proceeds
via the :meth:`._ConnectionRecord.invalidate` internal method.
:param e: an exception object indicating a reason for the invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.dbapi_connection is None:
util.warn("Can't invalidate an already-closed connection.")
return
if self._connection_record:
self._connection_record.invalidate(e=e, soft=soft)
if not soft:
self.dbapi_connection = None
self._checkin()
def cursor(self, *args, **kwargs):
"""Return a new DBAPI cursor for the underlying connection.
This method is a proxy for the ``connection.cursor()`` DBAPI
method.
"""
return self.dbapi_connection.cursor(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.dbapi_connection, key)
def detach(self):
"""Separate this connection from its Pool.
This means that the connection will no longer be returned to the
pool when closed, and will instead be literally closed. The
containing ConnectionRecord is separated from the DB-API connection,
and will create a new connection when next used.
Note that any overall connection limiting constraints imposed by a
Pool implementation may be violated after a detach, as the detached
connection is removed from the pool's knowledge and control.
"""
if self._connection_record is not None:
rec = self._connection_record
rec.fairy_ref = None
rec.dbapi_connection = None
# TODO: should this be _return_conn?
self._pool._do_return_conn(self._connection_record)
self.info = self.info.copy()
self._connection_record = None
if self._pool.dispatch.detach:
self._pool.dispatch.detach(self.dbapi_connection, rec)
def close(self):
self._counter -= 1
if self._counter == 0:
self._checkin()
def _close_no_reset(self):
self._counter -= 1
if self._counter == 0:
self._checkin(reset=False)
|
the-stack_0_5644 | # Copyright (c) 2012-2016 Seafile Ltd.
# encoding: utf-8
from django.core.management.base import BaseCommand
from seaserv import seafile_api
from seahub.wiki.models import GroupWiki, Wiki, DuplicateWikiNameError
class Command(BaseCommand):
help = 'Migrate records in wiki_group_wiki table to wiki_wiki table.'
label = "wiki_migrate_group_wiki"
def handle(self, *args, **options):
print('Start to migrate...')
for r in GroupWiki.objects.all():
repo = seafile_api.get_repo(r.repo_id)
if not repo:
print(('Repo %s not found. Skip.' % r.repo_id))
continue
owner = seafile_api.get_repo_owner(r.repo_id)
if not owner:
print(('Owner of repo %s not found. Skip.' % r.repo_id))
continue
wiki_name = 'Group%s-%s' % (r.group_id, repo.name)
try:
Wiki.objects.add(wiki_name=wiki_name,
username=owner, repo_id=r.repo_id)
print(('Successfully migrated GroupWiki(%s-%s) to Wiki(%s-%s-%s)' % (r.group_id, r.repo_id, owner, wiki_name, r.repo_id)))
except DuplicateWikiNameError:
print('Multiple group wiki records found, group: %s, repo_id: %s. Skip.' % (r.group_id, r.repo_id))
continue
except Exception as e:
print(e)
continue
print('Done.')
|
the-stack_0_5646 | import compas_rrc as rrc
if __name__ == '__main__':
# Create Ros Client
ros = rrc.RosClient()
ros.run()
# Create ABB Client
abb = rrc.AbbClient(ros, '/rob1')
print('Connected.')
# No operation
done = abb.send_and_wait(rrc.Noop())
# Print feedback
print('Feedback = ', done)
# End of Code
print('Finished')
# Close client
ros.close()
ros.terminate()
|
the-stack_0_5650 |
import traceback
import json
from pathlib import Path
import time
try:
print(str(Path().resolve()))
commands_dict = {
"commands": {
"!rng": "You have boosted RNG NAME",
"!test": "Test response"
}
}
with open(str(Path().resolve()) + r'\core\commands.json', 'w+') as file:
json.dump(commands_dict, file, indent=4)
except:
print(traceback.print_exc())
time.sleep(10000) |
the-stack_0_5655 | #!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
from buildbot_lib import (
BuildContext, BuildStatus, Command, ParseStandardCommandLine,
RemoveSconsBuildDirectories, RunBuild, SetupLinuxEnvironment,
SetupWindowsEnvironment, SCons, Step )
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import pynacl.platform
def RunSconsTests(status, context):
# Clean out build directories, unless we have built elsewhere.
if not context['skip_build']:
with Step('clobber scons', status):
RemoveSconsBuildDirectories()
# Run checkdeps script to vet #includes.
with Step('checkdeps', status):
Command(context, cmd=[sys.executable, 'tools/checkdeps/checkdeps.py'])
arch = context['default_scons_platform']
flags_subzero = ['use_sz=1']
flags_build = ['do_not_run_tests=1']
flags_run = []
# This file is run 3 different ways for ARM builds. The qemu-only trybot does
# a normal build-and-run with the emulator just like the x86 bots. The panda
# build side runs on an x86 machines with skip_run, and then packs up the
# result and triggers an ARM hardware tester that run with skip_build
if arch != 'arm':
# Unlike their arm counterparts we do not run trusted tests on x86 bots.
# Trusted tests get plenty of coverage by other bots, e.g. nacl-gcc bots.
# We make the assumption here that there are no "exotic tests" which
# are trusted in nature but are somehow depedent on the untrusted TC.
flags_build.append('skip_trusted_tests=1')
flags_run.append('skip_trusted_tests=1')
if context['skip_run']:
flags_run.append('do_not_run_tests=1')
if arch == 'arm':
# For ARM hardware bots, force_emulator= disables use of QEMU, which
# enables building tests which don't work under QEMU.
flags_build.append('force_emulator=')
flags_run.append('force_emulator=')
if context['skip_build']:
flags_run.extend(['naclsdk_validate=0', 'built_elsewhere=1'])
if not context['skip_build']:
# For ARM builders which will trigger hardware testers, run the hello world
# test with the emulator as a basic sanity check before doing anything else.
if arch == 'arm' and context['skip_run']:
with Step('hello_world ' + arch, status):
SCons(context, parallel=True, args=['run_hello_world_test'])
with Step('build_all ' + arch, status):
SCons(context, parallel=True, args=flags_build)
if arch in ('arm', 'x86-32', 'x86-64'):
with Step('build_all subzero ' + arch, status):
SCons(context, parallel=True, args=flags_build + flags_subzero)
smoke_tests = ['small_tests', 'medium_tests']
# Normal pexe-mode tests
with Step('smoke_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True, args=flags_run + smoke_tests)
# Large tests cannot be run in parallel
with Step('large_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=False, args=flags_run + ['large_tests'])
# Run small_tests, medium_tests, and large_tests with Subzero.
# TODO(stichnot): Move this to the sandboxed translator section
# along with the translate_fast flag once pnacl-sz.nexe is ready.
if arch in ('arm', 'x86-32', 'x86-64'):
# Normal pexe-mode tests
with Step('smoke_tests subzero ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True,
args=flags_run + flags_subzero + smoke_tests)
# Large tests cannot be run in parallel
with Step('large_tests subzero ' + arch, status, halt_on_fail=False):
SCons(context, parallel=False,
args=flags_run + flags_subzero + ['large_tests'])
with Step('nonpexe_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True,
args=flags_run + ['pnacl_generate_pexe=0', 'nonpexe_tests'])
irt_mode = context['default_scons_mode'] + ['nacl_irt_test']
# Build all the tests with the IRT
if not context['skip_build']:
with Step('build_all_irt ' + arch, status):
SCons(context, parallel=True, mode=irt_mode, args=flags_build)
smoke_tests_irt = ['small_tests_irt', 'medium_tests_irt']
# Run tests with the IRT.
with Step('smoke_tests_irt ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + smoke_tests_irt)
with Step('large_tests_irt ' + arch, status, halt_on_fail=False):
SCons(context, parallel=False, mode=irt_mode,
args=flags_run + ['large_tests_irt'])
# Run some nacl_clang tests. Eventually we will have bots that just run
# buildbot_standard with nacl_clang and this can be split out.
context['pnacl'] = False
context['nacl_clang'] = True
if not context['skip_build']:
with Step('build_nacl_clang ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True, args=flags_build)
with Step('smoke_tests_nacl_clang ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True,
args=flags_run + ['small_tests', 'medium_tests'])
with Step('large_tests_nacl_clang ' + arch, status, halt_on_fail=False):
SCons(context, parallel=False,
args=flags_run + ['large_tests'])
context['pnacl'] = True
context['nacl_clang'] = False
# Test sandboxed translation
# TODO(dschuff): The standalone sandboxed translator driver does not have
# the batch script wrappers, so it can't run on Windows. Either add them to
# the translator package or make SCons use the pnacl_newlib drivers except
# on the ARM bots where we don't have the pnacl_newlib drivers.
# TODO(sbc): Enable these tests for mips once we build the version of the
# translator nexe
if not context.Windows() and arch != 'mips32':
flags_run_sbtc = ['use_sandboxed_translator=1']
sbtc_tests = ['toolchain_tests_irt']
if arch == 'arm':
# When splitting the build from the run, translate_in_build_step forces
# the translation to run on the run side (it usually runs on the build
# side because that runs with more parallelism)
if context['skip_build'] or context['skip_run']:
flags_run_sbtc.append('translate_in_build_step=0')
else:
# The ARM sandboxed translator is flaky under qemu, so run a very small
# set of tests on the qemu-only trybot.
sbtc_tests = ['run_hello_world_test_irt']
else:
sbtc_tests.append('large_code')
with Step('sandboxed_translator_tests ' + arch, status,
halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + flags_run_sbtc + sbtc_tests)
with Step('sandboxed_translator_fast_tests ' + arch, status,
halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + flags_run_sbtc + ['translate_fast=1'] + sbtc_tests)
# Test Non-SFI Mode.
# The only architectures that the PNaCl toolchain supports Non-SFI
# versions of are currently x86-32 and ARM.
# The x86-64 toolchain bot currently also runs these tests from
# buildbot_pnacl.sh
if context.Linux() and (arch == 'x86-32' or arch == 'arm'):
with Step('nonsfi_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run +
['nonsfi_nacl=1',
'nonsfi_tests',
'nonsfi_tests_irt'])
# Build with pnacl_generate_pexe=0 to allow using pnacl-clang with
# direct-to-native mode. This allows assembly to be used in tests.
with Step('nonsfi_tests_nopnacl_generate_pexe ' + arch,
status, halt_on_fail=False):
extra_args = ['nonsfi_nacl=1',
'pnacl_generate_pexe=0',
'nonsfi_tests',
'nonsfi_tests_irt']
# nonsfi_tests_irt with pnacl_generate_pexe=0 does not pass on x86-32.
# https://code.google.com/p/nativeclient/issues/detail?id=4093
if arch == 'x86-32':
extra_args.remove('nonsfi_tests_irt')
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + extra_args)
# Test nonsfi_loader linked against host's libc.
with Step('nonsfi_tests_host_libc ' + arch, status, halt_on_fail=False):
# Using skip_nonstable_bitcode=1 here disables the tests for
# zero-cost C++ exception handling, which don't pass for Non-SFI
# mode yet because we don't build libgcc_eh for Non-SFI mode.
SCons(context, parallel=True, mode=irt_mode,
args=flags_run +
['nonsfi_nacl=1', 'use_newlib_nonsfi_loader=0',
'nonsfi_tests', 'nonsfi_tests_irt',
'toolchain_tests_irt', 'skip_nonstable_bitcode=1'])
# Test unsandboxed mode.
if (context.Linux() or context.Mac()) and arch == 'x86-32':
if context.Linux():
tests = ['run_' + test + '_test_irt' for test in
['hello_world', 'irt_futex', 'thread', 'float',
'malloc_realloc_calloc_free', 'dup', 'cond_timedwait',
'getpid']]
else:
# TODO(mseaborn): Use the same test list as on Linux when the threading
# tests pass for Mac.
tests = ['run_hello_world_test_irt']
with Step('unsandboxed_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + ['pnacl_unsandboxed=1'] + tests)
def Main():
context = BuildContext()
status = BuildStatus(context)
ParseStandardCommandLine(context)
if context.Linux():
SetupLinuxEnvironment(context)
elif context.Windows():
SetupWindowsEnvironment(context)
elif context.Mac():
# No setup to do for Mac.
pass
else:
raise Exception('Unsupported platform')
# Panda bots only have 2 cores.
if pynacl.platform.GetArch() == 'arm':
context['max_jobs'] = 2
RunBuild(RunSconsTests, status)
if __name__ == '__main__':
Main()
|
the-stack_0_5657 | class SelectionSequentialTransform(object):
def __init__(self, tokenizer, max_len):
self.tokenizer = tokenizer
self.max_len = max_len
def __call__(self, texts):
input_ids_list, segment_ids_list, input_masks_list, contexts_masks_list = [], [], [], []
for text in texts:
tokenized_dict = self.tokenizer.encode_plus(text, max_length=self.max_len, pad_to_max_length=True)
input_ids, input_masks = tokenized_dict['input_ids'], tokenized_dict['attention_mask']
assert len(input_ids) == self.max_len
assert len(input_masks) == self.max_len
input_ids_list.append(input_ids)
input_masks_list.append(input_masks)
return input_ids_list, input_masks_list
def __str__(self) -> str:
return 'maxlen{}'.format(self.max_len)
class SelectionJoinTransform(object):
def __init__(self, tokenizer, max_len):
self.tokenizer = tokenizer
self.max_len = max_len
self.cls_id = self.tokenizer.convert_tokens_to_ids('[CLS]')
self.sep_id = self.tokenizer.convert_tokens_to_ids('[SEP]')
self.tokenizer.add_tokens(['\n'], special_tokens=True)
self.pad_id = 0
def __call__(self, texts):
# another option is to use [SEP], but here we follow the discussion at:
# https://github.com/facebookresearch/ParlAI/issues/2306#issuecomment-599180186
context = '\n'.join(texts)
tokenized_dict = self.tokenizer.encode_plus(context)
input_ids, input_masks = tokenized_dict['input_ids'], tokenized_dict['attention_mask']
input_ids = input_ids[-self.max_len:]
input_ids[0] = self.cls_id
input_masks = input_masks[-self.max_len:]
input_ids += [self.pad_id] * (self.max_len - len(input_ids))
input_masks += [0] * (self.max_len - len(input_masks))
assert len(input_ids) == self.max_len
assert len(input_masks) == self.max_len
return input_ids, input_masks
def __str__(self) -> str:
return '[join_str]maxlen{}'.format(self.max_len)
|
the-stack_0_5658 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Philipp Wagner. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
import sys, os
sys.path.append("../..")
# Import Matplotlib:
import matplotlib
matplotlib.use('Agg')
import matplotlib.cm as cm
# For Python2 backward comability:
from builtins import range
# import facerec modules
from facerec.feature import Fisherfaces, SpatialHistogram, Identity
from facerec.distance import EuclideanDistance, ChiSquareDistance
from facerec.classifier import NearestNeighbor, SVM
from facerec.model import PredictableModel
from facerec.validation import KFoldCrossValidation
from facerec.visual import subplot
from facerec.util import minmax_normalize
from facerec.serialization import save_model, load_model
from facerec.svm import grid_search
import numpy as np
# try to import the PIL Image module
try:
from PIL import Image
except ImportError:
import Image
import logging
import matplotlib.pyplot as plt
from facerec.lbp import LPQ, ExtendedLBP
def read_images(path, sz=None):
"""Reads the images in a given folder, resizes images on the fly if size is given.
Args:
path: Path to a folder with subfolders representing the subjects (persons).
sz: A tuple with the size Resizes
Returns:
A list [X,y]
X: The images, which is a Python list of numpy arrays.
y: The corresponding labels (the unique number of the subject, person) in a Python list.
"""
c = 0
X,y = [], []
for dirname, dirnames, filenames in os.walk(path):
for subdirname in dirnames:
subject_path = os.path.join(dirname, subdirname)
for filename in os.listdir(subject_path):
try:
im = Image.open(os.path.join(subject_path, filename))
im = im.convert("L")
# resize to given size (if given)
if (sz is not None):
im = im.resize(sz, Image.ANTIALIAS)
X.append(np.asarray(im, dtype=np.uint8))
y.append(c)
except IOError as e:
print("I/O error: {0}".format(e))
raise e
except:
print("Unexpected error: {0}".format(sys.exc_info()[0]))
raise
c = c+1
return [X,y]
if __name__ == "__main__":
# This is where we write the images, if an output_dir is given
# in command line:
out_dir = None
# You'll need at least a path to your image data, please see
# the tutorial coming with this source code on how to prepare
# your image data:
if len(sys.argv) < 2:
print("USAGE: facerec_demo.py </path/to/images>")
sys.exit()
# Now read in the image data. This must be a valid path!
[X,y] = read_images(sys.argv[1])
# Then set up a handler for logging:
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# Add handler to facerec modules, so we see what's going on inside:
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# Define the Fisherfaces as Feature Extraction method:
feature = Fisherfaces()
# Define a 1-NN classifier with Euclidean Distance:
classifier = SVM()
# Define the model as the combination
model = PredictableModel(feature=feature, classifier=classifier)
# Compute a model:
model.compute(X, y)
# Save the Model using joblib:
save_model('model.pkl', model)
# Perform a Grid Search for the Set of Parameters:
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
# Find a good set of parameters:
grid_search(model, X, y, tuned_parameters)
# Perform a 10-fold cross validation
cv = KFoldCrossValidation(model, k=10)
cv.validate(X, y)
# And print the result:
cv.print_results()
|
the-stack_0_5659 | # -*- coding: utf-8 -*-
# @Time : 09/07/2021 02:56
# @Author : Rodolfo Londero
# @Email : [email protected]
# @File : test_text.py
# @Software : VSCode
import pytest
class TestText13Bus:
@pytest.fixture(scope='function')
def dss(self, solve_snap_13bus):
dss = solve_snap_13bus
dss.solution_solve()
return dss
# ===================================================================
# String methods
# ===================================================================
def test_text(self, dss):
expected = "2000"
actual = dss.text('? Line.650632.Length')
assert actual == expected
|
the-stack_0_5660 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import cv2
import numpy as np
import time
import math
from air_drone_vertical.edge_detection_canny_multi_rects import *
if __name__ == '__main__':
cnt = 0
while cnt < 600:
file_name = 'C:\\Users\\18056\\PycharmProjects\\untitled\\air_drone_vertical\\pic1\\' + str(cnt) + '.jpg'
print(file_name)
img = cv2.imread(file_name)
if (img is None):
cnt += 1
continue
#cross_detect time compute
img1 = np.copy(img)
mark = process_pictue2(img)
print("结果编码: " + str(mark[0]))
if (mark[0] == 12):
print("未找到中心点")
cnt += 1
continue
else:
print("成功找到可信中心点")
image = mark[1]
percent = mark[4]
print("percent: " + str(mark[4]))
print("centerX: " + str(mark[2]))
print("centerY: " + str(mark[3]))
#compute time
cv2.imshow('origin',img1)
cv2.imshow('process',image)
if cv2.waitKey(0) == 27:
break
cv2.destroyWindow('origin')
cv2.destroyWindow('process')
cnt += 1
cv2.destroyAllWindows()
|
the-stack_0_5662 | from typing import Optional, Tuple
from flask import url_for
from app.questionnaire.location import Location
from app.questionnaire.path_finder import PathFinder
from app.questionnaire.rules import evaluate_when_rules
class Router:
def __init__(self, schema, answer_store, list_store, progress_store, metadata):
self._schema = schema
self._answer_store = answer_store
self._list_store = list_store
self._progress_store = progress_store
self._metadata = metadata
self._path_finder = PathFinder(
self._schema,
self._answer_store,
self._list_store,
self._progress_store,
self._metadata,
)
@property
def enabled_section_ids(self):
return [
section["id"]
for section in self._schema.get_sections()
if self._is_section_enabled(section=section)
]
@property
def is_questionnaire_complete(self) -> bool:
first_incomplete_section_key = self._get_first_incomplete_section_key()
return not first_incomplete_section_key
def get_first_incomplete_location_in_questionnaire_url(self) -> str:
first_incomplete_section_key = self._get_first_incomplete_section_key()
if first_incomplete_section_key:
section_id, list_item_id = first_incomplete_section_key
section_routing_path = self._path_finder.routing_path(
section_id=section_id, list_item_id=list_item_id
)
return self.get_section_resume_url(section_routing_path)
return self.get_next_location_url_for_end_of_section()
def get_last_location_in_questionnaire_url(self) -> str:
routing_path = self.routing_path(*self._get_last_complete_section_key())
return self.get_last_location_in_section(routing_path).url()
def is_list_item_in_list_store(self, list_item_id, list_name):
return list_item_id in self._list_store[list_name]
def can_access_location(self, location: Location, routing_path):
"""
Checks whether the location is valid and accessible.
:return: boolean
"""
if location.section_id not in self.enabled_section_ids:
return False
if location.list_item_id and not self.is_list_item_in_list_store(
location.list_item_id, location.list_name
):
return False
return location.block_id in self._get_allowable_path(routing_path)
def can_access_hub(self):
return self._schema.is_flow_hub and all(
self._progress_store.is_section_complete(section_id)
for section_id in self._schema.get_section_ids_required_for_hub()
if section_id in self.enabled_section_ids
)
def can_display_section_summary(self, section_id, list_item_id=None):
return self._schema.get_summary_for_section(
section_id
) and self._progress_store.is_section_complete(section_id, list_item_id)
def routing_path(self, section_id, list_item_id=None):
return self._path_finder.routing_path(section_id, list_item_id)
def get_next_location_url(self, location, routing_path, return_to=None):
"""
Get the next location in the section. If the section is complete, determine where to go next,
whether it be a summary, the hub or the next incomplete location.
"""
is_last_block_in_section = routing_path[-1] == location.block_id
if self._progress_store.is_section_complete(
location.section_id, location.list_item_id
):
if return_to == "section-summary":
return self._get_section_url(location)
if return_to == "final-summary" and self.is_questionnaire_complete:
return url_for("questionnaire.submit_questionnaire")
if is_last_block_in_section:
return self._get_next_location_url_for_last_block_in_section(location)
# Due to backwards routing, you can be on the last block without the section being complete
if is_last_block_in_section:
return self._get_first_incomplete_location_in_section(routing_path).url()
return self.get_next_block_url(location, routing_path)
def _get_next_location_url_for_last_block_in_section(self, location):
if self._schema.show_summary_on_completion_for_section(location.section_id):
return self._get_section_url(location)
return self.get_next_location_url_for_end_of_section()
def get_previous_location_url(self, location, routing_path):
"""
Returns the previous 'location' to visit given a set of user answers
"""
block_id_index = routing_path.index(location.block_id)
if block_id_index != 0:
previous_block_id = routing_path[block_id_index - 1]
previous_block = self._schema.get_block(previous_block_id)
if previous_block["type"] == "RelationshipCollector":
return url_for(
"questionnaire.relationships",
last=True,
)
return url_for(
"questionnaire.block",
block_id=previous_block_id,
list_name=routing_path.list_name,
list_item_id=routing_path.list_item_id,
)
if self.can_access_hub():
return url_for("questionnaire.get_questionnaire")
return None
def get_next_location_url_for_end_of_section(self) -> str:
if self._schema.is_flow_hub and self.can_access_hub():
return url_for("questionnaire.get_questionnaire")
if self._schema.is_flow_linear and self.is_questionnaire_complete:
return url_for("questionnaire.submit_questionnaire")
return self.get_first_incomplete_location_in_questionnaire_url()
def get_section_resume_url(self, routing_path):
section_key = (routing_path.section_id, routing_path.list_item_id)
if section_key in self._progress_store:
location = self._get_first_incomplete_location_in_section(routing_path)
if location:
return location.url(resume=True)
return self.get_first_location_in_section(routing_path).url()
def is_path_complete(self, routing_path):
return not bool(self._get_first_incomplete_location_in_section(routing_path))
@staticmethod
def get_first_location_in_section(routing_path) -> Location:
return Location(
block_id=routing_path[0],
section_id=routing_path.section_id,
list_name=routing_path.list_name,
list_item_id=routing_path.list_item_id,
)
@staticmethod
def get_last_location_in_section(routing_path) -> Location:
return Location(
block_id=routing_path[-1],
section_id=routing_path.section_id,
list_name=routing_path.list_name,
list_item_id=routing_path.list_item_id,
)
def full_routing_path(self):
full_routing_path = []
for section_id in self.enabled_section_ids:
repeating_list = self._schema.get_repeating_list_for_section(section_id)
if repeating_list:
for list_item_id in self._list_store[repeating_list]:
full_routing_path.append(
self._path_finder.routing_path(
section_id=section_id, list_item_id=list_item_id
)
)
else:
full_routing_path.append(
self._path_finder.routing_path(section_id=section_id)
)
return full_routing_path
def _is_block_complete(self, block_id, section_id, list_item_id):
return block_id in self._progress_store.get_completed_block_ids(
section_id, list_item_id
)
def _get_first_incomplete_location_in_section(self, routing_path):
for block_id in routing_path:
if not self._is_block_complete(
block_id, routing_path.section_id, routing_path.list_item_id
):
return Location(
block_id=block_id,
section_id=routing_path.section_id,
list_item_id=routing_path.list_item_id,
list_name=routing_path.list_name,
)
def _get_allowable_path(self, routing_path):
"""
The allowable path is the completed path plus the next location
"""
allowable_path = []
if routing_path:
for block_id in routing_path:
allowable_path.append(block_id)
if not self._is_block_complete(
block_id, routing_path.section_id, routing_path.list_item_id
):
return allowable_path
return allowable_path
def get_enabled_section_keys(self):
for section_id in self.enabled_section_ids:
repeating_list = self._schema.get_repeating_list_for_section(section_id)
if repeating_list:
for list_item_id in self._list_store[repeating_list]:
section_key = (section_id, list_item_id)
yield section_key
else:
section_key = (section_id, None)
yield section_key
def _get_first_incomplete_section_key(self):
for section_id, list_item_id in self.get_enabled_section_keys():
if not self._progress_store.is_section_complete(section_id, list_item_id):
return section_id, list_item_id
def _get_last_complete_section_key(self) -> Tuple[str, Optional[str]]:
for section_id, list_item_id in list(self.get_enabled_section_keys())[::-1]:
if self._progress_store.is_section_complete(section_id, list_item_id):
return section_id, list_item_id
def _is_section_enabled(self, section):
if "enabled" not in section:
return True
for condition in section["enabled"]:
if evaluate_when_rules(
condition["when"],
self._schema,
self._metadata,
self._answer_store,
self._list_store,
):
return True
return False
@staticmethod
def get_next_block_url(location, routing_path):
next_block_id = routing_path[routing_path.index(location.block_id) + 1]
return url_for(
"questionnaire.block",
block_id=next_block_id,
list_name=routing_path.list_name,
list_item_id=routing_path.list_item_id,
)
@staticmethod
def _get_section_url(location):
return url_for(
"questionnaire.get_section",
section_id=location.section_id,
list_item_id=location.list_item_id,
)
|
the-stack_0_5666 | from __future__ import division
import keras
import six
from keras.models import Model
from keras.layers import (
Input,
Activation,
Dense,
Flatten
)
from keras.layers.convolutional import (
Conv2D,
MaxPooling2D,
AveragePooling2D
)
from keras.layers.merge import add
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras import backend as K
def _bn_relu(input):
"""Helper to build a BN -> relu block
"""
norm = BatchNormalization(axis=CHANNEL_AXIS)(input)
return Activation("relu")(norm)
def _conv_bn_relu(**conv_params):
"""Helper to build a conv -> BN -> relu block
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
conv = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(input)
return _bn_relu(conv)
return f
def _bn_relu_conv(**conv_params):
"""Helper to build a BN -> relu -> conv block.
This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
activation = _bn_relu(input)
return Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(activation)
return f
def _shortcut(input, residual):
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001))(input)
return add([shortcut, residual])
def _residual_block(block_function, filters, repetitions, is_first_layer=False):
"""Builds a residual block with repeating bottleneck blocks.
"""
def f(input):
for i in range(repetitions):
init_strides = (1, 1)
if i == 0 and not is_first_layer:
init_strides = (2, 2)
input = block_function(filters=filters, init_strides=init_strides,
is_first_block_of_first_layer=(is_first_layer and i == 0))(input)
return input
return f
def basic_block(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
"""Basic 3 X 3 convolution blocks for use on resnets with layers <= 34.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
"""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv1 = Conv2D(filters=filters, kernel_size=(3, 3),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(input)
else:
conv1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3),
strides=init_strides)(input)
residual = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv1)
return _shortcut(input, residual)
return f
def bottleneck(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
"""Bottleneck architecture for > 34 layer resnet.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
Returns:
A final conv layer of filters * 4
"""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv_1_1 = Conv2D(filters=filters, kernel_size=(1, 1),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(5e-4))(input)
else:
conv_1_1 = _bn_relu_conv(filters=filters, kernel_size=(1, 1),
strides=init_strides)(input)
conv_3_3 = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv_1_1)
residual = _bn_relu_conv(filters=filters * 4, kernel_size=(1, 1))(conv_3_3)
return _shortcut(input, residual)
return f
def _handle_dim_ordering():
global ROW_AXIS
global COL_AXIS
global CHANNEL_AXIS
if K.image_dim_ordering() == 'tf':
ROW_AXIS = 1
COL_AXIS = 2
CHANNEL_AXIS = 3
else:
CHANNEL_AXIS = 1
ROW_AXIS = 2
COL_AXIS = 3
def _get_block(identifier):
if isinstance(identifier, six.string_types):
res = globals().get(identifier)
if not res:
raise ValueError('Invalid {}'.format(identifier))
return res
return identifier
class ResnetBuilder(object):
@staticmethod
def build(input_shape, num_outputs, block_fn, repetitions):
"""Builds a custom ResNet like architecture.
Args:
input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols)
num_outputs: The number of outputs at final softmax layer
block_fn: The block function to use. This is either `basic_block` or `bottleneck`.
The original paper used basic_block for layers < 50
repetitions: Number of repetitions of various block units.
At each block unit, the number of filters are doubled and the input size is halved
Returns:
The keras `Model`.
"""
_handle_dim_ordering()
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_channels, nb_rows, nb_cols)")
# Permute dimension order if necessary
if K.image_dim_ordering() == 'tf':
input_shape = (input_shape[1], input_shape[2], input_shape[0])
# Load function from str if needed.
block_fn = _get_block(block_fn)
input = Input(shape=input_shape)
conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(2, 2))(input)
pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(conv1)
block = pool1
filters = 64
for i, r in enumerate(repetitions):
block = _residual_block(block_fn, filters=filters, repetitions=r, is_first_layer=(i == 0))(block)
filters *= 2
# Last activation
block = _bn_relu(block)
# Classifier block
block_shape = K.int_shape(block)
pool2 = AveragePooling2D(pool_size=(block_shape[ROW_AXIS], block_shape[COL_AXIS]),
strides=(1, 1))(block)
flatten1 = Flatten()(pool2)
dense = Dense(units=num_outputs,
kernel_initializer=keras.initializers.RandomNormal(stddev=0.001),
bias_initializer=keras.initializers.Constant(0.),
activation="softmax")(flatten1)
model = Model(inputs=input, outputs=dense)
return model
@staticmethod
def build_resnet_18(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, basic_block, [2, 2, 2, 2])
@staticmethod
def build_resnet_34(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, basic_block, [3, 4, 6, 3])
@staticmethod
def build_resnet_50(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 6, 3])
@staticmethod
def build_resnet_101(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 23, 3])
@staticmethod
def build_resnet_152(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 8, 36, 3])
|
the-stack_0_5667 | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 OpenAI GPT-2 model. """
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import json
import logging
import math
import os
import sys
from io import open
import numpy as np
import tensorflow as tf
from .modeling_tf_utils import (TFPreTrainedModel, TFConv1D, TFSharedEmbeddings,
TFSequenceSummary, shape_list, get_initializer)
from .configuration_gpt2 import GPT2Config
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-tf_model.h5",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-tf_model.h5",
"gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-tf_model.h5",
"distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-tf_model.h5",}
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
class TFAttention(tf.keras.layers.Layer):
def __init__(self, nx, n_ctx, config, scale=False, **kwargs):
super(TFAttention, self).__init__(**kwargs)
self.output_attentions = config.output_attentions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.n_ctx = n_ctx
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name='c_attn')
self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name='c_proj')
self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop)
self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
pass
@staticmethod
def causal_attention_mask(nd, ns, dtype):
"""1's in the lower triangle, counting from the lower right corner.
Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.
"""
i = tf.range(nd)[:,None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def _attn(self, inputs, training=False):
q, k, v, attention_mask, head_mask = inputs
# q, k, v have shape [batch, heads, sequence, features]
print("MatMul")
w = tf.matmul(q, k, transpose_b=True)
if self.scale:
dk = tf.cast(tf.shape(k)[-1], tf.float32) # scale attention_scores
w = w / tf.math.sqrt(dk)
# w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
_, _, nd, ns = shape_list(w)
b = self.causal_attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = tf.nn.softmax(w, axis=-1)
w = self.attn_dropout(w, training=training)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [tf.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = tf.transpose(x, [0, 2, 1, 3])
x_shape = shape_list(x)
new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]]
return tf.reshape(x, new_x_shape)
def split_heads(self, x):
x_shape = shape_list(x)
new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head]
x = tf.reshape(x, new_x_shape)
return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
def call(self, inputs, training=False):
x, layer_past, attention_mask, head_mask = inputs
x = self.c_attn(x)
query, key, value = tf.split(x, 3, axis=2)
query = self.split_heads(query)
key = self.split_heads(key)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = tf.unstack(layer_past, axis=1)
key = tf.concat([past_key, key], axis=-2)
value = tf.concat([past_value, value], axis=-2)
present = tf.stack([key, value], axis=1)
attn_outputs = self._attn([query, key, value, attention_mask, head_mask], training=training)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a, training=training)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class TFMLP(tf.keras.layers.Layer):
def __init__(self, n_state, config, **kwargs):
super(TFMLP, self).__init__(**kwargs)
nx = config.n_embd
self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name='c_fc')
self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name='c_proj')
self.act = gelu
self.dropout = tf.keras.layers.Dropout(config.resid_pdrop)
def call(self, x, training=False):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
h2 = self.dropout(h2, training=training)
return h2
class TFBlock(tf.keras.layers.Layer):
def __init__(self, n_ctx, config, scale=False, **kwargs):
super(TFBlock, self).__init__(**kwargs)
nx = config.n_embd
self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_1')
self.attn = TFAttention(nx, n_ctx, config, scale, name='attn')
self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_2')
self.mlp = TFMLP(4 * nx, config, name='mlp')
def call(self, inputs, training=False):
x, layer_past, attention_mask, head_mask = inputs
a = self.ln_1(x)
output_attn = self.attn([a, layer_past, attention_mask, head_mask], training=training)
a = output_attn[0] # output_attn: a, present, (attentions)
x = x + a
m = self.ln_2(x)
m = self.mlp(m, training=training)
x = x + m
outputs = [x] + output_attn[1:]
return outputs # x, present, (attentions)
class TFGPT2MainLayer(tf.keras.layers.Layer):
def __init__(self, config, *inputs, **kwargs):
super(TFGPT2MainLayer, self).__init__(config, *inputs, **kwargs)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.num_hidden_layers = config.n_layer
self.vocab_size = config.vocab_size
self.n_embd = config.n_embd
self.wte = TFSharedEmbeddings(config.vocab_size,
config.hidden_size,
initializer_range=config.initializer_range,
name='wte')
self.wpe = tf.keras.layers.Embedding(config.n_positions,
config.n_embd,
embeddings_initializer=get_initializer(config.initializer_range),
name='wpe')
self.drop = tf.keras.layers.Dropout(config.embd_pdrop)
self.h = [TFBlock(config.n_ctx,
config,
scale=True,
name='h_._{}'.format(i)) for i in range(config.n_layer)]
self.ln_f = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_f')
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
raise NotImplementedError
def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, training=False):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
past = inputs[1] if len(inputs) > 1 else past
attention_mask = inputs[2] if len(inputs) > 2 else attention_mask
token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids
position_ids = inputs[4] if len(inputs) > 4 else position_ids
head_mask = inputs[5] if len(inputs) > 5 else head_mask
assert len(inputs) <= 6, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get('input_ids')
past = inputs.get('past', past)
attention_mask = inputs.get('attention_mask', attention_mask)
token_type_ids = inputs.get('token_type_ids', token_type_ids)
position_ids = inputs.get('position_ids', position_ids)
head_mask = inputs.get('head_mask', head_mask)
assert len(inputs) <= 6, "Too many inputs."
else:
input_ids = inputs
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = shape_list(past[0][0])[-2]
if position_ids is None:
position_ids = tf.range(past_length, shape_list(input_ids)[-1] + past_length, dtype=tf.int32)[tf.newaxis, :]
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
else:
attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if not head_mask is None:
raise NotImplementedError
else:
head_mask = [None] * self.num_hidden_layers
# head_mask = tf.constant([0] * self.num_hidden_layers)
input_shape = shape_list(input_ids)
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
inputs_embeds = self.wte(input_ids, mode='embedding')
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.wte(token_type_ids, mode='embedding')
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block([hidden_states, layer_past, attention_mask, head_mask[i]], training=training)
hidden_states, present = outputs[:2]
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states, presents)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, presents, (all hidden_states), (attentions)
class TFGPT2PreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = GPT2Config
pretrained_model_archive_map = TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
GPT2_START_DOCSTRING = r""" OpenAI GPT-2 model was proposed in
`Language Models are Unsupervised Multitask Learners`_
by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
It's a causal (unidirectional) transformer pre-trained using language modeling on a very large
corpus of ~40 GB of text data.
This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and
refer to the TF 2.0 documentation for all matter related to general usage and behavior.
.. _`Language Models are Unsupervised Multitask Learners`:
https://openai.com/blog/better-language-models/
.. _`tf.keras.Model`:
https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model
Note on the model inputs:
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :
- a single Tensor with input_ids only and nothing else: `model(inputs_ids)
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associaed to the input names given in the docstring:
`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
GPT2_INPUTS_DOCSTRING = r""" Inputs:
**input_ids**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
GPT-2 is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`transformers.BPT2Tokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**past**:
list of ``Numpy array`` or ``tf.Tensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding.
**attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The embeddings from these tokens will be summed with the respective token embeddings.
Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
**position_ids**: (`optional`) ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare GPT2 Model transformer outputing raw hidden-states without any specific head on top.",
GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
class TFGPT2Model(TFGPT2PreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**past**:
list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import GPT2Tokenizer, TFGPT2Model
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = TFGPT2Model.from_pretrained('gpt2')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config, *inputs, **kwargs):
super(TFGPT2Model, self).__init__(config, *inputs, **kwargs)
self.transformer = TFGPT2MainLayer(config, name='transformer')
def call(self, inputs, **kwargs):
outputs = self.transformer(inputs, **kwargs)
return outputs
@add_start_docstrings("""The GPT2 Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
class TFGPT2LMHeadModel(TFGPT2PreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**prediction_scores**: `tf.Tensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**past**:
list of `tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of `tf.Tensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of `tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import GPT2Tokenizer, TFGPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = TFGPT2LMHeadModel.from_pretrained('gpt2')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
logits = outputs[0]
"""
def __init__(self, config, *inputs, **kwargs):
super(TFGPT2LMHeadModel, self).__init__(config, *inputs, **kwargs)
self.transformer = TFGPT2MainLayer(config, name='transformer')
def call(self, inputs, **kwargs):
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.transformer.wte(hidden_states, mode="linear")
outputs = (lm_logits,) + transformer_outputs[1:]
return outputs # lm_logits, presents, (all hidden_states), (attentions)
@add_start_docstrings("""The GPT2 Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""", GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel):
r"""
**mc_token_ids**: (`optional`, default to index of the last token of the input) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, num_choices)``:
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**lm_prediction_scores**: `tf.Tensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**mc_prediction_scores**: `tf.Tensor`` of shape ``(batch_size, num_choices)``
Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax).
**past**:
list of `tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of `tf.Tensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of `tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import GPT2Tokenizer, TFGPT2DoubleHeadsModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = TFGPT2DoubleHeadsModel.from_pretrained('gpt2')
# Add a [CLS] to the vocabulary (we should train it also!)
# This option is currently not implemented in TF 2.0
raise NotImplementedError
tokenizer.add_special_tokens({'cls_token': '[CLS]'})
model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
encoded_choices = [tokenizer.encode(s) for s in choices]
cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
input_ids = tf.constant(encoded_choices)[None, :] # Batch size: 1, number of choices: 2
mc_token_ids = tf.constant([cls_token_location]) # Batch size: 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
def __init__(self, config, *inputs, **kwargs):
super(TFGPT2DoubleHeadsModel, self).__init__(config, *inputs, **kwargs)
self.transformer = TFGPT2MainLayer(config, name='transformer')
self.multiple_choice_head = TFSequenceSummary(config, initializer_range=config.initializer_range, name='multiple_choice_head')
def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, mc_token_ids=None, training=False):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
past = inputs[1] if len(inputs) > 1 else past
attention_mask = inputs[2] if len(inputs) > 2 else attention_mask
token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids
position_ids = inputs[4] if len(inputs) > 4 else position_ids
head_mask = inputs[5] if len(inputs) > 5 else head_mask
mc_token_ids = inputs[6] if len(inputs) > 6 else mc_token_ids
assert len(inputs) <= 7, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get('input_ids')
past = inputs.get('past', past)
attention_mask = inputs.get('attention_mask', attention_mask)
token_type_ids = inputs.get('token_type_ids', token_type_ids)
position_ids = inputs.get('position_ids', position_ids)
head_mask = inputs.get('head_mask', head_mask)
mc_token_ids = inputs.get('mc_token_ids', mc_token_ids)
assert len(inputs) <= 7, "Too many inputs."
else:
input_ids = inputs
input_shapes = shape_list(input_ids)
seq_length = input_shapes[-1]
flat_input_ids = tf.reshape(input_ids, (-1, seq_length))
flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
flat_inputs = [flat_input_ids, past, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask]
transformer_outputs = self.transformer(flat_inputs, training=training)
hidden_states = transformer_outputs[0]
hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:])
lm_logits = self.transformer.wte(hidden_states, mode="linear")
mc_logits = self.multiple_choice_head([hidden_states, mc_token_ids], training=training)
mc_logits = tf.squeeze(mc_logits, axis=-1)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
return outputs # lm logits, mc logits, presents, (all hidden_states), (attentions)
|
the-stack_0_5669 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import argparse
import functools
import paddle
import paddle.fluid as fluid
import models
import reader
from utility import add_arguments, print_arguments, check_cuda
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('model', str, "MobileNetV3_large_x1_25", "Set the network to use.")
add_arg('embedding_size', int, 128, "Embedding size.")
add_arg('batch_size', int, 1, "Minibatch size.")
add_arg('image_shape', str, "3,128,128", "Input image size.")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
# yapf: enable
model_list = [m for m in dir(models) if "__" not in m]
def infer(args):
# parameters from arguments
model_name = args.model
pretrained_model = args.pretrained_model
image_shape = [int(m) for m in args.image_shape.split(",")]
assert model_name in model_list, "{} is not in lists: {}".format(args.model,
model_list)
image = fluid.data(name='image', shape=[None] + image_shape, dtype='float32')
infer_loader = fluid.io.DataLoader.from_generator(
feed_list=[image],
capacity=64,
use_double_buffer=True,
iterable=True)
# model definition
model = models.__dict__[model_name]()
out = model.net(input=image, embedding_size=args.embedding_size)
test_program = fluid.default_main_program().clone(for_test=True)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
if pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(pretrained_model, var.name))
fluid.load(model_path=pretrained_model, program=test_program, executor=exe)
infer_loader.set_sample_generator(
reader.test(args),
batch_size=args.batch_size,
drop_last=False,
places=place)
fetch_list = [out.name]
for batch_id, data in enumerate(infer_loader()):
result = exe.run(test_program, fetch_list=fetch_list, feed=data)
result = result[0][0].reshape(-1)
print("Test-{0}-feature: {1}".format(batch_id, result[:5]))
sys.stdout.flush()
def main():
paddle.enable_static()
args = parser.parse_args()
print_arguments(args)
check_cuda(args.use_gpu)
infer(args)
if __name__ == '__main__':
main()
|
the-stack_0_5670 | """
Instruction for the candidate.
1) Given a string compres the repeating letters following each letter
with number of repetition in the output string
2) Example:
'a' -> 'a1'
'aaa' -> 'a3'
'aabb' -> 'a2b2'
'' -> ''
"""
def rle(test_string):
result = ''
if not test_string:
return result
current = test_string[0]
count = 1
for c in test_string[1:]:
if c == current:
count += 1
else:
result += current
result += str(count)
current = c
count = 1
result += current
result += str(count)
return result
def eq(exp, res):
assert exp == res, f'expected: {exp} - result: {res}'
def main():
input = ['', 'a', 'aaabbc', 'aaabbbccc', 'abcdefg']
expect= ['', 'a1', 'a3b2c1', 'a3b3c3', 'a1b1c1d1e1f1g1']
for i, o in zip(input, expect):
eq(o, rle(i))
print('success')
if __name__ == '__main__':
main() |
the-stack_0_5672 | import gym
import os
from floatenv import FloatEnv
def get_user_action(env):
env.render(show_position_numbers=True)
print("What action would you like to take? Enter a location and an increment value:")
str_action = input().strip(" ")
locations = str_action.split(" ")
if len(locations) != 2:
return None
return (int(locations[0]), float(locations[1]))
if __name__ == '__main__':
path_to_config = os.getcwd() + "/config.yaml"
env = FloatEnv(path_to_config)
observation = env.reset()
total_reward = 0
actions_taken = 0
print("Current board score: ", env._current_score())
while True:
action = get_user_action(env)
if action == None:
print("That is not a valid action. Please retry:")
continue
print("Action taken: ", action)
state, reward, done, other = env.step(action)
total_reward += reward
actions_taken += 1
print("Reward recieved: ", reward)
print("Average reward: ", total_reward/actions_taken)
print("Total Reward: ", total_reward)
print("Number of actions: ", actions_taken)
if done:
break
print("You reached the maximum number of actions, the game has ended.\n")
# print final board state
print("Final ", end="")
env.render()
|
the-stack_0_5674 | # This file was automatically created by FeynRules 2.3.32
# Mathematica version: 11.3.0 for Mac OS X x86 (64-bit) (March 7, 2018)
# Date: Sat 21 Apr 2018 20:43:27
from object_library import all_parameters, Parameter
from function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot
# This is a default parameter object representing 0.
ZERO = Parameter(name = 'ZERO',
nature = 'internal',
type = 'real',
value = '0.0',
texname = '0')
# User-defined parameters.
cabi = Parameter(name = 'cabi',
nature = 'external',
type = 'real',
value = 0.227736,
texname = '\\theta _c',
lhablock = 'CKMBLOCK',
lhacode = [ 1 ])
aEWM1 = Parameter(name = 'aEWM1',
nature = 'external',
type = 'real',
value = 127.9,
texname = '\\text{aEWM1}',
lhablock = 'SMINPUTS',
lhacode = [ 1 ])
Gf = Parameter(name = 'Gf',
nature = 'external',
type = 'real',
value = 0.0000116637,
texname = 'G_f',
lhablock = 'SMINPUTS',
lhacode = [ 2 ])
aS = Parameter(name = 'aS',
nature = 'external',
type = 'real',
value = 0.1184,
texname = '\\alpha _s',
lhablock = 'SMINPUTS',
lhacode = [ 3 ])
ymdo = Parameter(name = 'ymdo',
nature = 'external',
type = 'real',
value = 0.00504,
texname = '\\text{ymdo}',
lhablock = 'YUKAWA',
lhacode = [ 1 ])
ymup = Parameter(name = 'ymup',
nature = 'external',
type = 'real',
value = 0.00255,
texname = '\\text{ymup}',
lhablock = 'YUKAWA',
lhacode = [ 2 ])
yms = Parameter(name = 'yms',
nature = 'external',
type = 'real',
value = 0.101,
texname = '\\text{yms}',
lhablock = 'YUKAWA',
lhacode = [ 3 ])
ymc = Parameter(name = 'ymc',
nature = 'external',
type = 'real',
value = 1.27,
texname = '\\text{ymc}',
lhablock = 'YUKAWA',
lhacode = [ 4 ])
ymb = Parameter(name = 'ymb',
nature = 'external',
type = 'real',
value = 4.7,
texname = '\\text{ymb}',
lhablock = 'YUKAWA',
lhacode = [ 5 ])
ymt = Parameter(name = 'ymt',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{ymt}',
lhablock = 'YUKAWA',
lhacode = [ 6 ])
yme = Parameter(name = 'yme',
nature = 'external',
type = 'real',
value = 0.000511,
texname = '\\text{yme}',
lhablock = 'YUKAWA',
lhacode = [ 11 ])
ymm = Parameter(name = 'ymm',
nature = 'external',
type = 'real',
value = 0.10566,
texname = '\\text{ymm}',
lhablock = 'YUKAWA',
lhacode = [ 13 ])
ymtau = Parameter(name = 'ymtau',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{ymtau}',
lhablock = 'YUKAWA',
lhacode = [ 15 ])
kq = Parameter(name = 'kq',
nature = 'external',
type = 'real',
value = 0.001,
texname = 'k_q',
lhablock = 'FRBlock',
lhacode = [ 1 ])
lamf = Parameter(name = 'lamf',
nature = 'external',
type = 'real',
value = 0.1,
texname = 'l_{\\text{fi}}',
lhablock = 'FRBlock',
lhacode = [ 2 ])
yf1x1 = Parameter(name = 'yf1x1',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf1x1}',
lhablock = 'FRBlock6',
lhacode = [ 1, 1 ])
yf1x2 = Parameter(name = 'yf1x2',
nature = 'external',
type = 'complex',
value = 1.e-6,
texname = '\\text{yf1x2}',
lhablock = 'FRBlock6',
lhacode = [ 1, 2 ])
yf1x3 = Parameter(name = 'yf1x3',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf1x3}',
lhablock = 'FRBlock6',
lhacode = [ 1, 3 ])
yf2x1 = Parameter(name = 'yf2x1',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf2x1}',
lhablock = 'FRBlock6',
lhacode = [ 2, 1 ])
yf2x2 = Parameter(name = 'yf2x2',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf2x2}',
lhablock = 'FRBlock6',
lhacode = [ 2, 2 ])
yf2x3 = Parameter(name = 'yf2x3',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf2x3}',
lhablock = 'FRBlock6',
lhacode = [ 2, 3 ])
yf3x1 = Parameter(name = 'yf3x1',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf3x1}',
lhablock = 'FRBlock6',
lhacode = [ 3, 1 ])
yf3x2 = Parameter(name = 'yf3x2',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf3x2}',
lhablock = 'FRBlock6',
lhacode = [ 3, 2 ])
yf3x3 = Parameter(name = 'yf3x3',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf3x3}',
lhablock = 'FRBlock6',
lhacode = [ 3, 3 ])
MZ = Parameter(name = 'MZ',
nature = 'external',
type = 'real',
value = 91.1876,
texname = '\\text{MZ}',
lhablock = 'MASS',
lhacode = [ 23 ])
Me = Parameter(name = 'Me',
nature = 'external',
type = 'real',
value = 0.000511,
texname = '\\text{Me}',
lhablock = 'MASS',
lhacode = [ 11 ])
MMU = Parameter(name = 'MMU',
nature = 'external',
type = 'real',
value = 0.10566,
texname = '\\text{MMU}',
lhablock = 'MASS',
lhacode = [ 13 ])
MTA = Parameter(name = 'MTA',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{MTA}',
lhablock = 'MASS',
lhacode = [ 15 ])
MU = Parameter(name = 'MU',
nature = 'external',
type = 'real',
value = 0.00255,
texname = 'M',
lhablock = 'MASS',
lhacode = [ 2 ])
MC = Parameter(name = 'MC',
nature = 'external',
type = 'real',
value = 1.27,
texname = '\\text{MC}',
lhablock = 'MASS',
lhacode = [ 4 ])
MT = Parameter(name = 'MT',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{MT}',
lhablock = 'MASS',
lhacode = [ 6 ])
MD = Parameter(name = 'MD',
nature = 'external',
type = 'real',
value = 0.00504,
texname = '\\text{MD}',
lhablock = 'MASS',
lhacode = [ 1 ])
MS = Parameter(name = 'MS',
nature = 'external',
type = 'real',
value = 0.101,
texname = '\\text{MS}',
lhablock = 'MASS',
lhacode = [ 3 ])
MB = Parameter(name = 'MB',
nature = 'external',
type = 'real',
value = 4.7,
texname = '\\text{MB}',
lhablock = 'MASS',
lhacode = [ 5 ])
MH = Parameter(name = 'MH',
nature = 'external',
type = 'real',
value = 125,
texname = '\\text{MH}',
lhablock = 'MASS',
lhacode = [ 25 ])
MP = Parameter(name = 'MP',
nature = 'external',
type = 'real',
value = 120,
texname = '\\text{MP}',
lhablock = 'MASS',
lhacode = [ 9000005 ])
Mfi = Parameter(name = 'Mfi',
nature = 'external',
type = 'real',
value = 10,
texname = '\\text{Mfi}',
lhablock = 'MASS',
lhacode = [ 9000006 ])
WZ = Parameter(name = 'WZ',
nature = 'external',
type = 'real',
value = 2.4952,
texname = '\\text{WZ}',
lhablock = 'DECAY',
lhacode = [ 23 ])
WW = Parameter(name = 'WW',
nature = 'external',
type = 'real',
value = 2.085,
texname = '\\text{WW}',
lhablock = 'DECAY',
lhacode = [ 24 ])
WT = Parameter(name = 'WT',
nature = 'external',
type = 'real',
value = 1.50833649,
texname = '\\text{WT}',
lhablock = 'DECAY',
lhacode = [ 6 ])
WH = Parameter(name = 'WH',
nature = 'external',
type = 'real',
value = 0.00589569,
texname = '\\text{WH}',
lhablock = 'DECAY',
lhacode = [ 25 ])
WH1 = Parameter(name = 'WH1',
nature = 'external',
type = 'real',
value = 0.00575308848,
texname = '\\text{WH1}',
lhablock = 'DECAY',
lhacode = [ 9000005 ])
Wfi = Parameter(name = 'Wfi',
nature = 'external',
type = 'real',
value = 6.03044e-9,
texname = '\\text{Wfi}',
lhablock = 'DECAY',
lhacode = [ 9000006 ])
aEW = Parameter(name = 'aEW',
nature = 'internal',
type = 'real',
value = '1/aEWM1',
texname = '\\alpha _{\\text{EW}}')
G = Parameter(name = 'G',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aS)*cmath.sqrt(cmath.pi)',
texname = 'G')
CKM1x1 = Parameter(name = 'CKM1x1',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM1x1}')
CKM1x2 = Parameter(name = 'CKM1x2',
nature = 'internal',
type = 'complex',
value = 'cmath.sin(cabi)',
texname = '\\text{CKM1x2}')
CKM1x3 = Parameter(name = 'CKM1x3',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM1x3}')
CKM2x1 = Parameter(name = 'CKM2x1',
nature = 'internal',
type = 'complex',
value = '-cmath.sin(cabi)',
texname = '\\text{CKM2x1}')
CKM2x2 = Parameter(name = 'CKM2x2',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM2x2}')
CKM2x3 = Parameter(name = 'CKM2x3',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM2x3}')
CKM3x1 = Parameter(name = 'CKM3x1',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM3x1}')
CKM3x2 = Parameter(name = 'CKM3x2',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM3x2}')
CKM3x3 = Parameter(name = 'CKM3x3',
nature = 'internal',
type = 'complex',
value = '1',
texname = '\\text{CKM3x3}')
MW = Parameter(name = 'MW',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(MZ**2/2. + cmath.sqrt(MZ**4/4. - (aEW*cmath.pi*MZ**2)/(Gf*cmath.sqrt(2))))',
texname = 'M_W')
ee = Parameter(name = 'ee',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aEW)*cmath.sqrt(cmath.pi)',
texname = 'e')
sw2 = Parameter(name = 'sw2',
nature = 'internal',
type = 'real',
value = '1 - MW**2/MZ**2',
texname = '\\text{sw2}')
cw = Parameter(name = 'cw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(1 - sw2)',
texname = 'c_w')
sw = Parameter(name = 'sw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(sw2)',
texname = 's_w')
g1 = Parameter(name = 'g1',
nature = 'internal',
type = 'real',
value = 'ee/cw',
texname = 'g_1')
gw = Parameter(name = 'gw',
nature = 'internal',
type = 'real',
value = 'ee/sw',
texname = 'g_w')
vev = Parameter(name = 'vev',
nature = 'internal',
type = 'real',
value = '(2*MW*sw)/ee',
texname = '\\text{vev}')
mfi = Parameter(name = 'mfi',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(100 - (kq*vev**2)/2.)',
texname = 'M_{\\text{fi}}')
AH = Parameter(name = 'AH',
nature = 'internal',
type = 'real',
value = '(47*ee**2*(1 - (2*MH**4)/(987.*MT**4) - (14*MH**2)/(705.*MT**2) + (213*MH**12)/(2.634632e7*MW**12) + (5*MH**10)/(119756.*MW**10) + (41*MH**8)/(180950.*MW**8) + (87*MH**6)/(65800.*MW**6) + (57*MH**4)/(6580.*MW**4) + (33*MH**2)/(470.*MW**2)))/(72.*cmath.pi**2*vev)',
texname = 'A_H')
GH = Parameter(name = 'GH',
nature = 'internal',
type = 'real',
value = '-(G**2*(1 + (13*MH**6)/(16800.*MT**6) + MH**4/(168.*MT**4) + (7*MH**2)/(120.*MT**2)))/(12.*cmath.pi**2*vev)',
texname = 'G_H')
Gphi = Parameter(name = 'Gphi',
nature = 'internal',
type = 'real',
value = '-(G**2*(1 + MH**6/(560.*MT**6) + MH**4/(90.*MT**4) + MH**2/(12.*MT**2)))/(8.*cmath.pi**2*vev)',
texname = 'G_h')
lam = Parameter(name = 'lam',
nature = 'internal',
type = 'real',
value = 'MH**2/(2.*vev**2)',
texname = '\\text{lam}')
yb = Parameter(name = 'yb',
nature = 'internal',
type = 'real',
value = '(ymb*cmath.sqrt(2))/vev',
texname = '\\text{yb}')
yc = Parameter(name = 'yc',
nature = 'internal',
type = 'real',
value = '(ymc*cmath.sqrt(2))/vev',
texname = '\\text{yc}')
ydo = Parameter(name = 'ydo',
nature = 'internal',
type = 'real',
value = '(ymdo*cmath.sqrt(2))/vev',
texname = '\\text{ydo}')
ye = Parameter(name = 'ye',
nature = 'internal',
type = 'real',
value = '(yme*cmath.sqrt(2))/vev',
texname = '\\text{ye}')
ym = Parameter(name = 'ym',
nature = 'internal',
type = 'real',
value = '(ymm*cmath.sqrt(2))/vev',
texname = '\\text{ym}')
ys = Parameter(name = 'ys',
nature = 'internal',
type = 'real',
value = '(yms*cmath.sqrt(2))/vev',
texname = '\\text{ys}')
yt = Parameter(name = 'yt',
nature = 'internal',
type = 'real',
value = '(ymt*cmath.sqrt(2))/vev',
texname = '\\text{yt}')
ytau = Parameter(name = 'ytau',
nature = 'internal',
type = 'real',
value = '(ymtau*cmath.sqrt(2))/vev',
texname = '\\text{ytau}')
yup = Parameter(name = 'yup',
nature = 'internal',
type = 'real',
value = '(ymup*cmath.sqrt(2))/vev',
texname = '\\text{yup}')
muH = Parameter(name = 'muH',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(lam*vev**2)',
texname = '\\mu')
|
the-stack_0_5675 | import rich.repr
@rich.repr.auto
class Bird:
def __init__(self, name, eats=None, fly=True, extinct=False):
self.name = name
self.eats = list(eats) if eats else []
self.fly = fly
self.extinct = extinct
# Note that the repr is still generated without Rich
# Try commenting out the following line
from rich import print
BIRDS = {
"gull": Bird("gull", eats=["fish", "chips", "ice cream", "sausage rolls"]),
"penguin": Bird("penguin", eats=["fish"], fly=False),
"dodo": Bird("dodo", eats=["fruit"], fly=False, extinct=True),
}
print(BIRDS)
|
the-stack_0_5676 | from .base import *
from .mgr import CoreManager as Mgr
class CreationPhaseManager:
_id_generator = id_generator()
def __init__(self, obj_type, has_color=False, add_to_hist=False):
self._obj = None
self._obj_type = obj_type
self._has_color = has_color
self._add_to_hist = add_to_hist
self._custom_obj_name = ""
self._origin_pos = Point3()
self._creation_start_func = None
self._creation_handlers = []
self._current_creation_phase = 0
if has_color:
self.set_next_object_color()
else:
GD[f"next_{obj_type}_color"] = None
Mgr.expose(f"custom_{obj_type}_name", lambda: self._custom_obj_name)
Mgr.accept(f"set_custom_{obj_type}_name", self.__set_custom_object_name)
def setup(self, creation_phases, status_text):
creation_status = {}
mode_text = f"Create {status_text['obj_type']}"
info_text = "LMB-drag to start creation"
creation_status["idle"] = {"mode": mode_text, "info": info_text}
info_text = "[SNAP] LMB-drag to start creation"
creation_status["snap_idle"] = {"mode": mode_text, "info": info_text}
add_state = Mgr.add_state
bind = Mgr.bind_state
state_persistence = -12
phase_finishers = []
for i, phase_data in enumerate(creation_phases):
if len(phase_data) == 3:
main_starter, main_handler, finisher = phase_data
else:
main_starter, main_handler = phase_data
finisher = lambda: None
phase_finishers.append(finisher)
def complete_creation(index):
for finisher in phase_finishers[index:]:
finisher()
self.__end_creation(cancel=False)
if i == 0:
self._creation_start_func = main_starter
Mgr.accept(f"start_{self._obj_type}_creation", self.__start_creation)
on_enter_state = self.__enter_creation_start_phase
else:
on_enter_state = lambda p, a, m=main_starter: self.__start_creation_phase(m, p, a)
on_exit_state = self.__exit_creation_phase
state_id = f"{self._obj_type}_creation_phase_{i + 1}"
add_state(state_id, state_persistence, on_enter_state, on_exit_state)
self._creation_handlers.append(self.__get_creation_phase_handler(main_handler))
binding_id = f"{self._obj_type} creation -> navigate"
bind(state_id, binding_id, "space", lambda: Mgr.enter_state("navigation_mode"))
binding_id = f"quit {self._obj_type} creation"
bind(state_id, binding_id, "escape", self.__end_creation)
binding_id = f"abort {self._obj_type} creation"
bind(state_id, binding_id, "focus_loss", self.__end_creation)
binding_id = f"cancel {self._obj_type} creation"
bind(state_id, binding_id, "mouse3", self.__end_creation)
binding_id = f"complete {self._obj_type} creation {i}"
bind(state_id, binding_id, "enter", lambda index=i: complete_creation(index))
def finish_phase(finisher, state_id=None):
finisher()
Mgr.enter_state(state_id) if state_id else self.__end_creation(cancel=False)
info_text = f"move mouse to {status_text[f'phase{i + 1}']};" \
+ " <Tab> to skip phase; <Enter> to complete;"
if i == len(creation_phases) - 1:
binding_id = f"skip {self._obj_type} creation phase {i}"
finish_command = lambda f=finisher: finish_phase(f)
bind(state_id, binding_id, "tab", finish_command)
binding_id = f"finalize {self._obj_type} creation"
bind(state_id, binding_id, "mouse1-up",
lambda: self.__end_creation(cancel=False))
info_text += " release LMB to finalize;"
else:
next_state_id = f"{self._obj_type}_creation_phase_{i + 2}"
binding_id = f"skip {self._obj_type} creation phase {i}"
finish_command = lambda f=finisher, i=next_state_id: finish_phase(f, i)
bind(state_id, binding_id, "tab", finish_command)
binding_id = f"start {self._obj_type} creation phase {i + 2}"
command = lambda state_id=next_state_id: Mgr.enter_state(state_id)
bind(state_id, binding_id, "mouse1-up", command)
info_text += " release LMB to set;"
info_text += " RMB to cancel; <Space> to navigate"
creation_status[f"phase{i + 1}"] = {"mode": mode_text, "info": info_text}
status_data = GD["status"]["create"]
status_data[self._obj_type] = creation_status
return True
def __enter_creation_start_phase(self, prev_state_id, active):
if active:
Mgr.do("enable_view_gizmo", False)
Mgr.do("set_view_gizmo_mouse_region_sort", 0)
Mgr.update_remotely("interactive_creation", "resumed")
snap_settings = GD["snap"]
if snap_settings["on"]["creation"]:
snap_type = "creation_phase_1"
snap_on = snap_settings["on"][snap_type]
snap_tgt_type = snap_settings["tgt_type"][snap_type]
if snap_on and snap_tgt_type != "increment":
snap_settings["type"] = snap_type
Mgr.do("init_snap_target_checking", "create")
self._creation_start_func()
Mgr.add_task(self._creation_handlers[0], "draw_object", sort=3)
Mgr.update_app("status", ["create", self._obj_type, "phase1"])
def __exit_creation_phase(self, next_state_id, active):
if active:
Mgr.remove_task("draw_object")
Mgr.do("enable_view_gizmo", True)
Mgr.do("set_view_gizmo_mouse_region_sort", 210)
self.__disable_snap()
def __start_creation(self, origin_pos):
self._origin_pos = origin_pos
self._current_creation_phase = 1
Mgr.enter_state(f"{self._obj_type}_creation_phase_1")
def __start_creation_phase(self, main_start_func, prev_state_id, active):
phase_id = self._current_creation_phase
if active:
Mgr.do("enable_view_gizmo", False)
Mgr.do("set_view_gizmo_mouse_region_sort", 0)
Mgr.update_remotely("interactive_creation", "resumed")
else:
phase_id += 1
self._current_creation_phase = phase_id
snap_settings = GD["snap"]
if snap_settings["on"]["creation"]:
snap_type = f"creation_phase_{phase_id - 1}"
snap_on = snap_settings["on"][snap_type]
snap_tgt_type = snap_settings["tgt_type"][snap_type]
if snap_on:
if snap_tgt_type != "increment":
Mgr.do("end_snap_target_checking")
Mgr.set_cursor("create")
if snap_tgt_type == "grid_point" and not active:
Mgr.update_app("active_grid_plane", GD["active_grid_plane"])
snap_type = f"creation_phase_{phase_id}"
snap_on = snap_settings["on"][snap_type]
snap_tgt_type = snap_settings["tgt_type"][snap_type]
if snap_on and snap_tgt_type != "increment":
snap_settings["type"] = snap_type
Mgr.do("init_snap_target_checking", "create")
Mgr.remove_task("draw_object")
main_start_func()
creation_handler = self._creation_handlers[phase_id - 1]
Mgr.add_task(creation_handler, "draw_object", sort=3)
Mgr.update_app("status", ["create", self._obj_type, f"phase{phase_id}"])
def __get_creation_phase_handler(self, main_handler_func):
def handle_creation_phase(task):
main_handler_func()
return task.cont
return handle_creation_phase
def __set_custom_object_name(self, custom_name):
self._custom_obj_name = custom_name
def init_object(self, obj):
self._obj = obj
def get_object(self):
return self._obj
def get_object_type(self):
return self._obj_type
def generate_object_id(self):
obj_id = (self._obj_type,) + next(self._id_generator)
return obj_id
def set_next_object_color(self):
color_values = tuple(random.random() * .4 + .5 for i in range(3))
GD[f"next_{self._obj_type}_color"] = color_values
def get_next_object_color(self):
r, g, b = GD[f"next_{self._obj_type}_color"]
color = VBase4(r, g, b, 1.)
return color
def get_origin_pos(self):
return self._origin_pos
def add_history(self, toplevel_obj):
Mgr.do("update_history_time")
event_descr = f'Create "{toplevel_obj.name}"'
obj_id = toplevel_obj.id
obj_data = {obj_id: toplevel_obj.get_data_to_store("creation")}
event_data = {"objects": obj_data}
event_data["object_ids"] = set(Mgr.get("object_ids"))
Mgr.do("add_history", event_descr, event_data, update_time_id=False)
def __disable_snap(self, reset_grid=False):
snap_settings = GD["snap"]
if snap_settings["on"]["creation"]:
snap_type = f"creation_phase_{self._current_creation_phase}"
snap_on = snap_settings["on"][snap_type]
snap_tgt_type = snap_settings["tgt_type"][snap_type]
if snap_on:
if snap_tgt_type != "increment":
Mgr.do("end_snap_target_checking")
Mgr.set_cursor("create")
if snap_tgt_type == "grid_point" and reset_grid:
Mgr.update_app("active_grid_plane", GD["active_grid_plane"])
snap_settings["type"] = "creation"
def __end_creation(self, cancel=True):
self.__disable_snap(reset_grid=True)
Mgr.remove_task("draw_object")
if cancel or not self._obj.is_valid():
self._obj.destroy()
else:
self._obj.finalize()
name = Mgr.get("next_obj_name", self._obj_type)
Mgr.update_remotely("next_obj_name", name)
if self._has_color:
self.set_next_object_color()
if self._add_to_hist:
self.add_history(self._obj.toplevel_obj)
Mgr.notify("creation_ended")
Mgr.enter_state("creation_mode")
self._obj = None
self._current_creation_phase = 0
|
the-stack_0_5678 | # Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
import onnx
from onnx import helper, TensorProto
IN = helper.make_tensor_value_info('in', TensorProto.FLOAT, [7])
OUT = helper.make_tensor_value_info('out', TensorProto.INT8, [7])
nodes = [
helper.make_node(
'Cast',
['in'],
['out'],
to=getattr(TensorProto, 'INT8'),
),
]
graph_def = helper.make_graph(
nodes,
'float_to_int8',
[IN],
[OUT],
)
model_def = helper.make_model(graph_def, producer_name='float_to_int8.py', opset_imports=[onnx.OperatorSetIdProto(version=13)])
onnx.save(model_def, 'float_to_int8.onnx')
|
the-stack_0_5680 | from setuptools import setup
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django",
"Framework :: Django :: 1.11",
"Framework :: Django :: 2.0",
"Intended Audience :: Developers",
# "License :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries :: Application Frameworks",
]
setup(
name="bitrix",
version='0.0.3',
author="Noors Ergesh",
author_email="[email protected]",
description="Bitrix24 python library",
license='MIT',
# long_description=LONG_DESCRIPTION,
url="https://github.com/NursErgesh/bitrix.git",
packages=("bitrix",),
include_package_data=True,
install_requires=open('requirements/requirements.txt').read().splitlines(),
tests_require=open('requirements/test.txt').read().splitlines(),
classifiers=CLASSIFIERS,
zip_safe=False,
)
|
the-stack_0_5681 | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateClusterRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'cluster_id': 'str',
'body': 'ClusterInformation'
}
attribute_map = {
'cluster_id': 'cluster_id',
'body': 'body'
}
def __init__(self, cluster_id=None, body=None):
"""UpdateClusterRequest - a model defined in huaweicloud sdk"""
self._cluster_id = None
self._body = None
self.discriminator = None
self.cluster_id = cluster_id
if body is not None:
self.body = body
@property
def cluster_id(self):
"""Gets the cluster_id of this UpdateClusterRequest.
集群 ID,获取方式请参见[[如何获取接口URI中参数](https://support.huaweicloud.com/api-cce/cce_02_0271.html)](tag:hws)[[如何获取接口URI中参数](https://support.huaweicloud.com/intl/zh-cn/api-cce/cce_02_0271.html)](tag:hws_hk)
:return: The cluster_id of this UpdateClusterRequest.
:rtype: str
"""
return self._cluster_id
@cluster_id.setter
def cluster_id(self, cluster_id):
"""Sets the cluster_id of this UpdateClusterRequest.
集群 ID,获取方式请参见[[如何获取接口URI中参数](https://support.huaweicloud.com/api-cce/cce_02_0271.html)](tag:hws)[[如何获取接口URI中参数](https://support.huaweicloud.com/intl/zh-cn/api-cce/cce_02_0271.html)](tag:hws_hk)
:param cluster_id: The cluster_id of this UpdateClusterRequest.
:type: str
"""
self._cluster_id = cluster_id
@property
def body(self):
"""Gets the body of this UpdateClusterRequest.
:return: The body of this UpdateClusterRequest.
:rtype: ClusterInformation
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdateClusterRequest.
:param body: The body of this UpdateClusterRequest.
:type: ClusterInformation
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateClusterRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_5682 | from flask import Blueprint, request, abort, send_file, Response, make_response
from app.helpers.google_maps import get_static_map
from app.helpers.slack import verify_slack_request
from flask_jwt_extended import jwt_required
from app.model import db,UserResponse
from flask import current_app as app
from datetime import datetime
import json
# Add blueprints
api = Blueprint('api', __name__, url_prefix="/askjeeves", template_folder='templates')
@api.route('/GoogleMaps', methods=['GET'])
def get_google_map():
"""
Input: Takes in a location via the query string in the URL
Output: Returns a Google Map static image (PNG) to client
"""
# Get query string
query_string = request.args
image = get_static_map(query_string['location'])
return send_file(image, mimetype='image/png')
@api.route('/', methods=['POST'])
@api.route('/UserResponse', methods=['POST'])
def user_response():
if request.headers.get("X-Slack-Signature") and request.headers.get("X-Slack-Request-Timestamp") and request.headers["Content-Type"] == "application/x-www-form-urlencoded":
request_body = request.get_data()
slack_signature = request.headers.get('X-Slack-Signature', None)
slack_request_timestamp = request.headers.get('X-Slack-Request-Timestamp', None)
if verify_slack_request(slack_signature, slack_request_timestamp, request_body):
# Get URL encoded form data
payload = json.loads(request.form['payload'])
# Unpack values from fields
temp_dict = dict()
for field in payload['message']['blocks'][3]['fields']:
temp_dict[field['text'].split("*\n")[0][1:]] = field['text'].split("*\n")[1]
temp_dict['Username'] = payload['user']['username']
temp_dict['user_selection'] = payload['actions'][0]['value']
# Create DB entry
userResponse = UserResponse(
EventID=temp_dict['EventID'],
Username=temp_dict['Username'],
Timestamp=temp_dict['Timestamp'],
Location=temp_dict['Location'],
IPaddress=temp_dict['IPaddress'],
VPNHash=temp_dict['VPNhash'],
Device=temp_dict['Device'],
Hostname=temp_dict['Hostname'],
Selection=temp_dict['user_selection']
)
# Commit DB entry
db.session.add(userResponse)
db.session.commit()
# remove blocks
del payload['message']['blocks']
selection = payload['actions'][0]['value']
msg_text = str()
if selection == "legitimate_login":
msg_text = ":partyparrot:"
else:
msg_text = ":rotating-light-red: :rotating-light-red: :rotating-light-red: Alerting security team :rotating-light-red: :rotating-light-red: :rotating-light-red: "
response = app.slack_client.chat_update(
channel=payload["channel"]["id"],
ts=payload['container']["message_ts"],
text=msg_text,
blocks=[],
attachments=[]
)
return make_response("", 200)
return abort(404)
@api.route('/GetUserResponse', methods=['GET'])
@jwt_required
def get_user_responses():
"""
Input: Request to get all the user responses in MySQL database
Output: Return JSON list of all user responses
"""
# Request all user responses from DB
userResponses = db.session.query(UserResponse).all()
# Delete all entries
for userResponse in userResponses:
db.session.delete(userResponse)
db.session.commit()
# Create list of dicts of each DB entry
userResponseLst = list()
for userResponse in userResponses:
temp = userResponse.__dict__
del temp['_sa_instance_state']
userResponseLst.append(temp)
# return user responses as JSON
return json.dumps(userResponseLst)
|
the-stack_0_5683 | #!/usr/bin/env python
from __future__ import with_statement
# ==============================================================================
# MetaPhlAn v2.x: METAgenomic PHyLogenetic ANalysis for taxonomic classification
# of metagenomic data
#
# Authors: Nicola Segata ([email protected]),
# Duy Tin Truong,
# Francesco Asnicar ([email protected])
#
# Please type "./metaphlan2.py -h" for usage help
#
# ==============================================================================
__author__ = ('Nicola Segata ([email protected]), '
'Duy Tin Truong, '
'Francesco Asnicar ([email protected])')
__version__ = '2.8'
__date__ = '31 May 2018'
import sys
import os
import stat
import re
import time
import tarfile
# from binascii import b2a_uu
try:
import numpy as np
except ImportError:
sys.stderr.write("Error! numpy python library not detected!!\n")
sys.exit(1)
import tempfile as tf
import argparse as ap
import subprocess as subp
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
# import multiprocessing as mp
from collections import defaultdict as defdict
import bz2
import itertools
from distutils.version import LooseVersion
try:
import cPickle as pickle
except ImportError:
import pickle
# try to import urllib.request.urlretrieve for python3
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
from glob import glob
import hashlib
# set the location of the database download url
DATABASE_DOWNLOAD = "https://www.dropbox.com/sh/7qze7m7g9fe2xjg/AADHWzATSQcI0CNFD0sk7MAga"
FILE_LIST= "https://www.dropbox.com/sh/7qze7m7g9fe2xjg/AAA4XDP85WHon_eHvztxkamTa/file_list.txt?dl=1"
# get the directory that contains this script
metaphlan2_script_install_folder = os.path.dirname(os.path.abspath(__file__))
# get the default database folder
DEFAULT_DB_FOLDER = os.path.join(metaphlan2_script_install_folder, "metaphlan_databases")
#**********************************************************************************************
# Modification of Code : *
# Modified the code so instead of using the current clade IDs, which are numbers, we will *
# use the clade_names *
# Users reported the biom output is invalid and also the IDs were changing from run to *
# run. *
# George Weingart 05/22/2017 [email protected] *
#**********************************************************************************************
#*************************************************************
#* Imports related to biom file generation *
#*************************************************************
try:
import biom
import biom.table
# import numpy as np # numpy already imported above
except ImportError:
sys.stderr.write("Warning! Biom python library not detected!"
"\n Exporting to biom format will not work!\n")
try:
import json
except ImportError:
sys.stderr.write("Warning! json python library not detected!"
"\n Exporting to biom format will not work!\n")
# This set contains the markers that after careful validation are found to have low precision or recall
# We esclude the markers here to avoid generating a new marker DB when changing just few markers
markers_to_exclude = set(['NC_001782.1', 'GeneID:17099689', 'gi|419819595|ref|NZ_AJRE01000517.1|:1-118',
'GeneID:10498696', 'GeneID:10498710', 'GeneID:10498726', 'GeneID:10498735',
'GeneID:10498757', 'GeneID:10498760', 'GeneID:10498761', 'GeneID:10498763',
'GeneID:11294465', 'GeneID:14181982', 'GeneID:14182132', 'GeneID:14182146',
'GeneID:14182148', 'GeneID:14182328', 'GeneID:14182639', 'GeneID:14182647',
'GeneID:14182650', 'GeneID:14182663', 'GeneID:14182683', 'GeneID:14182684',
'GeneID:14182691', 'GeneID:14182803', 'GeneID:14296322', 'GeneID:1489077',
'GeneID:1489080', 'GeneID:1489081', 'GeneID:1489084', 'GeneID:1489085',
'GeneID:1489088', 'GeneID:1489089', 'GeneID:1489090', 'GeneID:1489528',
'GeneID:1489530', 'GeneID:1489531', 'GeneID:1489735', 'GeneID:1491873',
'GeneID:1491889', 'GeneID:1491962', 'GeneID:1491963', 'GeneID:1491964',
'GeneID:1491965', 'GeneID:17099689', 'GeneID:1724732', 'GeneID:17494231',
'GeneID:2546403', 'GeneID:2703374', 'GeneID:2703375', 'GeneID:2703498',
'GeneID:2703531', 'GeneID:2772983', 'GeneID:2772989', 'GeneID:2772991',
'GeneID:2772993', 'GeneID:2772995', 'GeneID:2773037', 'GeneID:2777387',
'GeneID:2777399', 'GeneID:2777400', 'GeneID:2777439', 'GeneID:2777493',
'GeneID:2777494', 'GeneID:3077424', 'GeneID:3160801', 'GeneID:3197323',
'GeneID:3197355', 'GeneID:3197400', 'GeneID:3197428', 'GeneID:3783722',
'GeneID:3783750', 'GeneID:3953004', 'GeneID:3959334', 'GeneID:3964368',
'GeneID:3964370', 'GeneID:4961452', 'GeneID:5075645', 'GeneID:5075646',
'GeneID:5075647', 'GeneID:5075648', 'GeneID:5075649', 'GeneID:5075650',
'GeneID:5075651', 'GeneID:5075652', 'GeneID:5075653', 'GeneID:5075654',
'GeneID:5075655', 'GeneID:5075656', 'GeneID:5075657', 'GeneID:5075658',
'GeneID:5075659', 'GeneID:5075660', 'GeneID:5075661', 'GeneID:5075662',
'GeneID:5075663', 'GeneID:5075664', 'GeneID:5075665', 'GeneID:5075667',
'GeneID:5075668', 'GeneID:5075669', 'GeneID:5075670', 'GeneID:5075671',
'GeneID:5075672', 'GeneID:5075673', 'GeneID:5075674', 'GeneID:5075675',
'GeneID:5075676', 'GeneID:5075677', 'GeneID:5075678', 'GeneID:5075679',
'GeneID:5075680', 'GeneID:5075681', 'GeneID:5075682', 'GeneID:5075683',
'GeneID:5075684', 'GeneID:5075685', 'GeneID:5075686', 'GeneID:5075687',
'GeneID:5075688', 'GeneID:5075689', 'GeneID:5075690', 'GeneID:5075691',
'GeneID:5075692', 'GeneID:5075693', 'GeneID:5075694', 'GeneID:5075695',
'GeneID:5075696', 'GeneID:5075697', 'GeneID:5075698', 'GeneID:5075700',
'GeneID:5075701', 'GeneID:5075702', 'GeneID:5075703', 'GeneID:5075704',
'GeneID:5075705', 'GeneID:5075707', 'GeneID:5075708', 'GeneID:5075709',
'GeneID:5075710', 'GeneID:5075711', 'GeneID:5075712', 'GeneID:5075713',
'GeneID:5075714', 'GeneID:5075715', 'GeneID:5075716', 'GeneID:5176189',
'GeneID:6803896', 'GeneID:6803915', 'GeneID:7944151', 'GeneID:927334',
'GeneID:927335', 'GeneID:927337', 'GeneID:940263', 'GeneID:9538324',
'NC_003977.1', 'gi|103485498|ref|NC_008048.1|:1941166-1942314',
'gi|108802856|ref|NC_008148.1|:1230231-1230875',
'gi|124806686|ref|XM_001350760.1|',
'gi|126661648|ref|NZ_AAXW01000149.1|:c1513-1341',
'gi|149172845|ref|NZ_ABBW01000029.1|:970-1270',
'gi|153883242|ref|NZ_ABDQ01000074.1|:79-541',
'gi|167031021|ref|NC_010322.1|:1834668-1835168',
'gi|171344510|ref|NZ_ABJO01001391.1|:1-116',
'gi|171346813|ref|NZ_ABJO01001728.1|:c109-1',
'gi|190640924|ref|NZ_ABRC01000948.1|:c226-44',
'gi|223045343|ref|NZ_ACEN01000042.1|:1-336',
'gi|224580998|ref|NZ_GG657387.1|:c114607-114002',
'gi|224993759|ref|NZ_ACFY01000068.1|:c357-1',
'gi|237784637|ref|NC_012704.1|:141000-142970',
'gi|237784637|ref|NC_012704.1|:c2048315-2047083',
'gi|240136783|ref|NC_012808.1|:1928224-1928961',
'gi|255319020|ref|NZ_ACVR01000025.1|:28698-29132',
'gi|260590341|ref|NZ_ACEO02000062.1|:c387-151',
'gi|262368201|ref|NZ_GG704964.1|:733100-733978',
'gi|262369811|ref|NZ_GG704966.1|:c264858-264520',
'gi|288559258|ref|NC_013790.1|:448046-451354',
'gi|288559258|ref|NC_013790.1|:532047-533942',
'gi|294794157|ref|NZ_GG770200.1|:245344-245619',
'gi|304372805|ref|NC_014448.1|:444677-445120',
'gi|304372805|ref|NC_014448.1|:707516-708268',
'gi|304372805|ref|NC_014448.1|:790263-792257',
'gi|304372805|ref|NC_014448.1|:c367313-364470',
'gi|304372805|ref|NC_014448.1|:c659144-658272',
'gi|304372805|ref|NC_014448.1|:c772578-770410',
'gi|304372805|ref|NC_014448.1|:c777901-777470',
'gi|306477407|ref|NZ_GG770409.1|:c1643877-1643338',
'gi|317120849|ref|NC_014831.1|:c891121-890144',
'gi|323356441|ref|NZ_GL698442.1|:560-682',
'gi|324996766|ref|NZ_BABV01000451.1|:10656-11579',
'gi|326579405|ref|NZ_AEGQ01000006.1|:2997-3791',
'gi|326579407|ref|NZ_AEGQ01000008.1|:c45210-44497',
'gi|326579433|ref|NZ_AEGQ01000034.1|:346-3699',
'gi|329889017|ref|NZ_GL883086.1|:586124-586804',
'gi|330822653|ref|NC_015422.1|:2024431-2025018',
'gi|335053104|ref|NZ_AFIL01000010.1|:c33862-32210',
'gi|339304121|ref|NZ_AEOR01000258.1|:c294-1',
'gi|339304277|ref|NZ_AEOR01000414.1|:1-812',
'gi|342211239|ref|NZ_AFUK01000001.1|:790086-790835',
'gi|342211239|ref|NZ_AFUK01000001.1|:c1579497-1578787',
'gi|342213707|ref|NZ_AFUJ01000005.1|:48315-48908',
'gi|355707189|ref|NZ_JH376566.1|:326756-326986',
'gi|355707384|ref|NZ_JH376567.1|:90374-91453',
'gi|355707384|ref|NZ_JH376567.1|:c388018-387605',
'gi|355708440|ref|NZ_JH376569.1|:c80380-79448',
'gi|358051729|ref|NZ_AEUN01000100.1|:c120-1',
'gi|365983217|ref|XM_003668394.1|',
'gi|377571722|ref|NZ_BAFD01000110.1|:c1267-29',
'gi|377684864|ref|NZ_CM001194.1|:c1159954-1159619',
'gi|377684864|ref|NZ_CM001194.1|:c4966-4196',
'gi|378759497|ref|NZ_AFXE01000152.1|:1628-2215',
'gi|378835506|ref|NC_016829.1|:112560-113342',
'gi|378835506|ref|NC_016829.1|:114945-115193',
'gi|378835506|ref|NC_016829.1|:126414-127151',
'gi|378835506|ref|NC_016829.1|:272056-272403',
'gi|378835506|ref|NC_016829.1|:272493-272786',
'gi|378835506|ref|NC_016829.1|:358647-360863',
'gi|378835506|ref|NC_016829.1|:37637-38185',
'gi|378835506|ref|NC_016829.1|:60012-60497',
'gi|378835506|ref|NC_016829.1|:606819-607427',
'gi|378835506|ref|NC_016829.1|:607458-607760',
'gi|378835506|ref|NC_016829.1|:826192-826821',
'gi|378835506|ref|NC_016829.1|:c451932-451336',
'gi|378835506|ref|NC_016829.1|:c460520-459951',
'gi|378835506|ref|NC_016829.1|:c483843-482842',
'gi|378835506|ref|NC_016829.1|:c544660-543638',
'gi|378835506|ref|NC_016829.1|:c556383-555496',
'gi|378835506|ref|NC_016829.1|:c632166-631228',
'gi|378835506|ref|NC_016829.1|:c805066-802691',
'gi|384124469|ref|NC_017160.1|:c2157447-2156863',
'gi|385263288|ref|NZ_AJST01000001.1|:594143-594940',
'gi|385858114|ref|NC_017519.1|:10252-10746',
'gi|385858114|ref|NC_017519.1|:104630-104902',
'gi|385858114|ref|NC_017519.1|:154292-156016',
'gi|385858114|ref|NC_017519.1|:205158-206462',
'gi|385858114|ref|NC_017519.1|:507239-507703',
'gi|385858114|ref|NC_017519.1|:518924-519772',
'gi|385858114|ref|NC_017519.1|:524712-525545',
'gi|385858114|ref|NC_017519.1|:528387-528785',
'gi|385858114|ref|NC_017519.1|:532275-533429',
'gi|385858114|ref|NC_017519.1|:586402-586824',
'gi|385858114|ref|NC_017519.1|:621696-622226',
'gi|385858114|ref|NC_017519.1|:673673-676105',
'gi|385858114|ref|NC_017519.1|:706602-708218',
'gi|385858114|ref|NC_017519.1|:710627-711997',
'gi|385858114|ref|NC_017519.1|:744974-745456',
'gi|385858114|ref|NC_017519.1|:791055-791801',
'gi|385858114|ref|NC_017519.1|:805643-807430',
'gi|385858114|ref|NC_017519.1|:c172050-170809',
'gi|385858114|ref|NC_017519.1|:c334545-333268',
'gi|385858114|ref|NC_017519.1|:c383474-383202',
'gi|385858114|ref|NC_017519.1|:c450880-450389',
'gi|385858114|ref|NC_017519.1|:c451975-451001',
'gi|385858114|ref|NC_017519.1|:c470488-470036',
'gi|385858114|ref|NC_017519.1|:c485596-484598',
'gi|385858114|ref|NC_017519.1|:c58658-58065',
'gi|385858114|ref|NC_017519.1|:c592754-591081',
'gi|385858114|ref|NC_017519.1|:c59590-58820',
'gi|385858114|ref|NC_017519.1|:c601339-600575',
'gi|385858114|ref|NC_017519.1|:c76080-75160',
'gi|385858114|ref|NC_017519.1|:c97777-96302',
'gi|391227518|ref|NZ_CM001514.1|:c1442504-1440237',
'gi|391227518|ref|NZ_CM001514.1|:c3053472-3053023',
'gi|394749766|ref|NZ_AHHC01000069.1|:3978-6176',
'gi|398899615|ref|NZ_AKJK01000021.1|:28532-29209',
'gi|406580057|ref|NZ_AJRD01000017.1|:c17130-15766',
'gi|406584668|ref|NZ_AJQZ01000017.1|:c1397-771',
'gi|408543458|ref|NZ_AJLO01000024.1|:67702-68304',
'gi|410936685|ref|NZ_AJRF02000012.1|:21785-22696',
'gi|41406098|ref|NC_002944.2|:c4468304-4467864',
'gi|416998679|ref|NZ_AEXI01000003.1|:c562937-562176',
'gi|417017738|ref|NZ_AEYL01000489.1|:c111-1',
'gi|417018375|ref|NZ_AEYL01000508.1|:100-238',
'gi|418576506|ref|NZ_AHKB01000025.1|:c7989-7669',
'gi|419819595|ref|NZ_AJRE01000517.1|:1-118',
'gi|421806549|ref|NZ_AMTB01000006.1|:c181247-180489',
'gi|422320815|ref|NZ_GL636045.1|:28704-29048',
'gi|422320874|ref|NZ_GL636046.1|:4984-5742',
'gi|422323244|ref|NZ_GL636061.1|:479975-480520',
'gi|422443048|ref|NZ_GL383112.1|:663738-664823',
'gi|422552858|ref|NZ_GL383469.1|:c216727-215501',
'gi|422859491|ref|NZ_GL878548.1|:c271832-271695',
'gi|423012810|ref|NZ_GL982453.1|:3888672-3888935',
'gi|423012810|ref|NZ_GL982453.1|:4541873-4542328',
'gi|423012810|ref|NZ_GL982453.1|:c2189976-2188582',
'gi|423012810|ref|NZ_GL982453.1|:c5471232-5470300',
'gi|423262555|ref|NC_019552.1|:24703-25212',
'gi|423262555|ref|NC_019552.1|:28306-30696',
'gi|423262555|ref|NC_019552.1|:284252-284581',
'gi|423262555|ref|NC_019552.1|:311161-311373',
'gi|423262555|ref|NC_019552.1|:32707-34497',
'gi|423262555|ref|NC_019552.1|:34497-35237',
'gi|423262555|ref|NC_019552.1|:53691-56813',
'gi|423262555|ref|NC_019552.1|:c388986-386611',
'gi|423262555|ref|NC_019552.1|:c523106-522528',
'gi|423689090|ref|NZ_CM001513.1|:c1700632-1699448',
'gi|423689090|ref|NZ_CM001513.1|:c1701670-1700651',
'gi|423689090|ref|NZ_CM001513.1|:c5739118-5738390',
'gi|427395956|ref|NZ_JH992914.1|:c592682-591900',
'gi|427407324|ref|NZ_JH992904.1|:c2681223-2679463',
'gi|451952303|ref|NZ_AJRB03000021.1|:1041-1574',
'gi|452231579|ref|NZ_AEKA01000123.1|:c18076-16676',
'gi|459791914|ref|NZ_CM001824.1|:c899379-899239',
'gi|471265562|ref|NC_020815.1|:3155799-3156695',
'gi|472279780|ref|NZ_ALPV02000001.1|:33911-36751',
'gi|482733945|ref|NZ_AHGZ01000071.1|:10408-11154',
'gi|483051300|ref|NZ_ALYK02000034.1|:c37582-36650',
'gi|483051300|ref|NZ_ALYK02000034.1|:c38037-37582',
'gi|483993347|ref|NZ_AMXG01000045.1|:251724-253082',
'gi|484100856|ref|NZ_JH670250.1|:600643-602949',
'gi|484115941|ref|NZ_AJXG01000093.1|:567-947',
'gi|484228609|ref|NZ_JH730929.1|:c103784-99021',
'gi|484228797|ref|NZ_JH730960.1|:c16193-12429',
'gi|484228814|ref|NZ_JH730962.1|:c29706-29260',
'gi|484228929|ref|NZ_JH730981.1|:18645-22060',
'gi|484228939|ref|NZ_JH730983.1|:42943-43860',
'gi|484266598|ref|NZ_AKGC01000024.1|:118869-119636',
'gi|484327375|ref|NZ_AKVP01000093.1|:1-1281',
'gi|484328234|ref|NZ_AKVP01000127.1|:c325-110',
'gi|487376144|ref|NZ_KB911257.1|:600445-601482',
'gi|487376194|ref|NZ_KB911260.1|:146228-146533',
'gi|487381776|ref|NZ_KB911485.1|:101242-103083',
'gi|487381776|ref|NZ_KB911485.1|:c32472-31627',
'gi|487381800|ref|NZ_KB911486.1|:39414-39872',
'gi|487381828|ref|NZ_KB911487.1|:15689-17026',
'gi|487381846|ref|NZ_KB911488.1|:13678-13821',
'gi|487382089|ref|NZ_KB911497.1|:23810-26641',
'gi|487382176|ref|NZ_KB911501.1|:c497-381',
'gi|487382213|ref|NZ_KB911502.1|:12706-13119',
'gi|487382247|ref|NZ_KB911505.1|:c7595-6663',
'gi|490551798|ref|NZ_AORG01000011.1|:40110-41390',
'gi|491099398|ref|NZ_KB849654.1|:c720460-719912',
'gi|491124812|ref|NZ_KB849705.1|:1946500-1946937',
'gi|491155563|ref|NZ_KB849732.1|:46469-46843',
'gi|491155563|ref|NZ_KB849732.1|:46840-47181',
'gi|491155563|ref|NZ_KB849732.1|:47165-48616',
'gi|491155563|ref|NZ_KB849732.1|:55055-56662',
'gi|491155563|ref|NZ_KB849732.1|:56662-57351',
'gi|491155563|ref|NZ_KB849732.1|:6101-7588',
'gi|491155563|ref|NZ_KB849732.1|:7657-8073',
'gi|491349766|ref|NZ_KB850082.1|:441-941',
'gi|491395079|ref|NZ_KB850142.1|:1461751-1462554',
'gi|512608407|ref|NZ_KE150401.1|:c156891-156016',
'gi|518653462|ref|NZ_ATLM01000004.1|:c89669-89247',
'gi|520818261|ref|NZ_ATLQ01000015.1|:480744-481463',
'gi|520822538|ref|NZ_ATLQ01000063.1|:103173-103283',
'gi|520826510|ref|NZ_ATLQ01000092.1|:c13892-13563',
'gi|544644736|ref|NZ_KE747865.1|:68388-69722',
'gi|545347918|ref|NZ_KE952096.1|:c83873-81831',
'gi|550735774|gb|AXMM01000002.1|:c743886-743575',
'gi|552875787|ref|NZ_KI515684.1|:c584270-583890',
'gi|552876418|ref|NZ_KI515685.1|:36713-37258',
'gi|552876418|ref|NZ_KI515685.1|:432422-433465',
'gi|552876418|ref|NZ_KI515685.1|:c1014617-1014117',
'gi|552876418|ref|NZ_KI515685.1|:c931935-931327',
'gi|552876815|ref|NZ_KI515686.1|:613740-614315',
'gi|552879811|ref|NZ_AXME01000001.1|:1146402-1146932',
'gi|552879811|ref|NZ_AXME01000001.1|:40840-41742',
'gi|552879811|ref|NZ_AXME01000001.1|:49241-49654',
'gi|552891898|ref|NZ_AXMG01000001.1|:99114-99290',
'gi|552891898|ref|NZ_AXMG01000001.1|:c1460921-1460529',
'gi|552895565|ref|NZ_AXMI01000001.1|:619555-620031',
'gi|552895565|ref|NZ_AXMI01000001.1|:c14352-13837',
'gi|552896371|ref|NZ_AXMI01000002.1|:c148595-146280',
'gi|552897201|ref|NZ_AXMI01000004.1|:c231437-230883',
'gi|552902020|ref|NZ_AXMK01000001.1|:c1625038-1624022',
'gi|556346902|ref|NZ_KI535485.1|:c828278-827901',
'gi|556478613|ref|NZ_KI535633.1|:3529392-3530162',
'gi|560534311|ref|NZ_AYSF01000111.1|:26758-29049',
'gi|564165687|gb|AYLX01000355.1|:10906-11166',
'gi|564169776|gb|AYLX01000156.1|:1-185',
'gi|564938696|gb|AWYH01000018.1|:c75674-75039', 'gi|67993724|ref|XM_664440.1|',
'gi|68059117|ref|XM_666447.1|', 'gi|68062389|ref|XM_668109.1|',
'gi|71730848|gb|AAAM03000019.1|:c14289-12877', 'gi|82753723|ref|XM_722699.1|',
'gi|82775382|ref|NC_007606.1|:2249487-2250014', 'gi|82793634|ref|XM_723027.1|',
'GeneID:1489527'])
tax_units = "kpcofgst"
if float(sys.version_info[0]) < 3.0:
def read_and_split(ofn):
return (l.strip().split('\t') for l in ofn)
def read_and_split_line(line):
return line.strip().split('\t')
else:
def read_and_split(ofn):
return (l.decode('utf-8').strip().split('\t') for l in ofn)
def read_and_split_line(line):
return line.decode('utf-8').strip().split('\t')
def plain_read_and_split(ofn):
return (l.strip().split('\t') for l in ofn)
def plain_read_and_split_line(l):
return l.strip().split('\t')
if float(sys.version_info[0]) < 3.0:
def mybytes(val):
return val
else:
def mybytes(val):
return bytes(val, encoding='utf-8')
def read_params(args):
p = ap.ArgumentParser( description=
"DESCRIPTION\n"
" MetaPhlAn version "+__version__+" ("+__date__+"): \n"
" METAgenomic PHyLogenetic ANalysis for metagenomic taxonomic profiling.\n\n"
"AUTHORS: "+__author__+"\n\n"
"COMMON COMMANDS\n\n"
" We assume here that metaphlan2.py is in the system path and that mpa_dir bash variable contains the\n"
" main MetaPhlAn folder. Also BowTie2 should be in the system path with execution and read\n"
" permissions, and Perl should be installed)\n\n"
"\n========== MetaPhlAn 2 clade-abundance estimation ================= \n\n"
"The basic usage of MetaPhlAn 2 consists in the identification of the clades (from phyla to species and \n"
"strains in particular cases) present in the metagenome obtained from a microbiome sample and their \n"
"relative abundance. This correspond to the default analysis type (--analysis_type rel_ab).\n\n"
"* Profiling a metagenome from raw reads:\n"
"$ metaphlan2.py metagenome.fastq --input_type fastq\n\n"
"* You can take advantage of multiple CPUs and save the intermediate BowTie2 output for re-running\n"
" MetaPhlAn extremely quickly:\n"
"$ metaphlan2.py metagenome.fastq --bowtie2out metagenome.bowtie2.bz2 --nproc 5 --input_type fastq\n\n"
"* If you already mapped your metagenome against the marker DB (using a previous MetaPhlAn run), you\n"
" can obtain the results in few seconds by using the previously saved --bowtie2out file and \n"
" specifying the input (--input_type bowtie2out):\n"
"$ metaphlan2.py metagenome.bowtie2.bz2 --nproc 5 --input_type bowtie2out\n\n"
"* You can also provide an externally BowTie2-mapped SAM if you specify this format with \n"
" --input_type. Two steps: first apply BowTie2 and then feed MetaPhlAn2 with the obtained sam:\n"
"$ bowtie2 --sam-no-hd --sam-no-sq --no-unal --very-sensitive -S metagenome.sam -x ${mpa_dir}/db_v20/mpa_v20_m200 -U metagenome.fastq\n"
"$ metaphlan2.py metagenome.sam --input_type sam > profiled_metagenome.txt\n\n"
# "* Multiple alternative ways to pass the input are also available:\n"
# "$ cat metagenome.fastq | metaphlan2.py --input_type fastq \n"
# "$ tar xjf metagenome.tar.bz2 --to-stdout | metaphlan2.py --input_type fastq \n"
# "$ metaphlan2.py --input_type fastq < metagenome.fastq\n"
# "$ metaphlan2.py --input_type fastq <(bzcat metagenome.fastq.bz2)\n"
# "$ metaphlan2.py --input_type fastq <(zcat metagenome_1.fastq.gz metagenome_2.fastq.gz)\n\n"
"* We can also natively handle paired-end metagenomes, and, more generally, metagenomes stored in \n"
" multiple files (but you need to specify the --bowtie2out parameter):\n"
"$ metaphlan2.py metagenome_1.fastq,metagenome_2.fastq --bowtie2out metagenome.bowtie2.bz2 --nproc 5 --input_type fastq\n\n"
"\n------------------------------------------------------------------- \n \n\n"
"\n========== Marker level analysis ============================ \n\n"
"MetaPhlAn 2 introduces the capability of charachterizing organisms at the strain level using non\n"
"aggregated marker information. Such capability comes with several slightly different flavours and \n"
"are a way to perform strain tracking and comparison across multiple samples.\n"
"Usually, MetaPhlAn 2 is first ran with the default --analysis_type to profile the species present in\n"
"the community, and then a strain-level profiling can be performed to zoom-in into specific species\n"
"of interest. This operation can be performed quickly as it exploits the --bowtie2out intermediate \n"
"file saved during the execution of the default analysis type.\n\n"
"* The following command will output the abundance of each marker with a RPK (reads per kil-base) \n"
" higher 0.0. (we are assuming that metagenome_outfmt.bz2 has been generated before as \n"
" shown above).\n"
"$ metaphlan2.py -t marker_ab_table metagenome_outfmt.bz2 --input_type bowtie2out > marker_abundance_table.txt\n"
" The obtained RPK can be optionally normalized by the total number of reads in the metagenome \n"
" to guarantee fair comparisons of abundances across samples. The number of reads in the metagenome\n"
" needs to be passed with the '--nreads' argument\n\n"
"* The list of markers present in the sample can be obtained with '-t marker_pres_table'\n"
"$ metaphlan2.py -t marker_pres_table metagenome_outfmt.bz2 --input_type bowtie2out > marker_abundance_table.txt\n"
" The --pres_th argument (default 1.0) set the minimum RPK value to consider a marker present\n\n"
"* The list '-t clade_profiles' analysis type reports the same information of '-t marker_ab_table'\n"
" but the markers are reported on a clade-by-clade basis.\n"
"$ metaphlan2.py -t clade_profiles metagenome_outfmt.bz2 --input_type bowtie2out > marker_abundance_table.txt\n\n"
"* Finally, to obtain all markers present for a specific clade and all its subclades, the \n"
" '-t clade_specific_strain_tracker' should be used. For example, the following command\n"
" is reporting the presence/absence of the markers for the B. fragulis species and its strains\n"
" the optional argument --min_ab specifies the minimum clade abundance for reporting the markers\n\n"
"$ metaphlan2.py -t clade_specific_strain_tracker --clade s__Bacteroides_fragilis metagenome_outfmt.bz2 --input_type bowtie2out > marker_abundance_table.txt\n"
"\n------------------------------------------------------------------- \n\n"
"",
formatter_class=ap.RawTextHelpFormatter,
add_help=False )
arg = p.add_argument
arg( 'inp', metavar='INPUT_FILE', type=str, nargs='?', default=None, help=
"the input file can be:\n"
"* a fastq file containing metagenomic reads\n"
"OR\n"
"* a BowTie2 produced SAM file. \n"
"OR\n"
"* an intermediary mapping file of the metagenome generated by a previous MetaPhlAn run \n"
"If the input file is missing, the script assumes that the input is provided using the standard \n"
"input, or named pipes.\n"
"IMPORTANT: the type of input needs to be specified with --input_type" )
arg( 'output', metavar='OUTPUT_FILE', type=str, nargs='?', default=None,
help= "the tab-separated output file of the predicted taxon relative abundances \n"
"[stdout if not present]")
g = p.add_argument_group('Required arguments')
arg = g.add_argument
input_type_choices = ['fastq','fasta','multifasta','multifastq','bowtie2out','sam']
arg( '--input_type', choices=input_type_choices, required = '--install' not in args, help =
"set whether the input is the multifasta file of metagenomic reads or \n"
"the SAM file of the mapping of the reads against the MetaPhlAn db.\n"
"[default 'automatic', i.e. the script will try to guess the input format]\n" )
g = p.add_argument_group('Mapping arguments')
arg = g.add_argument
arg('--mpa_pkl', type=str, default=None,
help="The metadata pickled MetaPhlAn file [deprecated]")
arg('--bowtie2db', metavar="METAPHLAN_BOWTIE2_DB", type=str, default=DEFAULT_DB_FOLDER,
help=("The BowTie2 database file of the MetaPhlAn database. Used if "
"--input_type is fastq, fasta, multifasta, or multifastq [default "+DEFAULT_DB_FOLDER+"]\n"))
INDEX = 'v20_m200'
arg('-x', '--index', type=str, default='v20_m200',
help=("Specify the id of the database version to use. If the database\n"
"files are not found on the local MetaPhlAn2 installation they\n"
"will be automatically downloaded [default "+INDEX+"]\n"))
bt2ps = ['sensitive', 'very-sensitive', 'sensitive-local', 'very-sensitive-local']
arg('--bt2_ps', metavar="BowTie2 presets", default='very-sensitive',
choices=bt2ps, help="Presets options for BowTie2 (applied only when a "
"multifasta file is provided)\n"
"The choices enabled in MetaPhlAn are:\n"
" * sensitive\n"
" * very-sensitive\n"
" * sensitive-local\n"
" * very-sensitive-local\n"
"[default very-sensitive]\n")
arg('--bowtie2_exe', type=str, default=None,
help='Full path and name of the BowTie2 executable. This option allows'
'MetaPhlAn to reach the executable even when it is not in the '
'system PATH or the system PATH is unreachable')
arg('--bowtie2_build', type=str, default='bowtie2-build',
help="Full path to the bowtie2-build command to use, deafult assumes "
"that 'bowtie2-build is present in the system path")
arg('--bowtie2out', metavar="FILE_NAME", type=str, default=None,
help="The file for saving the output of BowTie2")
arg('--no_map', action='store_true',
help="Avoid storing the --bowtie2out map file")
arg('--tmp_dir', metavar="", default=None, type=str,
help="The folder used to store temporary files [default is the OS "
"dependent tmp dir]")
g = p.add_argument_group('Post-mapping arguments')
arg = g.add_argument
stat_choices = ['avg_g','avg_l','tavg_g','tavg_l','wavg_g','wavg_l','med']
arg( '--tax_lev', metavar='TAXONOMIC_LEVEL', type=str,
choices='a'+tax_units, default='a', help =
"The taxonomic level for the relative abundance output:\n"
"'a' : all taxonomic levels\n"
"'k' : kingdoms\n"
"'p' : phyla only\n"
"'c' : classes only\n"
"'o' : orders only\n"
"'f' : families only\n"
"'g' : genera only\n"
"'s' : species only\n"
"[default 'a']" )
arg( '--min_cu_len', metavar="", default="2000", type=int, help =
"minimum total nucleotide length for the markers in a clade for\n"
"estimating the abundance without considering sub-clade abundances\n"
"[default 2000]\n" )
arg( '--min_alignment_len', metavar="", default=None, type=int, help =
"The sam records for aligned reads with the longest subalignment\n"
"length smaller than this threshold will be discarded.\n"
"[default None]\n" )
arg( '--ignore_viruses', action='store_true', help=
"Do not profile viral organisms" )
arg( '--ignore_eukaryotes', action='store_true', help=
"Do not profile eukaryotic organisms" )
arg( '--ignore_bacteria', action='store_true', help=
"Do not profile bacterial organisms" )
arg( '--ignore_archaea', action='store_true', help=
"Do not profile archeal organisms" )
arg( '--stat_q', metavar="", type = float, default=0.1, help =
"Quantile value for the robust average\n"
"[default 0.1]" )
arg( '--ignore_markers', type=str, default = None, help =
"File containing a list of markers to ignore. \n")
arg( '--avoid_disqm', action="store_true", help =
"Deactivate the procedure of disambiguating the quasi-markers based on the \n"
"marker abundance pattern found in the sample. It is generally recommended \n"
"too keep the disambiguation procedure in order to minimize false positives\n")
arg( '--stat', metavar="", choices=stat_choices, default="tavg_g", type=str, help =
"EXPERIMENTAL! Statistical approach for converting marker abundances into clade abundances\n"
"'avg_g' : clade global (i.e. normalizing all markers together) average\n"
"'avg_l' : average of length-normalized marker counts\n"
"'tavg_g' : truncated clade global average at --stat_q quantile\n"
"'tavg_l' : trunated average of length-normalized marker counts (at --stat_q)\n"
"'wavg_g' : winsorized clade global average (at --stat_q)\n"
"'wavg_l' : winsorized average of length-normalized marker counts (at --stat_q)\n"
"'med' : median of length-normalized marker counts\n"
"[default tavg_g]" )
arg = p.add_argument
g = p.add_argument_group('Additional analysis types and arguments')
arg = g.add_argument
analysis_types = ['rel_ab', 'rel_ab_w_read_stats', 'reads_map', 'clade_profiles', 'marker_ab_table', 'marker_counts', 'marker_pres_table', 'clade_specific_strain_tracker']
arg( '-t', metavar='ANALYSIS TYPE', type=str, choices = analysis_types,
default='rel_ab', help =
"Type of analysis to perform: \n"
" * rel_ab: profiling a metagenomes in terms of relative abundances\n"
" * rel_ab_w_read_stats: profiling a metagenomes in terms of relative abundances and estimate the number of reads comming from each clade.\n"
" * reads_map: mapping from reads to clades (only reads hitting a marker)\n"
" * clade_profiles: normalized marker counts for clades with at least a non-null marker\n"
" * marker_ab_table: normalized marker counts (only when > 0.0 and normalized by metagenome size if --nreads is specified)\n"
" * marker_counts: non-normalized marker counts [use with extreme caution]\n"
" * marker_pres_table: list of markers present in the sample (threshold at 1.0 if not differently specified with --pres_th\n"
"[default 'rel_ab']" )
arg( '--nreads', metavar="NUMBER_OF_READS", type=int, default = None, help =
"The total number of reads in the original metagenome. It is used only when \n"
"-t marker_table is specified for normalizing the length-normalized counts \n"
"with the metagenome size as well. No normalization applied if --nreads is not \n"
"specified" )
arg( '--pres_th', metavar="PRESENCE_THRESHOLD", type=int, default = 1.0, help =
'Threshold for calling a marker present by the -t marker_pres_table option' )
arg( '--clade', metavar="", default=None, type=str, help =
"The clade for clade_specific_strain_tracker analysis\n" )
arg( '--min_ab', metavar="", default=0.1, type=float, help =
"The minimum percentage abundace for the clade in the clade_specific_strain_tracker analysis\n" )
g = p.add_argument_group('Output arguments')
arg = g.add_argument
arg( '-o', '--output_file', metavar="output file", type=str, default=None, help =
"The output file (if not specified as positional argument)\n")
arg('--sample_id_key', metavar="name", type=str, default="#SampleID",
help =("Specify the sample ID key for this analysis."
" Defaults to '#SampleID'."))
arg('--sample_id', metavar="value", type=str,
default="Metaphlan2_Analysis",
help =("Specify the sample ID for this analysis."
" Defaults to 'Metaphlan2_Analysis'."))
arg( '-s', '--samout', metavar="sam_output_file",
type=str, default=None, help="The sam output file\n")
#*************************************************************
#* Parameters related to biom file generation *
#*************************************************************
arg( '--biom', '--biom_output_file', metavar="biom_output", type=str, default=None, help =
"If requesting biom file output: The name of the output file in biom format \n")
arg( '--mdelim', '--metadata_delimiter_char', metavar="mdelim", type=str, default="|", help =
"Delimiter for bug metadata: - defaults to pipe. e.g. the pipe in k__Bacteria|p__Proteobacteria \n")
#*************************************************************
#* End parameters related to biom file generation *
#*************************************************************
g = p.add_argument_group('Other arguments')
arg = g.add_argument
arg('--nproc', metavar="N", type=int, default=4,
help="The number of CPUs to use for parallelizing the mapping [default 4]")
arg('--install', action='store_true',
help="Only checks if the MetaPhlAn2 DB is installed and installs it if not. All other parameters are ignored.")
arg('--read_min_len', type=int, default=70,
help="Specify the minimum length of the reads to be considered when parsing the input file with "
"'read_fastx.py' script, default value is 70")
arg('-v', '--version', action='version',
version="MetaPhlAn version {} ({})".format(__version__, __date__),
help="Prints the current MetaPhlAn version and exit")
arg("-h", "--help", action="help", help="show this help message and exit")
return vars(p.parse_args())
def byte_to_megabyte(byte):
"""
Convert byte value to megabyte
"""
return byte / (1024.0**2)
class ReportHook():
def __init__(self):
self.start_time = time.time()
def report(self, blocknum, block_size, total_size):
"""
Print download progress message
"""
if blocknum == 0:
self.start_time = time.time()
if total_size > 0:
sys.stderr.write("Downloading file of size: {:.2f} MB\n"
.format(byte_to_megabyte(total_size)))
else:
total_downloaded = blocknum * block_size
status = "{:3.2f} MB ".format(byte_to_megabyte(total_downloaded))
if total_size > 0:
percent_downloaded = total_downloaded * 100.0 / total_size
# use carriage return plus sys.stderr to overwrite stderr
download_rate = total_downloaded / (time.time() - self.start_time)
estimated_time = (total_size - total_downloaded) / download_rate
estimated_minutes = int(estimated_time / 60.0)
estimated_seconds = estimated_time - estimated_minutes * 60.0
status += ("{:3.2f} % {:5.2f} MB/sec {:2.0f} min {:2.0f} sec "
.format(percent_downloaded,
byte_to_megabyte(download_rate),
estimated_minutes, estimated_seconds))
status += " \r"
sys.stderr.write(status)
def download(url, download_file):
"""
Download a file from a url
"""
if not os.path.isfile(download_file):
try:
sys.stderr.write("\nDownloading " + url + "\n")
file, headers = urlretrieve(url, download_file,
reporthook=ReportHook().report)
except EnvironmentError:
sys.stderr.write("\nWarning: Unable to download " + url + "\n")
else:
sys.stderr.write("\nFile {} already present!\n".format(download_file))
def download_unpack_tar(FILE_LIST, download_file_name, folder, bowtie2_build, nproc):
"""
Download the url to the file and decompress into the folder
"""
# Create the folder if it does not already exist
if not os.path.isdir(folder):
try:
os.makedirs(folder)
except EnvironmentError:
sys.exit("ERROR: Unable to create folder for database install: " + folder)
# Check the directory permissions
if not os.access(folder, os.W_OK):
sys.exit("ERROR: The directory is not writeable: " + folder + ". "
"Please modify the permissions.")
tar_file = os.path.join(folder, "mpa_" + download_file_name + ".tar")
md5_file = os.path.join(folder, "mpa_" + download_file_name + ".md5")
if not os.path.isfile(md5_file) or not os.path.isfile(tar_file):
#Download the list of all the files in the Dropbox folder
list_file_path = os.path.join(folder, "file_list.txt")
download(FILE_LIST, list_file_path)
if os.path.isfile(list_file_path):
with open(list_file_path) as f:
ls_f = dict( [row.strip().split() for row in f])
url_tar_file = ls_f["mpa_" + download_file_name + ".tar"]
download(url_tar_file, tar_file)
# download MD5 checksum
url_md5_file = ls_f["mpa_" + download_file_name + ".md5"]
download(url_md5_file, md5_file)
md5_md5 = None
md5_tar = None
if os.path.isfile(md5_file):
with open(md5_file) as f:
for row in f:
md5_md5 = row.strip().split(' ')[0]
else:
sys.stderr.write('File "{}" not found!\n'.format(md5_file))
# compute MD5 of .tar.bz2
if os.path.isfile(tar_file):
hash_md5 = hashlib.md5()
with open(tar_file, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
md5_tar = hash_md5.hexdigest()[:32]
else:
sys.stderr.write('File "{}" not found!\n'.format(tar_file))
if (md5_tar is None) or (md5_md5 is None):
sys.exit("MD5 checksums not found, something went wrong!")
# compare checksums
if md5_tar != md5_md5:
sys.exit("MD5 checksums do not correspond! If this happens again, you should remove the database files and "
"rerun MetaPhlAn2 so they are re-downloaded")
# untar
try:
tarfile_handle = tarfile.open(tar_file)
tarfile_handle.extractall(path=folder)
tarfile_handle.close()
except EnvironmentError:
sys.stderr.write("Warning: Unable to extract {}.\n".format(tar_file))
# uncompress sequences
bz2_file = os.path.join(folder, "mpa_" + download_file_name + ".fna.bz2")
fna_file = os.path.join(folder, "mpa_" + download_file_name + ".fna")
if not os.path.isfile(fna_file):
sys.stderr.write('\n\nDecompressing {} into {}\n'.format(bz2_file, fna_file))
with open(fna_file, 'wb') as fna_h, bz2.BZ2File(bz2_file, 'rb') as bz2_h:
for data in iter(lambda: bz2_h.read(100 * 1024), b''):
fna_h.write(data)
# build bowtie2 indexes
if not glob(os.path.join(folder, "mpa_" + download_file_name + "*.bt2")):
bt2_base = os.path.join(folder, "mpa_" + download_file_name)
bt2_cmd = [bowtie2_build, '--quiet']
if nproc > 1:
bt2_build_output = subp.check_output([bowtie2_build, '--usage'], stderr=subp.STDOUT)
if 'threads' in str(bt2_build_output):
bt2_cmd += ['--threads', str(nproc)]
bt2_cmd += ['-f', fna_file, bt2_base]
sys.stderr.write('\nBuilding Bowtie2 indexes\n')
try:
subp.check_call(bt2_cmd)
except Exception as e:
sys.stderr.write("Fatal error running '{}'\nError message: '{}'\n\n".format(' '.join(bt2_cmd), e))
sys.exit(1)
sys.stderr.write('Removing uncompress database {}\n'.format(fna_file))
os.remove(fna_file)
def check_and_install_database(index, bowtie2_db, bowtie2_build, nproc):
""" Check if the database is installed, if not download and install """
if len(glob(os.path.join(bowtie2_db, "mpa_{}*".format(index)))) >= 7:
return
# download the tar archive and decompress
sys.stderr.write("\nDownloading MetaPhlAn2 database\nPlease note due to "
"the size this might take a few minutes\n")
download_unpack_tar(FILE_LIST, index, bowtie2_db, bowtie2_build, nproc)
sys.stderr.write("\nDownload complete\n")
def set_mapping_arguments(index, bowtie2_db):
mpa_pkl = 'mpa_pkl'
bowtie2db = 'bowtie2db'
if os.path.isfile(os.path.join(bowtie2_db, "mpa_{}.pkl".format(index))):
mpa_pkl = os.path.join(bowtie2_db, "mpa_{}.pkl".format(index))
if glob(os.path.join(bowtie2_db, "mpa_{}*.bt2".format(index))):
bowtie2db = os.path.join(bowtie2_db, "mpa_{}".format(index))
return (mpa_pkl, bowtie2db)
def run_bowtie2(fna_in, outfmt6_out, bowtie2_db, preset, nproc, file_format="multifasta",
exe=None, samout=None, min_alignment_len=None, read_min_len=0):
# checking read_fastx.py
read_fastx = "read_fastx.py"
try:
subp.check_call([read_fastx, "-h"], stdout=DEVNULL)
except Exception as e:
try:
read_fastx = os.path.join(os.path.join(os.path.dirname(__file__), "utils"), read_fastx)
subp.check_call([read_fastx, "-h"], stdout=DEVNULL)
except Exception as e:
sys.stderr.write("OSError: fatal error running '{}'. Is it in the system path?\n".format(read_fastx))
sys.exit(1)
# checking bowtie2
try:
subp.check_call([exe if exe else 'bowtie2', "-h"], stdout=DEVNULL)
except Exception as e:
sys.stderr.write('OSError: "{}"\nFatal error running BowTie2. Is BowTie2 in the system path?\n'.format(e))
sys.exit(1)
try:
if fna_in:
readin = subp.Popen([read_fastx, '-l', str(read_min_len), fna_in], stdout=subp.PIPE)
else:
readin = subp.Popen([read_fastx, '-l', str(read_min_len)], stdin=sys.stdin, stdout=subp.PIPE)
bowtie2_cmd = [exe if exe else 'bowtie2', "--quiet", "--no-unal", "--{}".format(preset),
"-S", "-", "-x", bowtie2_db]
if int(nproc) > 1:
bowtie2_cmd += ["-p", str(nproc)]
bowtie2_cmd += ["-U", "-"] # if not stat.S_ISFIFO(os.stat(fna_in).st_mode) else []
if file_format == "multifasta":
bowtie2_cmd += ["-f"]
p = subp.Popen(bowtie2_cmd, stdout=subp.PIPE, stdin=readin.stdout)
readin.stdout.close()
lmybytes, outf = (mybytes, bz2.BZ2File(outfmt6_out, "w")) if outfmt6_out.endswith(".bz2") else (str, open(outfmt6_out, "w"))
try:
if samout:
if samout[-4:] == '.bz2':
sam_file = bz2.BZ2File(samout, 'w')
else:
sam_file = open(samout, 'wb')
except IOError as e:
sys.stderr.write('IOError: "{}"\nUnable to open sam output file.\n'.format(e))
sys.exit(1)
for line in p.stdout:
if samout:
sam_file.write(line)
o = read_and_split_line(line)
if not o[0].startswith('@'):
if not o[2].endswith('*'):
if ((min_alignment_len is None) or
(max([int(x.strip('M')) for x in re.findall(r'(\d*M)', o[5]) if x]) >= min_alignment_len)):
outf.write(lmybytes("\t".join([o[0], o[2]]) + "\n"))
outf.close()
if samout:
sam_file.close()
p.communicate()
except OSError as e:
sys.stderr.write('OSError: "{}"\nFatal error running BowTie2.\n'.format(e))
sys.exit(1)
except ValueError as e:
sys.stderr.write('ValueError: "{}"\nFatal error running BowTie2.\n'.format(e))
sys.exit(1)
except IOError as e:
sys.stderr.write('IOError: "{}"\nFatal error running BowTie2.\n'.format(e))
sys.exit(1)
if p.returncode == 13:
sys.stderr.write("Permission Denied Error: fatal error running BowTie2."
"Is the BowTie2 file in the path with execution and read permissions?\n")
sys.exit(1)
elif p.returncode != 0:
sys.stderr.write("Error while running bowtie2.\n")
sys.exit(1)
#def guess_input_format( inp_file ):
# if "," in inp_file:
# sys.stderr.write( "Sorry, I cannot guess the format of the input, when "
# "more than one file is specified. Please set the --input_type parameter \n" )
# sys.exit(1)
#
# with open( inp_file ) as inpf:
# for i,l in enumerate(inpf):
# line = l.strip()
# if line[0] == '#': continue
# if line[0] == '>': return 'multifasta'
# if line[0] == '@': return 'multifastq'
# if len(l.split('\t')) == 2: return 'bowtie2out'
# if i > 20: break
# return None
class TaxClade:
min_cu_len = -1
markers2lens = None
stat = None
quantile = None
avoid_disqm = False
def __init__( self, name, uncl = False, id_int = 0 ):
self.children, self.markers2nreads = {}, {}
self.name, self.father = name, None
self.uncl, self.subcl_uncl = uncl, False
self.abundance, self.uncl_abundance = None, 0
self.id = id_int
def add_child( self, name, id_int ):
new_clade = TaxClade( name, id_int=id_int )
self.children[name] = new_clade
new_clade.father = self
return new_clade
def get_terminals( self ):
terms = []
if not self.children:
return [self]
for c in self.children.values():
terms += c.get_terminals()
return terms
def get_full_name( self ):
fullname = [self.name]
cl = self.father
while cl:
fullname = [cl.name] + fullname
cl = cl.father
return "|".join(fullname[1:])
def get_normalized_counts( self ):
return [(m,float(n)*1000.0/self.markers2lens[m])
for m,n in self.markers2nreads.items()]
def compute_abundance( self ):
if self.abundance is not None: return self.abundance
sum_ab = sum([c.compute_abundance() for c in self.children.values()])
rat_nreads = sorted([(self.markers2lens[m],n)
for m,n in self.markers2nreads.items()],
key = lambda x: x[1])
rat_nreads, removed = [], []
for m,n in sorted(self.markers2nreads.items(),key=lambda pars:pars[0]):
misidentified = False
if not self.avoid_disqm:
for e in self.markers2exts[m]:
toclade = self.taxa2clades[e]
m2nr = toclade.markers2nreads
tocladetmp = toclade
while len(tocladetmp.children) == 1:
tocladetmp = list(tocladetmp.children.values())[0]
m2nr = tocladetmp.markers2nreads
nonzeros = sum([v>0 for v in m2nr.values()])
if len(m2nr):
if float(nonzeros) / len(m2nr) > 0.33:
misidentified = True
removed.append( (self.markers2lens[m],n) )
break
if not misidentified:
rat_nreads.append( (self.markers2lens[m],n) )
if not self.avoid_disqm and len(removed):
n_rat_nreads = float(len(rat_nreads))
n_removed = float(len(removed))
n_tot = n_rat_nreads + n_removed
n_ripr = 10
if len(self.get_terminals()) < 2:
n_ripr = 0
if "k__Viruses" in self.get_full_name():
n_ripr = 0
if n_rat_nreads < n_ripr and n_tot > n_rat_nreads:
rat_nreads += removed[:n_ripr-int(n_rat_nreads)]
rat_nreads = sorted(rat_nreads, key = lambda x: x[1])
rat_v,nreads_v = zip(*rat_nreads) if rat_nreads else ([],[])
rat, nrawreads, loc_ab = float(sum(rat_v)) or -1.0, sum(nreads_v), 0.0
quant = int(self.quantile*len(rat_nreads))
ql,qr,qn = (quant,-quant,quant) if quant else (None,None,0)
if self.name[0] == 't' and (len(self.father.children) > 1 or "_sp" in self.father.name or "k__Viruses" in self.get_full_name()):
non_zeros = float(len([n for r,n in rat_nreads if n > 0]))
nreads = float(len(rat_nreads))
if nreads == 0.0 or non_zeros / nreads < 0.7:
self.abundance = 0.0
return 0.0
if rat < 0.0:
pass
elif self.stat == 'avg_g' or (not qn and self.stat in ['wavg_g','tavg_g']):
loc_ab = nrawreads / rat if rat >= 0 else 0.0
elif self.stat == 'avg_l' or (not qn and self.stat in ['wavg_l','tavg_l']):
loc_ab = np.mean([float(n)/r for r,n in rat_nreads])
elif self.stat == 'tavg_g':
wnreads = sorted([(float(n)/r,r,n) for r,n in rat_nreads], key=lambda x:x[0])
den,num = zip(*[v[1:] for v in wnreads[ql:qr]])
loc_ab = float(sum(num))/float(sum(den)) if any(den) else 0.0
elif self.stat == 'tavg_l':
loc_ab = np.mean(sorted([float(n)/r for r,n in rat_nreads])[ql:qr])
elif self.stat == 'wavg_g':
vmin, vmax = nreads_v[ql], nreads_v[qr]
wnreads = [vmin]*qn+list(nreads_v[ql:qr])+[vmax]*qn
loc_ab = float(sum(wnreads)) / rat
elif self.stat == 'wavg_l':
wnreads = sorted([float(n)/r for r,n in rat_nreads])
vmin, vmax = wnreads[ql], wnreads[qr]
wnreads = [vmin]*qn+list(wnreads[ql:qr])+[vmax]*qn
loc_ab = np.mean(wnreads)
elif self.stat == 'med':
loc_ab = np.median(sorted([float(n)/r for r,n in rat_nreads])[ql:qr])
self.abundance = loc_ab
if rat < self.min_cu_len and self.children:
self.abundance = sum_ab
elif loc_ab < sum_ab:
self.abundance = sum_ab
if self.abundance > sum_ab and self.children: # *1.1??
self.uncl_abundance = self.abundance - sum_ab
self.subcl_uncl = not self.children and self.name[0] not in tax_units[-2:]
return self.abundance
def get_all_abundances( self ):
ret = [(self.name,self.abundance)]
if self.uncl_abundance > 0.0:
lchild = list(self.children.values())[0].name[:3]
ret += [(lchild+self.name[3:]+"_unclassified",self.uncl_abundance)]
if self.subcl_uncl and self.name[0] != tax_units[-2]:
cind = tax_units.index( self.name[0] )
ret += [( tax_units[cind+1]+self.name[1:]+"_unclassified",
self.abundance)]
for c in self.children.values():
ret += c.get_all_abundances()
return ret
class TaxTree:
def __init__( self, mpa, markers_to_ignore = None ): #, min_cu_len ):
self.root = TaxClade( "root" )
self.all_clades, self.markers2lens, self.markers2clades, self.taxa2clades, self.markers2exts = {}, {}, {}, {}, {}
TaxClade.markers2lens = self.markers2lens
TaxClade.markers2exts = self.markers2exts
TaxClade.taxa2clades = self.taxa2clades
self.id_gen = itertools.count(1)
# clades_txt = ((l.strip().split("|"),n) for l,n in mpa_pkl['taxonomy'].items())
clades_txt = ((l.strip().split("|"), n) for l, n in mpa['taxonomy'].items())
for clade,lenc in clades_txt:
father = self.root
for clade_lev in clade: # !!!!! [:-1]:
if not clade_lev in father.children:
father.add_child( clade_lev, id_int=next(self.id_gen) )
self.all_clades[clade_lev] = father.children[clade_lev]
if clade_lev[0] == "t":
self.taxa2clades[clade_lev[3:]] = father
father = father.children[clade_lev]
if clade_lev[0] == "t":
father.glen = lenc
def add_lens( node ):
if not node.children:
return node.glen
lens = []
for c in node.children.values():
lens.append( add_lens( c ) )
node.glen = sum(lens) / len(lens)
return node.glen
add_lens( self.root )
# for k,p in mpa_pkl['markers'].items():
for k, p in mpa['markers'].items():
if k in markers_to_exclude:
continue
if k in markers_to_ignore:
continue
self.markers2lens[k] = p['len']
self.markers2clades[k] = p['clade']
self.add_reads(k, 0)
self.markers2exts[k] = p['ext']
def set_min_cu_len( self, min_cu_len ):
TaxClade.min_cu_len = min_cu_len
def set_stat( self, stat, quantile, avoid_disqm = False ):
TaxClade.stat = stat
TaxClade.quantile = quantile
TaxClade.avoid_disqm = avoid_disqm
def add_reads( self, marker, n,
ignore_viruses = False, ignore_eukaryotes = False,
ignore_bacteria = False, ignore_archaea = False ):
clade = self.markers2clades[marker]
cl = self.all_clades[clade]
if ignore_viruses or ignore_eukaryotes or ignore_bacteria or ignore_archaea:
cn = cl.get_full_name()
if ignore_viruses and cn.startswith("k__Viruses"):
return ""
if ignore_eukaryotes and cn.startswith("k__Eukaryota"):
return ""
if ignore_archaea and cn.startswith("k__Archaea"):
return ""
if ignore_bacteria and cn.startswith("k__Bacteria"):
return ""
while len(cl.children) == 1:
cl = list(cl.children.values())[0]
cl.markers2nreads[marker] = n
return cl.get_full_name()
def markers2counts( self ):
m2c = {}
for k,v in self.all_clades.items():
for m,c in v.markers2nreads.items():
m2c[m] = c
return m2c
def clade_profiles( self, tax_lev, get_all = False ):
cl2pr = {}
for k,v in self.all_clades.items():
if tax_lev and not k.startswith(tax_lev):
continue
prof = v.get_normalized_counts()
if not get_all and ( len(prof) < 1 or not sum([p[1] for p in prof]) > 0.0 ):
continue
cl2pr[v.get_full_name()] = prof
return cl2pr
def relative_abundances( self, tax_lev ):
cl2ab_n = dict([(k,v) for k,v in self.all_clades.items()
if k.startswith("k__") and not v.uncl])
cl2ab, cl2glen, tot_ab = {}, {}, 0.0
for k,v in cl2ab_n.items():
tot_ab += v.compute_abundance()
for k,v in cl2ab_n.items():
for cl,ab in sorted(v.get_all_abundances(),key=lambda pars:pars[0]):
if not tax_lev:
if cl not in self.all_clades:
to = tax_units.index(cl[0])
t = tax_units[to-1]
cl = t + cl.split("_unclassified")[0][1:]
cl = self.all_clades[cl].get_full_name()
spl = cl.split("|")
cl = "|".join(spl+[tax_units[to]+spl[-1][1:]+"_unclassified"])
glen = self.all_clades[spl[-1]].glen
else:
glen = self.all_clades[cl].glen
cl = self.all_clades[cl].get_full_name()
elif not cl.startswith(tax_lev):
if cl in self.all_clades:
glen = self.all_clades[cl].glen
else:
glen = 1.0
continue
cl2ab[cl] = ab
cl2glen[cl] = glen
ret_d = dict([( k, float(v) / tot_ab if tot_ab else 0.0) for k,v in cl2ab.items()])
ret_r = dict([( k, (v,cl2glen[k],float(v)*cl2glen[k])) for k,v in cl2ab.items()])
#ret_r = dict([( k, float(v) / tot_ab if tot_ab else 0.0) for k,v in cl2ab.items()])
if tax_lev:
ret_d[tax_lev+"unclassified"] = 1.0 - sum(ret_d.values())
return ret_d, ret_r
def map2bbh(mapping_f, input_type='bowtie2out', min_alignment_len=None):
if not mapping_f:
ras, ras_line, inpf = plain_read_and_split, plain_read_and_split_line, sys.stdin
else:
if mapping_f.endswith(".bz2"):
ras, ras_line, inpf = read_and_split, read_and_split_line, bz2.BZ2File(mapping_f, "r")
else:
ras, ras_line, inpf = plain_read_and_split, plain_read_and_split_line, open(mapping_f)
reads2markers = {}
if input_type == 'bowtie2out':
for r, c in ras(inpf):
reads2markers[r] = c
elif input_type == 'sam':
for line in inpf:
o = ras_line(line)
if ((o[0][0] != '@') and
(o[2][-1] != '*') and
((min_alignment_len is None) or
(max([int(x.strip('M')) for x in re.findall(r'(\d*M)', o[5]) if x]) >= min_alignment_len))):
reads2markers[o[0]] = o[2]
inpf.close()
markers2reads = defdict(set)
for r, m in reads2markers.items():
markers2reads[m].add(r)
return markers2reads
def maybe_generate_biom_file(tree, pars, abundance_predictions):
json_key = "MetaPhlAn2"
if not pars['biom']:
return None
if not abundance_predictions:
biom_table = biom.Table([], [], []) # create empty BIOM table
with open(pars['biom'], 'w') as outfile:
biom_table.to_json(json_key, direct_io=outfile)
return True
delimiter = "|" if len(pars['mdelim']) > 1 else pars['mdelim']
def istip(clade_name):
end_name = clade_name.split(delimiter)[-1]
return end_name.startswith("t__") or end_name.endswith("_unclassified")
def findclade(clade_name):
if clade_name.endswith('_unclassified'):
name = clade_name.split(delimiter)[-2]
else:
name = clade_name.split(delimiter)[-1]
return tree.all_clades[name]
def to_biomformat(clade_name):
return {'taxonomy': clade_name.split(delimiter)}
clades = iter((abundance, findclade(name))
for (name, abundance) in abundance_predictions if istip(name))
packed = iter(([abundance], clade.get_full_name(), clade.id)
for (abundance, clade) in clades)
# unpack that tuple here to stay under 80 chars on a line
data, clade_names, clade_ids = zip(*packed)
# biom likes column vectors, so we give it an array like this:
# np.array([a],[b],[c])
data = np.array(data)
sample_ids = [pars['sample_id']]
table_id = 'MetaPhlAn2_Analysis'
#**********************************************************************************************
# Modification of Code : *
# Modified the code so instead of using the current clade IDs, which are numbers, we will *
# use the clade_names *
# Users reported the biom output is invalid and also the IDs were changing from run to *
# run. *
# George Weingart 05/22/2017 [email protected] *
#**********************************************************************************************
if LooseVersion(biom.__version__) < LooseVersion("2.0.0"):
biom_table = biom.table.table_factory(
data,
sample_ids,
######## clade_ids, #Modified by George Weingart 5/22/2017 - We will use instead the clade_names
clade_names, #Modified by George Weingart 5/22/2017 - We will use instead the clade_names
sample_metadata = None,
observation_metadata = map(to_biomformat, clade_names),
table_id = table_id,
constructor = biom.table.DenseOTUTable
)
with open(pars['biom'], 'w') as outfile:
json.dump( biom_table.getBiomFormatObject(json_key),
outfile )
else: # Below is the biom2 compatible code
biom_table = biom.table.Table(
data,
#clade_ids, #Modified by George Weingart 5/22/2017 - We will use instead the clade_names
clade_names, #Modified by George Weingart 5/22/2017 - We will use instead the clade_names
sample_ids,
sample_metadata = None,
observation_metadata = map(to_biomformat, clade_names),
table_id = table_id,
input_is_dense = True
)
with open(pars['biom'], 'w') as outfile:
biom_table.to_json( json_key,
direct_io = outfile )
return True
def metaphlan2():
pars = read_params(sys.argv)
# check if the database is installed, if not then install
check_and_install_database(pars['index'], pars['bowtie2db'], pars['bowtie2_build'], pars['nproc'])
if pars['install']:
sys.stderr.write('The database is installed\n')
return
# set correct map_pkl and bowtie2db variables
pars['mpa_pkl'], pars['bowtie2db'] = set_mapping_arguments(pars['index'], pars['bowtie2db'])
#if pars['inp'] is None and ( pars['input_type'] is None or pars['input_type'] == 'automatic'):
# sys.stderr.write( "The --input_type parameter need top be specified when the "
# "input is provided from the standard input.\n"
# "Type metaphlan.py -h for more info\n")
# sys.exit(0)
if (pars['bt2_ps'] in ["sensitive-local", "very-sensitive-local"]) and (pars['min_alignment_len'] is None):
pars['min_alignment_len'] = 100
sys.stderr.write('Warning! bt2_ps is set to local mode, and min_alignment_len is None, I automatically '
'set min_alignment_len to 100! If you do not like, rerun the command and set '
'min_alignment_len to a specific value.\n')
if pars['input_type'] == 'fastq':
pars['input_type'] = 'multifastq'
if pars['input_type'] == 'fasta':
pars['input_type'] = 'multifasta'
#if pars['input_type'] == 'automatic':
# pars['input_type'] = guess_input_format( pars['inp'] )
# if not pars['input_type']:
# sys.stderr.write( "Sorry, I cannot guess the format of the input file, please "
# "specify the --input_type parameter \n" )
# sys.exit(1)
# check for the mpa_pkl file
if not os.path.isfile(pars['mpa_pkl']):
sys.stderr.write("Error: Unable to find the mpa_pkl file at: " + pars['mpa_pkl'] +
"\nExpecting location ${mpa_dir}/db_v20/map_v20_m200.pkl "
"\nSelect the file location with the option --mpa_pkl.\n"
"Exiting...\n\n")
sys.exit(1)
if pars['ignore_markers']:
with open(pars['ignore_markers']) as ignv:
ignore_markers = set([l.strip() for l in ignv])
else:
ignore_markers = set()
no_map = False
if pars['input_type'] == 'multifasta' or pars['input_type'] == 'multifastq':
bow = pars['bowtie2db'] is not None
if not bow:
sys.stderr.write( "No MetaPhlAn BowTie2 database provided\n "
"[--bowtie2db options]!\n"
"Exiting...\n\n" )
sys.exit(1)
if pars['no_map']:
pars['bowtie2out'] = tf.NamedTemporaryFile(dir=pars['tmp_dir']).name
no_map = True
else:
if bow and not pars['bowtie2out']:
if pars['inp'] and "," in pars['inp']:
sys.stderr.write("Error! --bowtie2out needs to be specified when multiple "
"fastq or fasta files (comma separated) are provided\n")
sys.exit(1)
fname = pars['inp']
if fname is None:
fname = "stdin_map"
elif stat.S_ISFIFO(os.stat(fname).st_mode):
fname = "fifo_map"
pars['bowtie2out'] = fname + ".bowtie2out.txt"
if os.path.exists( pars['bowtie2out'] ):
sys.stderr.write(
"BowTie2 output file detected: " + pars['bowtie2out'] + "\n"
"Please use it as input or remove it if you want to "
"re-perform the BowTie2 run.\n"
"Exiting...\n\n" )
sys.exit(1)
if bow and not all([os.path.exists(".".join([str(pars['bowtie2db']), p]))
for p in ["1.bt2", "2.bt2", "3.bt2", "4.bt2", "rev.1.bt2", "rev.2.bt2"]]):
sys.stderr.write("No MetaPhlAn BowTie2 database found (--index "
"option)!\nExpecting location {}\nExiting..."
.format(pars['bowtie2db']))
sys.exit(1)
if bow:
run_bowtie2(pars['inp'], pars['bowtie2out'], pars['bowtie2db'],
pars['bt2_ps'], pars['nproc'], file_format=pars['input_type'],
exe=pars['bowtie2_exe'], samout=pars['samout'],
min_alignment_len=pars['min_alignment_len'], read_min_len=pars['read_min_len'])
pars['input_type'] = 'bowtie2out'
pars['inp'] = pars['bowtie2out'] # !!!
with open( pars['mpa_pkl'], 'rb' ) as a:
mpa_pkl = pickle.loads( bz2.decompress( a.read() ) )
tree = TaxTree( mpa_pkl, ignore_markers )
tree.set_min_cu_len( pars['min_cu_len'] )
tree.set_stat( pars['stat'], pars['stat_q'], pars['avoid_disqm'] )
markers2reads = map2bbh(pars['inp'], pars['input_type'],
pars['min_alignment_len'])
if no_map:
os.remove( pars['inp'] )
map_out = []
for marker,reads in sorted(markers2reads.items(), key=lambda pars: pars[0]):
if marker not in tree.markers2lens:
continue
tax_seq = tree.add_reads( marker, len(reads),
ignore_viruses = pars['ignore_viruses'],
ignore_eukaryotes = pars['ignore_eukaryotes'],
ignore_bacteria = pars['ignore_bacteria'],
ignore_archaea = pars['ignore_archaea'],
)
if tax_seq:
map_out +=["\t".join([r,tax_seq]) for r in sorted(reads)]
if pars['output'] is None and pars['output_file'] is not None:
pars['output'] = pars['output_file']
with (open(pars['output'],"w") if pars['output'] else sys.stdout) as outf:
outf.write('\t'.join((pars["sample_id_key"], pars["sample_id"])) + '\n')
if pars['t'] == 'reads_map':
outf.write( "\n".join( map_out ) + "\n" )
elif pars['t'] == 'rel_ab':
cl2ab, _ = tree.relative_abundances(
pars['tax_lev']+"__" if pars['tax_lev'] != 'a' else None )
outpred = [(k,round(v*100.0,5)) for k,v in cl2ab.items() if v > 0.0]
if outpred:
for k,v in sorted( outpred, reverse=True,
key=lambda x:x[1]+(100.0*(8-x[0].count("|"))) ):
outf.write( "\t".join( [k,str(v)] ) + "\n" )
else:
outf.write( "unclassified\t100.0\n" )
maybe_generate_biom_file(tree, pars, outpred)
elif pars['t'] == 'rel_ab_w_read_stats':
cl2ab, rr = tree.relative_abundances(
pars['tax_lev']+"__" if pars['tax_lev'] != 'a' else None )
outpred = [(k,round(v*100.0,5)) for k,v in cl2ab.items() if v > 0.0]
totl = 0
if outpred:
outf.write( "\t".join( [ "#clade_name",
"relative_abundance",
"coverage",
"average_genome_length_in_the_clade",
"estimated_number_of_reads_from_the_clade" ]) +"\n" )
for k,v in sorted( outpred, reverse=True,
key=lambda x:x[1]+(100.0*(8-x[0].count("|"))) ):
outf.write( "\t".join( [ k,
str(v),
str(rr[k][0]) if k in rr else "-",
str(rr[k][1]) if k in rr else "-",
str(int(round(rr[k][2],0)) if k in rr else "-")
] ) + "\n" )
if "|" not in k:
totl += (int(round(rr[k][2],0)) if k in rr else 0)
outf.write( "#estimated total number of reads from known clades: " + str(totl)+"\n")
else:
outf.write( "unclassified\t100.0\n" )
maybe_generate_biom_file(tree, pars, outpred)
elif pars['t'] == 'clade_profiles':
cl2pr = tree.clade_profiles( pars['tax_lev']+"__" if pars['tax_lev'] != 'a' else None )
for c,p in cl2pr.items():
mn,n = zip(*p)
outf.write( "\t".join( [""]+[str(s) for s in mn] ) + "\n" )
outf.write( "\t".join( [c]+[str(s) for s in n] ) + "\n" )
elif pars['t'] == 'marker_ab_table':
cl2pr = tree.clade_profiles( pars['tax_lev']+"__" if pars['tax_lev'] != 'a' else None )
for v in cl2pr.values():
outf.write( "\n".join(["\t".join([str(a),str(b/float(pars['nreads'])) if pars['nreads'] else str(b)])
for a,b in v if b > 0.0]) + "\n" )
elif pars['t'] == 'marker_pres_table':
cl2pr = tree.clade_profiles( pars['tax_lev']+"__" if pars['tax_lev'] != 'a' else None )
for v in cl2pr.values():
strout = ["\t".join([str(a),"1"]) for a,b in v if b > pars['pres_th']]
if strout:
outf.write( "\n".join(strout) + "\n" )
elif pars['t'] == 'marker_counts':
outf.write( "\n".join( ["\t".join([m,str(c)]) for m,c in tree.markers2counts().items() ]) +"\n" )
elif pars['t'] == 'clade_specific_strain_tracker':
cl2pr = tree.clade_profiles( None, get_all = True )
cl2ab, _ = tree.relative_abundances( None )
strout = []
for cl,v in cl2pr.items():
if cl.endswith(pars['clade']) and cl2ab[cl]*100.0 < pars['min_ab']:
strout = []
break
if pars['clade'] in cl:
strout += ["\t".join([str(a),str(int(b > pars['pres_th']))]) for a,b in v]
if strout:
strout = sorted(strout,key=lambda x:x[0])
outf.write( "\n".join(strout) + "\n" )
else:
sys.stderr.write("Clade "+pars['clade']+" not present at an abundance >"+str(round(pars['min_ab'],2))+"%, "
"so no clade specific markers are reported\n")
if __name__ == '__main__':
metaphlan2()
|
the-stack_0_5688 | # import modules
import numpy as np
from numpy.linalg import norm
import astropy.units as u
from astropy.constants import G
from pathlib import Path
# import plotting modules
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# my modules
from galaxy.galaxy import Galaxy
from galaxy.centerofmass import CenterOfMass
from galaxy.massprofile import MassProfile
from galaxy.timecourse import TimeCourse
from galaxy.plots import Plots
def make_plot(gname, snap, lim, fname):
try:
gal = Galaxy(gname, snap, usesql=True, ptype=2)
t = gal.time.value / 1000
except TypeError:
gal = Galaxy(gname, snap, datadir=datadir, ptype=2)
t = gal.time.value / 1000
com = CenterOfMass(gal)
tc = TimeCourse(usesql=True)
com_xyz, com_vxyz = tc.get_one_com(gname, snap)
# gal_xyzD, gal_vxyzD = com.center_com(com_xyz, com_vxyz)
# determine the rotated velocity vectors
rn, _ = com.rotate_frame(com_p=com_xyz, com_v=com_vxyz)
p.plot_density(rn, gname, snap, t, pngout=True, lim=lim, fname=fname)
plt.close('all')
p = Plots()
limits = {'MW': (50, 80),
'M31': (50, 80),
'M33': (30, 100)}
cmd = ''
datadir = Path.home() / 'HighRes'
cmdfile = 'make_densities.sh'
with open(cmdfile, 'w') as fp:
fp.write(cmd)
for gname in ('MW', 'M31', 'M33'):
print(gname)
group = 'early'
for snap in np.arange(0, 300):
print(snap, end=' ')
lim = limits[gname][0]
fname = f'png_files/{gname}_density_{group}_{snap:03}.png'
make_plot(gname, snap, lim, fname=fname)
cmd += f'ffmpeg -r 10 -start_number 0 -s 1920x1080'
cmd += f' -i png_files/{gname}_density_early_%03d.png'
cmd += f' -vcodec libx264 -vf fps=25 -crf 25 -pix_fmt yuv420p {gname}_early.mp4\n'
with open(cmdfile, 'w') as fp:
fp.write(cmd)
for snap in np.arange(290, 802):
print(snap, end=' ')
group = 'late'
lim = limits[gname][1]
fname = f'png_files/{gname}_density_{group}_{snap:03}.png'
make_plot(gname, snap, lim, fname=fname)
cmd += f'ffmpeg -r 10 -start_number 290 -s 1920x1080'
cmd += f' -i png_files/{gname}_density_late_%03d.png'
cmd += f' -vcodec libx264 -vf fps=25 -crf 25 -pix_fmt yuv420p {gname}_late.mp4\n'
with open(cmdfile, 'w') as fp:
fp.write(cmd)
|
the-stack_0_5689 | '''
This code is due to Yutong Deng (@yutongD), Yingtong Dou (@Yingtong Dou) and UIC BDSC Lab
DGFraud (A Deep Graph-based Toolbox for Fraud Detection)
https://github.com/safe-graph/DGFraud
'''
import tensorflow as tf
import argparse
from algorithms.Player2Vec.Player2Vec import Player2Vec
import time
from utils.data_loader import *
from utils.utils import *
# os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
# init the common args, expect the model specific args
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=123, help='Random seed.')
parser.add_argument('--dataset_str', type=str, default='dblp', help="['dblp','example']")
parser.add_argument('--epoch_num', type=int, default=30, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--momentum', type=int, default=0.9)
parser.add_argument('--learning_rate', default=0.001, help='the ratio of training set in whole dataset.')
# GCN args
parser.add_argument('--hidden1', default=16, help='Number of units in GCN hidden layer 1.')
parser.add_argument('--hidden2', default=16, help='Number of units in GCN hidden layer 2.')
parser.add_argument('--gcn_output', default=4, help='gcn output size.')
args = parser.parse_args()
return args
def set_env(args):
tf.reset_default_graph()
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
# get batch data
def get_data(ix, int_batch, train_size):
if ix + int_batch >= train_size:
ix = train_size - int_batch
end = train_size
else:
end = ix + int_batch
return train_data[ix:end], train_label[ix:end]
def load_data(args):
if args.dataset_str == 'dblp':
adj_list, features, train_data, train_label, test_data, test_label = load_data_dblp()
node_size = features.shape[0]
node_embedding = features.shape[1]
class_size = train_label.shape[1]
train_size = len(train_data)
paras = [node_size, node_embedding, class_size, train_size]
return adj_list, features, train_data, train_label, test_data, test_label, paras
def train(args, adj_list, features, train_data, train_label, test_data, test_label, paras):
with tf.Session() as sess:
adj_data = [normalize_adj(adj) for adj in adj_list]
meta_size = len(adj_list)
net = Player2Vec(session=sess, class_size=paras[2], gcn_output1=args.hidden1,
meta=meta_size, nodes=paras[0], embedding=paras[1], encoding=args.gcn_output)
sess.run(tf.global_variables_initializer())
# net.load(sess)
t_start = time.clock()
for epoch in range(args.epoch_num):
train_loss = 0
train_acc = 0
count = 0
for index in range(0, paras[3], args.batch_size):
batch_data, batch_label = get_data(index, args.batch_size, paras[3])
loss, acc, pred, prob = net.train(features, adj_data, batch_label,
batch_data, args.learning_rate,
args.momentum)
print("batch loss: {:.4f}, batch acc: {:.4f}".format(loss, acc))
# print(prob, pred)
train_loss += loss
train_acc += acc
count += 1
train_loss = train_loss / count
train_acc = train_acc / count
print("epoch{:d} : train_loss: {:.4f}, train_acc: {:.4f}".format(epoch, train_loss, train_acc))
# net.save(sess)
t_end = time.clock()
print("train time=", "{:.5f}".format(t_end - t_start))
print("Train end!")
test_acc, test_pred, test_probabilities, test_tags = net.test(features, adj_data, test_label,
test_data)
print("test acc:", test_acc)
if __name__ == "__main__":
args = arg_parser()
set_env(args)
adj_list, features, train_data, train_label, test_data, test_label, paras = load_data(args)
train(args, adj_list, features, train_data, train_label, test_data, test_label, paras)
|
the-stack_0_5690 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Python for AHDA.
Part 5, Example 10.
"""
# Named Entity Recognition
import nltk
nltk.download('max_ent_chunker')
nltk.download('words')
print()
sentence = "President Trump visited the United Nations headquarters in New York."
tokens = nltk.word_tokenize(sentence)
pos_tags = nltk.pos_tag(tokens)
print(nltk.ne_chunk(pos_tags))
print()
# as a one liner if you prefer
# print(nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(sentence))))
|
the-stack_0_5692 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import wx
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import time
sys.path.append(os.path.abspath(".."))
from mem import RTxxx_memcore
from ui import RTxxx_uidef
from ui import uidef
from ui import uivar
from ui import uilang
kRetryPingTimes = 5
class secBootRTxxxMain(RTxxx_memcore.secBootRTxxxMem):
def __init__(self, parent):
RTxxx_memcore.secBootRTxxxMem.__init__(self, parent)
self.RTxxx_isAllInOneActionTaskPending = False
if self.mcuSeries == uidef.kMcuSeries_iMXRTxxx:
self._RTxxx_initMain()
def _RTxxx_initMain( self ):
self.connectStage = uidef.kConnectStage_Rom
self.isBootableAppAllowedToView = False
self.lastTime = None
self.isAccessMemTaskPending = False
self.accessMemType = ''
self.isThereBoardConnection = False
def _RTxxx_startGaugeTimer( self ):
if not self.RTxxx_isAllInOneActionTaskPending:
self.lastTime = time.time()
self.initGauge()
def _RTxxx_stopGaugeTimer( self ):
if not self.RTxxx_isAllInOneActionTaskPending:
self.deinitGauge()
self.updateCostTime()
def RTxxx_callbackSetMcuSeries( self ):
self.RTxxx_initUi()
self.RTxxx_initGen()
self.RTxxx_initRun()
self._RTxxx_initMain()
self.RTxxx_setTargetSetupValue()
def RTxxx_callbackSetMcuDevice( self ):
self.RTxxx_setTargetSetupValue()
needToPlaySound = False
self.RTxxx_setSecureBootSeqColor(needToPlaySound)
def RTxxx_callbackSetBootDevice( self ):
self.RTxxx_setTargetSetupValue()
needToPlaySound = False
self.RTxxx_setSecureBootSeqColor(needToPlaySound)
def _RTxxx_retryToPingBootloader( self ):
pingStatus = False
pingCnt = kRetryPingTimes
while (not pingStatus) and pingCnt > 0:
pingStatus = self.RTxxx_pingRom()
if pingStatus:
break
pingCnt = pingCnt - 1
if self.isUsbhidPortSelected:
time.sleep(2)
return pingStatus
def _RTxxx_connectFailureHandler( self ):
self.connectStage = uidef.kConnectStage_Rom
self.updateConnectStatus('red')
usbIdList = self.RTxxx_getUsbid()
self.setPortSetupValue(self.connectStage, usbIdList, False, False)
self.isBootableAppAllowedToView = False
def _RTxxx_connectStateMachine( self, showError=True ):
connectSteps = RTxxx_uidef.kConnectStep_Normal
self.getOneStepConnectMode()
retryToDetectUsb = False
if self.isOneStepConnectMode:
if self.connectStage == uidef.kConnectStage_Reset or self.connectStage == uidef.kConnectStage_ExternalMemory:
connectSteps = RTxxx_uidef.kConnectStep_Fast - 1
elif self.connectStage == uidef.kConnectStage_Rom:
connectSteps = RTxxx_uidef.kConnectStep_Fast
retryToDetectUsb = True
else:
pass
while connectSteps:
if not self.updatePortSetupValue(retryToDetectUsb, showError):
if self.connectStage == uidef.kConnectStage_Rom:
if showError:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_doubleCheckIsp'][self.languageIndex])
self._RTxxx_connectFailureHandler()
return
if self.connectStage == uidef.kConnectStage_Rom:
self.RTxxx_connectToDevice(self.connectStage)
if self._RTxxx_retryToPingBootloader():
self.RTxxx_getMcuDeviceInfoViaRom()
self.updateConnectStatus('green')
self.connectStage = uidef.kConnectStage_ExternalMemory
else:
self.updateConnectStatus('red')
if showError:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_doubleCheckIsp'][self.languageIndex])
return
elif self.connectStage == uidef.kConnectStage_ExternalMemory:
if self.RTxxx_configureBootDevice():
self.RTxxx_getBootDeviceInfoViaRom()
self.connectStage = uidef.kConnectStage_Reset
self.updateConnectStatus('blue')
else:
if showError:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_failToCfgBootDevice'][self.languageIndex])
self._RTxxx_connectFailureHandler()
return
elif self.connectStage == uidef.kConnectStage_Reset:
self.RTxxx_resetMcuDevice()
self.isBootableAppAllowedToView = False
self.connectStage = uidef.kConnectStage_Rom
self.updateConnectStatus('black')
usbIdList = self.RTxxx_getUsbid()
self.setPortSetupValue(self.connectStage, usbIdList, True, True)
self.RTxxx_connectToDevice(self.connectStage)
else:
pass
connectSteps -= 1
def RTxxx_callbackConnectToDevice( self ):
self._RTxxx_startGaugeTimer()
self.printLog("'Connect to xxx' button is clicked")
if not self.isSbFileEnabledToGen:
self._RTxxx_connectStateMachine(True)
else:
if not self.isThereBoardConnection:
if self.connectStage == uidef.kConnectStage_Rom:
self.initSbAppBdfilesContent()
else:
# It means there is board connection
self.isThereBoardConnection = True
self._RTxxx_connectStateMachine(False)
if not self.isThereBoardConnection:
if self.connectStage == uidef.kConnectStage_Rom:
# It means there is no board connection, but we need to set it as True for SB generation
self.isThereBoardConnection = True
self.RTxxx_isDeviceEnabledToOperate = False
self.RTxxx_configureBootDevice()
self.connectStage = uidef.kConnectStage_Reset
self.updateConnectStatus('blue')
else:
self.isThereBoardConnection = False
else:
self.isThereBoardConnection = False
self.RTxxx_isDeviceEnabledToOperate = True
self.connectStage = uidef.kConnectStage_Rom
self.updateConnectStatus('black')
self._RTxxx_stopGaugeTimer()
def RTxxx_callbackSetSecureBootType( self ):
self.setCostTime(0)
self.RTxxx_setSecureBootSeqColor()
def RTxxx_task_doAllInOneAction( self ):
while True:
if self.RTxxx_isAllInOneActionTaskPending:
self._RTxxx_doAllInOneAction()
self.RTxxx_isAllInOneActionTaskPending = False
self._RTxxx_stopGaugeTimer()
time.sleep(1)
def _RTxxx_doAllInOneAction( self ):
allInOneSeqCnt = 1
status = False
while allInOneSeqCnt:
status = self._RTxxx_doGenImage()
if not status:
break
status = self._RTxxx_doFlashImage()
if not status:
break
allInOneSeqCnt -= 1
if status and self.isAutomaticImageReadback:
self.showPageInMainBootSeqWin(uidef.kPageIndex_BootDeviceMemory)
self._RTxxx_doViewMem()
self.invalidateStepButtonColor(uidef.kSecureBootSeqStep_AllInOne, status)
def RTxxx_callbackAllInOneAction( self ):
self._RTxxx_startGaugeTimer()
self.RTxxx_isAllInOneActionTaskPending = True
def _RTxxx_doGenImage( self ):
status = False
self._RTxxx_startGaugeTimer()
self.printLog("'Generate Bootable Image' button is clicked")
if self.createMatchedAppJsonfile():
if self.RTxxx_genBootableImage():
status = True
self._RTxxx_stopGaugeTimer()
self.invalidateStepButtonColor(uidef.kSecureBootSeqStep_GenImage, status)
return status
def RTxxx_callbackGenImage( self ):
if not self.isToolRunAsEntryMode:
self._RTxxx_doGenImage()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['separActnError_notAvailUnderEntry'][self.languageIndex])
def _RTxxx_doFlashImage( self ):
status = False
if self.connectStage == uidef.kConnectStage_Reset:
self._RTxxx_startGaugeTimer()
self.printLog("'Load Bootable Image' button is clicked")
if not self.RTxxx_flashBootableImage():
self.popupMsgBox(uilang.kMsgLanguageContentDict['operImgError_failToFlashImage'][self.languageIndex])
else:
self.isBootableAppAllowedToView = True
if self.RTxxx_burnBootDeviceOtps():
status = True
self._RTxxx_stopGaugeTimer()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_hasnotCfgBootDevice'][self.languageIndex])
self.invalidateStepButtonColor(uidef.kSecureBootSeqStep_FlashImage, status)
return status
def RTxxx_callbackFlashImage( self ):
if not self.isToolRunAsEntryMode:
self._RTxxx_doFlashImage()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['separActnError_notAvailUnderEntry'][self.languageIndex])
def _RTxxx_doViewMem( self ):
if self.connectStage == uidef.kConnectStage_Reset:
if self.isBootableAppAllowedToView:
self._RTxxx_startGaugeTimer()
self.RTxxx_readProgrammedMemoryAndShow()
self._RTxxx_stopGaugeTimer()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['operImgError_hasnotFlashImage'][self.languageIndex])
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_hasnotCfgBootDevice'][self.languageIndex])
def RTxxx_callbackViewMem( self ):
self._RTxxx_doViewMem()
def RTxxx_switchToolRunMode( self ):
self.applyOtpOperToRunMode()
|
the-stack_0_5694 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import OPConvertAutoScanTest, BaseNet
from hypothesis import reproduce_failure
import hypothesis.strategies as st
import numpy as np
import unittest
import paddle
import random
class Net(BaseNet):
"""
simple Net
"""
def forward(self, x):
"""
forward
"""
scale = self.config["scale"]
if self.config['isTensor']:
scale = paddle.to_tensor(scale)
x = paddle.scale(
x,
scale=scale,
bias=self.config["bias"],
bias_after_scale=self.config["bias_after_scale"])
return x
class TestScaleConvert(OPConvertAutoScanTest):
"""
api: paddle.scale
OPset version: 7, 9, 15
"""
def sample_convert_config(self, draw):
input_shape = draw(
st.lists(
st.integers(
min_value=2, max_value=20), min_size=2, max_size=5))
# int32, int64 has a bug
dtype = draw(st.sampled_from(["float32", "float64"]))
scale = draw(st.floats(min_value=-20, max_value=20))
isTensor = draw(st.booleans())
bias = draw(st.floats(min_value=-20, max_value=20))
bias_after_scale = draw(st.booleans())
config = {
"op_names": ["scale"],
"test_data_shapes": [input_shape],
"test_data_types": [[dtype]],
"opset_version": [7, 9, 15],
"input_spec_shape": [],
"scale": scale,
"bias": bias,
"bias_after_scale": bias_after_scale,
"isTensor": isTensor,
}
models = Net(config)
return (config, models)
def test(self):
self.run_and_statis(max_examples=30)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_5696 | #Author-Chun-Yu Ke
#Description-Creates a VGmesh component.
import adsk.core, adsk.fusion, adsk.cam, traceback
import math
import time
# Globals
_app = adsk.core.Application.cast(None)
_ui = adsk.core.UserInterface.cast(None)
_units = 'mm'
# Command inputs
_deltaAngle = adsk.core.DropDownCommandInput.cast(None)
_outerRadius = adsk.core.ValueCommandInput.cast(None)
_innerRadius = adsk.core.ValueCommandInput.cast(None)
_numLayer = adsk.core.StringValueCommandInput.cast(None)
_memberRadius = adsk.core.ValueCommandInput.cast(None)
_meshSize = adsk.core.ValueCommandInput.cast(None)
_vesselDiameter = adsk.core.ValueCommandInput.cast(None)
_vesselHeight = adsk.core.TextBoxCommandInput.cast(None)
_errMessage = adsk.core.TextBoxCommandInput.cast(None)
_handlers = []
def run(context):
try:
global _app, _ui
_app = adsk.core.Application.get()
_ui = _app.userInterface
cmdDef = _ui.commandDefinitions.itemById('VGmeshPythonScript')
if not cmdDef:
# Create a command definition.
cmdDef = _ui.commandDefinitions.addButtonDefinition('VGmeshPythonScript', 'VGmesh', 'Creates a VGmesh component', 'Resources/VGmesh')
# Connect to the command created event.
onCommandCreated = VGmeshCommandCreatedHandler()
cmdDef.commandCreated.add(onCommandCreated)
_handlers.append(onCommandCreated)
# Execute the command.
cmdDef.execute()
# prevent this module from being terminate when the script returns, because we are waiting for event handlers to fire
adsk.autoTerminate(False)
except:
if _ui:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
class VGmeshCommandDestroyHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
eventArgs = adsk.core.CommandEventArgs.cast(args)
# when the command is done, terminate the script
# this will release all globals which will remove all event handlers
adsk.terminate()
except:
if _ui:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Verfies that a value command input has a valid expression and returns the
# value if it does. Otherwise it returns False. This works around a
# problem where when you get the value from a ValueCommandInput it causes the
# current expression to be evaluated and updates the display. Some new functionality
# is being added in the future to the ValueCommandInput object that will make
# this easier and should make this function obsolete.
def getCommandInputValue(commandInput, unitType):
try:
valCommandInput = adsk.core.ValueCommandInput.cast(commandInput)
if not valCommandInput:
return (False, 0)
# Verify that the expression is valid.
des = adsk.fusion.Design.cast(_app.activeProduct)
unitsMgr = des.unitsManager
if unitsMgr.isValidExpression(valCommandInput.expression, unitType):
value = unitsMgr.evaluateExpression(valCommandInput.expression, unitType)
return (True, value)
else:
return (False, 0)
except:
if _ui:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Event handler for the commandCreated event.
class VGmeshCommandCreatedHandler(adsk.core.CommandCreatedEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
eventArgs = adsk.core.CommandCreatedEventArgs.cast(args)
# Verify that a Fusion design is active.
des = adsk.fusion.Design.cast(_app.activeProduct)
if not des:
_ui.messageBox('A Fusion design must be active when invoking this command.')
return()
defaultUnits = des.unitsManager.defaultLengthUnits
# Determine whether to use inches or millimeters as the intial default.
global _units
_units = 'mm'
deltaAngle = '30 deg'
deltaAngleAttrib = des.attributes.itemByName('VGmesh', 'deltaAngle')
if deltaAngleAttrib:
deltaAngle = deltaAngleAttrib.value
outerRadius = '0.0745'
outerRadiusAttrib = des.attributes.itemByName('VGmesh', 'outerRadius')
if outerRadiusAttrib:
outerRadius = outerRadiusAttrib.value
innerRadius = '0.0395'
innerRadiusAttrib = des.attributes.itemByName('VGmesh', 'innerRadius')
if innerRadiusAttrib:
innerRadius = innerRadiusAttrib.value
numLayer = '2'
numLayerAttrib = des.attributes.itemByName('VGmesh', 'numLayer')
if numLayerAttrib:
numLayer = numLayerAttrib.value
memberRadius = '0.0025'
memberRadiusAttrib = des.attributes.itemByName('VGmesh', 'memberRadius')
if memberRadiusAttrib:
memberRadius = memberRadiusAttrib.value
meshSize = '0.0100'
meshSizeAttrib = des.attributes.itemByName('VGmesh', 'meshSize')
if meshSizeAttrib:
meshSize = meshSizeAttrib.value
cmd = eventArgs.command
cmd.isExecutedWhenPreEmpted = False
inputs = cmd.commandInputs
global _deltaAngle, _outerRadius, _innerRadius, _numLayer, _memberRadius, _meshSize, _vesselDiameter, _vesselHeight, _errMessage #, _imgInputEnglish, _imgInputMetric
# Define the command dialog.
# _imgInputEnglish = inputs.addImageCommandInput('VGmeshImageEnglish', '', 'Resources/VGmeshEnglish.png')
# _imgInputEnglish.isFullWidth = True
# _imgInputMetric = inputs.addImageCommandInput('VGmeshImageMetric', '', 'Resources/VGmeshMetric.png')
# _imgInputMetric.isFullWidth = True
_outerRadius = inputs.addValueInput('outerRadius', 'Outer Radius', _units, adsk.core.ValueInput.createByReal(float(outerRadius)))
_innerRadius = inputs.addValueInput('innerRadius', 'Inner Radius', _units, adsk.core.ValueInput.createByReal(float(innerRadius)))
_memberRadius = inputs.addValueInput('memberRadius', 'Member Radius', _units, adsk.core.ValueInput.createByReal(float(memberRadius)))
_meshSize = inputs.addValueInput('meshSize', 'Mesh Size', _units, adsk.core.ValueInput.createByReal(float(meshSize)))
_deltaAngle = inputs.addDropDownCommandInput('deltaAngle', 'Delta Angle', adsk.core.DropDownStyles.TextListDropDownStyle)
if deltaAngle == '15 deg':
_deltaAngle.listItems.add('15 deg', True)
else:
_deltaAngle.listItems.add('15 deg', False)
if deltaAngle == '30 deg':
_deltaAngle.listItems.add('30 deg', True)
else:
_deltaAngle.listItems.add('30 deg', False)
if deltaAngle == '45 deg':
_deltaAngle.listItems.add('45 deg', True)
else:
_deltaAngle.listItems.add('45 deg', False)
_numLayer = inputs.addStringValueInput('numLayer', 'Number of Layers', numLayer)
_vesselDiameter = inputs.addTextBoxCommandInput('vesselDiameter', 'Vessel Diameter', '', 1, True)
_vesselHeight = inputs.addTextBoxCommandInput('vesselHeight', 'Vessel Height', '', 1, True)
_errMessage = inputs.addTextBoxCommandInput('errMessage', '', '', 2, True)
_errMessage.isFullWidth = True
# Connect to the command related events.
onExecute = VGmeshCommandExecuteHandler()
cmd.execute.add(onExecute)
_handlers.append(onExecute)
onInputChanged = VGmeshCommandInputChangedHandler()
cmd.inputChanged.add(onInputChanged)
_handlers.append(onInputChanged)
onValidateInputs = VGmeshCommandValidateInputsHandler()
cmd.validateInputs.add(onValidateInputs)
_handlers.append(onValidateInputs)
onDestroy = VGmeshCommandDestroyHandler()
cmd.destroy.add(onDestroy)
_handlers.append(onDestroy)
except:
if _ui:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Event handler for the execute event.
class VGmeshCommandExecuteHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
eventArgs = adsk.core.CommandEventArgs.cast(args)
# Save the current values as attributes.
des = adsk.fusion.Design.cast(_app.activeProduct)
attribs = des.attributes
attribs.add('VGmesh', 'outerRadius', str(_outerRadius.value))
attribs.add('VGmesh', 'innerRadius', str(_innerRadius.value))
attribs.add('VGmesh', 'memberRadius', str(_memberRadius.value))
attribs.add('VGmesh', 'meshSize', str(_meshSize.value))
attribs.add('VGmesh', 'deltaAngle', _deltaAngle.selectedItem.name)
attribs.add('VGmesh', 'numLayer', str(_numLayer.value))
# Get the current values.
if _deltaAngle.selectedItem.name == '15 deg':
deltaAngle = 15.0 * (math.pi/180)
elif _deltaAngle.selectedItem.name == '30 deg':
deltaAngle = 30.0 * (math.pi/180)
elif _deltaAngle.selectedItem.name == '45 deg':
deltaAngle = 45.0 * (math.pi/180)
numLayer = int(_numLayer.value)
memberRadius = _memberRadius.value
meshSize = _meshSize.value
outerRadius = _outerRadius.value
innerRadius = _innerRadius.value
# Create the gear.
VGmeshComp = drawVGmesh(des, outerRadius, innerRadius, numLayer, meshSize, memberRadius, deltaAngle)
if VGmeshComp:
desc = 'VGmesh; Outer Radius: ' + des.unitsManager.formatInternalValue(outerRadius, _units, True) + '; '
desc += 'Inner Radius: ' + des.unitsManager.formatInternalValue(innerRadius, _units, True) + '; '
desc += 'Member Radius: ' + des.unitsManager.formatInternalValue(memberRadius, _units, True) + '; '
desc += 'Mesh Size: ' + des.unitsManager.formatInternalValue(meshSize, _units, True) + '; '
desc += 'Delta Angle: ' + str(deltaAngle * (180/math.pi)) + '; '
desc += 'Number Layers: ' + str(numLayer)
VGmeshComp.description = desc
except:
if _ui:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Event handler for the inputChanged event.
class VGmeshCommandInputChangedHandler(adsk.core.InputChangedEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
eventArgs = adsk.core.InputChangedEventArgs.cast(args)
changedInput = eventArgs.input
global _units
# Update the pitch diameter value.
meshSize = None
result = getCommandInputValue(_meshSize, '')
if result[0]:
meshSize = result[1]
if not meshSize == None:
if _numLayer.value.isdigit():
numLayer = int(_numLayer.value)
vesselHeight = numLayer * meshSize
# The pitch dia has been calculated in inches, but this expects cm as the input units.
des = adsk.fusion.Design.cast(_app.activeProduct)
vesselHeightText = des.unitsManager.formatInternalValue(vesselHeight, _units, True)
_vesselHeight.text = vesselHeightText
else:
_vesselHeight.text = ''
else:
_vesselHeight.text = ''
outerRadius = None
result = getCommandInputValue(_outerRadius, '')
if result[0]:
outerRadius = result[1]
if not outerRadius == None:
vesselDiameter = outerRadius * 2
vesselDiameterText = des.unitsManager.formatInternalValue(vesselDiameter, _units, True)
_vesselDiameter.text = vesselDiameterText
else:
_vesselDiameter.text = ''
except:
if _ui:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Event handler for the validateInputs event.
class VGmeshCommandValidateInputsHandler(adsk.core.ValidateInputsEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
eventArgs = adsk.core.ValidateInputsEventArgs.cast(args)
_errMessage.text = ''
# Verify that at lesat 4 teath are specified.
if not _numLayer.value.isdigit():
_errMessage.text = 'The number of layers must be a whole number.'
eventArgs.areInputsValid = False
return
else:
numLayer = int(_numLayer.value)
if _outerRadius.value <= _innerRadius.value:
_errMessage.text = 'Outer Radius must be greater than Inner Radius.'
eventArgs.areInputsValid = False
return
else:
outerRadius = float(_outerRadius.value)
if _deltaAngle.selectedItem.name == '15 deg':
deltaAngle = 15.0 * (math.pi/180)
elif _deltaAngle.selectedItem.name == '30 deg':
deltaAngle = 20.0 * (math.pi/180)
elif _deltaAngle.selectedItem.name == '45 deg':
deltaAngle = 25.0 * (math.pi/180)
des = adsk.fusion.Design.cast(_app.activeProduct)
except:
if _ui:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Calculate points along an involute curve.
def involutePoint(baseCircleRadius, distFromCenterToInvolutePoint):
try:
# Calculate the other side of the right-angle triangle defined by the base circle and the current distance radius.
# This is also the length of the involute chord as it comes off of the base circle.
triangleSide = math.sqrt(math.pow(distFromCenterToInvolutePoint,2) - math.pow(baseCircleRadius,2))
# Calculate the angle of the involute.
alpha = triangleSide / baseCircleRadius
# Calculate the angle where the current involute point is.
theta = alpha - math.acos(baseCircleRadius / distFromCenterToInvolutePoint)
# Calculate the coordinates of the involute point.
x = distFromCenterToInvolutePoint * math.cos(theta)
y = distFromCenterToInvolutePoint * math.sin(theta)
# Create a point to return.
return adsk.core.Point3D.create(x, y, 0)
except:
if _ui:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Builds a VGmesh.
def drawVGmesh(design, outerRadius, innerRadius, numLayer, meshSize, memberRadius, deltaAngle):
try:
t_begin = time.time()
# Create a new component by creating an occurrence.
occs = design.rootComponent.occurrences
mat = adsk.core.Matrix3D.create()
newOcc = occs.addNewComponent(mat)
newComp = adsk.fusion.Component.cast(newOcc.component)
rootComp = design.rootComponent
# Create a new sketch.
sketches = newComp.sketches
xzPlane = newComp.xZConstructionPlane
baseSketch = sketches.add(xzPlane)
origin = adsk.core.Point3D.create(0,0,0)
# Create one unit component
nr = math.floor((outerRadius - innerRadius) / meshSize) + 1
nt = round(2 * math.pi / deltaAngle)
nz = numLayer
global t
t = deltaAngle / 2
z = 0
plate_unit_angle = adsk.core.ObjectCollection.create()
support_unit_angle = adsk.core.ObjectCollection.create()
for ir in range(0, nr):
r1 = innerRadius + meshSize * ir
if (ir % 2 == 0):
y1 = r1 * math.tan(t)
p1 = adsk.core.Point3D.create(r1, y1, z)
p2 = adsk.core.Point3D.create(r1, -y1, z)
create_bond(newComp, baseSketch, plate_unit_angle, p1, p2, memberRadius)
if ir > 0:
p0 = adsk.core.Point3D.create(r1 - meshSize, 0, z)
create_bond(newComp, baseSketch, plate_unit_angle, p0, p1, memberRadius)
create_bond(newComp, baseSketch, plate_unit_angle, p0, p2, memberRadius)
p3 = adsk.core.Point3D.create(r1 / math.cos(t), 0, z + meshSize)
create_bond(newComp, baseSketch, support_unit_angle, p1, p3, memberRadius)
create_bond(newComp, baseSketch, support_unit_angle, p2, p3, memberRadius)
if ir < nr - 1:
r2 = r1 + meshSize
p4 = adsk.core.Point3D.create(r2 * math.cos(t), r2 * math.sin(t), z + meshSize)
create_bond(newComp, baseSketch, support_unit_angle, p1, p4, memberRadius)
if ir > 0:
r0 = r1 - meshSize
p4 = adsk.core.Point3D.create(r0 * math.cos(t), r0 * math.sin(t), z + meshSize)
create_bond(newComp, baseSketch, support_unit_angle, p1, p4, memberRadius)
else:
p1 = adsk.core.Point3D.create(r1, 0, z)
r0 = r1 - meshSize
y0 = r0 * math.tan(t)
p2 = adsk.core.Point3D.create(r0, y0, z)
p3 = adsk.core.Point3D.create(r0, -y0, z)
p4 = adsk.core.Point3D.create(r1 * math.cos(2*t), r1 * math.sin(2*t), z)
create_bond(newComp, baseSketch, plate_unit_angle, p1, p2, memberRadius)
create_bond(newComp, baseSketch, plate_unit_angle, p1, p3, memberRadius)
create_bond(newComp, baseSketch, plate_unit_angle, p1, p4, memberRadius)
x2 = r1 * math.cos(t)
y2 = r1 * math.sin(t)
p5 = adsk.core.Point3D.create(x2, y2, z + meshSize)
p6 = adsk.core.Point3D.create(x2, -y2, z + meshSize)
create_bond(newComp, baseSketch, support_unit_angle, p1, p5, memberRadius)
create_bond(newComp, baseSketch, support_unit_angle, p1, p6, memberRadius)
if ir < nr - 1:
r2 = r1 + meshSize
p7 = adsk.core.Point3D.create(r2 / math.cos(t), 0, z + meshSize)
create_bond(newComp, baseSketch, support_unit_angle, p1, p7, memberRadius)
if ir > 0:
p7 = adsk.core.Point3D.create(r0 / math.cos(t), 0, z + meshSize)
create_bond(newComp, baseSketch, support_unit_angle, p1, p7, memberRadius)
plate_b = plate_unit_angle.item(0)
plate_unit_angle.removeByIndex(0)
support_b = support_unit_angle.item(0)
support_unit_angle.removeByIndex(0)
combineFeats = newComp.features.combineFeatures
combineInput = combineFeats.createInput(plate_b, plate_unit_angle)
combineInput.operation = adsk.fusion.FeatureOperations.JoinFeatureOperation
combineFeats.add(combineInput)
combineInput = combineFeats.createInput(support_b, support_unit_angle)
combineInput.operation = adsk.fusion.FeatureOperations.JoinFeatureOperation
combineFeats.add(combineInput)
plate_b = newComp.bRepBodies.item(0)
support_b = newComp.bRepBodies.item(1)
# Copy and paste in theta
plate = adsk.core.ObjectCollection.create()
support = adsk.core.ObjectCollection.create()
plate.add(plate_b)
support.add(support_b)
normal = baseSketch.xDirection.crossProduct(baseSketch.yDirection)
normal.transformBy(baseSketch.transform)
for it in range(1, nt):
theta = deltaAngle * it
transform = adsk.core.Matrix3D.create()
transform.setToRotation(theta, normal, baseSketch.origin)
new_plate = adsk.core.ObjectCollection.create()
new_support = adsk.core.ObjectCollection.create()
new_plate.add(plate_b.copyToComponent(newOcc));
new_support.add(support_b.copyToComponent(newOcc));
moveInput = newComp.features.moveFeatures.createInput(new_plate, transform);
newComp.features.moveFeatures.add(moveInput);
moveInput = newComp.features.moveFeatures.createInput(new_support, transform);
newComp.features.moveFeatures.add(moveInput);
for entity in new_plate:
plate.add(entity)
for entity in new_support:
support.add(entity)
plate_b = plate.item(0)
plate.removeByIndex(0)
support_b = support.item(0)
support.removeByIndex(0)
combineFeats = newComp.features.combineFeatures
combineInput = combineFeats.createInput(plate_b, plate)
combineInput.operation = adsk.fusion.FeatureOperations.JoinFeatureOperation
combineFeats.add(combineInput)
combineInput = combineFeats.createInput(support_b, support)
combineInput.operation = adsk.fusion.FeatureOperations.JoinFeatureOperation
combineFeats.add(combineInput)
plate_b = newComp.bRepBodies.item(0)
support_b = newComp.bRepBodies.item(1)
# Copy and paste in z
rot = adsk.core.Matrix3D.create()
rot.setToRotation(deltaAngle / 2, normal, baseSketch.origin)
bodies = adsk.core.ObjectCollection.create()
bodies.add(plate_b)
bodies.add(support_b)
for iz in range(1, nz):
transform = adsk.core.Matrix3D.create()
transform.translation = adsk.core.Vector3D.create(0, 0, meshSize * iz)
new_plate = adsk.core.ObjectCollection.create()
new_plate.add(plate_b.copyToComponent(newOcc));
moveInput = newComp.features.moveFeatures.createInput(new_plate, transform);
newComp.features.moveFeatures.add(moveInput);
bodies.add(newComp.bRepBodies.item(newComp.bRepBodies.count - 1))
if iz % 2 == 1:
moveInput = newComp.features.moveFeatures.createInput(new_plate, rot);
newComp.features.moveFeatures.add(moveInput);
if iz < nz - 1:
new_support = adsk.core.ObjectCollection.create()
new_support.add(support_b.copyToComponent(newOcc));
moveInput = newComp.features.moveFeatures.createInput(new_support, transform);
newComp.features.moveFeatures.add(moveInput);
if iz % 2 == 1:
moveInput = newComp.features.moveFeatures.createInput(new_support, rot);
newComp.features.moveFeatures.add(moveInput)
bodies.add(newComp.bRepBodies.item(newComp.bRepBodies.count - 1))
for i in range(0, bodies.count - 1):
combineFeats = newComp.features.combineFeatures
that = adsk.core.ObjectCollection.create()
that.add(bodies.item(i + 1))
combineInput = combineFeats.createInput(bodies.item(i), that)
combineInput.operation = adsk.fusion.FeatureOperations.JoinFeatureOperation
combineFeats.add(combineInput)
# Group everything used to create the gear in the timeline.
timelineGroups = design.timeline.timelineGroups
newOccIndex = newOcc.timelineObject.index
baseSketchIndex = baseSketch.timelineObject.index
timelineGroup = timelineGroups.add(newOccIndex, baseSketchIndex)
timelineGroup.name = 'VGmesh'
VGmeshValues = {}
VGmeshValues['outerRadius'] = str(outerRadius)
VGmeshValues['innerRadius'] = str(innerRadius)
VGmeshValues['memberRadius'] = str(memberRadius)
VGmeshValues['meshSize'] = str(meshSize)
VGmeshValues['deltaAngle'] = str(deltaAngle)
VGmeshValues['numLayer'] = str(numLayer)
attrib = newComp.attributes.add('VGmesh', 'Values',str(VGmeshValues))
newComp.name = 'VGmesh'
t_end = time.time()
_ui.messageBox('Elapsed time: %s' % str(t_end - t_begin))
return newComp
except Exception as error:
_ui.messageBox("drawVGmesh Failed : " + str(error))
return None
def create_bond(rootComp, rootSketch, container, start, end, r):
global t
planes = rootComp.constructionPlanes
planeInput = planes.createInput()
line_sketch = rootComp.sketches.add(rootComp.xYConstructionPlane)
lines = line_sketch.sketchCurves.sketchLines
line = lines.addByTwoPoints(start, end)
path = rootComp.features.createPath(line)
planeInput = rootComp.constructionPlanes.createInput()
planeInput.setByDistanceOnPath(path, adsk.core.ValueInput.createByReal(0))
plane1 = rootComp.constructionPlanes.add(planeInput)
sketch1 = rootComp.sketches.add(plane1)
circles = sketch1.sketchCurves.sketchCircles
circle1 = circles.addByCenterRadius(adsk.core.Point3D.create(0, 0, 0), r)
profile0 = sketch1.profiles.item(0)
extrudes = rootComp.features.extrudeFeatures
dist = adsk.core.ValueInput.createByReal(start.distanceTo(end) + r * math.tan(t))
extrude1 = extrudes.addSimple(profile0, dist, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
container.add(rootComp.bRepBodies.item(rootComp.bRepBodies.count - 1))
line_sketch.deleteMe()
sketch1.deleteMe() |
the-stack_0_5698 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
import re
import time
import token
from tokenize import generate_tokens, untokenize
from robot.api import logger
from robot.errors import (ContinueForLoop, DataError, ExecutionFailed,
ExecutionFailures, ExecutionPassed, ExitForLoop,
PassExecution, ReturnFromKeyword)
from robot.running import Keyword, RUN_KW_REGISTER
from robot.running.context import EXECUTION_CONTEXTS
from robot.running.usererrorhandler import UserErrorHandler
from robot.utils import (DotDict, escape, format_assign_message,
get_error_message, get_time, html_escape, is_falsy, is_integer,
is_string, is_truthy, is_unicode, IRONPYTHON, JYTHON,
Matcher, normalize, NormalizedDict, parse_time, prepr,
RERAISED_EXCEPTIONS, plural_or_not as s, roundup,
secs_to_timestr, seq2str, split_from_equals, StringIO,
timestr_to_secs, type_name, unic, is_list_like)
from robot.utils.asserts import assert_equal, assert_not_equal
from robot.variables import (is_list_var, is_var, DictVariableTableValue,
VariableTableValue, VariableSplitter,
variable_not_found)
from robot.version import get_version
if JYTHON:
from java.lang import String, Number
# TODO: Clean-up registering run keyword variants in RF 3.1.
# https://github.com/robotframework/robotframework/issues/2190
def run_keyword_variant(resolve):
def decorator(method):
RUN_KW_REGISTER.register_run_keyword('BuiltIn', method.__name__,
resolve, deprecation_warning=False)
return method
return decorator
class _BuiltInBase(object):
@property
def _context(self):
return self._get_context()
def _get_context(self, top=False):
ctx = EXECUTION_CONTEXTS.current if not top else EXECUTION_CONTEXTS.top
if ctx is None:
raise RobotNotRunningError('Cannot access execution context')
return ctx
@property
def _namespace(self):
return self._get_context().namespace
@property
def _variables(self):
return self._namespace.variables
def _matches(self, string, pattern, caseless=False):
# Must use this instead of fnmatch when string may contain newlines.
matcher = Matcher(pattern, caseless=caseless, spaceless=False)
return matcher.match(string)
def _is_true(self, condition):
if is_string(condition):
condition = self.evaluate(condition, modules='os,sys')
return bool(condition)
def _log_types(self, *args):
self._log_types_at_level('DEBUG', *args)
def _log_types_at_level(self, level, *args):
msg = ["Argument types are:"] + [self._get_type(a) for a in args]
self.log('\n'.join(msg), level)
def _get_type(self, arg):
# In IronPython type(u'x') is str. We want to report unicode anyway.
if is_unicode(arg):
return "<type 'unicode'>"
return str(type(arg))
class _Converter(_BuiltInBase):
def convert_to_integer(self, item, base=None):
"""Converts the given item to an integer number.
If the given item is a string, it is by default expected to be an
integer in base 10. There are two ways to convert from other bases:
- Give base explicitly to the keyword as ``base`` argument.
- Prefix the given string with the base so that ``0b`` means binary
(base 2), ``0o`` means octal (base 8), and ``0x`` means hex (base 16).
The prefix is considered only when ``base`` argument is not given and
may itself be prefixed with a plus or minus sign.
The syntax is case-insensitive and possible spaces are ignored.
Examples:
| ${result} = | Convert To Integer | 100 | | # Result is 100 |
| ${result} = | Convert To Integer | FF AA | 16 | # Result is 65450 |
| ${result} = | Convert To Integer | 100 | 8 | # Result is 64 |
| ${result} = | Convert To Integer | -100 | 2 | # Result is -4 |
| ${result} = | Convert To Integer | 0b100 | | # Result is 4 |
| ${result} = | Convert To Integer | -0x100 | | # Result is -256 |
See also `Convert To Number`, `Convert To Binary`, `Convert To Octal`,
`Convert To Hex`, and `Convert To Bytes`.
"""
self._log_types(item)
return self._convert_to_integer(item, base)
def _convert_to_integer(self, orig, base=None):
try:
item = self._handle_java_numbers(orig)
item, base = self._get_base(item, base)
if base:
return int(item, self._convert_to_integer(base))
return int(item)
except:
raise RuntimeError("'%s' cannot be converted to an integer: %s"
% (orig, get_error_message()))
def _handle_java_numbers(self, item):
if not JYTHON:
return item
if isinstance(item, String):
return unic(item)
if isinstance(item, Number):
return item.doubleValue()
return item
def _get_base(self, item, base):
if not is_string(item):
return item, base
item = normalize(item)
if item.startswith(('-', '+')):
sign = item[0]
item = item[1:]
else:
sign = ''
bases = {'0b': 2, '0o': 8, '0x': 16}
if base or not item.startswith(tuple(bases)):
return sign+item, base
return sign+item[2:], bases[item[:2]]
def convert_to_binary(self, item, base=None, prefix=None, length=None):
"""Converts the given item to a binary string.
The ``item``, with an optional ``base``, is first converted to an
integer using `Convert To Integer` internally. After that it
is converted to a binary number (base 2) represented as a
string such as ``1011``.
The returned value can contain an optional ``prefix`` and can be
required to be of minimum ``length`` (excluding the prefix and a
possible minus sign). If the value is initially shorter than
the required length, it is padded with zeros.
Examples:
| ${result} = | Convert To Binary | 10 | | | # Result is 1010 |
| ${result} = | Convert To Binary | F | base=16 | prefix=0b | # Result is 0b1111 |
| ${result} = | Convert To Binary | -2 | prefix=B | length=4 | # Result is -B0010 |
See also `Convert To Integer`, `Convert To Octal` and `Convert To Hex`.
"""
return self._convert_to_bin_oct_hex(item, base, prefix, length, 'b')
def convert_to_octal(self, item, base=None, prefix=None, length=None):
"""Converts the given item to an octal string.
The ``item``, with an optional ``base``, is first converted to an
integer using `Convert To Integer` internally. After that it
is converted to an octal number (base 8) represented as a
string such as ``775``.
The returned value can contain an optional ``prefix`` and can be
required to be of minimum ``length`` (excluding the prefix and a
possible minus sign). If the value is initially shorter than
the required length, it is padded with zeros.
Examples:
| ${result} = | Convert To Octal | 10 | | | # Result is 12 |
| ${result} = | Convert To Octal | -F | base=16 | prefix=0 | # Result is -017 |
| ${result} = | Convert To Octal | 16 | prefix=oct | length=4 | # Result is oct0020 |
See also `Convert To Integer`, `Convert To Binary` and `Convert To Hex`.
"""
return self._convert_to_bin_oct_hex(item, base, prefix, length, 'o')
def convert_to_hex(self, item, base=None, prefix=None, length=None,
lowercase=False):
"""Converts the given item to a hexadecimal string.
The ``item``, with an optional ``base``, is first converted to an
integer using `Convert To Integer` internally. After that it
is converted to a hexadecimal number (base 16) represented as
a string such as ``FF0A``.
The returned value can contain an optional ``prefix`` and can be
required to be of minimum ``length`` (excluding the prefix and a
possible minus sign). If the value is initially shorter than
the required length, it is padded with zeros.
By default the value is returned as an upper case string, but the
``lowercase`` argument a true value (see `Boolean arguments`) turns
the value (but not the given prefix) to lower case.
Examples:
| ${result} = | Convert To Hex | 255 | | | # Result is FF |
| ${result} = | Convert To Hex | -10 | prefix=0x | length=2 | # Result is -0x0A |
| ${result} = | Convert To Hex | 255 | prefix=X | lowercase=yes | # Result is Xff |
See also `Convert To Integer`, `Convert To Binary` and `Convert To Octal`.
"""
spec = 'x' if is_truthy(lowercase) else 'X'
return self._convert_to_bin_oct_hex(item, base, prefix, length, spec)
def _convert_to_bin_oct_hex(self, item, base, prefix, length, format_spec):
self._log_types(item)
ret = format(self._convert_to_integer(item, base), format_spec)
prefix = prefix or ''
if ret[0] == '-':
prefix = '-' + prefix
ret = ret[1:]
if length:
ret = ret.rjust(self._convert_to_integer(length), '0')
return prefix + ret
def convert_to_number(self, item, precision=None):
"""Converts the given item to a floating point number.
If the optional ``precision`` is positive or zero, the returned number
is rounded to that number of decimal digits. Negative precision means
that the number is rounded to the closest multiple of 10 to the power
of the absolute precision. If a number is equally close to a certain
precision, it is always rounded away from zero.
Examples:
| ${result} = | Convert To Number | 42.512 | | # Result is 42.512 |
| ${result} = | Convert To Number | 42.512 | 1 | # Result is 42.5 |
| ${result} = | Convert To Number | 42.512 | 0 | # Result is 43.0 |
| ${result} = | Convert To Number | 42.512 | -1 | # Result is 40.0 |
Notice that machines generally cannot store floating point numbers
accurately. This may cause surprises with these numbers in general
and also when they are rounded. For more information see, for example,
these resources:
- http://docs.python.org/tutorial/floatingpoint.html
- http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition
If you want to avoid possible problems with floating point numbers,
you can implement custom keywords using Python's
[http://docs.python.org/library/decimal.html|decimal] or
[http://docs.python.org/library/fractions.html|fractions] modules.
If you need an integer number, use `Convert To Integer` instead.
"""
self._log_types(item)
return self._convert_to_number(item, precision)
def _convert_to_number(self, item, precision=None):
number = self._convert_to_number_without_precision(item)
if precision is not None:
number = roundup(number, self._convert_to_integer(precision),
return_type=float)
return number
def _convert_to_number_without_precision(self, item):
try:
if JYTHON:
item = self._handle_java_numbers(item)
return float(item)
except:
error = get_error_message()
try:
return float(self._convert_to_integer(item))
except RuntimeError:
raise RuntimeError("'%s' cannot be converted to a floating "
"point number: %s" % (item, error))
def convert_to_string(self, item):
"""Converts the given item to a Unicode string.
Uses ``__unicode__`` or ``__str__`` method with Python objects and
``toString`` with Java objects.
Use `Encode String To Bytes` and `Decode Bytes To String` keywords
in ``String`` library if you need to convert between Unicode and byte
strings using different encodings. Use `Convert To Bytes` if you just
want to create byte strings.
"""
self._log_types(item)
return self._convert_to_string(item)
def _convert_to_string(self, item):
return unic(item)
def convert_to_boolean(self, item):
"""Converts the given item to Boolean true or false.
Handles strings ``True`` and ``False`` (case-insensitive) as expected,
otherwise returns item's
[http://docs.python.org/library/stdtypes.html#truth|truth value]
using Python's ``bool()`` method.
"""
self._log_types(item)
if is_string(item):
if item.upper() == 'TRUE':
return True
if item.upper() == 'FALSE':
return False
return bool(item)
def convert_to_bytes(self, input, input_type='text'):
u"""Converts the given ``input`` to bytes according to the ``input_type``.
Valid input types are listed below:
- ``text:`` Converts text to bytes character by character. All
characters with ordinal below 256 can be used and are converted to
bytes with same values. Many characters are easiest to represent
using escapes like ``\\x00`` or ``\\xff``. Supports both Unicode
strings and bytes.
- ``int:`` Converts integers separated by spaces to bytes. Similarly as
with `Convert To Integer`, it is possible to use binary, octal, or
hex values by prefixing the values with ``0b``, ``0o``, or ``0x``,
respectively.
- ``hex:`` Converts hexadecimal values to bytes. Single byte is always
two characters long (e.g. ``01`` or ``FF``). Spaces are ignored and
can be used freely as a visual separator.
- ``bin:`` Converts binary values to bytes. Single byte is always eight
characters long (e.g. ``00001010``). Spaces are ignored and can be
used freely as a visual separator.
In addition to giving the input as a string, it is possible to use
lists or other iterables containing individual characters or numbers.
In that case numbers do not need to be padded to certain length and
they cannot contain extra spaces.
Examples (last column shows returned bytes):
| ${bytes} = | Convert To Bytes | hyv\xe4 | | # hyv\\xe4 |
| ${bytes} = | Convert To Bytes | \\xff\\x07 | | # \\xff\\x07 |
| ${bytes} = | Convert To Bytes | 82 70 | int | # RF |
| ${bytes} = | Convert To Bytes | 0b10 0x10 | int | # \\x02\\x10 |
| ${bytes} = | Convert To Bytes | ff 00 07 | hex | # \\xff\\x00\\x07 |
| ${bytes} = | Convert To Bytes | 5246212121 | hex | # RF!!! |
| ${bytes} = | Convert To Bytes | 0000 1000 | bin | # \\x08 |
| ${input} = | Create List | 1 | 2 | 12 |
| ${bytes} = | Convert To Bytes | ${input} | int | # \\x01\\x02\\x0c |
| ${bytes} = | Convert To Bytes | ${input} | hex | # \\x01\\x02\\x12 |
Use `Encode String To Bytes` in ``String`` library if you need to
convert text to bytes using a certain encoding.
"""
try:
try:
ordinals = getattr(self, '_get_ordinals_from_%s' % input_type)
except AttributeError:
raise RuntimeError("Invalid input type '%s'." % input_type)
return bytes(bytearray(o for o in ordinals(input)))
except:
raise RuntimeError("Creating bytes failed: %s" % get_error_message())
def _get_ordinals_from_text(self, input):
# https://github.com/IronLanguages/main/issues/1237
if IRONPYTHON and isinstance(input, bytearray):
input = bytes(input)
for char in input:
ordinal = char if is_integer(char) else ord(char)
yield self._test_ordinal(ordinal, char, 'Character')
def _test_ordinal(self, ordinal, original, type):
if 0 <= ordinal <= 255:
return ordinal
raise RuntimeError("%s '%s' cannot be represented as a byte."
% (type, original))
def _get_ordinals_from_int(self, input):
if is_string(input):
input = input.split()
elif is_integer(input):
input = [input]
for integer in input:
ordinal = self._convert_to_integer(integer)
yield self._test_ordinal(ordinal, integer, 'Integer')
def _get_ordinals_from_hex(self, input):
for token in self._input_to_tokens(input, length=2):
ordinal = self._convert_to_integer(token, base=16)
yield self._test_ordinal(ordinal, token, 'Hex value')
def _get_ordinals_from_bin(self, input):
for token in self._input_to_tokens(input, length=8):
ordinal = self._convert_to_integer(token, base=2)
yield self._test_ordinal(ordinal, token, 'Binary value')
def _input_to_tokens(self, input, length):
if not is_string(input):
return input
input = ''.join(input.split())
if len(input) % length != 0:
raise RuntimeError('Expected input to be multiple of %d.' % length)
return (input[i:i+length] for i in range(0, len(input), length))
def create_list(self, *items):
"""Returns a list containing given items.
The returned list can be assigned both to ``${scalar}`` and ``@{list}``
variables.
Examples:
| @{list} = | Create List | a | b | c |
| ${scalar} = | Create List | a | b | c |
| ${ints} = | Create List | ${1} | ${2} | ${3} |
"""
return list(items)
@run_keyword_variant(resolve=0)
def create_dictionary(self, *items):
"""Creates and returns a dictionary based on the given ``items``.
Items are typically given using the ``key=value`` syntax same way as
``&{dictionary}`` variables are created in the Variable table. Both
keys and values can contain variables, and possible equal sign in key
can be escaped with a backslash like ``escaped\\=key=value``. It is
also possible to get items from existing dictionaries by simply using
them like ``&{dict}``.
Alternatively items can be specified so that keys and values are given
separately. This and the ``key=value`` syntax can even be combined,
but separately given items must be first.
If same key is used multiple times, the last value has precedence.
The returned dictionary is ordered, and values with strings as keys
can also be accessed using a convenient dot-access syntax like
``${dict.key}``.
Examples:
| &{dict} = | Create Dictionary | key=value | foo=bar | | | # key=value syntax |
| Should Be True | ${dict} == {'key': 'value', 'foo': 'bar'} |
| &{dict2} = | Create Dictionary | key | value | foo | bar | # separate key and value |
| Should Be Equal | ${dict} | ${dict2} |
| &{dict} = | Create Dictionary | ${1}=${2} | &{dict} | foo=new | | # using variables |
| Should Be True | ${dict} == {1: 2, 'key': 'value', 'foo': 'new'} |
| Should Be Equal | ${dict.key} | value | | | | # dot-access |
This keyword was changed in Robot Framework 2.9 in many ways:
- Moved from ``Collections`` library to ``BuiltIn``.
- Support also non-string keys in ``key=value`` syntax.
- Returned dictionary is ordered and dot-accessible.
- Old syntax to give keys and values separately was deprecated, but
deprecation was later removed in RF 3.0.1.
"""
separate, combined = self._split_dict_items(items)
result = DotDict(self._format_separate_dict_items(separate))
combined = DictVariableTableValue(combined).resolve(self._variables)
result.update(combined)
return result
def _split_dict_items(self, items):
separate = []
for item in items:
name, value = split_from_equals(item)
if value is not None or VariableSplitter(item).is_dict_variable():
break
separate.append(item)
return separate, items[len(separate):]
def _format_separate_dict_items(self, separate):
separate = self._variables.replace_list(separate)
if len(separate) % 2 != 0:
raise DataError('Expected even number of keys and values, got %d.'
% len(separate))
return [separate[i:i+2] for i in range(0, len(separate), 2)]
class _Verify(_BuiltInBase):
def _set_and_remove_tags(self, tags):
set_tags = [tag for tag in tags if not tag.startswith('-')]
remove_tags = [tag[1:] for tag in tags if tag.startswith('-')]
if remove_tags:
self.remove_tags(*remove_tags)
if set_tags:
self.set_tags(*set_tags)
def fail(self, msg=None, *tags):
"""Fails the test with the given message and optionally alters its tags.
The error message is specified using the ``msg`` argument.
It is possible to use HTML in the given error message, similarly
as with any other keyword accepting an error message, by prefixing
the error with ``*HTML*``.
It is possible to modify tags of the current test case by passing tags
after the message. Tags starting with a hyphen (e.g. ``-regression``)
are removed and others added. Tags are modified using `Set Tags` and
`Remove Tags` internally, and the semantics setting and removing them
are the same as with these keywords.
Examples:
| Fail | Test not ready | | | # Fails with the given message. |
| Fail | *HTML*<b>Test not ready</b> | | | # Fails using HTML in the message. |
| Fail | Test not ready | not-ready | | # Fails and adds 'not-ready' tag. |
| Fail | OS not supported | -regression | | # Removes tag 'regression'. |
| Fail | My message | tag | -t* | # Removes all tags starting with 't' except the newly added 'tag'. |
See `Fatal Error` if you need to stop the whole test execution.
"""
self._set_and_remove_tags(tags)
raise AssertionError(msg) if msg else AssertionError()
def fatal_error(self, msg=None):
"""Stops the whole test execution.
The test or suite where this keyword is used fails with the provided
message, and subsequent tests fail with a canned message.
Possible teardowns will nevertheless be executed.
See `Fail` if you only want to stop one test case unconditionally.
"""
error = AssertionError(msg) if msg else AssertionError()
error.ROBOT_EXIT_ON_FAILURE = True
raise error
def should_not_be_true(self, condition, msg=None):
"""Fails if the given condition is true.
See `Should Be True` for details about how ``condition`` is evaluated
and how ``msg`` can be used to override the default error message.
"""
if self._is_true(condition):
raise AssertionError(msg or "'%s' should not be true." % condition)
def should_be_true(self, condition, msg=None):
"""Fails if the given condition is not true.
If ``condition`` is a string (e.g. ``${rc} < 10``), it is evaluated as
a Python expression as explained in `Evaluating expressions` and the
keyword status is decided based on the result. If a non-string item is
given, the status is got directly from its
[http://docs.python.org/library/stdtypes.html#truth|truth value].
The default error message (``<condition> should be true``) is not very
informative, but it can be overridden with the ``msg`` argument.
Examples:
| Should Be True | ${rc} < 10 |
| Should Be True | '${status}' == 'PASS' | # Strings must be quoted |
| Should Be True | ${number} | # Passes if ${number} is not zero |
| Should Be True | ${list} | # Passes if ${list} is not empty |
Variables used like ``${variable}``, as in the examples above, are
replaced in the expression before evaluation. Variables are also
available in the evaluation namespace and can be accessed using special
syntax ``$variable``. This is a new feature in Robot Framework 2.9
and it is explained more thoroughly in `Evaluating expressions`.
Examples:
| Should Be True | $rc < 10 |
| Should Be True | $status == 'PASS' | # Expected string must be quoted |
`Should Be True` automatically imports Python's
[http://docs.python.org/library/os.html|os] and
[http://docs.python.org/library/sys.html|sys] modules that contain
several useful attributes:
| Should Be True | os.linesep == '\\n' | # Unixy |
| Should Be True | os.linesep == '\\r\\n' | # Windows |
| Should Be True | sys.platform == 'darwin' | # OS X |
| Should Be True | sys.platform.startswith('java') | # Jython |
"""
if not self._is_true(condition):
raise AssertionError(msg or "'%s' should be true." % condition)
def should_be_equal(self, first, second, msg=None, values=True,
ignore_case=False):
"""Fails if the given objects are unequal.
Optional ``msg`` and ``values`` arguments specify how to construct
the error message if this keyword fails:
- If ``msg`` is not given, the error message is ``<first> != <second>``.
- If ``msg`` is given and ``values`` gets a true value (default),
the error message is ``<msg>: <first> != <second>``.
- If ``msg`` is given and ``values`` gets a false value, the error
message is simply ``<msg>``. See `Boolean arguments` for more details
about using false values.
If ``ignore_case`` is given a true value (see `Boolean arguments`) and
arguments are strings, it indicates that comparison should be
case-insensitive. New option in Robot Framework 3.0.1.
If both arguments are multiline strings, the comparison is done using
`multiline string comparisons`.
Examples:
| Should Be Equal | ${x} | expected |
| Should Be Equal | ${x} | expected | Custom error message |
| Should Be Equal | ${x} | expected | Custom message | values=False |
| Should Be Equal | ${x} | expected | ignore_case=True |
"""
self._log_types_at_info_if_different(first, second)
if is_truthy(ignore_case) and is_string(first) and is_string(second):
first = first.lower()
second = second.lower()
self._should_be_equal(first, second, msg, values)
def _should_be_equal(self, first, second, msg, values):
if first == second:
return
include_values = self._include_values(values)
if include_values and is_string(first) and is_string(second):
self._raise_multi_diff(first, second)
assert_equal(first, second, msg, include_values)
def _log_types_at_info_if_different(self, first, second):
level = 'DEBUG' if type(first) == type(second) else 'INFO'
self._log_types_at_level(level, first, second)
def _raise_multi_diff(self, first, second):
first_lines, second_lines = first.splitlines(), second.splitlines()
if len(first_lines) < 3 or len(second_lines) < 3:
return
self.log("%s\n!=\n%s" % (first, second))
err = 'Multiline strings are different:\n'
for line in difflib.unified_diff(first_lines, second_lines,
fromfile='first', tofile='second',
lineterm=''):
err += line + '\n'
raise AssertionError(err)
def _include_values(self, values):
return is_truthy(values) and str(values).upper() != 'NO VALUES'
def should_not_be_equal(self, first, second, msg=None, values=True,
ignore_case=False):
"""Fails if the given objects are equal.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
If ``ignore_case`` is given a true value (see `Boolean arguments`) and
both arguments are strings, it indicates that comparison should be
case-insensitive. New option in Robot Framework 3.0.1.
"""
self._log_types_at_info_if_different(first, second)
if is_truthy(ignore_case) and is_string(first) and is_string(second):
first = first.lower()
second = second.lower()
self._should_not_be_equal(first, second, msg, values)
def _should_not_be_equal(self, first, second, msg, values):
assert_not_equal(first, second, msg, self._include_values(values))
def should_not_be_equal_as_integers(self, first, second, msg=None,
values=True, base=None):
"""Fails if objects are equal after converting them to integers.
See `Convert To Integer` for information how to convert integers from
other bases than 10 using ``base`` argument or ``0b/0o/0x`` prefixes.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
See `Should Be Equal As Integers` for some usage examples.
"""
self._log_types_at_info_if_different(first, second)
self._should_not_be_equal(self._convert_to_integer(first, base),
self._convert_to_integer(second, base),
msg, values)
def should_be_equal_as_integers(self, first, second, msg=None, values=True,
base=None):
"""Fails if objects are unequal after converting them to integers.
See `Convert To Integer` for information how to convert integers from
other bases than 10 using ``base`` argument or ``0b/0o/0x`` prefixes.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
Examples:
| Should Be Equal As Integers | 42 | ${42} | Error message |
| Should Be Equal As Integers | ABCD | abcd | base=16 |
| Should Be Equal As Integers | 0b1011 | 11 |
"""
self._log_types_at_info_if_different(first, second)
self._should_be_equal(self._convert_to_integer(first, base),
self._convert_to_integer(second, base),
msg, values)
def should_not_be_equal_as_numbers(self, first, second, msg=None,
values=True, precision=6):
"""Fails if objects are equal after converting them to real numbers.
The conversion is done with `Convert To Number` keyword using the
given ``precision``.
See `Should Be Equal As Numbers` for examples on how to use
``precision`` and why it does not always work as expected. See also
`Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
self._log_types_at_info_if_different(first, second)
first = self._convert_to_number(first, precision)
second = self._convert_to_number(second, precision)
self._should_not_be_equal(first, second, msg, values)
def should_be_equal_as_numbers(self, first, second, msg=None, values=True,
precision=6):
"""Fails if objects are unequal after converting them to real numbers.
The conversion is done with `Convert To Number` keyword using the
given ``precision``.
Examples:
| Should Be Equal As Numbers | ${x} | 1.1 | | # Passes if ${x} is 1.1 |
| Should Be Equal As Numbers | 1.123 | 1.1 | precision=1 | # Passes |
| Should Be Equal As Numbers | 1.123 | 1.4 | precision=0 | # Passes |
| Should Be Equal As Numbers | 112.3 | 75 | precision=-2 | # Passes |
As discussed in the documentation of `Convert To Number`, machines
generally cannot store floating point numbers accurately. Because of
this limitation, comparing floats for equality is problematic and
a correct approach to use depends on the context. This keyword uses
a very naive approach of rounding the numbers before comparing them,
which is both prone to rounding errors and does not work very well if
numbers are really big or small. For more information about comparing
floats, and ideas on how to implement your own context specific
comparison algorithm, see
http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/.
If you want to avoid possible problems with floating point numbers,
you can implement custom keywords using Python's
[http://docs.python.org/library/decimal.html|decimal] or
[http://docs.python.org/library/fractions.html|fractions] modules.
See `Should Not Be Equal As Numbers` for a negative version of this
keyword and `Should Be Equal` for an explanation on how to override
the default error message with ``msg`` and ``values``.
"""
self._log_types_at_info_if_different(first, second)
first = self._convert_to_number(first, precision)
second = self._convert_to_number(second, precision)
self._should_be_equal(first, second, msg, values)
def should_not_be_equal_as_strings(self, first, second, msg=None,
values=True, ignore_case=False):
"""Fails if objects are equal after converting them to strings.
If ``ignore_case`` is given a true value (see `Boolean arguments`), it
indicates that comparison should be case-insensitive. New option in
Robot Framework 3.0.1.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
self._log_types_at_info_if_different(first, second)
first = self._convert_to_string(first)
second = self._convert_to_string(second)
if is_truthy(ignore_case):
first = first.lower()
second = second.lower()
self._should_not_be_equal(first, second, msg, values)
def should_be_equal_as_strings(self, first, second, msg=None, values=True,
ignore_case=False):
"""Fails if objects are unequal after converting them to strings.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
If ``ignore_case`` is given a true value (see `Boolean arguments`), it
indicates that comparison should be case-insensitive. New option in
Robot Framework 3.0.1.
If both arguments are multiline strings, the comparison is done using
`multiline string comparisons`.
"""
self._log_types_at_info_if_different(first, second)
first = self._convert_to_string(first)
second = self._convert_to_string(second)
if is_truthy(ignore_case):
first = first.lower()
second = second.lower()
self._should_be_equal(first, second, msg, values)
def should_not_start_with(self, str1, str2, msg=None, values=True,
ignore_case=False):
"""Fails if the string ``str1`` starts with the string ``str2``.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``, as well as for semantics
of the ``ignore_case`` option.
"""
if is_truthy(ignore_case):
str1 = str1.lower()
str2 = str2.lower()
if str1.startswith(str2):
raise AssertionError(self._get_string_msg(str1, str2, msg, values,
'starts with'))
def should_start_with(self, str1, str2, msg=None, values=True,
ignore_case=False):
"""Fails if the string ``str1`` does not start with the string ``str2``.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``, as well as for semantics
of the ``ignore_case`` option.
"""
if is_truthy(ignore_case):
str1 = str1.lower()
str2 = str2.lower()
if not str1.startswith(str2):
raise AssertionError(self._get_string_msg(str1, str2, msg, values,
'does not start with'))
def should_not_end_with(self, str1, str2, msg=None, values=True,
ignore_case=False):
"""Fails if the string ``str1`` ends with the string ``str2``.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``, as well as for semantics
of the ``ignore_case`` option.
"""
if is_truthy(ignore_case):
str1 = str1.lower()
str2 = str2.lower()
if str1.endswith(str2):
raise AssertionError(self._get_string_msg(str1, str2, msg, values,
'ends with'))
def should_end_with(self, str1, str2, msg=None, values=True,
ignore_case=False):
"""Fails if the string ``str1`` does not end with the string ``str2``.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``, as well as for semantics
of the ``ignore_case`` option.
"""
if is_truthy(ignore_case):
str1 = str1.lower()
str2 = str2.lower()
if not str1.endswith(str2):
raise AssertionError(self._get_string_msg(str1, str2, msg, values,
'does not end with'))
def should_not_contain(self, container, item, msg=None, values=True,
ignore_case=False):
"""Fails if ``container`` contains ``item`` one or more times.
Works with strings, lists, and anything that supports Python's ``in``
operator.
See `Should Be Equal` for an explanation on how to override the default
error message with arguments ``msg`` and ``values``. ``ignore_case``
has exactly the same semantics as with `Should Contain`.
Examples:
| Should Not Contain | ${some list} | value |
| Should Not Contain | ${output} | FAILED | ignore_case=True |
"""
# TODO: It is inconsistent that errors show original case in 'container'
# 'item' is in lower case. Should rather show original case everywhere
# and add separate '(case-insensitive)' not to the error message.
# This same logic should be used with all keywords supporting
# case-insensitive comparisons.
orig_container = container
if is_truthy(ignore_case) and is_string(item):
item = item.lower()
if is_string(container):
container = container.lower()
elif is_list_like(container):
container = set(x.lower() if is_string(x) else x for x in container)
if item in container:
raise AssertionError(self._get_string_msg(orig_container, item, msg,
values, 'contains'))
def should_contain(self, container, item, msg=None, values=True,
ignore_case=False):
"""Fails if ``container`` does not contain ``item`` one or more times.
Works with strings, lists, and anything that supports Python's ``in``
operator.
See `Should Be Equal` for an explanation on how to override the default
error message with arguments ``msg`` and ``values``.
If ``ignore_case`` is given a true value (see `Boolean arguments`) and
compared items are strings, it indicates that comparison should be
case-insensitive. If the ``container`` is a list-like object, string
items in it are compared case-insensitively. New option in Robot
Framework 3.0.1.
Examples:
| Should Contain | ${output} | PASS |
| Should Contain | ${some list} | value | msg=Failure! | values=False |
| Should Contain | ${some list} | value | ignore_case=True |
"""
orig_container = container
if is_truthy(ignore_case) and is_string(item):
item = item.lower()
if is_string(container):
container = container.lower()
elif is_list_like(container):
container = set(x.lower() if is_string(x) else x for x in container)
if item not in container:
raise AssertionError(self._get_string_msg(orig_container, item, msg,
values, 'does not contain'))
def should_contain_any(self, container, *items, **configuration):
"""Fails if ``container`` does not contain any of the ``*items``.
Works with strings, lists, and anything that supports Python's ``in``
operator.
Supports additional configuration parameters ``msg``, ``values``
and ``ignore_case``, which have exactly the same semantics as arguments
with same names have with `Should Contain`. These arguments must
always be given using ``name=value`` syntax after all ``items``.
Note that possible equal signs in ``items`` must be escaped with
a backslash (e.g. ``foo\\=bar``) to avoid them to be passed in
as ``**configuration``.
Examples:
| Should Contain Any | ${string} | substring 1 | substring 2 |
| Should Contain Any | ${list} | item 1 | item 2 | item 3 |
| Should Contain Any | ${list} | item 1 | item 2 | item 3 | ignore_case=True |
| Should Contain Any | ${list} | @{items} | msg=Custom message | values=False |
New in Robot Framework 3.0.1.
"""
msg = configuration.pop('msg', None)
values = configuration.pop('values', True)
ignore_case = configuration.pop('ignore_case', False)
if configuration:
raise RuntimeError("Unsupported configuration parameter%s: %s."
% (s(configuration),
seq2str(sorted(configuration))))
if not items:
raise RuntimeError('One or more items required.')
orig_container = container
if is_truthy(ignore_case):
items = [x.lower() if is_string(x) else x for x in items]
if is_string(container):
container = container.lower()
elif is_list_like(container):
container = set(x.lower() if is_string(x) else x for x in container)
if not any(item in container for item in items):
msg = self._get_string_msg(orig_container,
seq2str(items, lastsep=' or '),
msg, values,
'does not contain any of',
quote_item2=False)
raise AssertionError(msg)
def should_not_contain_any(self, container, *items, **configuration):
"""Fails if ``container`` contains one or more of the ``*items``.
Works with strings, lists, and anything that supports Python's ``in``
operator.
Supports additional configuration parameters ``msg``, ``values``
and ``ignore_case``, which have exactly the same semantics as arguments
with same names have with `Should Contain`. These arguments must
always be given using ``name=value`` syntax after all ``items``.
Note that possible equal signs in ``items`` must be escaped with
a backslash (e.g. ``foo\\=bar``) to avoid them to be passed in
as ``**configuration``.
Examples:
| Should Not Contain Any | ${string} | substring 1 | substring 2 |
| Should Not Contain Any | ${list} | item 1 | item 2 | item 3 |
| Should Not Contain Any | ${list} | item 1 | item 2 | item 3 | ignore_case=True |
| Should Not Contain Any | ${list} | @{items} | msg=Custom message | values=False |
New in Robot Framework 3.0.1.
"""
msg = configuration.pop('msg', None)
values = configuration.pop('values', True)
ignore_case = configuration.pop('ignore_case', False)
if configuration:
raise RuntimeError("Unsupported configuration parameter%s: %s."
% (s(configuration),
seq2str(sorted(configuration))))
if not items:
raise RuntimeError('One or more items required.')
orig_container = container
if is_truthy(ignore_case):
items = [x.lower() if is_string(x) else x for x in items]
if is_string(container):
container = container.lower()
elif is_list_like(container):
container = set(x.lower() if is_string(x) else x for x in container)
if any(item in container for item in items):
msg = self._get_string_msg(orig_container,
seq2str(items, lastsep=' or '),
msg, values,
'contains one or more of',
quote_item2=False)
raise AssertionError(msg)
def should_contain_x_times(self, item1, item2, count, msg=None,
ignore_case=False):
"""Fails if ``item1`` does not contain ``item2`` ``count`` times.
Works with strings, lists and all objects that `Get Count` works
with. The default error message can be overridden with ``msg`` and
the actual count is always logged.
If ``ignore_case`` is given a true value (see `Boolean arguments`) and
compared items are strings, it indicates that comparison should be
case-insensitive. If the ``item1`` is a list-like object, string
items in it are compared case-insensitively. New option in Robot
Framework 3.0.1.
Examples:
| Should Contain X Times | ${output} | hello | 2 |
| Should Contain X Times | ${some list} | value | 3 | ignore_case=True |
"""
# TODO: Rename 'item1' and 'item2' to 'container' and 'item' in RF 3.1.
# Other 'contain' keywords use these names. And 'Get Count' should too.
# Cannot be done in minor release due to backwards compatibility.
# Remember to update it also in the docstring!!
count = self._convert_to_integer(count)
orig_item1 = item1
if is_truthy(ignore_case) and is_string(item2):
item2 = item2.lower()
if is_string(item1):
item1 = item1.lower()
elif is_list_like(item1):
item1 = [x.lower() if is_string(x) else x for x in item1]
x = self.get_count(item1, item2)
if not msg:
msg = "'%s' contains '%s' %d time%s, not %d time%s." \
% (unic(orig_item1), unic(item2), x, s(x), count, s(count))
self.should_be_equal_as_integers(x, count, msg, values=False)
def get_count(self, item1, item2):
"""Returns and logs how many times ``item2`` is found from ``item1``.
This keyword works with Python strings and lists and all objects
that either have ``count`` method or can be converted to Python lists.
Example:
| ${count} = | Get Count | ${some item} | interesting value |
| Should Be True | 5 < ${count} < 10 |
"""
if not hasattr(item1, 'count'):
try:
item1 = list(item1)
except:
raise RuntimeError("Converting '%s' to list failed: %s"
% (item1, get_error_message()))
count = item1.count(item2)
self.log('Item found from the first item %d time%s' % (count, s(count)))
return count
def should_not_match(self, string, pattern, msg=None, values=True,
ignore_case=False):
"""Fails if the given ``string`` matches the given ``pattern``.
Pattern matching is similar as matching files in a shell with
``*``, ``?`` and ``[chars]`` acting as wildcards. See the
`Glob patterns` section for more information.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``, as well as for semantics
of the ``ignore_case`` option.
"""
if self._matches(string, pattern, caseless=is_truthy(ignore_case)):
raise AssertionError(self._get_string_msg(string, pattern, msg,
values, 'matches'))
def should_match(self, string, pattern, msg=None, values=True,
ignore_case=False):
"""Fails if the given ``string`` does not match the given ``pattern``.
Pattern matching is similar as matching files in a shell with
``*``, ``?`` and ``[chars]`` acting as wildcards. See the
`Glob patterns` section for more information.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``, as well as for semantics
of the ``ignore_case`` option.
"""
if not self._matches(string, pattern, caseless=is_truthy(ignore_case)):
raise AssertionError(self._get_string_msg(string, pattern, msg,
values, 'does not match'))
def should_match_regexp(self, string, pattern, msg=None, values=True):
"""Fails if ``string`` does not match ``pattern`` as a regular expression.
See the `Regular expressions` section for more information about
regular expressions and how to use then in Robot Framework test data.
Notice that the given pattern does not need to match the whole string.
For example, the pattern ``ello`` matches the string ``Hello world!``.
If a full match is needed, the ``^`` and ``$`` characters can be used
to denote the beginning and end of the string, respectively.
For example, ``^ello$`` only matches the exact string ``ello``.
Possible flags altering how the expression is parsed (e.g.
``re.IGNORECASE``, ``re.MULTILINE``) must be embedded to the
pattern like ``(?im)pattern``. The most useful flags are ``i``
(case-insensitive), ``m`` (multiline mode), ``s`` (dotall mode)
and ``x`` (verbose).
If this keyword passes, it returns the portion of the string that
matched the pattern. Additionally, the possible captured groups are
returned.
See the `Should Be Equal` keyword for an explanation on how to override
the default error message with the ``msg`` and ``values`` arguments.
Examples:
| Should Match Regexp | ${output} | \\\\d{6} | # Output contains six numbers |
| Should Match Regexp | ${output} | ^\\\\d{6}$ | # Six numbers and nothing more |
| ${ret} = | Should Match Regexp | Foo: 42 | (?i)foo: \\\\d+ |
| ${match} | ${group1} | ${group2} = |
| ... | Should Match Regexp | Bar: 43 | (Foo|Bar): (\\\\d+) |
=>
| ${ret} = 'Foo: 42'
| ${match} = 'Bar: 43'
| ${group1} = 'Bar'
| ${group2} = '43'
"""
res = re.search(pattern, string)
if res is None:
raise AssertionError(self._get_string_msg(string, pattern, msg,
values, 'does not match'))
match = res.group(0)
groups = res.groups()
if groups:
return [match] + list(groups)
return match
def should_not_match_regexp(self, string, pattern, msg=None, values=True):
"""Fails if ``string`` matches ``pattern`` as a regular expression.
See `Should Match Regexp` for more information about arguments.
"""
if re.search(pattern, string) is not None:
raise AssertionError(self._get_string_msg(string, pattern, msg,
values, 'matches'))
def get_length(self, item):
"""Returns and logs the length of the given item as an integer.
The item can be anything that has a length, for example, a string,
a list, or a mapping. The keyword first tries to get the length with
the Python function ``len``, which calls the item's ``__len__`` method
internally. If that fails, the keyword tries to call the item's
possible ``length`` and ``size`` methods directly. The final attempt is
trying to get the value of the item's ``length`` attribute. If all
these attempts are unsuccessful, the keyword fails.
Examples:
| ${length} = | Get Length | Hello, world! | |
| Should Be Equal As Integers | ${length} | 13 |
| @{list} = | Create List | Hello, | world! |
| ${length} = | Get Length | ${list} | |
| Should Be Equal As Integers | ${length} | 2 |
See also `Length Should Be`, `Should Be Empty` and `Should Not Be
Empty`.
"""
length = self._get_length(item)
self.log('Length is %d' % length)
return length
def _get_length(self, item):
try:
return len(item)
except RERAISED_EXCEPTIONS:
raise
except:
try:
return item.length()
except RERAISED_EXCEPTIONS:
raise
except:
try:
return item.size()
except RERAISED_EXCEPTIONS:
raise
except:
try:
return item.length
except RERAISED_EXCEPTIONS:
raise
except:
raise RuntimeError("Could not get length of '%s'." % item)
def length_should_be(self, item, length, msg=None):
"""Verifies that the length of the given item is correct.
The length of the item is got using the `Get Length` keyword. The
default error message can be overridden with the ``msg`` argument.
"""
length = self._convert_to_integer(length)
actual = self.get_length(item)
if actual != length:
raise AssertionError(msg or "Length of '%s' should be %d but is %d."
% (item, length, actual))
def should_be_empty(self, item, msg=None):
"""Verifies that the given item is empty.
The length of the item is got using the `Get Length` keyword. The
default error message can be overridden with the ``msg`` argument.
"""
if self.get_length(item) > 0:
raise AssertionError(msg or "'%s' should be empty." % item)
def should_not_be_empty(self, item, msg=None):
"""Verifies that the given item is not empty.
The length of the item is got using the `Get Length` keyword. The
default error message can be overridden with the ``msg`` argument.
"""
if self.get_length(item) == 0:
raise AssertionError(msg or "'%s' should not be empty." % item)
def _get_string_msg(self, item1, item2, custom_message, include_values,
delimiter, quote_item1=True, quote_item2=True):
if custom_message and not self._include_values(include_values):
return custom_message
item1 = "'%s'" % unic(item1) if quote_item1 else unic(item1)
item2 = "'%s'" % unic(item2) if quote_item2 else unic(item2)
default_message = '%s %s %s' % (item1, delimiter, item2)
if not custom_message:
return default_message
return '%s: %s' % (custom_message, default_message)
class _Variables(_BuiltInBase):
def get_variables(self, no_decoration=False):
"""Returns a dictionary containing all variables in the current scope.
Variables are returned as a special dictionary that allows accessing
variables in space, case, and underscore insensitive manner similarly
as accessing variables in the test data. This dictionary supports all
same operations as normal Python dictionaries and, for example,
Collections library can be used to access or modify it. Modifying the
returned dictionary has no effect on the variables available in the
current scope.
By default variables are returned with ``${}``, ``@{}`` or ``&{}``
decoration based on variable types. Giving a true value (see `Boolean
arguments`) to the optional argument ``no_decoration`` will return
the variables without the decoration. This option is new in Robot
Framework 2.9.
Example:
| ${example_variable} = | Set Variable | example value |
| ${variables} = | Get Variables | |
| Dictionary Should Contain Key | ${variables} | \\${example_variable} |
| Dictionary Should Contain Key | ${variables} | \\${ExampleVariable} |
| Set To Dictionary | ${variables} | \\${name} | value |
| Variable Should Not Exist | \\${name} | | |
| ${no decoration} = | Get Variables | no_decoration=Yes |
| Dictionary Should Contain Key | ${no decoration} | example_variable |
"""
return self._variables.as_dict(decoration=is_falsy(no_decoration))
@run_keyword_variant(resolve=0)
def get_variable_value(self, name, default=None):
"""Returns variable value or ``default`` if the variable does not exist.
The name of the variable can be given either as a normal variable name
(e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice
that the former has some limitations explained in `Set Suite Variable`.
Examples:
| ${x} = | Get Variable Value | ${a} | default |
| ${y} = | Get Variable Value | ${a} | ${b} |
| ${z} = | Get Variable Value | ${z} | |
=>
| ${x} gets value of ${a} if ${a} exists and string 'default' otherwise
| ${y} gets value of ${a} if ${a} exists and value of ${b} otherwise
| ${z} is set to Python None if it does not exist previously
See `Set Variable If` for another keyword to set variables dynamically.
"""
try:
return self._variables[self._get_var_name(name)]
except DataError:
return self._variables.replace_scalar(default)
def log_variables(self, level='INFO'):
"""Logs all variables in the current scope with given log level."""
variables = self.get_variables()
for name in sorted(variables, key=lambda s: s[2:-1].lower()):
msg = format_assign_message(name, variables[name], cut_long=False)
self.log(msg, level)
@run_keyword_variant(resolve=0)
def variable_should_exist(self, name, msg=None):
"""Fails unless the given variable exists within the current scope.
The name of the variable can be given either as a normal variable name
(e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice
that the former has some limitations explained in `Set Suite Variable`.
The default error message can be overridden with the ``msg`` argument.
See also `Variable Should Not Exist` and `Keyword Should Exist`.
"""
name = self._get_var_name(name)
msg = self._variables.replace_string(msg) if msg \
else "Variable %s does not exist." % name
try:
self._variables[name]
except DataError:
raise AssertionError(msg)
@run_keyword_variant(resolve=0)
def variable_should_not_exist(self, name, msg=None):
"""Fails if the given variable exists within the current scope.
The name of the variable can be given either as a normal variable name
(e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice
that the former has some limitations explained in `Set Suite Variable`.
The default error message can be overridden with the ``msg`` argument.
See also `Variable Should Exist` and `Keyword Should Exist`.
"""
name = self._get_var_name(name)
msg = self._variables.replace_string(msg) if msg \
else "Variable %s exists." % name
try:
self._variables[name]
except DataError:
pass
else:
raise AssertionError(msg)
def replace_variables(self, text):
"""Replaces variables in the given text with their current values.
If the text contains undefined variables, this keyword fails.
If the given ``text`` contains only a single variable, its value is
returned as-is and it can be any object. Otherwise this keyword
always returns a string.
Example:
The file ``template.txt`` contains ``Hello ${NAME}!`` and variable
``${NAME}`` has the value ``Robot``.
| ${template} = | Get File | ${CURDIR}/template.txt |
| ${message} = | Replace Variables | ${template} |
| Should Be Equal | ${message} | Hello Robot! |
"""
return self._variables.replace_scalar(text)
def set_variable(self, *values):
"""Returns the given values which can then be assigned to a variables.
This keyword is mainly used for setting scalar variables.
Additionally it can be used for converting a scalar variable
containing a list to a list variable or to multiple scalar variables.
It is recommended to use `Create List` when creating new lists.
Examples:
| ${hi} = | Set Variable | Hello, world! |
| ${hi2} = | Set Variable | I said: ${hi} |
| ${var1} | ${var2} = | Set Variable | Hello | world |
| @{list} = | Set Variable | ${list with some items} |
| ${item1} | ${item2} = | Set Variable | ${list with 2 items} |
Variables created with this keyword are available only in the
scope where they are created. See `Set Global Variable`,
`Set Test Variable` and `Set Suite Variable` for information on how to
set variables so that they are available also in a larger scope.
"""
if len(values) == 0:
return ''
elif len(values) == 1:
return values[0]
else:
return list(values)
@run_keyword_variant(resolve=0)
def set_test_variable(self, name, *values):
"""Makes a variable available everywhere within the scope of the current test.
Variables set with this keyword are available everywhere within the
scope of the currently executed test case. For example, if you set a
variable in a user keyword, it is available both in the test case level
and also in all other user keywords used in the current test. Other
test cases will not see variables set with this keyword.
See `Set Suite Variable` for more information and examples.
"""
name = self._get_var_name(name)
value = self._get_var_value(name, values)
self._variables.set_test(name, value)
self._log_set_variable(name, value)
@run_keyword_variant(resolve=0)
def set_task_variable(self, name, *values):
"""Makes a variable available everywhere within the scope of the current task.
This is an alias for `Set Test Variable` that is more applicable when
creating tasks, not tests. New in RF 3.1.
"""
self.set_test_variable(name, *values)
@run_keyword_variant(resolve=0)
def set_suite_variable(self, name, *values):
"""Makes a variable available everywhere within the scope of the current suite.
Variables set with this keyword are available everywhere within the
scope of the currently executed test suite. Setting variables with this
keyword thus has the same effect as creating them using the Variable
table in the test data file or importing them from variable files.
Possible child test suites do not see variables set with this keyword
by default. Starting from Robot Framework 2.9, that can be controlled
by using ``children=<option>`` as the last argument. If the specified
``<option>`` is a non-empty string or any other value considered true
in Python, the variable is set also to the child suites. Parent and
sibling suites will never see variables set with this keyword.
The name of the variable can be given either as a normal variable name
(e.g. ``${NAME}``) or in escaped format as ``\\${NAME}`` or ``$NAME``.
Variable value can be given using the same syntax as when variables
are created in the Variable table.
If a variable already exists within the new scope, its value will be
overwritten. Otherwise a new variable is created. If a variable already
exists within the current scope, the value can be left empty and the
variable within the new scope gets the value within the current scope.
Examples:
| Set Suite Variable | ${SCALAR} | Hello, world! |
| Set Suite Variable | ${SCALAR} | Hello, world! | children=true |
| Set Suite Variable | @{LIST} | First item | Second item |
| Set Suite Variable | &{DICT} | key=value | foo=bar |
| ${ID} = | Get ID |
| Set Suite Variable | ${ID} |
To override an existing value with an empty value, use built-in
variables ``${EMPTY}``, ``@{EMPTY}`` or ``&{EMPTY}``:
| Set Suite Variable | ${SCALAR} | ${EMPTY} |
| Set Suite Variable | @{LIST} | @{EMPTY} |
| Set Suite Variable | &{DICT} | &{EMPTY} |
*NOTE:* If the variable has value which itself is a variable (escaped
or not), you must always use the escaped format to set the variable:
Example:
| ${NAME} = | Set Variable | \\${var} |
| Set Suite Variable | ${NAME} | value | # Sets variable ${var} |
| Set Suite Variable | \\${NAME} | value | # Sets variable ${NAME} |
This limitation applies also to `Set Test Variable`, `Set Global
Variable`, `Variable Should Exist`, `Variable Should Not Exist` and
`Get Variable Value` keywords.
"""
name = self._get_var_name(name)
if (values and is_string(values[-1]) and
values[-1].startswith('children=')):
children = self._variables.replace_scalar(values[-1][9:])
children = is_truthy(children)
values = values[:-1]
else:
children = False
value = self._get_var_value(name, values)
self._variables.set_suite(name, value, children=children)
self._log_set_variable(name, value)
@run_keyword_variant(resolve=0)
def set_global_variable(self, name, *values):
"""Makes a variable available globally in all tests and suites.
Variables set with this keyword are globally available in all
subsequent test suites, test cases and user keywords. Also variables
in variable tables are overridden. Variables assigned locally based
on keyword return values or by using `Set Test Variable` and
`Set Suite Variable` override these variables in that scope, but
the global value is not changed in those cases.
In practice setting variables with this keyword has the same effect
as using command line options ``--variable`` and ``--variablefile``.
Because this keyword can change variables everywhere, it should be
used with care.
See `Set Suite Variable` for more information and examples.
"""
name = self._get_var_name(name)
value = self._get_var_value(name, values)
self._variables.set_global(name, value)
self._log_set_variable(name, value)
# Helpers
def _get_var_name(self, orig):
name = self._resolve_possible_variable(orig)
try:
return self._unescape_variable_if_needed(name)
except ValueError:
raise RuntimeError("Invalid variable syntax '%s'." % orig)
def _resolve_possible_variable(self, name):
try:
resolved = self._variables.replace_string(name)
return self._unescape_variable_if_needed(resolved)
except (KeyError, ValueError, DataError):
return name
def _unescape_variable_if_needed(self, name):
if name.startswith('\\'):
name = name[1:]
if len(name) < 2:
raise ValueError
if name[0] in '$@&' and name[1] != '{':
name = '%s{%s}' % (name[0], name[1:])
if is_var(name):
return name
# Support for possible internal variables (issue 397)
name = '%s{%s}' % (name[0], self.replace_variables(name[2:-1]))
if is_var(name):
return name
raise ValueError
def _get_var_value(self, name, values):
if not values:
return self._variables[name]
if name[0] == '$':
# We could consider catenating values similarly as when creating
# scalar variables in the variable table, but that would require
# handling non-string values somehow. For details see
# https://github.com/robotframework/robotframework/issues/1919
if len(values) != 1 or VariableSplitter(values[0]).is_list_variable():
raise DataError("Setting list value to scalar variable '%s' "
"is not supported anymore. Create list "
"variable '@%s' instead." % (name, name[1:]))
return self._variables.replace_scalar(values[0])
return VariableTableValue(values, name).resolve(self._variables)
def _log_set_variable(self, name, value):
self.log(format_assign_message(name, value))
class _RunKeyword(_BuiltInBase):
# If you use any of these run keyword variants from another library, you
# should register those keywords with 'register_run_keyword' method. See
# the documentation of that method at the end of this file. There are also
# other run keyword variant keywords in BuiltIn which can also be seen
# at the end of this file.
@run_keyword_variant(resolve=1)
def run_keyword(self, name, *args):
"""Executes the given keyword with the given arguments.
Because the name of the keyword to execute is given as an argument, it
can be a variable and thus set dynamically, e.g. from a return value of
another keyword or from the command line.
"""
if not is_string(name):
raise RuntimeError('Keyword name must be a string.')
kw = Keyword(name, args=args)
return kw.run(self._context)
@run_keyword_variant(resolve=0)
def run_keywords(self, *keywords):
"""Executes all the given keywords in a sequence.
This keyword is mainly useful in setups and teardowns when they need
to take care of multiple actions and creating a new higher level user
keyword would be an overkill.
By default all arguments are expected to be keywords to be executed.
Examples:
| `Run Keywords` | `Initialize database` | `Start servers` | `Clear logs` |
| `Run Keywords` | ${KW 1} | ${KW 2} |
| `Run Keywords` | @{KEYWORDS} |
Keywords can also be run with arguments using upper case ``AND`` as
a separator between keywords. The keywords are executed so that the
first argument is the first keyword and proceeding arguments until
the first ``AND`` are arguments to it. First argument after the first
``AND`` is the second keyword and proceeding arguments until the next
``AND`` are its arguments. And so on.
Examples:
| `Run Keywords` | `Initialize database` | db1 | AND | `Start servers` | server1 | server2 |
| `Run Keywords` | `Initialize database` | ${DB NAME} | AND | `Start servers` | @{SERVERS} | AND | `Clear logs` |
| `Run Keywords` | ${KW} | AND | @{KW WITH ARGS} |
Notice that the ``AND`` control argument must be used explicitly and
cannot itself come from a variable. If you need to use literal ``AND``
string as argument, you can either use variables or escape it with
a backslash like ``\\AND``.
"""
self._run_keywords(self._split_run_keywords(list(keywords)))
def _run_keywords(self, iterable):
errors = []
for kw, args in iterable:
try:
self.run_keyword(kw, *args)
except ExecutionPassed as err:
err.set_earlier_failures(errors)
raise err
except ExecutionFailed as err:
errors.extend(err.get_errors())
if not err.can_continue(self._context.in_teardown):
break
if errors:
raise ExecutionFailures(errors)
def _split_run_keywords(self, keywords):
if 'AND' not in keywords:
for name in self._variables.replace_list(keywords):
yield name, ()
else:
for name, args in self._split_run_keywords_from_and(keywords):
yield name, args
def _split_run_keywords_from_and(self, keywords):
while 'AND' in keywords:
index = keywords.index('AND')
yield self._resolve_run_keywords_name_and_args(keywords[:index])
keywords = keywords[index+1:]
yield self._resolve_run_keywords_name_and_args(keywords)
def _resolve_run_keywords_name_and_args(self, kw_call):
kw_call = self._variables.replace_list(kw_call, replace_until=1)
if not kw_call:
raise DataError('Incorrect use of AND')
return kw_call[0], kw_call[1:]
@run_keyword_variant(resolve=2)
def run_keyword_if(self, condition, name, *args):
"""Runs the given keyword with the given arguments, if ``condition`` is true.
The given ``condition`` is evaluated in Python as explained in
`Evaluating expressions`, and ``name`` and ``*args`` have same
semantics as with `Run Keyword`.
Example, a simple if/else construct:
| ${status} | ${value} = | `Run Keyword And Ignore Error` | `My Keyword` |
| `Run Keyword If` | '${status}' == 'PASS' | `Some Action` | arg |
| `Run Keyword Unless` | '${status}' == 'PASS' | `Another Action` |
In this example, only either `Some Action` or `Another Action` is
executed, based on the status of `My Keyword`. Instead of `Run Keyword
And Ignore Error` you can also use `Run Keyword And Return Status`.
Variables used like ``${variable}``, as in the examples above, are
replaced in the expression before evaluation. Variables are also
available in the evaluation namespace and can be accessed using special
syntax ``$variable``. This is a new feature in Robot Framework 2.9
and it is explained more thoroughly in `Evaluating expressions`.
Example:
| `Run Keyword If` | $result is None or $result == 'FAIL' | `Keyword` |
This keyword supports also optional ELSE and ELSE IF branches. Both
of them are defined in ``*args`` and must use exactly format ``ELSE``
or ``ELSE IF``, respectively. ELSE branches must contain first the
name of the keyword to execute and then its possible arguments. ELSE
IF branches must first contain a condition, like the first argument
to this keyword, and then the keyword to execute and its possible
arguments. It is possible to have ELSE branch after ELSE IF and to
have multiple ELSE IF branches. Nested `Run Keyword If` usage is not
supported when using ELSE and/or ELSE IF branches.
Given previous example, if/else construct can also be created like this:
| ${status} | ${value} = | `Run Keyword And Ignore Error` | `My Keyword` |
| `Run Keyword If` | '${status}' == 'PASS' | `Some Action` | arg | ELSE | `Another Action` |
The return value of this keyword is the return value of the actually
executed keyword or Python ``None`` if no keyword was executed (i.e.
if ``condition`` was false). Hence, it is recommended to use ELSE
and/or ELSE IF branches to conditionally assign return values from
keyword to variables (see `Set Variable If` if you need to set fixed
values conditionally). This is illustrated by the example below:
| ${var1} = | `Run Keyword If` | ${rc} == 0 | `Some keyword returning a value` |
| ... | ELSE IF | 0 < ${rc} < 42 | `Another keyword` |
| ... | ELSE IF | ${rc} < 0 | `Another keyword with args` | ${rc} | arg2 |
| ... | ELSE | `Final keyword to handle abnormal cases` | ${rc} |
| ${var2} = | `Run Keyword If` | ${condition} | `Some keyword` |
In this example, ${var2} will be set to ``None`` if ${condition} is
false.
Notice that ``ELSE`` and ``ELSE IF`` control words must be used
explicitly and thus cannot come from variables. If you need to use
literal ``ELSE`` and ``ELSE IF`` strings as arguments, you can escape
them with a backslash like ``\\ELSE`` and ``\\ELSE IF``.
Python's [http://docs.python.org/library/os.html|os] and
[http://docs.python.org/library/sys.html|sys] modules are
automatically imported when evaluating the ``condition``.
Attributes they contain can thus be used in the condition:
| `Run Keyword If` | os.sep == '/' | `Unix Keyword` |
| ... | ELSE IF | sys.platform.startswith('java') | `Jython Keyword` |
| ... | ELSE | `Windows Keyword` |
"""
args, branch = self._split_elif_or_else_branch(args)
if self._is_true(condition):
return self.run_keyword(name, *args)
return branch()
def _split_elif_or_else_branch(self, args):
if 'ELSE IF' in args:
args, branch = self._split_branch(args, 'ELSE IF', 2,
'condition and keyword')
return args, lambda: self.run_keyword_if(*branch)
if 'ELSE' in args:
args, branch = self._split_branch(args, 'ELSE', 1, 'keyword')
return args, lambda: self.run_keyword(*branch)
return args, lambda: None
def _split_branch(self, args, control_word, required, required_error):
index = list(args).index(control_word)
branch = self._variables.replace_list(args[index+1:], required)
if len(branch) < required:
raise DataError('%s requires %s.' % (control_word, required_error))
return args[:index], branch
@run_keyword_variant(resolve=2)
def run_keyword_unless(self, condition, name, *args):
"""Runs the given keyword with the given arguments if ``condition`` is false.
See `Run Keyword If` for more information and an example. Notice that
this keyword does not support ``ELSE`` or ``ELSE IF`` branches like
`Run Keyword If` does, though.
"""
if not self._is_true(condition):
return self.run_keyword(name, *args)
@run_keyword_variant(resolve=1)
def run_keyword_and_ignore_error(self, name, *args):
"""Runs the given keyword with the given arguments and ignores possible error.
This keyword returns two values, so that the first is either string
``PASS`` or ``FAIL``, depending on the status of the executed keyword.
The second value is either the return value of the keyword or the
received error message. See `Run Keyword And Return Status` If you are
only interested in the execution status.
The keyword name and arguments work as in `Run Keyword`. See
`Run Keyword If` for a usage example.
Errors caused by invalid syntax, timeouts, or fatal exceptions are not
caught by this keyword. Otherwise this keyword itself never fails.
Since Robot Framework 2.9, variable errors are caught by this keyword.
"""
try:
return 'PASS', self.run_keyword(name, *args)
except ExecutionFailed as err:
if err.dont_continue:
raise
return 'FAIL', unic(err)
@run_keyword_variant(resolve=1)
def run_keyword_and_return_status(self, name, *args):
"""Runs the given keyword with given arguments and returns the status as a Boolean value.
This keyword returns Boolean ``True`` if the keyword that is executed
succeeds and ``False`` if it fails. This is useful, for example, in
combination with `Run Keyword If`. If you are interested in the error
message or return value, use `Run Keyword And Ignore Error` instead.
The keyword name and arguments work as in `Run Keyword`.
Example:
| ${passed} = | `Run Keyword And Return Status` | Keyword | args |
| `Run Keyword If` | ${passed} | Another keyword |
Errors caused by invalid syntax, timeouts, or fatal exceptions are not
caught by this keyword. Otherwise this keyword itself never fails.
"""
status, _ = self.run_keyword_and_ignore_error(name, *args)
return status == 'PASS'
@run_keyword_variant(resolve=1)
def run_keyword_and_continue_on_failure(self, name, *args):
"""Runs the keyword and continues execution even if a failure occurs.
The keyword name and arguments work as with `Run Keyword`.
Example:
| Run Keyword And Continue On Failure | Fail | This is a stupid example |
| Log | This keyword is executed |
The execution is not continued if the failure is caused by invalid syntax,
timeout, or fatal exception.
Since Robot Framework 2.9, variable errors are caught by this keyword.
"""
try:
return self.run_keyword(name, *args)
except ExecutionFailed as err:
if not err.dont_continue:
err.continue_on_failure = True
raise err
@run_keyword_variant(resolve=2)
def run_keyword_and_expect_error(self, expected_error, name, *args):
"""Runs the keyword and checks that the expected error occurred.
The keyword to execute and its arguments are specified using ``name``
and ``*args`` exactly like with `Run Keyword`.
The expected error must be given in the same format as in Robot
Framework reports. By default it is interpreted as a glob pattern
with ``*``, ``?`` and ``[chars]`` as wildcards, but starting from
Robot Framework 3.1 that can be changed by using various prefixes
explained in the table below. Prefixes are case-sensitive and they
must be separated from the actual message with a colon and an
optional space like ``PREFIX: Message`` or ``PREFIX:Message``.
| = Prefix = | = Explanation = |
| ``EQUALS`` | Exact match. Especially useful if the error contains glob wildcards. |
| ``STARTS`` | Error must start with the specified error. |
| ``REGEXP`` | Regular expression match. |
| ``GLOB`` | Same as the default behavior. |
See the `Pattern matching` section for more information about glob
patterns and regular expressions.
If the expected error occurs, the error message is returned and it can
be further processed or tested if needed. If there is no error, or the
error does not match the expected error, this keyword fails.
Examples:
| Run Keyword And Expect Error | My error | Keyword | arg |
| Run Keyword And Expect Error | ValueError: * | Some Keyword |
| Run Keyword And Expect Error | STARTS: ValueError: | Some Keyword |
| Run Keyword And Expect Error | EQUALS:No match for '//input[@type="text"]' |
| ... | Find Element | //input[@type="text"] |
| ${msg} = | Run Keyword And Expect Error | * |
| ... | Keyword | arg1 | arg2 |
| Log To Console | ${msg} |
Errors caused by invalid syntax, timeouts, or fatal exceptions are not
caught by this keyword.
Since Robot Framework 2.9, variable errors are caught by this keyword.
"""
try:
self.run_keyword(name, *args)
except ExecutionFailed as err:
if err.dont_continue:
raise
error = err.message
else:
raise AssertionError("Expected error '%s' did not occur."
% expected_error)
if not self._error_is_expected(error, expected_error):
raise AssertionError("Expected error '%s' but got '%s'."
% (expected_error, error))
return error
def _error_is_expected(self, error, expected_error):
glob = self._matches
matchers = {'GLOB': glob,
'EQUALS': lambda s, p: s == p,
'STARTS': lambda s, p: s.startswith(p),
'REGEXP': lambda s, p: re.match(p, s) is not None}
prefixes = tuple(prefix + ':' for prefix in matchers)
if not expected_error.startswith(prefixes):
return glob(error, expected_error)
prefix, expected_error = expected_error.split(':', 1)
return matchers[prefix](error, expected_error.lstrip())
@run_keyword_variant(resolve=2)
def repeat_keyword(self, repeat, name, *args):
"""Executes the specified keyword multiple times.
``name`` and ``args`` define the keyword that is executed similarly as
with `Run Keyword`. ``repeat`` specifies how many times (as a count) or
how long time (as a timeout) the keyword should be executed.
If ``repeat`` is given as count, it specifies how many times the
keyword should be executed. ``repeat`` can be given as an integer or
as a string that can be converted to an integer. If it is a string,
it can have postfix ``times`` or ``x`` (case and space insensitive)
to make the expression more explicit.
If ``repeat`` is given as timeout, it must be in Robot Framework's
time format (e.g. ``1 minute``, ``2 min 3 s``). Using a number alone
(e.g. ``1`` or ``1.5``) does not work in this context.
If ``repeat`` is zero or negative, the keyword is not executed at
all. This keyword fails immediately if any of the execution
rounds fails.
Examples:
| Repeat Keyword | 5 times | Go to Previous Page |
| Repeat Keyword | ${var} | Some Keyword | arg1 | arg2 |
| Repeat Keyword | 2 minutes | Some Keyword | arg1 | arg2 |
Specifying ``repeat`` as a timeout is new in Robot Framework 3.0.
"""
try:
count = self._get_repeat_count(repeat)
except RuntimeError as err:
timeout = self._get_repeat_timeout(repeat)
if timeout is None:
raise err
keywords = self._keywords_repeated_by_timeout(timeout, name, args)
else:
keywords = self._keywords_repeated_by_count(count, name, args)
self._run_keywords(keywords)
def _get_repeat_count(self, times, require_postfix=False):
times = normalize(str(times))
if times.endswith('times'):
times = times[:-5]
elif times.endswith('x'):
times = times[:-1]
elif require_postfix:
raise ValueError
return self._convert_to_integer(times)
def _get_repeat_timeout(self, timestr):
try:
float(timestr)
except ValueError:
pass
else:
return None
try:
return timestr_to_secs(timestr)
except ValueError:
return None
def _keywords_repeated_by_count(self, count, name, args):
if count <= 0:
self.log("Keyword '%s' repeated zero times." % name)
for i in range(count):
self.log("Repeating keyword, round %d/%d." % (i + 1, count))
yield name, args
def _keywords_repeated_by_timeout(self, timeout, name, args):
if timeout <= 0:
self.log("Keyword '%s' repeated zero times." % name)
repeat_round = 0
maxtime = time.time() + timeout
while time.time() < maxtime:
repeat_round += 1
self.log("Repeating keyword, round %d, %s remaining."
% (repeat_round,
secs_to_timestr(maxtime - time.time(), compact=True)))
yield name, args
@run_keyword_variant(resolve=3)
def wait_until_keyword_succeeds(self, retry, retry_interval, name, *args):
"""Runs the specified keyword and retries if it fails.
``name`` and ``args`` define the keyword that is executed similarly
as with `Run Keyword`. How long to retry running the keyword is
defined using ``retry`` argument either as timeout or count.
``retry_interval`` is the time to wait before trying to run the
keyword again after the previous run has failed.
If ``retry`` is given as timeout, it must be in Robot Framework's
time format (e.g. ``1 minute``, ``2 min 3 s``, ``4.5``) that is
explained in an appendix of Robot Framework User Guide. If it is
given as count, it must have ``times`` or ``x`` postfix (e.g.
``5 times``, ``10 x``). ``retry_interval`` must always be given in
Robot Framework's time format.
If the keyword does not succeed regardless of retries, this keyword
fails. If the executed keyword passes, its return value is returned.
Examples:
| Wait Until Keyword Succeeds | 2 min | 5 sec | My keyword | argument |
| ${result} = | Wait Until Keyword Succeeds | 3x | 200ms | My keyword |
All normal failures are caught by this keyword. Errors caused by
invalid syntax, test or keyword timeouts, or fatal exceptions (caused
e.g. by `Fatal Error`) are not caught.
Running the same keyword multiple times inside this keyword can create
lots of output and considerably increase the size of the generated
output files. It is possible to remove unnecessary keywords from
the outputs using ``--RemoveKeywords WUKS`` command line option.
Support for specifying ``retry`` as a number of times to retry is
a new feature in Robot Framework 2.9.
Since Robot Framework 2.9, variable errors are caught by this keyword.
"""
maxtime = count = -1
try:
count = self._get_repeat_count(retry, require_postfix=True)
except ValueError:
timeout = timestr_to_secs(retry)
maxtime = time.time() + timeout
message = 'for %s' % secs_to_timestr(timeout)
else:
if count <= 0:
raise ValueError('Retry count %d is not positive.' % count)
message = '%d time%s' % (count, s(count))
retry_interval = timestr_to_secs(retry_interval)
while True:
try:
return self.run_keyword(name, *args)
except ExecutionFailed as err:
if err.dont_continue:
raise
count -= 1
if time.time() > maxtime > 0 or count == 0:
raise AssertionError("Keyword '%s' failed after retrying "
"%s. The last error was: %s"
% (name, message, err))
self._sleep_in_parts(retry_interval)
@run_keyword_variant(resolve=1)
def set_variable_if(self, condition, *values):
"""Sets variable based on the given condition.
The basic usage is giving a condition and two values. The
given condition is first evaluated the same way as with the
`Should Be True` keyword. If the condition is true, then the
first value is returned, and otherwise the second value is
returned. The second value can also be omitted, in which case
it has a default value None. This usage is illustrated in the
examples below, where ``${rc}`` is assumed to be zero.
| ${var1} = | Set Variable If | ${rc} == 0 | zero | nonzero |
| ${var2} = | Set Variable If | ${rc} > 0 | value1 | value2 |
| ${var3} = | Set Variable If | ${rc} > 0 | whatever | |
=>
| ${var1} = 'zero'
| ${var2} = 'value2'
| ${var3} = None
It is also possible to have 'else if' support by replacing the
second value with another condition, and having two new values
after it. If the first condition is not true, the second is
evaluated and one of the values after it is returned based on
its truth value. This can be continued by adding more
conditions without a limit.
| ${var} = | Set Variable If | ${rc} == 0 | zero |
| ... | ${rc} > 0 | greater than zero | less then zero |
| |
| ${var} = | Set Variable If |
| ... | ${rc} == 0 | zero |
| ... | ${rc} == 1 | one |
| ... | ${rc} == 2 | two |
| ... | ${rc} > 2 | greater than two |
| ... | ${rc} < 0 | less than zero |
Use `Get Variable Value` if you need to set variables
dynamically based on whether a variable exist or not.
"""
values = self._verify_values_for_set_variable_if(list(values))
if self._is_true(condition):
return self._variables.replace_scalar(values[0])
values = self._verify_values_for_set_variable_if(values[1:], True)
if len(values) == 1:
return self._variables.replace_scalar(values[0])
return self.run_keyword('BuiltIn.Set Variable If', *values[0:])
def _verify_values_for_set_variable_if(self, values, default=False):
if not values:
if default:
return [None]
raise RuntimeError('At least one value is required')
if is_list_var(values[0]):
values[:1] = [escape(item) for item in self._variables[values[0]]]
return self._verify_values_for_set_variable_if(values)
return values
@run_keyword_variant(resolve=1)
def run_keyword_if_test_failed(self, name, *args):
"""Runs the given keyword with the given arguments, if the test failed.
This keyword can only be used in a test teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
Prior to Robot Framework 2.9 failures in test teardown itself were
not detected by this keyword.
"""
test = self._get_test_in_teardown('Run Keyword If Test Failed')
if not test.passed:
return self.run_keyword(name, *args)
@run_keyword_variant(resolve=1)
def run_keyword_if_test_passed(self, name, *args):
"""Runs the given keyword with the given arguments, if the test passed.
This keyword can only be used in a test teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
Prior to Robot Framework 2.9 failures in test teardown itself were
not detected by this keyword.
"""
test = self._get_test_in_teardown('Run Keyword If Test Passed')
if test.passed:
return self.run_keyword(name, *args)
@run_keyword_variant(resolve=1)
def run_keyword_if_timeout_occurred(self, name, *args):
"""Runs the given keyword if either a test or a keyword timeout has occurred.
This keyword can only be used in a test teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
self._get_test_in_teardown('Run Keyword If Timeout Occurred')
if self._context.timeout_occurred:
return self.run_keyword(name, *args)
def _get_test_in_teardown(self, kwname):
ctx = self._context
if ctx.test and ctx.in_test_teardown:
return ctx.test
raise RuntimeError("Keyword '%s' can only be used in test teardown."
% kwname)
@run_keyword_variant(resolve=1)
def run_keyword_if_all_critical_tests_passed(self, name, *args):
"""Runs the given keyword with the given arguments, if all critical tests passed.
This keyword can only be used in suite teardown. Trying to use it in
any other place will result in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If '
'All Critical Tests Passed')
if suite.statistics.critical.failed == 0:
return self.run_keyword(name, *args)
@run_keyword_variant(resolve=1)
def run_keyword_if_any_critical_tests_failed(self, name, *args):
"""Runs the given keyword with the given arguments, if any critical tests failed.
This keyword can only be used in a suite teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If '
'Any Critical Tests Failed')
if suite.statistics.critical.failed > 0:
return self.run_keyword(name, *args)
@run_keyword_variant(resolve=1)
def run_keyword_if_all_tests_passed(self, name, *args):
"""Runs the given keyword with the given arguments, if all tests passed.
This keyword can only be used in a suite teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If All Tests Passed')
if suite.statistics.all.failed == 0:
return self.run_keyword(name, *args)
@run_keyword_variant(resolve=1)
def run_keyword_if_any_tests_failed(self, name, *args):
"""Runs the given keyword with the given arguments, if one or more tests failed.
This keyword can only be used in a suite teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If Any Tests Failed')
if suite.statistics.all.failed > 0:
return self.run_keyword(name, *args)
def _get_suite_in_teardown(self, kwname):
if not self._context.in_suite_teardown:
raise RuntimeError("Keyword '%s' can only be used in suite teardown."
% kwname)
return self._context.suite
class _Control(_BuiltInBase):
def continue_for_loop(self):
"""Skips the current for loop iteration and continues from the next.
Skips the remaining keywords in the current for loop iteration and
continues from the next one. Can be used directly in a for loop or
in a keyword that the loop uses.
Example:
| :FOR | ${var} | IN | @{VALUES} |
| | Run Keyword If | '${var}' == 'CONTINUE' | Continue For Loop |
| | Do Something | ${var} |
See `Continue For Loop If` to conditionally continue a for loop without
using `Run Keyword If` or other wrapper keywords.
"""
self.log("Continuing for loop from the next iteration.")
raise ContinueForLoop()
def continue_for_loop_if(self, condition):
"""Skips the current for loop iteration if the ``condition`` is true.
A wrapper for `Continue For Loop` to continue a for loop based on
the given condition. The condition is evaluated using the same
semantics as with `Should Be True` keyword.
Example:
| :FOR | ${var} | IN | @{VALUES} |
| | Continue For Loop If | '${var}' == 'CONTINUE' |
| | Do Something | ${var} |
"""
if self._is_true(condition):
self.continue_for_loop()
def exit_for_loop(self):
"""Stops executing the enclosing for loop.
Exits the enclosing for loop and continues execution after it.
Can be used directly in a for loop or in a keyword that the loop uses.
Example:
| :FOR | ${var} | IN | @{VALUES} |
| | Run Keyword If | '${var}' == 'EXIT' | Exit For Loop |
| | Do Something | ${var} |
See `Exit For Loop If` to conditionally exit a for loop without
using `Run Keyword If` or other wrapper keywords.
"""
self.log("Exiting for loop altogether.")
raise ExitForLoop()
def exit_for_loop_if(self, condition):
"""Stops executing the enclosing for loop if the ``condition`` is true.
A wrapper for `Exit For Loop` to exit a for loop based on
the given condition. The condition is evaluated using the same
semantics as with `Should Be True` keyword.
Example:
| :FOR | ${var} | IN | @{VALUES} |
| | Exit For Loop If | '${var}' == 'EXIT' |
| | Do Something | ${var} |
"""
if self._is_true(condition):
self.exit_for_loop()
@run_keyword_variant(resolve=0)
def return_from_keyword(self, *return_values):
"""Returns from the enclosing user keyword.
This keyword can be used to return from a user keyword with PASS status
without executing it fully. It is also possible to return values
similarly as with the ``[Return]`` setting. For more detailed information
about working with the return values, see the User Guide.
This keyword is typically wrapped to some other keyword, such as
`Run Keyword If` or `Run Keyword If Test Passed`, to return based
on a condition:
| Run Keyword If | ${rc} < 0 | Return From Keyword |
| Run Keyword If Test Passed | Return From Keyword |
It is possible to use this keyword to return from a keyword also inside
a for loop. That, as well as returning values, is demonstrated by the
`Find Index` keyword in the following somewhat advanced example.
Notice that it is often a good idea to move this kind of complicated
logic into a test library.
| ***** Variables *****
| @{LIST} = foo baz
|
| ***** Test Cases *****
| Example
| ${index} = Find Index baz @{LIST}
| Should Be Equal ${index} ${1}
| ${index} = Find Index non existing @{LIST}
| Should Be Equal ${index} ${-1}
|
| ***** Keywords *****
| Find Index
| [Arguments] ${element} @{items}
| ${index} = Set Variable ${0}
| :FOR ${item} IN @{items}
| \\ Run Keyword If '${item}' == '${element}' Return From Keyword ${index}
| \\ ${index} = Set Variable ${index + 1}
| Return From Keyword ${-1} # Also [Return] would work here.
The most common use case, returning based on an expression, can be
accomplished directly with `Return From Keyword If`. See also
`Run Keyword And Return` and `Run Keyword And Return If`.
"""
self._return_from_keyword(return_values)
def _return_from_keyword(self, return_values=None, failures=None):
self.log('Returning from the enclosing user keyword.')
raise ReturnFromKeyword(return_values, failures)
@run_keyword_variant(resolve=1)
def return_from_keyword_if(self, condition, *return_values):
"""Returns from the enclosing user keyword if ``condition`` is true.
A wrapper for `Return From Keyword` to return based on the given
condition. The condition is evaluated using the same semantics as
with `Should Be True` keyword.
Given the same example as in `Return From Keyword`, we can rewrite the
`Find Index` keyword as follows:
| ***** Keywords *****
| Find Index
| [Arguments] ${element} @{items}
| ${index} = Set Variable ${0}
| :FOR ${item} IN @{items}
| \\ Return From Keyword If '${item}' == '${element}' ${index}
| \\ ${index} = Set Variable ${index + 1}
| Return From Keyword ${-1} # Also [Return] would work here.
See also `Run Keyword And Return` and `Run Keyword And Return If`.
"""
if self._is_true(condition):
self._return_from_keyword(return_values)
@run_keyword_variant(resolve=1)
def run_keyword_and_return(self, name, *args):
"""Runs the specified keyword and returns from the enclosing user keyword.
The keyword to execute is defined with ``name`` and ``*args`` exactly
like with `Run Keyword`. After running the keyword, returns from the
enclosing user keyword and passes possible return value from the
executed keyword further. Returning from a keyword has exactly same
semantics as with `Return From Keyword`.
Example:
| `Run Keyword And Return` | `My Keyword` | arg1 | arg2 |
| # Above is equivalent to: |
| ${result} = | `My Keyword` | arg1 | arg2 |
| `Return From Keyword` | ${result} | | |
Use `Run Keyword And Return If` if you want to run keyword and return
based on a condition.
"""
try:
ret = self.run_keyword(name, *args)
except ExecutionFailed as err:
self._return_from_keyword(failures=[err])
else:
self._return_from_keyword(return_values=[escape(ret)])
@run_keyword_variant(resolve=2)
def run_keyword_and_return_if(self, condition, name, *args):
"""Runs the specified keyword and returns from the enclosing user keyword.
A wrapper for `Run Keyword And Return` to run and return based on
the given ``condition``. The condition is evaluated using the same
semantics as with `Should Be True` keyword.
Example:
| `Run Keyword And Return If` | ${rc} > 0 | `My Keyword` | arg1 | arg2 |
| # Above is equivalent to: |
| `Run Keyword If` | ${rc} > 0 | `Run Keyword And Return` | `My Keyword ` | arg1 | arg2 |
Use `Return From Keyword If` if you want to return a certain value
based on a condition.
"""
if self._is_true(condition):
self.run_keyword_and_return(name, *args)
def pass_execution(self, message, *tags):
"""Skips rest of the current test, setup, or teardown with PASS status.
This keyword can be used anywhere in the test data, but the place where
used affects the behavior:
- When used in any setup or teardown (suite, test or keyword), passes
that setup or teardown. Possible keyword teardowns of the started
keywords are executed. Does not affect execution or statuses
otherwise.
- When used in a test outside setup or teardown, passes that particular
test case. Possible test and keyword teardowns are executed.
Possible continuable failures before this keyword is used, as well as
failures in executed teardowns, will fail the execution.
It is mandatory to give a message explaining why execution was passed.
By default the message is considered plain text, but starting it with
``*HTML*`` allows using HTML formatting.
It is also possible to modify test tags passing tags after the message
similarly as with `Fail` keyword. Tags starting with a hyphen
(e.g. ``-regression``) are removed and others added. Tags are modified
using `Set Tags` and `Remove Tags` internally, and the semantics
setting and removing them are the same as with these keywords.
Examples:
| Pass Execution | All features available in this version tested. |
| Pass Execution | Deprecated test. | deprecated | -regression |
This keyword is typically wrapped to some other keyword, such as
`Run Keyword If`, to pass based on a condition. The most common case
can be handled also with `Pass Execution If`:
| Run Keyword If | ${rc} < 0 | Pass Execution | Negative values are cool. |
| Pass Execution If | ${rc} < 0 | Negative values are cool. |
Passing execution in the middle of a test, setup or teardown should be
used with care. In the worst case it leads to tests that skip all the
parts that could actually uncover problems in the tested application.
In cases where execution cannot continue do to external factors,
it is often safer to fail the test case and make it non-critical.
"""
message = message.strip()
if not message:
raise RuntimeError('Message cannot be empty.')
self._set_and_remove_tags(tags)
log_message, level = self._get_logged_test_message_and_level(message)
self.log('Execution passed with message:\n%s' % log_message, level)
raise PassExecution(message)
@run_keyword_variant(resolve=1)
def pass_execution_if(self, condition, message, *tags):
"""Conditionally skips rest of the current test, setup, or teardown with PASS status.
A wrapper for `Pass Execution` to skip rest of the current test,
setup or teardown based the given ``condition``. The condition is
evaluated similarly as with `Should Be True` keyword, and ``message``
and ``*tags`` have same semantics as with `Pass Execution`.
Example:
| :FOR | ${var} | IN | @{VALUES} |
| | Pass Execution If | '${var}' == 'EXPECTED' | Correct value was found |
| | Do Something | ${var} |
"""
if self._is_true(condition):
message = self._variables.replace_string(message)
tags = self._variables.replace_list(tags)
self.pass_execution(message, *tags)
class _Misc(_BuiltInBase):
def no_operation(self):
"""Does absolutely nothing."""
def sleep(self, time_, reason=None):
"""Pauses the test executed for the given time.
``time`` may be either a number or a time string. Time strings are in
a format such as ``1 day 2 hours 3 minutes 4 seconds 5milliseconds`` or
``1d 2h 3m 4s 5ms``, and they are fully explained in an appendix of
Robot Framework User Guide. Optional `reason` can be used to explain why
sleeping is necessary. Both the time slept and the reason are logged.
Examples:
| Sleep | 42 |
| Sleep | 1.5 |
| Sleep | 2 minutes 10 seconds |
| Sleep | 10s | Wait for a reply |
"""
seconds = timestr_to_secs(time_)
# Python hangs with negative values
if seconds < 0:
seconds = 0
self._sleep_in_parts(seconds)
self.log('Slept %s' % secs_to_timestr(seconds))
if reason:
self.log(reason)
def _sleep_in_parts(self, seconds):
# time.sleep can't be stopped in windows
# to ensure that we can signal stop (with timeout)
# split sleeping to small pieces
endtime = time.time() + float(seconds)
while True:
remaining = endtime - time.time()
if remaining <= 0:
break
time.sleep(min(remaining, 0.01))
def catenate(self, *items):
"""Catenates the given items together and returns the resulted string.
By default, items are catenated with spaces, but if the first item
contains the string ``SEPARATOR=<sep>``, the separator ``<sep>`` is
used instead. Items are converted into strings when necessary.
Examples:
| ${str1} = | Catenate | Hello | world | |
| ${str2} = | Catenate | SEPARATOR=--- | Hello | world |
| ${str3} = | Catenate | SEPARATOR= | Hello | world |
=>
| ${str1} = 'Hello world'
| ${str2} = 'Hello---world'
| ${str3} = 'Helloworld'
"""
if not items:
return ''
items = [unic(item) for item in items]
if items[0].startswith('SEPARATOR='):
sep = items[0][len('SEPARATOR='):]
items = items[1:]
else:
sep = ' '
return sep.join(items)
def log(self, message, level='INFO', html=False, console=False, repr=False):
u"""Logs the given message with the given level.
Valid levels are TRACE, DEBUG, INFO (default), HTML, WARN, and ERROR.
Messages below the current active log level are ignored. See
`Set Log Level` keyword and ``--loglevel`` command line option
for more details about setting the level.
Messages logged with the WARN or ERROR levels will be automatically
visible also in the console and in the Test Execution Errors section
in the log file.
Logging can be configured using optional ``html``, ``console`` and
``repr`` arguments. They are off by default, but can be enabled
by giving them a true value. See `Boolean arguments` section for more
information about true and false values.
If the ``html`` argument is given a true value, the message will be
considered HTML and special characters such as ``<`` in it are not
escaped. For example, logging ``<img src="image.png">`` creates an
image when ``html`` is true, but otherwise the message is that exact
string. An alternative to using the ``html`` argument is using the HTML
pseudo log level. It logs the message as HTML using the INFO level.
If the ``console`` argument is true, the message will be written to
the console where test execution was started from in addition to
the log file. This keyword always uses the standard output stream
and adds a newline after the written message. Use `Log To Console`
instead if either of these is undesirable,
If the ``repr`` argument is true, the given item will be passed through
a custom version of Python's ``pprint.pformat()`` function before
logging it. This is useful, for example, when working with strings or
bytes containing invisible characters, or when working with nested data
structures. The custom version differs from the standard one so that it
omits the ``u`` prefix from Unicode strings and adds ``b`` prefix to
byte strings on Python 2.
Examples:
| Log | Hello, world! | | | # Normal INFO message. |
| Log | Warning, world! | WARN | | # Warning. |
| Log | <b>Hello</b>, world! | html=yes | | # INFO message as HTML. |
| Log | <b>Hello</b>, world! | HTML | | # Same as above. |
| Log | <b>Hello</b>, world! | DEBUG | html=true | # DEBUG as HTML. |
| Log | Hello, console! | console=yes | | # Log also to the console. |
| Log | Hyv\xe4 \\x00 | repr=yes | | # Log ``'Hyv\\xe4 \\x00'``. |
See `Log Many` if you want to log multiple messages in one go, and
`Log To Console` if you only want to write to the console.
"""
if is_truthy(repr):
message = prepr(message, width=80)
logger.write(message, level, is_truthy(html))
if is_truthy(console):
logger.console(message)
@run_keyword_variant(resolve=0)
def log_many(self, *messages):
"""Logs the given messages as separate entries using the INFO level.
Supports also logging list and dictionary variable items individually.
Examples:
| Log Many | Hello | ${var} |
| Log Many | @{list} | &{dict} |
See `Log` and `Log To Console` keywords if you want to use alternative
log levels, use HTML, or log to the console.
"""
for msg in self._yield_logged_messages(messages):
self.log(msg)
def _yield_logged_messages(self, messages):
for msg in messages:
var = VariableSplitter(msg)
value = self._variables.replace_scalar(msg)
if var.is_list_variable():
for item in value:
yield item
elif var.is_dict_variable():
for name, value in value.items():
yield '%s=%s' % (name, value)
else:
yield value
def log_to_console(self, message, stream='STDOUT', no_newline=False):
"""Logs the given message to the console.
By default uses the standard output stream. Using the standard error
stream is possibly by giving the ``stream`` argument value ``STDERR``
(case-insensitive).
By default appends a newline to the logged message. This can be
disabled by giving the ``no_newline`` argument a true value (see
`Boolean arguments`).
Examples:
| Log To Console | Hello, console! | |
| Log To Console | Hello, stderr! | STDERR |
| Log To Console | Message starts here and is | no_newline=true |
| Log To Console | continued without newline. | |
This keyword does not log the message to the normal log file. Use
`Log` keyword, possibly with argument ``console``, if that is desired.
"""
logger.console(message, newline=is_falsy(no_newline), stream=stream)
@run_keyword_variant(resolve=0)
def comment(self, *messages):
"""Displays the given messages in the log file as keyword arguments.
This keyword does nothing with the arguments it receives, but as they
are visible in the log, this keyword can be used to display simple
messages. Given arguments are ignored so thoroughly that they can even
contain non-existing variables. If you are interested about variable
values, you can use the `Log` or `Log Many` keywords.
"""
pass
def set_log_level(self, level):
"""Sets the log threshold to the specified level and returns the old level.
Messages below the level will not logged. The default logging level is
INFO, but it can be overridden with the command line option
``--loglevel``.
The available levels: TRACE, DEBUG, INFO (default), WARN, ERROR and NONE (no
logging).
"""
try:
old = self._context.output.set_log_level(level)
except DataError as err:
raise RuntimeError(unic(err))
self._namespace.variables.set_global('${LOG_LEVEL}', level.upper())
self.log('Log level changed from %s to %s.' % (old, level.upper()))
return old
def reload_library(self, name_or_instance):
"""Rechecks what keywords the specified library provides.
Can be called explicitly in the test data or by a library itself
when keywords it provides have changed.
The library can be specified by its name or as the active instance of
the library. The latter is especially useful if the library itself
calls this keyword as a method.
New in Robot Framework 2.9.
"""
library = self._namespace.reload_library(name_or_instance)
self.log('Reloaded library %s with %s keywords.' % (library.name,
len(library)))
@run_keyword_variant(resolve=0)
def import_library(self, name, *args):
"""Imports a library with the given name and optional arguments.
This functionality allows dynamic importing of libraries while tests
are running. That may be necessary, if the library itself is dynamic
and not yet available when test data is processed. In a normal case,
libraries should be imported using the Library setting in the Setting
table.
This keyword supports importing libraries both using library
names and physical paths. When paths are used, they must be
given in absolute format or found from
[http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#pythonpath-jythonpath-and-ironpythonpath|
search path]. Forward slashes can be used as path separators in all
operating systems.
It is possible to pass arguments to the imported library and also
named argument syntax works if the library supports it. ``WITH NAME``
syntax can be used to give a custom name to the imported library.
Examples:
| Import Library | MyLibrary |
| Import Library | ${CURDIR}/../Library.py | arg1 | named=arg2 |
| Import Library | ${LIBRARIES}/Lib.java | arg | WITH NAME | JavaLib |
"""
try:
self._namespace.import_library(name, list(args))
except DataError as err:
raise RuntimeError(unic(err))
@run_keyword_variant(resolve=0)
def import_variables(self, path, *args):
"""Imports a variable file with the given path and optional arguments.
Variables imported with this keyword are set into the test suite scope
similarly when importing them in the Setting table using the Variables
setting. These variables override possible existing variables with
the same names. This functionality can thus be used to import new
variables, for example, for each test in a test suite.
The given path must be absolute or found from
[http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#pythonpath-jythonpath-and-ironpythonpath|
search path]. Forward slashes can be used as path separator regardless
the operating system.
Examples:
| Import Variables | ${CURDIR}/variables.py | | |
| Import Variables | ${CURDIR}/../vars/env.py | arg1 | arg2 |
| Import Variables | file_from_pythonpath.py | | |
"""
try:
self._namespace.import_variables(path, list(args), overwrite=True)
except DataError as err:
raise RuntimeError(unic(err))
@run_keyword_variant(resolve=0)
def import_resource(self, path):
"""Imports a resource file with the given path.
Resources imported with this keyword are set into the test suite scope
similarly when importing them in the Setting table using the Resource
setting.
The given path must be absolute or found from
[http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#pythonpath-jythonpath-and-ironpythonpath|
search path]. Forward slashes can be used as path separator regardless
the operating system.
Examples:
| Import Resource | ${CURDIR}/resource.txt |
| Import Resource | ${CURDIR}/../resources/resource.html |
| Import Resource | found_from_pythonpath.robot |
"""
try:
self._namespace.import_resource(path)
except DataError as err:
raise RuntimeError(unic(err))
def set_library_search_order(self, *search_order):
"""Sets the resolution order to use when a name matches multiple keywords.
The library search order is used to resolve conflicts when a keyword
name in the test data matches multiple keywords. The first library
(or resource, see below) containing the keyword is selected and that
keyword implementation used. If the keyword is not found from any library
(or resource), test executing fails the same way as when the search
order is not set.
When this keyword is used, there is no need to use the long
``LibraryName.Keyword Name`` notation. For example, instead of
having
| MyLibrary.Keyword | arg |
| MyLibrary.Another Keyword |
| MyLibrary.Keyword | xxx |
you can have
| Set Library Search Order | MyLibrary |
| Keyword | arg |
| Another Keyword |
| Keyword | xxx |
This keyword can be used also to set the order of keywords in different
resource files. In this case resource names must be given without paths
or extensions like:
| Set Library Search Order | resource | another_resource |
*NOTE:*
- The search order is valid only in the suite where this keywords is used.
- Keywords in resources always have higher priority than
keywords in libraries regardless the search order.
- The old order is returned and can be used to reset the search order later.
- Library and resource names in the search order are both case and space
insensitive.
"""
return self._namespace.set_search_order(search_order)
def keyword_should_exist(self, name, msg=None):
"""Fails unless the given keyword exists in the current scope.
Fails also if there are more than one keywords with the same name.
Works both with the short name (e.g. ``Log``) and the full name
(e.g. ``BuiltIn.Log``).
The default error message can be overridden with the ``msg`` argument.
See also `Variable Should Exist`.
"""
try:
runner = self._namespace.get_runner(name)
except DataError as error:
raise AssertionError(msg or error.message)
if isinstance(runner, UserErrorHandler):
raise AssertionError(msg or runner.error.message)
def get_time(self, format='timestamp', time_='NOW'):
"""Returns the given time in the requested format.
*NOTE:* DateTime library contains much more flexible keywords for
getting the current date and time and for date and time handling in
general.
How time is returned is determined based on the given ``format``
string as follows. Note that all checks are case-insensitive.
1) If ``format`` contains the word ``epoch``, the time is returned
in seconds after the UNIX epoch (1970-01-01 00:00:00 UTC).
The return value is always an integer.
2) If ``format`` contains any of the words ``year``, ``month``,
``day``, ``hour``, ``min``, or ``sec``, only the selected parts are
returned. The order of the returned parts is always the one
in the previous sentence and the order of words in ``format``
is not significant. The parts are returned as zero-padded
strings (e.g. May -> ``05``).
3) Otherwise (and by default) the time is returned as a
timestamp string in the format ``2006-02-24 15:08:31``.
By default this keyword returns the current local time, but
that can be altered using ``time`` argument as explained below.
Note that all checks involving strings are case-insensitive.
1) If ``time`` is a number, or a string that can be converted to
a number, it is interpreted as seconds since the UNIX epoch.
This documentation was originally written about 1177654467
seconds after the epoch.
2) If ``time`` is a timestamp, that time will be used. Valid
timestamp formats are ``YYYY-MM-DD hh:mm:ss`` and
``YYYYMMDD hhmmss``.
3) If ``time`` is equal to ``NOW`` (default), the current local
time is used.
4) If ``time`` is equal to ``UTC``, the current time in
[http://en.wikipedia.org/wiki/Coordinated_Universal_Time|UTC]
is used.
5) If ``time`` is in the format like ``NOW - 1 day`` or ``UTC + 1 hour
30 min``, the current local/UTC time plus/minus the time
specified with the time string is used. The time string format
is described in an appendix of Robot Framework User Guide.
Examples (expecting the current local time is 2006-03-29 15:06:21):
| ${time} = | Get Time | | | |
| ${secs} = | Get Time | epoch | | |
| ${year} = | Get Time | return year | | |
| ${yyyy} | ${mm} | ${dd} = | Get Time | year,month,day |
| @{time} = | Get Time | year month day hour min sec | | |
| ${y} | ${s} = | Get Time | seconds and year | |
=>
| ${time} = '2006-03-29 15:06:21'
| ${secs} = 1143637581
| ${year} = '2006'
| ${yyyy} = '2006', ${mm} = '03', ${dd} = '29'
| @{time} = ['2006', '03', '29', '15', '06', '21']
| ${y} = '2006'
| ${s} = '21'
Examples (expecting the current local time is 2006-03-29 15:06:21 and
UTC time is 2006-03-29 12:06:21):
| ${time} = | Get Time | | 1177654467 | # Time given as epoch seconds |
| ${secs} = | Get Time | sec | 2007-04-27 09:14:27 | # Time given as a timestamp |
| ${year} = | Get Time | year | NOW | # The local time of execution |
| @{time} = | Get Time | hour min sec | NOW + 1h 2min 3s | # 1h 2min 3s added to the local time |
| @{utc} = | Get Time | hour min sec | UTC | # The UTC time of execution |
| ${hour} = | Get Time | hour | UTC - 1 hour | # 1h subtracted from the UTC time |
=>
| ${time} = '2007-04-27 09:14:27'
| ${secs} = 27
| ${year} = '2006'
| @{time} = ['16', '08', '24']
| @{utc} = ['12', '06', '21']
| ${hour} = '11'
"""
return get_time(format, parse_time(time_))
def evaluate(self, expression, modules=None, namespace=None):
"""Evaluates the given expression in Python and returns the results.
``expression`` is evaluated in Python as explained in `Evaluating
expressions`.
``modules`` argument can be used to specify a comma separated
list of Python modules to be imported and added to the evaluation
namespace.
``namespace`` argument can be used to pass a custom evaluation
namespace as a dictionary. Possible ``modules`` are added to this
namespace.
Variables used like ``${variable}`` are replaced in the expression
before evaluation. Variables are also available in the evaluation
namespace and can be accessed using special syntax ``$variable``.
This is a new feature in Robot Framework 2.9 and it is explained more
thoroughly in `Evaluating expressions`.
Examples (expecting ``${result}`` is 3.14):
| ${status} = | Evaluate | 0 < ${result} < 10 | # Would also work with string '3.14' |
| ${status} = | Evaluate | 0 < $result < 10 | # Using variable itself, not string representation |
| ${random} = | Evaluate | random.randint(0, sys.maxint) | modules=random, sys |
| ${ns} = | Create Dictionary | x=${4} | y=${2} |
| ${result} = | Evaluate | x*10 + y | namespace=${ns} |
=>
| ${status} = True
| ${random} = <random integer>
| ${result} = 42
"""
if is_string(expression) and '$' in expression:
expression, variables = self._handle_variables_in_expression(expression)
else:
variables = {}
namespace = self._create_evaluation_namespace(namespace, modules)
try:
if not is_string(expression):
raise TypeError("Expression must be string, got %s."
% type_name(expression))
if not expression:
raise ValueError("Expression cannot be empty.")
return eval(expression, namespace, variables)
except:
raise RuntimeError("Evaluating expression '%s' failed: %s"
% (expression, get_error_message()))
def _handle_variables_in_expression(self, expression):
variables = None
variable_started = False
tokens = []
generated = generate_tokens(StringIO(expression).readline)
for toknum, tokval, _, _, _ in generated:
if variable_started:
if toknum == token.NAME:
if variables is None:
variables = self._variables.as_dict(decoration=False)
if tokval not in variables:
variable_not_found('$%s' % tokval, variables,
deco_braces=False)
tokval = 'RF_VAR_' + tokval
else:
tokens.append((token.ERRORTOKEN, '$'))
variable_started = False
if toknum == token.ERRORTOKEN and tokval == '$':
variable_started = True
else:
tokens.append((toknum, tokval))
if variables is None:
return expression, {}
decorated = [('RF_VAR_' + name, variables[name]) for name in variables]
return untokenize(tokens).strip(), NormalizedDict(decorated, ignore='_')
def _create_evaluation_namespace(self, namespace, modules):
namespace = dict(namespace or {})
modules = modules.replace(' ', '').split(',') if modules else []
namespace.update((m, __import__(m)) for m in modules if m)
return namespace
def call_method(self, object, method_name, *args, **kwargs):
"""Calls the named method of the given object with the provided arguments.
The possible return value from the method is returned and can be
assigned to a variable. Keyword fails both if the object does not have
a method with the given name or if executing the method raises an
exception.
Support for ``**kwargs`` is new in Robot Framework 2.9. Since that
possible equal signs in other arguments must be escaped with a
backslash like ``\\=``.
Examples:
| Call Method | ${hashtable} | put | myname | myvalue |
| ${isempty} = | Call Method | ${hashtable} | isEmpty | |
| Should Not Be True | ${isempty} | | | |
| ${value} = | Call Method | ${hashtable} | get | myname |
| Should Be Equal | ${value} | myvalue | | |
| Call Method | ${object} | kwargs | name=value | foo=bar |
| Call Method | ${object} | positional | escaped\\=equals |
"""
try:
method = getattr(object, method_name)
except AttributeError:
raise RuntimeError("Object '%s' does not have method '%s'."
% (object, method_name))
try:
return method(*args, **kwargs)
except:
raise RuntimeError("Calling method '%s' failed: %s"
% (method_name, get_error_message()))
def regexp_escape(self, *patterns):
"""Returns each argument string escaped for use as a regular expression.
This keyword can be used to escape strings to be used with
`Should Match Regexp` and `Should Not Match Regexp` keywords.
Escaping is done with Python's ``re.escape()`` function.
Examples:
| ${escaped} = | Regexp Escape | ${original} |
| @{strings} = | Regexp Escape | @{strings} |
"""
if len(patterns) == 0:
return ''
if len(patterns) == 1:
return re.escape(patterns[0])
return [re.escape(p) for p in patterns]
def set_test_message(self, message, append=False):
"""Sets message for the current test case.
If the optional ``append`` argument is given a true value (see `Boolean
arguments`), the given ``message`` is added after the possible earlier
message by joining the messages with a space.
In test teardown this keyword can alter the possible failure message,
but otherwise failures override messages set by this keyword. Notice
that in teardown the message is available as a built-in variable
``${TEST MESSAGE}``.
It is possible to use HTML format in the message by starting the message
with ``*HTML*``.
Examples:
| Set Test Message | My message | |
| Set Test Message | is continued. | append=yes |
| Should Be Equal | ${TEST MESSAGE} | My message is continued. |
| Set Test Message | `*`HTML`*` <b>Hello!</b> | |
This keyword can not be used in suite setup or suite teardown.
"""
test = self._context.test
if not test:
raise RuntimeError("'Set Test Message' keyword cannot be used in "
"suite setup or teardown.")
test.message = self._get_new_text(test.message, message,
append, handle_html=True)
if self._context.in_test_teardown:
self._variables.set_test("${TEST_MESSAGE}", test.message)
message, level = self._get_logged_test_message_and_level(test.message)
self.log('Set test message to:\n%s' % message, level)
def _get_new_text(self, old, new, append, handle_html=False):
if not is_unicode(new):
new = unic(new)
if not (is_truthy(append) and old):
return new
if handle_html:
if new.startswith('*HTML*'):
new = new[6:].lstrip()
if not old.startswith('*HTML*'):
old = '*HTML* %s' % html_escape(old)
elif old.startswith('*HTML*'):
new = html_escape(new)
return '%s %s' % (old, new)
def _get_logged_test_message_and_level(self, message):
if message.startswith('*HTML*'):
return message[6:].lstrip(), 'HTML'
return message, 'INFO'
def set_test_documentation(self, doc, append=False):
"""Sets documentation for the current test case.
By default the possible existing documentation is overwritten, but
this can be changed using the optional ``append`` argument similarly
as with `Set Test Message` keyword.
The current test documentation is available as a built-in variable
``${TEST DOCUMENTATION}``. This keyword can not be used in suite
setup or suite teardown.
"""
test = self._context.test
if not test:
raise RuntimeError("'Set Test Documentation' keyword cannot be "
"used in suite setup or teardown.")
test.doc = self._get_new_text(test.doc, doc, append)
self._variables.set_test('${TEST_DOCUMENTATION}', test.doc)
self.log('Set test documentation to:\n%s' % test.doc)
def set_suite_documentation(self, doc, append=False, top=False):
"""Sets documentation for the current test suite.
By default the possible existing documentation is overwritten, but
this can be changed using the optional ``append`` argument similarly
as with `Set Test Message` keyword.
This keyword sets the documentation of the current suite by default.
If the optional ``top`` argument is given a true value (see `Boolean
arguments`), the documentation of the top level suite is altered
instead.
The documentation of the current suite is available as a built-in
variable ``${SUITE DOCUMENTATION}``.
"""
top = is_truthy(top)
suite = self._get_context(top).suite
suite.doc = self._get_new_text(suite.doc, doc, append)
self._variables.set_suite('${SUITE_DOCUMENTATION}', suite.doc, top)
self.log('Set suite documentation to:\n%s' % suite.doc)
def set_suite_metadata(self, name, value, append=False, top=False):
"""Sets metadata for the current test suite.
By default possible existing metadata values are overwritten, but
this can be changed using the optional ``append`` argument similarly
as with `Set Test Message` keyword.
This keyword sets the metadata of the current suite by default.
If the optional ``top`` argument is given a true value (see `Boolean
arguments`), the metadata of the top level suite is altered instead.
The metadata of the current suite is available as a built-in variable
``${SUITE METADATA}`` in a Python dictionary. Notice that modifying this
variable directly has no effect on the actual metadata the suite has.
"""
top = is_truthy(top)
if not is_unicode(name):
name = unic(name)
metadata = self._get_context(top).suite.metadata
original = metadata.get(name, '')
metadata[name] = self._get_new_text(original, value, append)
self._variables.set_suite('${SUITE_METADATA}', metadata.copy(), top)
self.log("Set suite metadata '%s' to value '%s'." % (name, metadata[name]))
def set_tags(self, *tags):
"""Adds given ``tags`` for the current test or all tests in a suite.
When this keyword is used inside a test case, that test gets
the specified tags and other tests are not affected.
If this keyword is used in a suite setup, all test cases in
that suite, recursively, gets the given tags. It is a failure
to use this keyword in a suite teardown.
The current tags are available as a built-in variable ``@{TEST TAGS}``.
See `Remove Tags` if you want to remove certain tags and `Fail` if
you want to fail the test case after setting and/or removing tags.
"""
ctx = self._context
if ctx.test:
ctx.test.tags.add(tags)
ctx.variables.set_test('@{TEST_TAGS}', list(ctx.test.tags))
elif not ctx.in_suite_teardown:
ctx.suite.set_tags(tags, persist=True)
else:
raise RuntimeError("'Set Tags' cannot be used in suite teardown.")
self.log('Set tag%s %s.' % (s(tags), seq2str(tags)))
def remove_tags(self, *tags):
"""Removes given ``tags`` from the current test or all tests in a suite.
Tags can be given exactly or using a pattern with ``*``, ``?`` and
``[chars]`` acting as wildcards. See the `Glob patterns` section
for more information.
This keyword can affect either one test case or all test cases in a
test suite similarly as `Set Tags` keyword.
The current tags are available as a built-in variable ``@{TEST TAGS}``.
Example:
| Remove Tags | mytag | something-* | ?ython |
See `Set Tags` if you want to add certain tags and `Fail` if you want
to fail the test case after setting and/or removing tags.
"""
ctx = self._context
if ctx.test:
ctx.test.tags.remove(tags)
ctx.variables.set_test('@{TEST_TAGS}', list(ctx.test.tags))
elif not ctx.in_suite_teardown:
ctx.suite.set_tags(remove=tags, persist=True)
else:
raise RuntimeError("'Remove Tags' cannot be used in suite teardown.")
self.log('Removed tag%s %s.' % (s(tags), seq2str(tags)))
def get_library_instance(self, name=None, all=False):
"""Returns the currently active instance of the specified test library.
This keyword makes it easy for test libraries to interact with
other test libraries that have state. This is illustrated by
the Python example below:
| from robot.libraries.BuiltIn import BuiltIn
|
| def title_should_start_with(expected):
| seleniumlib = BuiltIn().get_library_instance('SeleniumLibrary')
| title = seleniumlib.get_title()
| if not title.startswith(expected):
| raise AssertionError("Title '%s' did not start with '%s'"
| % (title, expected))
It is also possible to use this keyword in the test data and
pass the returned library instance to another keyword. If a
library is imported with a custom name, the ``name`` used to get
the instance must be that name and not the original library name.
If the optional argument ``all`` is given a true value, then a
dictionary mapping all library names to instances will be returned.
This feature is new in Robot Framework 2.9.2.
Example:
| &{all libs} = | Get library instance | all=True |
"""
if is_truthy(all):
return self._namespace.get_library_instances()
try:
return self._namespace.get_library_instance(name)
except DataError as err:
raise RuntimeError(unic(err))
class BuiltIn(_Verify, _Converter, _Variables, _RunKeyword, _Control, _Misc):
"""An always available standard library with often needed keywords.
``BuiltIn`` is Robot Framework's standard library that provides a set
of generic keywords needed often. It is imported automatically and
thus always available. The provided keywords can be used, for example,
for verifications (e.g. `Should Be Equal`, `Should Contain`),
conversions (e.g. `Convert To Integer`) and for various other purposes
(e.g. `Log`, `Sleep`, `Run Keyword If`, `Set Global Variable`).
== Table of contents ==
- `HTML error messages`
- `Evaluating expressions`
- `Boolean arguments`
- `Pattern matching`
- `Multiline string comparisons`
- `Shortcuts`
- `Keywords`
= HTML error messages =
Many of the keywords accept an optional error message to use if the keyword
fails, and it is possible to use HTML in these messages by prefixing them
with ``*HTML*``. See `Fail` keyword for a usage example. Notice that using
HTML in messages is not limited to BuiltIn library but works with any
error message.
= Evaluating expressions =
Many keywords, such as `Evaluate`, `Run Keyword If` and `Should Be True`,
accept an expression that is evaluated in Python. These expressions are
evaluated using Python's
[http://docs.python.org/library/functions.html#eval|eval] function so
that all Python built-ins like ``len()`` and ``int()`` are available.
`Evaluate` allows configuring the execution namespace with custom modules,
and other keywords have [http://docs.python.org/library/os.html|os]
and [http://docs.python.org/library/sys.html|sys] modules available
automatically.
Examples:
| `Run Keyword If` | os.sep == '/' | Log | Not on Windows |
| ${random int} = | `Evaluate` | random.randint(0, 5) | modules=random |
When a variable is used in the expressing using the normal ``${variable}``
syntax, its value is replaces before the expression is evaluated. This
means that the value used in the expression will be the string
representation of the variable value, not the variable value itself.
This is not a problem with numbers and other objects that have a string
representation that can be evaluated directly, but with other objects
the behavior depends on the string representation. Most importantly,
strings must always be quoted, and if they can contain newlines, they must
be triple quoted.
Examples:
| `Should Be True` | ${rc} < 10 | Return code greater than 10 |
| `Run Keyword If` | '${status}' == 'PASS' | Log | Passed |
| `Run Keyword If` | 'FAIL' in '''${output}''' | Log | Output contains FAIL |
Starting from Robot Framework 2.9, variables themselves are automatically
available in the evaluation namespace. They can be accessed using special
variable syntax without the curly braces like ``$variable``. These
variables should never be quoted, and in fact they are not even replaced
inside strings.
Examples:
| `Should Be True` | $rc < 10 | Return code greater than 10 |
| `Run Keyword If` | $status == 'PASS' | `Log` | Passed |
| `Run Keyword If` | 'FAIL' in $output | `Log` | Output contains FAIL |
| `Should Be True` | len($result) > 1 and $result[1] == 'OK' |
Using the ``$variable`` syntax slows down expression evaluation a little.
This should not typically matter, but should be taken into account if
complex expressions are evaluated often and there are strict time
constrains.
Notice that instead of creating complicated expressions, it is often better
to move the logic into a test library. That eases maintenance and can also
enhance execution speed.
= Boolean arguments =
Some keywords accept arguments that are handled as Boolean values true or
false. If such an argument is given as a string, it is considered false if
it is an empty string or equal to ``FALSE``, ``NONE``, ``NO``, ``OFF`` or
``0``, case-insensitively. Keywords verifying something that allow dropping
actual and expected values from the possible error message also consider
string ``no values`` to be false. Other strings are considered true
regardless their value, and other argument types are tested using the same
[http://docs.python.org/library/stdtypes.html#truth|rules as in Python].
True examples:
| `Should Be Equal` | ${x} | ${y} | Custom error | values=True | # Strings are generally true. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=yes | # Same as the above. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=${TRUE} | # Python ``True`` is true. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=${42} | # Numbers other than 0 are true. |
False examples:
| `Should Be Equal` | ${x} | ${y} | Custom error | values=False | # String ``false`` is false. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=no | # Also string ``no`` is false. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=${EMPTY} | # Empty string is false. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=${FALSE} | # Python ``False`` is false. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=no values | # ``no values`` works with ``values`` argument |
Considering string ``NONE`` false is new in Robot Framework 3.0.3 and
considering also ``OFF`` and ``0`` false is new in Robot Framework 3.1.
= Pattern matching =
Many keywords accepts arguments as either glob or regular expression
patterns.
== Glob patterns ==
Some keywords, for example `Should Match`, support so called
[http://en.wikipedia.org/wiki/Glob_(programming)|glob patterns] where:
| ``*`` | matches any string, even an empty string |
| ``?`` | matches any single character |
| ``[chars]`` | matches one character in the bracket |
| ``[!chars]`` | matches one character not in the bracket |
| ``[a-z]`` | matches one character from the range in the bracket |
| ``[!a-z]`` | matches one character not from the range in the bracket |
Unlike with glob patterns normally, path separator characters ``/`` and
``\\`` and the newline character ``\\n`` are matches by the above
wildcards.
Support for brackets like ``[abc]`` and ``[!a-z]`` is new in
Robot Framework 3.1
== Regular expressions ==
Some keywords, for example `Should Match Regexp`, support
[http://en.wikipedia.org/wiki/Regular_expression|regular expressions]
that are more powerful but also more complicated that glob patterns.
The regular expression support is implemented using Python's
[http://docs.python.org/library/re.html|re module] and its documentation
should be consulted for more information about the syntax.
Because the backslash character (``\\``) is an escape character in
Robot Framework test data, possible backslash characters in regular
expressions need to be escaped with another backslash like ``\\\\d\\\\w+``.
Strings that may contain special characters but should be handled
as literal strings, can be escaped with the `Regexp Escape` keyword.
= Multiline string comparisons =
`Should Be Equal` and `Should Be Equal As Strings` report the failures using
[http://en.wikipedia.org/wiki/Diff_utility#Unified_format|unified diff
format] if both strings have more than two lines. New in Robot Framework
2.9.1.
Example:
| ${first} = | `Catenate` | SEPARATOR=\\n | Not in second | Same | Differs | Same |
| ${second} = | `Catenate` | SEPARATOR=\\n | Same | Differs2 | Same | Not in first |
| `Should Be Equal` | ${first} | ${second} |
Results in the following error message:
| Multiline strings are different:
| --- first
| +++ second
| @@ -1,4 +1,4 @@
| -Not in second
| Same
| -Differs
| +Differs2
| Same
| +Not in first
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = get_version()
class RobotNotRunningError(AttributeError):
"""Used when something cannot be done because Robot is not running.
Based on AttributeError to be backwards compatible with RF < 2.8.5.
May later be based directly on Exception, so new code should except
this exception explicitly.
"""
pass
def register_run_keyword(library, keyword, args_to_process=None,
deprecation_warning=True):
"""Registers 'run keyword' so that its arguments can be handled correctly.
*NOTE:* This API will change in RF 3.1. For more information see
https://github.com/robotframework/robotframework/issues/2190. Use with
`deprecation_warning=False` to avoid related deprecation warnings.
1) Why is this method needed
Keywords running other keywords internally (normally using `Run Keyword`
or some variants of it in BuiltIn) must have the arguments meant to the
internally executed keyword handled specially to prevent processing them
twice. This is done ONLY for keywords registered using this method.
If the register keyword has same name as any keyword from Robot Framework
standard libraries, it can be used without getting warnings. Normally
there is a warning in such cases unless the keyword is used in long
format (e.g. MyLib.Keyword).
Keywords executed by registered run keywords can be tested in dry-run mode
if they have 'name' argument which takes the name of the executed keyword.
2) How to use this method
`library` is the name of the library where the registered keyword is
implemented.
`keyword` can be either a function or method implementing the
keyword, or name of the implemented keyword as a string.
`args_to_process` is needed when `keyword` is given as a string, and it
defines how many of the arguments to the registered keyword must be
processed normally. When `keyword` is a method or function, this
information is got directly from it so that varargs (those specified with
syntax '*args') are not processed but others are.
3) Examples
from robot.libraries.BuiltIn import BuiltIn, register_run_keyword
def my_run_keyword(name, *args):
# do something
return BuiltIn().run_keyword(name, *args)
# Either one of these works
register_run_keyword(__name__, my_run_keyword)
register_run_keyword(__name__, 'My Run Keyword', 1)
-------------
from robot.libraries.BuiltIn import BuiltIn, register_run_keyword
class MyLibrary:
def my_run_keyword_if(self, expression, name, *args):
# do something
return BuiltIn().run_keyword_if(expression, name, *args)
# Either one of these works
register_run_keyword('MyLibrary', MyLibrary.my_run_keyword_if)
register_run_keyword('MyLibrary', 'my_run_keyword_if', 2)
"""
RUN_KW_REGISTER.register_run_keyword(library, keyword, args_to_process,
deprecation_warning)
|
the-stack_0_5702 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Convert caffe mean
"""
import argparse
import numpy as np
import mxnet as mx
import caffe_parser
def convert_mean(binaryproto_fname, output=None):
"""Convert caffe mean
Parameters
----------
binaryproto_fname : str
Filename of the mean
output : str, optional
Save the mean into mxnet's format
Returns
-------
NDArray
Mean in ndarray
"""
mean_blob = caffe_parser.caffe_pb2.BlobProto()
with open(binaryproto_fname, 'rb') as f:
mean_blob.ParseFromString(f.read())
img_mean_np = np.array(mean_blob.data)
img_mean_np = img_mean_np.reshape(
mean_blob.channels, mean_blob.height, mean_blob.width
)
# swap channels from Caffe BGR to RGB
img_mean_np[[0, 2], :, :] = img_mean_np[[2, 0], :, :]
nd = mx.nd.array(img_mean_np)
if output is not None:
mx.nd.save(output, {"mean_image": nd})
return nd
def main():
parser = argparse.ArgumentParser(description='Convert caffe mean')
parser.add_argument('binaryproto_fname', help='Filename of the mean')
parser.add_argument('output', help='The name of the output file')
args = parser.parse_args()
convert_mean(args.binaryproto_fname, args.output)
if __name__ == '__main__':
main()
|
the-stack_0_5704 | import scapy.all as scapy
import sys
#Send 10 VLAN paclkets. With data = "Test"
eth_src = "00:00:00:00:00:01" #Host 1
eth_dst = "00:00:00:00:00:02" #Host 2
eth_type = 0x8100 #VLAN
data = "Test" #Data to send
total_packets = 10 #Number of packets to send
l2packet = scapy.Ether(type=eth_type,src=eth_src,dst=eth_dst)/data #Creates a L2 ethernet packet, and adds data to the header.
scapy.sendp(l2packet,count=total_packets) |
the-stack_0_5706 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import wx
from wx import Colour
from wx.adv import HyperlinkCtrl, EVT_HYPERLINK
from .preferences_dialogs import PreferencesPanel
from ..widgets import RIDEDialog, HtmlWindow
class ExcludePreferences(PreferencesPanel):
location = ('Excludes')
title = 'Excludes'
def __init__(self, settings, *args, **kwargs):
super(ExcludePreferences, self).__init__(*args, **kwargs)
self._settings = settings
self._general_settings = self._settings['General']
self.font = self.GetFont()
self.font.SetFaceName(self._general_settings['font face'])
self.font.SetPointSize(self._general_settings['font size'])
self.SetFont(self.font)
self.SetBackgroundColour(Colour(self._general_settings['background']))
self.color_secondary_background = Colour(self._general_settings['secondary background'])
self.SetForegroundColour(Colour(self._general_settings['foreground']))
self.color_secondary_foreground = Colour(self._general_settings['secondary foreground'])
self._create_sizer()
def _create_sizer(self):
sizer = wx.BoxSizer(orient=wx.VERTICAL)
self._add_help_dialog(sizer)
self._add_text_box(sizer)
self._add_button_and_status(sizer)
self.SetSizer(sizer)
def _add_help_dialog(self, sizer):
need_help = HyperlinkCtrl(self, wx.ID_ANY, '', 'Need help?')
need_help.SetBackgroundColour(Colour(self.color_secondary_background))
need_help.SetForegroundColour(Colour(self.color_secondary_foreground))
sizer.Add(need_help)
self.Bind(EVT_HYPERLINK, self.OnHelp)
def _add_text_box(self, sizer):
self._text_box = wx.TextCtrl(self,
style=wx.TE_MULTILINE|wx.TE_NOHIDESEL,
size=wx.Size(570, 100),
value=self._settings.excludes.get_excludes())
self._text_box.SetBackgroundColour(Colour(self.color_secondary_background))
self._text_box.SetForegroundColour(Colour(self.color_secondary_foreground))
sizer.Add(self._text_box, proportion=wx.EXPAND)
def _add_button_and_status(self, sizer):
# DEBUG wxPhoenix
status_and_button_sizer = wx.GridSizer(rows=1, cols=2, vgap=10, hgap=10)
save_button = wx.Button(self, id=wx.ID_SAVE)
save_button.SetBackgroundColour(Colour(self.color_secondary_background))
save_button.SetForegroundColour(Colour(self.color_secondary_foreground))
status_and_button_sizer.Add(save_button)
self.Bind(wx.EVT_BUTTON, self.OnSave)
self._status_label = wx.StaticText(self)
status_and_button_sizer.Add(self._status_label)
sizer.Add(status_and_button_sizer)
def OnSave(self, event):
text = self._text_box.GetValue()
self._settings.excludes.write_excludes(set(text.split('\n')))
save_label = 'Saved at %s. Reload the project for changes to take an effect.' %\
datetime.now().strftime('%H:%M:%S')
self._status_label.SetLabel(save_label)
def OnHelp(self, event):
dialog = ExcludeHelpDialog()
dialog.Show()
class ExcludeHelpDialog(RIDEDialog):
help = """<font size="5">
<h1>Excludes</h1>
<p>
Paths to excludes are described in the text box, one exclude per row.
These excludes are saved in a file which is located at $HOME/.robotframework/ride/excludes on POSIX-systems and
%APPDATA%\\RobotFramework\\ride\\excludes on Windows.
</p>
<p>
You can edit excludes yourself using either the text box or editing the file with an editor. After hitting "Save", close
the Preferences window and reload the project to make the edited exludes to take effect. You can reload the project by
selecting "File" from the main menu bar and then selecting your project from the list in view.
</p>
<h2>Patterns in paths</h2>
<p>
RIDE supports defining excludes with absolute paths. You can achieve relative paths with path patterns which are
also supported.
</p>
<p>
The following shell-style wildcards are supported:
<table width="100%" border="1">
<thead>
<th><b>Pattern</b></th>
<th><b>Meaning</b></th>
<th><b>Examples</b></th>
</thead>
<tbody>
<tr>
<td valign="top" align="center">*</td>
<td valign="top" align="center">matches everything</td>
<td valign="top" align="left">
Pattern /foo/*/quu matches:
<ul>
<li>/foo/bar/quu</li>
<li>/foo/corge/quu</li>
<li><i>etc.</i></li>
</ul>
</td>
</tr>
<tr>
<td valign="top" align="center">?</td>
<td valign="top" align="center">matches any single character</td>
<td valign="top" align="left">
Pattern C:\MyProject\?oo matches:
<ul>
<li>C:\MyProject\\foo</li>
<li>C:\MyProject\\boo</li>
<li><i>etc.</i></li>
</ul>
</td>
</tr>
<tr>
<td valign="top" align="center">[seq]</td>
<td valign="top" align="center">matches any character in <i>seq</i></td>
<td valign="top" align="left">
Pattern C:\MyProject\[bf]oo matches:
<ul>
<li>C:\MyProject\\foo</li>
<li>C:\MyProject\\boo</li>
<li><i>and nothing else</i></li>
</ul>
</td>
</tr>
<tr>
<td valign="top" align="center">[!seq]</td>
<td valign="top" align="center">matches any character not in <i>seq</i></td>
<td valign="top" align="left">
Pattern /foo/[!q]uu matches:
<ul>
<li>/foo/zuu</li>
<li><i>etc.</i></li>
</ul>
But does not match:
<ul>
<li>/foo/quu</li>
</ul>
</td>
</tr>
</tbody>
</table>
</p>
</font>"""
def __init__(self):
RIDEDialog.__init__(self, title='Help: excludes')
# set Left to Right direction (while we don't have localization)
self.SetLayoutDirection(wx.Layout_LeftToRight)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(HtmlWindow(self, (800, 600), self.help),
1,
flag=wx.EXPAND)
self.SetSizerAndFit(sizer)
def OnKey(self, *args):
pass
def close(self):
self.Destroy()
|
the-stack_0_5707 | import pygame as pg
import gym_gvgai as gvg
class Game:
def __init__(self, game, lvl):
self.env = gvg.make('gvgai-' + game + '-' + lvl + '-v0')
self.stateObs = self.env.reset()
size = (len(self.stateObs), len(self.stateObs[0]))
self.transpose = size[0] < size[1]
if self.transpose:
self.size = (size[1]*2, size[0]*2)
else:
self.size = (size[0]*2, size[1]*2)
self.done = False
self.score = 0
self.frame = 0
self.nAction = self.env.action_space.n
def start(self, agent, maxT=1000, printLog=True, visualized=True, fps=10):
if visualized:
clk = pg.time.Clock()
screen = pg.display.set_mode(self.size)
for i in range(maxT):
clk.tick(fps)
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
self.update(agent, printLog)
self.draw(screen)
pg.display.flip()
if self.done:
print('---------------------------\nFinish. Final score = %d' % self.score)
return
else:
for i in range(maxT):
self.update(agent, printLog)
if self.done:
print('---------------------------\nFinish. Final score = %d' % self.score)
return
def humanPlay(self):
print('Use direction keys to move, z key to take other actions(if exist in this game).')
screen = pg.display.set_mode(self.size)
while not self.done:
evt = pg.event.wait()
if evt.type == pg.QUIT:
pg.quit()
self.done = True
elif evt.type == 3:
self.playerAct(self.parseKey(evt))
if self.done:
print('---------------------------\nFinish. Final score = %d' % self.score)
return
self.draw(screen)
pg.display.flip()
def parseKey(self, evt):
if evt.key == pg.K_z:
if self.nAction > 5:
return 1
else:
return 0
if evt.key == pg.K_x:
if self.nAction > 6:
return 2
else:
return 0
elif evt.key == pg.K_UP:
return self.nAction-1
elif evt.key == pg.K_DOWN:
return self.nAction-2
elif evt.key == pg.K_RIGHT:
return self.nAction - 3
elif evt.key == pg.K_LEFT:
return self.nAction - 4
else:
return 0
def playerAct(self, actionID):
self.stateObs, reward, self.done, debug = self.env.step(actionID)
self.score += reward
self.frame += 1
print('frame%d, action:%d, reward:%d, score:%d' % (self.frame, actionID, reward, self.score))
def update(self, agent, printLog=True):
action_id = agent.act(self.stateObs, self.env.action_space)
self.stateObs, reward, self.done, debug = self.env.step(action_id)
self.score += reward
self.frame += 1
if printLog:
print('frame%d, action:%d, reward:%d, score:%d' % (self.frame, action_id, reward, self.score))
def draw(self, screen):
buffer = pg.pixelcopy.make_surface(self.stateObs[:, :, :3])
pa = pg.PixelArray(buffer)
if self.transpose:
pa = pa.transpose()
screen.blit(pg.transform.scale(pa.make_surface(), self.size), (0, 0))
|
the-stack_0_5708 | import collections
import copy
from builtins import range
from typing import Union
import numpy as np
from speclite.filters import FilterResponse, FilterSequence
from threeML.plugins.XYLike import XYLike
from threeML.utils.photometry import FilterSet, PhotometericObservation
__instrument_name = "Generic photometric data"
class BandNode(object):
def __init__(self, name, index, value, mask):
"""
Container class that allows for the shutting on and off of bands
"""
self._name = name
self._index = index
self._mask = mask
self._value = value
self._on = True
def _set_on(self, value=True):
self._on = value
self._mask[self._index] = self._on
def _get_on(self):
return self._on
on = property(_get_on, _set_on,
doc="Turn on or off the band. Use booleans, like: 'p.on = True' "
" or 'p.on = False'. ")
# Define property "fix"
def _set_off(self, value=True):
self._on = (not value)
self._mask[self._index] = self._on
def _get_off(self):
return not self._on
off = property(_get_off, _set_off,
doc="Turn on or off the band. Use booleans, like: 'p.off = True' "
" or 'p.off = False'. ")
def __repr__(self):
return f"on: {self._on}\nvalue: {self._value}"
class PhotometryLike(XYLike):
def __init__(self, name: str,
filters: Union[FilterSequence, FilterResponse],
observation: PhotometericObservation):
"""
The photometry plugin is desinged to fit optical/IR/UV photometric data from a given
filter system. Filters are given in the form a speclite (http://speclite.readthedocs.io)
FitlerResponse or FilterSequence objects. 3ML contains a vast number of filters via the SVO
VO service: http://svo2.cab.inta-csic.es/svo/theory/fps/ and can be accessed via:
from threeML.utils.photometry import get_photometric_filter_library
filter_lib = get_photometric_filter_library()
Bands can be turned on and off by setting
plugin.band_<band name>.on = False/True
plugin.band_<band name>.off = False/True
:param name: plugin name
:param filters: speclite filters
:param observation: A PhotometricObservation instance
"""
assert isinstance(
observation, PhotometericObservation), "Observation must be PhotometricObservation"
# convert names so that only the filters are present
# speclite uses '-' to separate instrument and filter
if isinstance(filters, FilterSequence):
# we have a filter sequence
names = [fname.split("-")[1] for fname in filters.names]
elif isinstance(filters, FilterResponse):
# we have a filter response
names = [filters.name.split("-")[1]]
filters = FilterSequence([filters])
else:
RuntimeError(
"filters must be A FilterResponse or a FilterSequence")
# since we may only have a few of the filters in use
# we will mask the filters not needed. The will stay fixed
# during the life of the plugin
assert observation.is_compatible_with_filter_set(
filters), "The data and filters are not congruent"
mask = observation.get_mask_from_filter_sequence(filters)
assert mask.sum() > 0, "There are no data in this observation!"
# create a filter set and use only the bands that were specified
self._filter_set = FilterSet(filters, mask)
self._magnitudes = np.zeros(self._filter_set.n_bands)
self._magnitude_errors = np.zeros(self._filter_set.n_bands)
# we want to fill the magnitudes in the same order as the
# the filters
for i, band in enumerate(self._filter_set.filter_names):
self._magnitudes[i] = observation[band][0]
self._magnitude_errors[i] = observation[band][1]
self._observation = observation
# pass thru to XYLike
super(PhotometryLike, self).__init__(
name=name,
x=self._filter_set.effective_wavelength, # dummy x values
y=self._magnitudes,
yerr=self._magnitude_errors,
poisson_data=False,
)
# now set up the mask zetting
for i, band in enumerate(self._filter_set.filter_names):
node = BandNode(band, i, (self._magnitudes[i], self._magnitude_errors[i]),
self._mask)
setattr(self, f"band_{band}", node)
@classmethod
def from_kwargs(cls, name, filters, **kwargs):
"""
Example:
grond = PhotometryLike.from_kwargs('GROND',
filters=threeML_filter_library.ESO.GROND,
g=(20.93,.23),
r=(20.6,0.12),
i=(20.4,.07),
z=(20.3,.04),
J=(20.0,.03),
H=(19.8,.03),
K=(19.7,.04))
Magnitudes and errors are entered as keyword arguments where the key is the filter name and
the argument is a tuple containing the data. You can exclude data for individual filters and
they will be ignored during the fit.
NOTE: PhotometryLike expects apparent AB magnitudes. Please calibrate your data to this system
:param name: plugin name
:param filters: speclite filters
:param kwargs: keyword args of band name and tuple(mag, mag error)
"""
return cls(name, filters, PhotometericObservation.from_kwargs(**kwargs))
@classmethod
def from_file(cls, name: str, filters: Union[FilterResponse, FilterSequence], file_name: str):
"""
Create the a PhotometryLike plugin from a saved HDF5 data file
:param name: plugin name
:param filters: speclite filters
:param file_name: name of the observation file
"""
return cls(name, filters, PhotometericObservation.from_hdf5(file_name))
@property
def magnitudes(self):
return self._magnitudes
@property
def magnitude_errors(self):
return self._magnitude_errors
def set_model(self, likelihood_model):
"""
set the likelihood model
:param likelihood_model:
:return:
"""
super(PhotometryLike, self).set_model(likelihood_model)
n_point_sources = self._likelihood_model.get_number_of_point_sources()
# sum up the differential
def differential_flux(energies):
fluxes = self._likelihood_model.get_point_source_fluxes(
0, energies, tag=self._tag
)
# If we have only one point source, this will never be executed
for i in range(1, n_point_sources):
fluxes += self._likelihood_model.get_point_source_fluxes(
i, energies, tag=self._tag
)
return fluxes
self._filter_set.set_model(differential_flux)
def _get_total_expectation(self):
return self._filter_set.ab_magnitudes()
def display_filters(self):
"""
display the filter transmission curves
:return:
"""
return self._filter_set.plot_filters()
def _new_plugin(self, name, x, y, yerr):
"""
construct a new PhotometryLike plugin. allows for returning a new plugin
from simulated data set while customizing the constructor
further down the inheritance tree
:param name: new name
:param x: new x
:param y: new y
:param yerr: new yerr
:return: new XYLike
"""
bands = collections.OrderedDict()
for i, band in enumerate(self._filter_set.filter_names):
bands[band] = (y[i], yerr[i])
new_photo = PhotometryLike(
name, filters=self._filter_set.speclite_filters, **bands
)
# apply the current mask
new_photo._mask = copy.copy(self._mask)
return new_photo
|
the-stack_0_5710 | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
import copy
import json
from collections import defaultdict
from django.dispatch import receiver
from django.template.loader import get_template
from django.urls import resolve, reverse
from django.utils.html import escape
from django.utils.translation import gettext_lazy as _
from pretix.base.models import Event, Order
from pretix.base.signals import (
event_copy_data, item_copy_data, logentry_display, logentry_object_link,
register_data_exporters,
)
from pretix.control.signals import (
item_forms, nav_event, order_info, order_position_buttons,
)
from pretix.plugins.badges.forms import BadgeItemForm
from pretix.plugins.badges.models import BadgeItem, BadgeLayout
@receiver(nav_event, dispatch_uid="badges_nav")
def control_nav_import(sender, request=None, **kwargs):
url = resolve(request.path_info)
p = (
request.user.has_event_permission(request.organizer, request.event, 'can_change_settings', request)
or request.user.has_event_permission(request.organizer, request.event, 'can_view_orders', request)
)
if not p:
return []
return [
{
'label': _('Badges'),
'url': reverse('plugins:badges:index', kwargs={
'event': request.event.slug,
'organizer': request.event.organizer.slug,
}),
'active': url.namespace == 'plugins:badges',
'icon': 'id-card',
}
]
@receiver(item_forms, dispatch_uid="badges_item_forms")
def control_item_forms(sender, request, item, **kwargs):
try:
inst = BadgeItem.objects.get(item=item)
except BadgeItem.DoesNotExist:
inst = BadgeItem(item=item)
return BadgeItemForm(
instance=inst,
event=sender,
data=(request.POST if request.method == "POST" else None),
prefix="badgeitem"
)
@receiver(item_copy_data, dispatch_uid="badges_item_copy")
def copy_item(sender, source, target, **kwargs):
try:
inst = BadgeItem.objects.get(item=source)
BadgeItem.objects.create(item=target, layout=inst.layout)
except BadgeItem.DoesNotExist:
pass
@receiver(signal=event_copy_data, dispatch_uid="badges_copy_data")
def event_copy_data_receiver(sender, other, question_map, item_map, **kwargs):
layout_map = {}
for bl in other.badge_layouts.all():
oldid = bl.pk
bl = copy.copy(bl)
bl.pk = None
bl.event = sender
layout = json.loads(bl.layout)
for o in layout:
if o['type'] == 'textarea':
if o['content'].startswith('question_'):
newq = question_map.get(int(o['content'][9:]))
if newq:
o['content'] = 'question_{}'.format(newq.pk)
bl.save()
if bl.background and bl.background.name:
bl.background.save('background.pdf', bl.background)
layout_map[oldid] = bl
for bi in BadgeItem.objects.filter(item__event=other):
BadgeItem.objects.create(item=item_map.get(bi.item_id), layout=layout_map.get(bi.layout_id))
@receiver(register_data_exporters, dispatch_uid="badges_export_all")
def register_pdf(sender, **kwargs):
from .exporters import BadgeExporter
return BadgeExporter
def _cached_rendermap(event):
if hasattr(event, '_cached_renderermap'):
return event._cached_renderermap
renderermap = {
bi.item_id: bi.layout_id
for bi in BadgeItem.objects.select_related('layout').filter(item__event=event)
}
try:
default_renderer = event.badge_layouts.get(default=True).pk
except BadgeLayout.DoesNotExist:
default_renderer = None
event._cached_renderermap = defaultdict(lambda: default_renderer)
event._cached_renderermap.update(renderermap)
return event._cached_renderermap
@receiver(order_position_buttons, dispatch_uid="badges_control_order_buttons")
def control_order_position_info(sender: Event, position, request, order: Order, **kwargs):
if _cached_rendermap(sender)[position.item_id] is None:
return ''
template = get_template('pretixplugins/badges/control_order_position_buttons.html')
ctx = {
'order': order,
'request': request,
'event': sender,
'position': position
}
return template.render(ctx, request=request).strip()
@receiver(order_info, dispatch_uid="badges_control_order_info")
def control_order_info(sender: Event, request, order: Order, **kwargs):
cm = _cached_rendermap(sender)
if all(cm[p.item_id] is None for p in order.positions.all()):
return ''
template = get_template('pretixplugins/badges/control_order_info.html')
ctx = {
'order': order,
'request': request,
'event': sender,
}
return template.render(ctx, request=request)
@receiver(signal=logentry_display, dispatch_uid="badges_logentry_display")
def badges_logentry_display(sender, logentry, **kwargs):
if not logentry.action_type.startswith('pretix.plugins.badges'):
return
plains = {
'pretix.plugins.badges.layout.added': _('Badge layout created.'),
'pretix.plugins.badges.layout.deleted': _('Badge layout deleted.'),
'pretix.plugins.badges.layout.changed': _('Badge layout changed.'),
}
if logentry.action_type in plains:
return plains[logentry.action_type]
@receiver(signal=logentry_object_link, dispatch_uid="badges_logentry_object_link")
def badges_logentry_object_link(sender, logentry, **kwargs):
if not logentry.action_type.startswith('pretix.plugins.badges.layout') or not isinstance(logentry.content_object,
BadgeLayout):
return
a_text = _('Badge layout {val}')
a_map = {
'href': reverse('plugins:badges:edit', kwargs={
'event': sender.slug,
'organizer': sender.organizer.slug,
'layout': logentry.content_object.id
}),
'val': escape(logentry.content_object.name),
}
a_map['val'] = '<a href="{href}">{val}</a>'.format_map(a_map)
return a_text.format_map(a_map)
|
the-stack_0_5711 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import glob
import salem
from combine2d.core.utils import NonRGIGlacierDirectory
from combine2d.core.test_cases import Borden, Giluwe
from combine2d.core.arithmetics import RMSE, mean_BIAS, percentiles
from combine2d.core.data_logging import load_pickle
from combine2d.sandbox.quick_n_dirty_eval.experiment_naming_engine import \
get_experiment_group, get_experiment_subgroup
from oggm import cfg;
cfg.initialize()
basedir = '/home/philipp/final'
outputdir = '/home/philipp/final'
case = Giluwe
gdir = NonRGIGlacierDirectory(case, basedir)
ref_ice_mask = np.load(gdir.get_filepath('ref_ice_mask'))
true_bed = salem.GeoTiff(gdir.get_filepath('dem')).get_vardata()
true_surf = salem.GeoTiff(gdir.get_filepath('ref_dem')).get_vardata()
filepaths = glob.glob(os.path.join(gdir.dir, '*', 'data_logger.pkl'))
filepaths = sorted(filepaths)
columns = [
'experiment',
'experimentgroup',
'experimentsubgroup',
'subgroupindex',
'optimizedbed',
'optimizedsurf',
'optimizedicethick',
'firstguess',
'beds',
'surfs',
'costs',
'cterms',
'optimizedbederror',
'optimizedsurferror',
'optimizedbedrmse',
'optimizedsurfrmse',
'optimizedbedbias',
'optimizedsurfbias',
'firstguessrmse',
'firstguessbias',
'firstguess_5_percentile',
'firstguess_25_percentile',
'firstguess_75_percentile',
'firstguess_95_percentile',
'surfacenoise',
'surfacenoisermse',
'surfacenoisebias',
'surfacenoise_5_percentile',
'surfacenoise_25_percentile',
'surfacenoise_75_percentile',
'surfacenoise_95_percentile',
'bedmeasurements',
'bedmeasurementsrmse',
'bedmeasurementsbias',
'iterations',
'R',
'dV',
'warning',
'dir_path'
]
df = pd.DataFrame(columns=columns)
for path in filepaths:
dl = load_pickle(path)
inv_subdir = os.path.split(path)[0]
inv_settings = load_pickle(os.path.join(gdir.dir, inv_subdir,
'inversion_settings.pkl'))
experiment = inv_settings['inversion_subdir']
surface_noise = np.zeros(true_bed.shape)
if os.path.exists(os.path.join(gdir.dir, inv_subdir, 'dem_noise.npy')):
surface_noise = np.load(os.path.join(gdir.dir, inv_subdir,
'dem_noise.npy'))
bed_measurements = np.ma.masked_all(true_bed.shape)
if os.path.exists(os.path.join(gdir.dir, inv_subdir,
'bed_measurements.pkl')):
bed_measurements = np.load(os.path.join(gdir.dir, inv_subdir,
'bed_measurements.pkl'))
warning_found = False
# first_guessed_bed_noise = np.load(os.path.join(gdir.dir, inv_subdir,
# 'first_guessed_bed_noise.npy'))
if os.path.exists(os.path.join(gdir.dir, inv_subdir,
'warning.txt')):
warning_found = True
if len(dl.step_indices) > 0:
final_bed = dl.beds[-1]
final_surf = dl.surfs[-1]
final_it = dl.surfs[-1] - dl.beds[-1]
bed_rmse = RMSE(dl.beds[-1], true_bed, ref_ice_mask)
bed_bias = mean_BIAS(dl.beds[-1], true_bed, ref_ice_mask)
bed_error = final_bed - true_bed
surf_rmse = RMSE(dl.surfs[-1], true_surf, ref_ice_mask)
surf_bias = mean_BIAS(dl.surfs[-1], true_surf, ref_ice_mask)
surf_error = final_surf - true_surf
dV = (((dl.surfs[-1] - dl.beds[-1]).sum())
- (true_surf - true_bed).sum()) / (true_surf - true_bed).sum()
else:
final_bed = np.ma.masked_all(true_bed.shape)
final_surf = np.ma.masked_all(true_bed.shape)
final_it = np.ma.masked_all(true_bed.shape)
bed_error = np.ma.masked_all(true_bed.shape)
bed_rmse = np.nan
bed_bias = np.nan
surf_error = np.ma.masked_all(true_bed.shape)
surf_rmse = np.nan
surf_bias = np.nan
dV = np.nan
first_guess_percentiles = percentiles(dl.first_guessed_bed, true_bed,
ref_ice_mask)
surface_noise_percentiles = percentiles(surface_noise, 0, ref_ice_mask)
new_row = {
'experiment': experiment,
'experimentgroup': get_experiment_group(experiment),
'experimentsubgroup': get_experiment_subgroup(experiment),
'subgroupindex': '',
'optimizedbed': final_bed,
'optimizedsurf': final_surf,
'optimizedicethick': final_it,
'firstguess': dl.first_guessed_bed,
# 'beds': dl.beds,
# 'surfs': dl.surfs,
# 'costs': dl.costs,
# 'cterms': dl.c_terms,
'optimizedbederror': bed_error,
'optimizedbedrmse': bed_rmse,
'optimizedbedbias': bed_bias,
'optimizedsurferror': surf_error,
'optimizedsurfrmse': surf_rmse,
'optimizedsurfbias': surf_rmse,
'firstguessrmse': RMSE(dl.first_guessed_bed, true_bed, ref_ice_mask),
'firstguessbias': mean_BIAS(dl.first_guessed_bed, true_bed,
ref_ice_mask),
'firstguess_5_percentile': first_guess_percentiles[0],
'firstguess_25_percentile': first_guess_percentiles[1],
'firstguess_75_percentile': first_guess_percentiles[-2],
'firstguess_95_percentile': first_guess_percentiles[-1],
'surfacenoise': surface_noise,
'surfacenoisermse': RMSE(surface_noise, 0, ref_ice_mask),
'surfacenoisebias': mean_BIAS(surface_noise, 0, ref_ice_mask),
'surfacenoise_5_percentile': surface_noise_percentiles[0],
'surfacenoise_25_percentile': surface_noise_percentiles[1],
'surfacenoise_75_percentile': surface_noise_percentiles[-2],
'surfacenoise_95_percentile': surface_noise_percentiles[-1],
'bedmeasurements': bed_measurements,
'bedmeasurementsrmse': RMSE(bed_measurements, 0, ref_ice_mask),
'bedmeasurementsbias': mean_BIAS(bed_measurements, 0, ref_ice_mask),
'iterations': len(dl.step_indices),
'dx': case.dx,
'dV': dV,
'warning': warning_found,
'dir_path': inv_subdir
}
if new_row['experimentgroup'] == 'fg rmse':
new_row['subgroupindex'] = new_row['firstguessrmse']
elif new_row['experimentgroup'] == 'fg bias':
new_row['subgroupindex'] = new_row['firstguessbias']
elif new_row['experimentgroup'] == 'promised land':
new_row['subgroupindex'] = new_row['surfacenoisermse']
elif new_row['experimentgroup'] == 'bed measurements':
new_row['subgroupindex'] = new_row['bedmeasurementsrmse']
# df_row = pd.DataFrame(new_row)
df = df.append(new_row, ignore_index=True)
df = df.sort_values(['experimentgroup', 'experimentsubgroup', 'subgroupindex',
'experiment'])
df.to_pickle(os.path.join(basedir, '{:s} dataframe.pkl'.format(case.name)))
# store = pd.HDFStore(os.path.join(basedir,
# '{:s} dataframe.h5'.format(case.name)))
# store['df'] = df
# store.close()
cols_to_drop = [
'optimizedbed',
'optimizedsurf',
'optimizedicethick',
'firstguess',
'beds',
'surfs',
'costs',
'cterms',
'optimizedbederror',
'optimizedsurferror',
'surfacenoise',
'bedmeasurements'
]
small_df = df.copy()
small_df.drop(cols_to_drop, inplace=True, axis=1)
small_df = small_df.to_csv(
os.path.join(basedir, '{:s} dataframe small.csv'.format(case.name)))
print('end')
fig, ax = plt.subplots()
sdf = df.loc[df['experimentsubgroup'] == 'fg bias']
sdf.plot('subgroupindex', 'optimizedbedrmse', ax=ax)
sdf.plot('subgroupindex', 'firstguessrmse', ax=ax)
plt.show()
fig, ax = plt.subplots()
sdf = df.loc[df['experimentgroup'] == 'fg rmse']
for key, grp in sdf.groupby(['experimentsubgroup']):
grp.plot('subgroupindex', 'optimizedbedrmse', ax=ax, label='optimized '
+ str(key))
grp.plot('subgroupindex', 'firstguessrmse', ax=ax, label='fg rmse ' + str(
key))
grp.plot('subgroupindex', 'firstguessbias', ax=ax, label='fg bias ' + str(
key))
plt.show()
|
the-stack_0_5712 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# Copyright (C) 2020-2021 LuaVela Authors. See Copyright Notice in COPYRIGHT
# Copyright (C) 2015-2020 IPONWEB Ltd. See Copyright Notice in COPYRIGHT
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'Documentation'
copyright = u'2015-2019 IPONWEB Ltd.'
author = u'uJIT team'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
if tags.has('use_confluencebuilder'):
extensions.append('sphinxcontrib.confluencebuilder')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
]
# Single sourcing
rst_epilog = '''
.. |PROJECT| replace:: LuaVela
.. |PRJ_INT| replace:: uJIT
.. |CLI_BIN| replace:: ujit
'''
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Documentationdoc'
# -- Options for LaTeX output ------------------------------------------------
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'uJIT documentation', u'Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Documentation', u'Documentation',
author, 'uJIT', 'Implementation of Lua 5.1, originally a fork of LuaJIT',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
confluence_publish = True
confluence_page_hierarchy = True
confluence_server_user = os.getenv("CONFLUENCE_USERNAME", "")
confluence_server_pass = os.getenv("CONFLUENCE_PASSWORD", "")
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.