prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>comments.py<|end_file_name|><|fim▁begin|>import logging
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from pylons.templating import render_mako_def
from kai.lib.base import BaseController, render
from kai.lib.helpers import textilize<|fim▁hole|>from kai.lib.serialization import render_feed
from kai.model import Comment
log = logging.getLogger(__name__)
class CommentsController(BaseController):
def preview(self):
data = request.POST['content']
return textilize(data)
def create(self, doc_id):
if not c.user:
abort(401)
# Ensure the doc exists
doc = self.db.get(doc_id)
if not doc:
abort(404)
comment = Comment(doc_id=doc_id, displayname=c.user.displayname,
email=c.user.email, human_id=c.user.id,
content=request.POST['content'])
comment.store(self.db)
return ''
def delete(self, id):
if not c.user or not c.user.in_group('admin'):
abort(401)
# Ensure doc exists
doc = self.db.get(id)
if not doc:
abort(404)
# Make sure its a comment
if not doc['type'] == 'Comment':
abort(404)
self.db.delete(doc)
return ''
def index(self, format='html'):
if format == 'html':
abort(404)
elif format in ['atom', 'rss']:
# Pull comments and grab the docs with them for their info
comments = list(Comment.by_anytime(c.db, descending=True, limit=20))
commentdata = []
for comment_doc in comments:
comment = {}
displayname = comment_doc.displayname or 'Anonymous'
comment['created'] = comment_doc.created
id = comment_doc.id
doc = c.db.get(comment_doc.doc_id)
if doc['type'] == 'Traceback':
comment['title'] = '%s: %s' % (doc['exception_type'], doc['exception_value'])
else:
comment['title'] = doc.get('title', '-- No title --')
comment['type'] = doc['type']
comment['link'] = render_mako_def(
'/widgets.mako', 'comment_link', title=comment['title'],
comment_id=comment_doc.id, doc=doc, type=doc['type'],
urlonly=True).strip()
comment['doc_id'] = comment_doc.doc_id
comment['description'] = textilize(comment_doc.content)
commentdata.append(comment)
response.content_type = 'application/atom+xml'
return render_feed(
title="PylonsHQ Comment Feed", link=url.current(qualified=True),
description="Recent PylonsHQ comments", objects=commentdata,
pub_date='created')<|fim▁end|> | |
<|file_name|>target_pool_suite_test.go<|end_file_name|><|fim▁begin|>package targetpool_test
import (
"testing"<|fim▁hole|> . "github.com/onsi/gomega"
)
func TestTargetPoolService(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Target Pool Service Suite")
}<|fim▁end|> |
. "github.com/onsi/ginkgo" |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
<|fim▁hole|>from openpyxl.styles.alignment import Alignment
from openpyxl.styles.borders import Border, Side
from openpyxl.styles.colors import Color
from openpyxl.styles.fills import PatternFill, GradientFill, Fill
from openpyxl.styles.fonts import Font, DEFAULT_FONT
from openpyxl.styles.numbers import NumberFormatDescriptor, is_date_format, is_builtin
from openpyxl.styles.protection import Protection<|fim▁end|> | |
<|file_name|>test_ports.py<|end_file_name|><|fim▁begin|># Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import netaddr
from tempest_lib.common.utils import data_utils
from neutron.tests.api import base
from neutron.tests.api import base_security_groups as sec_base
from neutron.tests.tempest.common import custom_matchers
from neutron.tests.tempest import config
from neutron.tests.tempest import test
CONF = config.CONF
class PortsTestJSON(sec_base.BaseSecGroupTest):
"""
Test the following operations for ports:
port create
port delete
port list
port show
port update
"""
@classmethod
def resource_setup(cls):
super(PortsTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.port = cls.create_port(cls.network)
def _delete_port(self, port_id):
self.client.delete_port(port_id)
body = self.client.list_ports()
ports_list = body['ports']
self.assertNotIn(port_id, [n['id'] for n in ports_list])
@test.attr(type='smoke')
@test.idempotent_id('c72c1c0c-2193-4aca-aaa4-b1442640f51c')
def test_create_update_delete_port(self):
# Verify port creation
body = self.client.create_port(network_id=self.network['id'])
port = body['port']
# Schedule port deletion with verification upon test completion
self.addCleanup(self._delete_port, port['id'])
self.assertTrue(port['admin_state_up'])
# Verify port update
new_name = "New_Port"
body = self.client.update_port(port['id'],
name=new_name,
admin_state_up=False)
updated_port = body['port']
self.assertEqual(updated_port['name'], new_name)
self.assertFalse(updated_port['admin_state_up'])
@test.idempotent_id('67f1b811-f8db-43e2-86bd-72c074d4a42c')
def test_create_bulk_port(self):
network1 = self.network
name = data_utils.rand_name('network-')
network2 = self.create_network(network_name=name)
network_list = [network1['id'], network2['id']]
port_list = [{'network_id': net_id} for net_id in network_list]
body = self.client.create_bulk_port(port_list)
created_ports = body['ports']
port1 = created_ports[0]
port2 = created_ports[1]
self.addCleanup(self._delete_port, port1['id'])
self.addCleanup(self._delete_port, port2['id'])
self.assertEqual(port1['network_id'], network1['id'])
self.assertEqual(port2['network_id'], network2['id'])
self.assertTrue(port1['admin_state_up'])
self.assertTrue(port2['admin_state_up'])
@classmethod
def _get_ipaddress_from_tempest_conf(cls):
"""Return first subnet gateway for configured CIDR """
if cls._ip_version == 4:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
elif cls._ip_version == 6:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
return netaddr.IPAddress(cidr)
@test.attr(type='smoke')
@test.idempotent_id('0435f278-40ae-48cb-a404-b8a087bc09b1')
def test_create_port_in_allowed_allocation_pools(self):
network = self.create_network()
net_id = network['id']
address = self._get_ipaddress_from_tempest_conf()
allocation_pools = {'allocation_pools': [{'start': str(address + 4),
'end': str(address + 6)}]}
subnet = self.create_subnet(network, **allocation_pools)
self.addCleanup(self.client.delete_subnet, subnet['id'])
body = self.client.create_port(network_id=net_id)
self.addCleanup(self.client.delete_port, body['port']['id'])
port = body['port']
ip_address = port['fixed_ips'][0]['ip_address']
start_ip_address = allocation_pools['allocation_pools'][0]['start']
end_ip_address = allocation_pools['allocation_pools'][0]['end']
ip_range = netaddr.IPRange(start_ip_address, end_ip_address)
self.assertIn(ip_address, ip_range)
@test.attr(type='smoke')
@test.idempotent_id('c9a685bd-e83f-499c-939f-9f7863ca259f')
def test_show_port(self):
# Verify the details of port
body = self.client.show_port(self.port['id'])
port = body['port']
self.assertIn('id', port)
# TODO(Santosh)- This is a temporary workaround to compare create_port
# and show_port dict elements.Remove this once extra_dhcp_opts issue
# gets fixed in neutron.( bug - 1365341.)
self.assertThat(self.port,
custom_matchers.MatchesDictExceptForKeys
(port, excluded_keys=['extra_dhcp_opts']))
@test.attr(type='smoke')
@test.idempotent_id('45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd')
def test_show_port_fields(self):
# Verify specific fields of a port
fields = ['id', 'mac_address']
body = self.client.show_port(self.port['id'],
fields=fields)
port = body['port']
self.assertEqual(sorted(port.keys()), sorted(fields))
for field_name in fields:
self.assertEqual(port[field_name], self.port[field_name])
@test.attr(type='smoke')
@test.idempotent_id('cf95b358-3e92-4a29-a148-52445e1ac50e')
def test_list_ports(self):
# Verify the port exists in the list of all ports
body = self.client.list_ports()
ports = [port['id'] for port in body['ports']
if port['id'] == self.port['id']]
self.assertNotEmpty(ports, "Created port not found in the list")
@test.attr(type='smoke')
@test.idempotent_id('5ad01ed0-0e6e-4c5d-8194-232801b15c72')
def test_port_list_filter_by_router_id(self):
# Create a router
network = self.create_network()
self.addCleanup(self.client.delete_network, network['id'])
subnet = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet['id'])
router = self.create_router(data_utils.rand_name('router-'))
self.addCleanup(self.client.delete_router, router['id'])
port = self.client.create_port(network_id=network['id'])
# Add router interface to port created above
self.client.add_router_interface_with_port_id(
router['id'], port['port']['id'])
self.addCleanup(self.client.remove_router_interface_with_port_id,
router['id'], port['port']['id'])
# List ports filtered by router_id
port_list = self.client.list_ports(device_id=router['id'])
ports = port_list['ports']
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['id'], port['port']['id'])
self.assertEqual(ports[0]['device_id'], router['id'])
@test.attr(type='smoke')
@test.idempotent_id('ff7f117f-f034-4e0e-abff-ccef05c454b4')
def test_list_ports_fields(self):
# Verify specific fields of ports
fields = ['id', 'mac_address']
body = self.client.list_ports(fields=fields)
ports = body['ports']
self.assertNotEmpty(ports, "Port list returned is empty")
# Asserting the fields returned are correct
for port in ports:
self.assertEqual(sorted(fields), sorted(port.keys()))
@test.attr(type='smoke')
@test.idempotent_id('63aeadd4-3b49-427f-a3b1-19ca81f06270')
def test_create_update_port_with_second_ip(self):
# Create a network with two subnets
network = self.create_network()
self.addCleanup(self.client.delete_network, network['id'])
subnet_1 = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet_1['id'])
subnet_2 = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet_2['id'])
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
fixed_ips = fixed_ip_1 + fixed_ip_2
# Create a port with multiple IP addresses
port = self.create_port(network,
fixed_ips=fixed_ips)
self.addCleanup(self.client.delete_port, port['id'])
self.assertEqual(2, len(port['fixed_ips']))
check_fixed_ips = [subnet_1['id'], subnet_2['id']]
for item in port['fixed_ips']:
self.assertIn(item['subnet_id'], check_fixed_ips)
# Update the port to return to a single IP address
port = self.update_port(port, fixed_ips=fixed_ip_1)
self.assertEqual(1, len(port['fixed_ips']))
# Update the port with a second IP address from second subnet
port = self.update_port(port, fixed_ips=fixed_ips)
self.assertEqual(2, len(port['fixed_ips']))
def _update_port_with_security_groups(self, security_groups_names):
subnet_1 = self.create_subnet(self.network)
self.addCleanup(self.client.delete_subnet, subnet_1['id'])
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
security_groups_list = list()
for name in security_groups_names:
group_create_body = self.client.create_security_group(
name=name)
self.addCleanup(self.client.delete_security_group,
group_create_body['security_group']['id'])
security_groups_list.append(group_create_body['security_group']
['id'])
# Create a port
sec_grp_name = data_utils.rand_name('secgroup')
security_group = self.client.create_security_group(name=sec_grp_name)
self.addCleanup(self.client.delete_security_group,
security_group['security_group']['id'])
post_body = {
"name": data_utils.rand_name('port-'),
"security_groups": [security_group['security_group']['id']],
"network_id": self.network['id'],
"admin_state_up": True,
"fixed_ips": fixed_ip_1}
body = self.client.create_port(**post_body)
self.addCleanup(self.client.delete_port, body['port']['id'])
port = body['port']
# Update the port with security groups
subnet_2 = self.create_subnet(self.network)
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
update_body = {"name": data_utils.rand_name('port-'),
"admin_state_up": False,
"fixed_ips": fixed_ip_2,
"security_groups": security_groups_list}
body = self.client.update_port(port['id'], **update_body)
port_show = body['port']
# Verify the security groups and other attributes updated to port
exclude_keys = set(port_show).symmetric_difference(update_body)
exclude_keys.add('fixed_ips')
exclude_keys.add('security_groups')
self.assertThat(port_show, custom_matchers.MatchesDictExceptForKeys(
update_body, exclude_keys))
self.assertEqual(fixed_ip_2[0]['subnet_id'],
port_show['fixed_ips'][0]['subnet_id'])
for security_group in security_groups_list:
self.assertIn(security_group, port_show['security_groups'])
@test.attr(type='smoke')
@test.idempotent_id('58091b66-4ff4-4cc1-a549-05d60c7acd1a')
def test_update_port_with_security_group_and_extra_attributes(self):
self._update_port_with_security_groups(
[data_utils.rand_name('secgroup')])
@test.attr(type='smoke')
@test.idempotent_id('edf6766d-3d40-4621-bc6e-2521a44c257d')
def test_update_port_with_two_security_groups_and_extra_attributes(self):
self._update_port_with_security_groups(
[data_utils.rand_name('secgroup'),
data_utils.rand_name('secgroup')])
@test.attr(type='smoke')
@test.idempotent_id('13e95171-6cbd-489c-9d7c-3f9c58215c18')
def test_create_show_delete_port_user_defined_mac(self):
# Create a port for a legal mac
body = self.client.create_port(network_id=self.network['id'])
old_port = body['port']
free_mac_address = old_port['mac_address']
self.client.delete_port(old_port['id'])
# Create a new port with user defined mac
body = self.client.create_port(network_id=self.network['id'],
mac_address=free_mac_address)
self.addCleanup(self.client.delete_port, body['port']['id'])
port = body['port']
body = self.client.show_port(port['id'])
show_port = body['port']
self.assertEqual(free_mac_address,
show_port['mac_address'])
@test.attr(type='smoke')
@test.idempotent_id('4179dcb9-1382-4ced-84fe-1b91c54f5735')
def test_create_port_with_no_securitygroups(self):
network = self.create_network()
self.addCleanup(self.client.delete_network, network['id'])
subnet = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet['id'])
port = self.create_port(network, security_groups=[])
self.addCleanup(self.client.delete_port, port['id'])
self.assertIsNotNone(port['security_groups'])
self.assertEmpty(port['security_groups'])
class PortsAdminExtendedAttrsTestJSON(base.BaseAdminNetworkTest):
@classmethod
def resource_setup(cls):
super(PortsAdminExtendedAttrsTestJSON, cls).resource_setup()
cls.identity_client = cls._get_identity_admin_client()
cls.tenant = cls.identity_client.get_tenant_by_name(
CONF.identity.tenant_name)
cls.network = cls.create_network()
cls.host_id = socket.gethostname()
@test.attr(type='smoke')
@test.idempotent_id('8e8569c1-9ac7-44db-8bc1-f5fb2814f29b')
def test_create_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id'],
"binding:host_id": self.host_id}
body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
host_id = port['binding:host_id']
self.assertIsNotNone(host_id)
self.assertEqual(self.host_id, host_id)
@test.attr(type='smoke')
@test.idempotent_id('6f6c412c-711f-444d-8502-0ac30fbf5dd5')
def test_update_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id']}
body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
update_body = {"binding:host_id": self.host_id}
body = self.admin_client.update_port(port['id'], **update_body)
updated_port = body['port']
host_id = updated_port['binding:host_id']
self.assertIsNotNone(host_id)
self.assertEqual(self.host_id, host_id)
@test.attr(type='smoke')
@test.idempotent_id('1c82a44a-6c6e-48ff-89e1-abe7eaf8f9f8')
def test_list_ports_binding_ext_attr(self):
# Create a new port
post_body = {"network_id": self.network['id']}
body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
# Update the port's binding attributes so that is now 'bound'
# to a host
update_body = {"binding:host_id": self.host_id}
self.admin_client.update_port(port['id'], **update_body)
# List all ports, ensure new port is part of list and its binding
# attributes are set and accurate
body = self.admin_client.list_ports()
ports_list = body['ports']
pids_list = [p['id'] for p in ports_list]
self.assertIn(port['id'], pids_list)
listed_port = [p for p in ports_list if p['id'] == port['id']]
self.assertEqual(1, len(listed_port),
'Multiple ports listed with id %s in ports listing: '
'%s' % (port['id'], ports_list))
self.assertEqual(self.host_id, listed_port[0]['binding:host_id'])
<|fim▁hole|> @test.attr(type='smoke')
@test.idempotent_id('b54ac0ff-35fc-4c79-9ca3-c7dbd4ea4f13')
def test_show_port_binding_ext_attr(self):
body = self.admin_client.create_port(network_id=self.network['id'])
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
body = self.admin_client.show_port(port['id'])
show_port = body['port']
self.assertEqual(port['binding:host_id'],
show_port['binding:host_id'])
self.assertEqual(port['binding:vif_type'],
show_port['binding:vif_type'])
self.assertEqual(port['binding:vif_details'],
show_port['binding:vif_details'])
class PortsIpV6TestJSON(PortsTestJSON):
_ip_version = 6
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
_tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
class PortsAdminExtendedAttrsIpV6TestJSON(PortsAdminExtendedAttrsTestJSON):
_ip_version = 6
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
_tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits<|fim▁end|> | |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
requirements = [ 'pyte', 'docopt' ]
try:
import asyncio
except ImportError:
requirements.append('asyncio')
setup(
name='libpymux',
author='Jonathan Slenders',
version='0.1',
license='LICENSE.txt',
url='https://github.com/jonathanslenders/libpymux',
description='Python terminal multiplexer (Pure Python tmux clone)',<|fim▁hole|> long_description=open("README.rst").read(),
packages=['libpymux'],
install_requires=requirements,
)<|fim▁end|> | |
<|file_name|>GLSLNodeFunction.js<|end_file_name|><|fim▁begin|>import NodeFunction from '../core/NodeFunction.js';<|fim▁hole|>
const declarationRegexp = /^\s*(highp|mediump|lowp)?\s*([a-z_0-9]+)\s*([a-z_0-9]+)?\s*\(([\s\S]*?)\)/i;
const propertiesRegexp = /[a-z_0-9]+/ig;
const pragmaMain = '#pragma main';
const parse = ( source ) => {
const pragmaMainIndex = source.indexOf( pragmaMain );
const mainCode = pragmaMainIndex !== - 1 ? source.substr( pragmaMainIndex + pragmaMain.length ) : source;
const declaration = mainCode.match( declarationRegexp );
if ( declaration !== null && declaration.length === 5 ) {
// tokenizer
const inputsCode = declaration[ 4 ];
const propsMatches = [];
let nameMatch = null;
while ( ( nameMatch = propertiesRegexp.exec( inputsCode ) ) !== null ) {
propsMatches.push( nameMatch );
}
// parser
const inputs = [];
let i = 0;
while ( i < propsMatches.length ) {
const isConst = propsMatches[ i ][ 0 ] === 'const';
if ( isConst === true ) {
i ++;
}
let qualifier = propsMatches[ i ][ 0 ];
if ( qualifier === 'in' || qualifier === 'out' || qualifier === 'inout' ) {
i ++;
} else {
qualifier = '';
}
const type = propsMatches[ i ++ ][ 0 ];
let count = Number.parseInt( propsMatches[ i ][ 0 ] );
if ( Number.isNaN( count ) === false ) i ++;
else count = null;
const name = propsMatches[ i ++ ][ 0 ];
inputs.push( new NodeFunctionInput( type, name, count, qualifier, isConst ) );
}
//
const blockCode = mainCode.substring( declaration[ 0 ].length );
const name = declaration[ 3 ] !== undefined ? declaration[ 3 ] : '';
const type = declaration[ 2 ];
const presicion = declaration[ 1 ] !== undefined ? declaration[ 1 ] : '';
const headerCode = pragmaMainIndex !== - 1 ? source.substr( 0, pragmaMainIndex ) : '';
return {
type,
inputs,
name,
presicion,
inputsCode,
blockCode,
headerCode
};
} else {
throw new Error( 'FunctionNode: Function is not a GLSL code.' );
}
};
class GLSLNodeFunction extends NodeFunction {
constructor( source ) {
const { type, inputs, name, presicion, inputsCode, blockCode, headerCode } = parse( source );
super( type, inputs, name, presicion );
this.inputsCode = inputsCode;
this.blockCode = blockCode;
this.headerCode = headerCode;
}
getCode( name = this.name ) {
const headerCode = this.headerCode;
const presicion = this.presicion;
let declarationCode = `${ this.type } ${ name } ( ${ this.inputsCode.trim() } )`;
if ( presicion !== '' ) {
declarationCode = `${ presicion } ${ declarationCode }`;
}
return headerCode + declarationCode + this.blockCode;
}
}
export default GLSLNodeFunction;<|fim▁end|> | import NodeFunctionInput from '../core/NodeFunctionInput.js'; |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>from rapidsms.tests.scripted import TestScript
from apps.form.models import *
from apps.reporters.models import *
import apps.reporters.app as reporter_app
import apps.supply.app as supply_app
import apps.form.app as form_app
import apps.default.app as default_app
from app import App
from django.core.management.commands.dumpdata import Command
import time
import random
import os
from datetime import datetime
class TestApp (TestScript):
#apps = (reporter_app.App, App,form_app.App, supply_app.App, default_app.App )
apps = (reporter_app.App, App,form_app.App, supply_app.App )
# the test_backend script does the loading of the dummy backend that allows reporters
# to work properly in tests
fixtures = ['nigeria_llin', 'test_kano_locations', 'test_backend']
def setUp(self):
TestScript.setUp(self)
def testFixtures(self):
self._testKanoLocations()
self._testForms()
self._testRoles()
def testScript(self):
a = """
8005551219 > llin register 20 dl crummy user
8005551219 < Hello crummy! You are now registered as Distribution point team leader at KANO State.
"""
self.runScript(a)
# this should succeed because we just created him
reporters = Reporter.objects.all()
Reporter.objects.get(alias="cuser")
dict = {"alias":"fail"}
# make sure checking a non-existant user fails
self.assertRaises(Reporter.DoesNotExist, Reporter.objects.get, **dict)
testRegistration = """
8005551212 > llin my status
8005551212 < Please register your phone with RapidSMS.
8005551212 > llin register 20 dl dummy user
8005551212 < Hello dummy! You are now registered as Distribution point team leader at KANO State.
8005551212 > llin my status
8005551212 < I think you are dummy user.
#duplicate submission
test_reg_dup > llin register 20 dl duplicate user
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at KANO State.
# this one should be a duplicate
test_reg_dup > llin register 20 dl duplicate user
test_reg_dup < Hello again duplicate! You are already registered as a Distribution point team leader at KANO State.
# but all of these should create a new registration
test_reg_dup > llin register 20 dl duplicate user withanothername
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at KANO State.
test_reg_dup > llin register 20 dl duplicate userlonger
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at KANO State.
test_reg_dup > llin register 20 dl duplicated user
test_reg_dup < Hello duplicated! You are now registered as Distribution point team leader at KANO State.
test_reg_dup > llin register 20 sm duplicate user
test_reg_dup < Hello duplicate! You are now registered as Stock manager at KANO State.
test_reg_dup > llin register 2001 dl duplicate user
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at AJINGI LGA.
# case sensitivity
test_reg_2 > llin REGISTER 20 dl another user
test_reg_2 < Hello another! You are now registered as Distribution point team leader at KANO State.
# different name formats
test_reg_3 > llin register 20 dl onename
test_reg_3 < Hello onename! You are now registered as Distribution point team leader at KANO State.
# these fail
test_reg_4 > llin register 20 dl mister three names
test_reg_4 < Hello mister! You are now registered as Distribution point team leader at KANO State.
test_reg_5 > llin register 20 dl mister four name guy
test_reg_5 < Hello mister! You are now registered as Distribution point team leader at KANO State.
# some other spellings
test_reg_short > llin regstr 20 dl short user
test_reg_short < Hello short! You are now registered as Distribution point team leader at KANO State.
test_reg_short_2 > llin regs 20 dl short user
test_reg_short_2 < Hello short! You are now registered as Distribution point team leader at KANO State.
test_reg_short_3 > llin reg 20 dl short user
test_reg_short_3 < Hello short! You are now registered as Distribution point team leader at KANO State.
test_reg_long > llin registered 20 dl long user
test_reg_long < Hello long! You are now registered as Distribution point team leader at KANO State.
# extra spaces
test_reg_8 > llin register 20 dl space guy
test_reg_8 < Hello space! You are now registered as Distribution point team leader at KANO State.
# new tests for more flexible roles
test_reg_dl > llin register 20 dl distribution leader
test_reg_dl < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_dl_2 > llin register 20 ds distribution leader
test_reg_dl_2 < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_dl_3 > llin register 20 dm distribution leader
test_reg_dl_3 < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_dl_4 > llin register 20 dp distribution leader
test_reg_dl_4 < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_lf > llin register 20 lf lga focal person
test_reg_lf < Hello lga! You are now registered as LGA focal person at KANO State.
test_reg_lf > llin register 20 lp lga focal person
test_reg_lf < Hello again lga! You are already registered as a LGA focal person at KANO State.
# alas, we're not perfect
test_reg_fail > llin rgstr 20 dl sorry guy
test_reg_fail < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
"""
testRegistrationErrors = """
12345 > llin my status
12345 < Please register your phone with RapidSMS.
12345 > llin register 45 DL hello world
12345 < Invalid form. 45 not in list of location codes
12345 > llin my status
12345 < Please register your phone with RapidSMS.
12345 > llin register 20 pp hello world
12345 < Invalid form. Unknown role code: pp
12345 > llin my status
12345 < Please register your phone with RapidSMS.
12345 > llin register 6803 AL hello world
12345 < Invalid form. 6803 not in list of location codes. Unknown role code: AL
12345 > llin my status
12345 < Please register your phone with RapidSMS.
"""
testKeyword= """
tkw_1 > llin register 20 dl keyword tester
tkw_1 < Hello keyword! You are now registered as Distribution point team leader at KANO State.
# base case
tkw_1 > llin nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# capitalize the domain
tkw_1 > LLIN nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# drop an L
tkw_1 > lin nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# mix the order - this is no longer supported
#tkw_1 > ILLn nets 2001 123 456 78 90
#tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
#tkw_1 > ilin nets 2001 123 456 78 90
#tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# ll anything works?
tkw_1 > ll nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
tkw_1 > llan nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# don't support w/o keyword
tkw_1 > nets 2001 123 456 78 90
# the default app to the rescue!
tkw_1 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
"""
testNets= """
8005551213 > llin register 2001 lf net guy
8005551213 < Hello net! You are now registered as LGA focal person at AJINGI LGA.
8005551213 > llin nets 2001 123 456 78 90
8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
8005551213 > llin nets 2001 123 456 78
8005551213 < Invalid form. The following fields are required: discrepancy
# test some of the different form prefix options
# case sensitivity
8005551213 > llin NETS 2001 123 456 78 90
8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# no s
8005551213 > llin net 2001 123 456 78 90
8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# really? this works?
8005551213 > llin Nt 2001 123 456 78 90
8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# something's gotta fail
8005551213 > llin n 2001 123 456 78 90
8005551213 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
8005551213 > llin bednets 2001 123 456 78 90
8005551213 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
8005551213 > llin ents 2001 123 456 78 90
8005551213 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
"""
testNetCards= """
8005551214 > llin register 200201 lf card guy
8005551214 < Hello card! You are now registered as LGA focal person at ALBASU CENTRAL Ward.
8005551214 > llin net cards 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
8005551214 > llin net cards 200201 123 456
8005551214 < Invalid form. The following fields are required: issued
# test some of the different form prefix options
# case sensitivity
8005551214 > llin NET CARDS 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
# no s
8005551214 > llin net card 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
# one word
8005551214 > llin netcards 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
8005551214 > llin netcard 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
# he he
8005551214 > llin nt cd 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
8005551214 > llin ntcrds 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
# something's gotta fail
8005551214 > llin cards 200201 123 456 78
8005551214 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
"""
testUnregisteredSubmissions = """
tus_1 > llin net cards 200201 123 456 78
tus_1 < Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78. Please register your phone
tus_1 > llin my status
tus_1 < Please register your phone with RapidSMS.
tus_2 > llin nets 2001 123 456 78 90
tus_2 < Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90. Please register your phone
tus_2 > llin my status
tus_2 < Please register your phone with RapidSMS.
"""
def testGenerateNetFixtures(self):
""" This isn't actually a test. It just takes advantage
of the test harness to spam a bunch of messages to the
nigeria app and spit out the data in a format that can
be sucked into a fixture. It should be moved to some
data generator at some point, but is being left here
for laziness sake """
# this is the number of net reports that will be generated
count = 0
# the sender will always be the same, for now
phone = "55555"
expected_actual_match_percent = .8
# allow specifying the minimum and maximum dates for message generation
min_date = datetime(2009,4,1)
max_date = datetime(2009,4,30)
min_time = time.mktime(min_date.timetuple())
max_time = time.mktime(max_date.timetuple())
# these are the locations that will be chosen. The actual
# location will be a distribution point under one of these
# wards
wards = [200101, 200102, 200103, 200104, 200105, 200106, 200107, 200108, 200109, 200110, 200201]
all_net_strings = []
for i in range(count):
# this first part generates a net form at a random DP
date = datetime.fromtimestamp(random.randint(min_time, max_time))
ward = Location.objects.get(code=random.choice(wards))
dp = random.choice(ward.children.all())
distributed = random.randint(50,500)
expected = random.randint(0,2000)
# create an actual amount based on the likelihood of match
if random.random() < expected_actual_match_percent:
actual = expected
else:
actual = random.randint(0,2000)
discrepancy = random.randint(0,distributed/5)
net_string = "%s@%s > llin nets %s %s %s %s %s" % (phone, date.strftime("%Y%m%d%H%M"), dp.code, distributed, expected, actual, discrepancy)
all_net_strings.append(net_string)
# the second part generates a net card form at a random MT
date = datetime.fromtimestamp(random.randint(min_time, max_time))
ward = Location.objects.get(code=random.choice(wards))
dp = random.choice(ward.children.all())
mt = random.choice(dp.children.all())
settlements = random.randint(3, 50)
people = random.randint(50, 600)
coupons = random.randint(50, 600)
net_card_string = "%s@%s > llin net cards %s %s %s %s" % (phone, date.strftime("%Y%m%d%H%M"), mt.code, settlements, people, coupons )
all_net_strings.append(net_card_string)
script = "\n".join(all_net_strings)
self.runScript(script)
dumpdata = Command()
filename = os.path.abspath(os.path.join(os.path.dirname(__file__),"fixtures/test_net_data.json"))
options = { "indent" : 2 }
datadump = dumpdata.handle("bednets", **options)
# uncomment these lines to save the fixture
# file = open(filename, "w")
# file.write(datadump)
# file.close()
# print "=== Successfully wrote fixtures to %s ===" % filename
#
def _testKanoLocations(self):
#TODO test for DPs and MTs
loc_types = LocationType.objects.all()
self.assertEqual(6, len(loc_types))
state = LocationType.objects.get(name="State")
lga = LocationType.objects.get(name="LGA")
ward = LocationType.objects.get(name="Ward")
locations = Location.objects.all()
# 1 state
self.assertEqual(1, len(locations.filter(type=state)))
# 44 lgas
self.assertEqual(44, len(locations.filter(type=lga)))
# 484 wards
self.assertEqual(484, len(locations.filter(type=ward)))<|fim▁hole|>
for lga in locations.filter(type=lga):
self.assertEqual(kano, lga.parent)
def _testForms(self):
forms = Form.objects.all()
self.assertEqual(5, len(forms))
for form_name in ["register", "issue", "receive", "nets", "netcards"]:
# this will throw an error if it doesn't exist
Form.objects.get(code__abbreviation=form_name)
def _testRoles(self):
# add this when we have a fixture for roles
roles = Role.objects.all()
self.assertEqual(4, len(roles))
for role_name in ["LGA focal person", "Ward supervisor", "Stock manager", "Distribution point team leader"]:
# this will throw an error if it doesn't exist
Role.objects.get(name=role_name)<|fim▁end|> | kano = locations.get(type=state)
self.assertEqual("KANO", kano.name)
self.assertEqual(44, len(kano.children.all())) |
<|file_name|>render.rs<|end_file_name|><|fim▁begin|>use std::iter;
use brdgme_color::*;
use brdgme_game::Renderer;
use brdgme_markup::ast::{Col, Row};
use brdgme_markup::{Align as A, Node as N};
use crate::board::{Block, Board, BoardTile, Loc, TileOwner, BLOCKS};
use crate::card::casino_card_count;
use crate::casino::CASINOS;
use crate::tile::TILES;
use crate::PlayerState;
use crate::PubState;
use crate::CASINO_CARDS;
use crate::CASINO_TILES;
use crate::PLAYER_DICE;
use crate::PLAYER_OWNER_TOKENS;
use crate::POINT_STOPS;
const TILE_WIDTH: usize = 9;
const TILE_HEIGHT: usize = 4;
const INLAY_WIDTH: usize = 5;
const INLAY_HEIGHT: usize = 2;
const INLAY_TOP: usize = 1;
const INLAY_LEFT: usize = 2;
const ALLEY_FULL_HEIGHT: usize = 3;
const STRIP_FULL_WIDTH: usize = 9;
static UNBUILT_TILE_BG: Color = Color {
r: 200,
g: 200,
b: 200,
};
impl Renderer for PubState {
fn render(&self) -> Vec<N> {
vec![self.render_with_perspective(None)]
}
}
impl Renderer for PlayerState {
fn render(&self) -> Vec<N> {
vec![self.pub_state.render_with_perspective(Some(self.player))]
}
}
impl PubState {
pub fn render_with_perspective(&self, perspective: Option<usize>) -> N {
N::Table(vec![
vec![(
A::Center,
vec![N::Table(vec![vec![(A::Left, vec![self.board.render()])]])],
)],
vec![],
vec![(
A::Center,
vec![self.render_player_table(perspective.unwrap_or(0))],
)],
vec![],
vec![(A::Center, vec![self.render_casino_table()])],
])
}
pub fn render_player_table(&self, perspective: usize) -> N {
let mut rows: Vec<Row> = vec![];
rows.push(vec![
(A::Right, vec![N::Bold(vec![N::text("Player")])]),
(A::Left, vec![N::text(" ")]),
(A::Center, vec![N::Bold(vec![N::text("Cash")])]),
(A::Left, vec![N::text(" ")]),
(A::Center, vec![N::Bold(vec![N::text("Dice")])]),
(A::Left, vec![N::text(" ")]),
(A::Center, vec![N::Bold(vec![N::text("Tokens")])]),
(A::Left, vec![N::text(" ")]),
(A::Center, vec![N::Bold(vec![N::text("Points")])]),
]);
let p_len = self.players.len();
for i in 0..p_len {
let p = (perspective + i) % p_len;
let used = self.board.used_resources(p);
rows.push(vec![
(A::Right, vec![N::Player(p)]),
(A::Left, vec![]),
(A::Center, vec![render_cash(self.players[p].cash)]),
(A::Left, vec![]),
(
A::Center,
vec![N::text(format!("{}", PLAYER_DICE - used.dice))],<|fim▁hole|> (
A::Center,
vec![N::text(format!("{}", PLAYER_OWNER_TOKENS - used.tokens))],
),
(A::Left, vec![]),
(
A::Center,
vec![N::text(format!("{}", POINT_STOPS[self.players[p].points]))],
),
]);
}
N::Table(rows)
}
pub fn render_casino_table(&self) -> N {
let mut casino_names: Row = vec![(A::Right, vec![N::Bold(vec![N::text("Casino")])])];
let mut remaining_cards: Row = vec![(A::Right, vec![N::Bold(vec![N::text("Cards left")])])];
let mut remaining_tiles: Row = vec![(A::Right, vec![N::Bold(vec![N::text("Tiles left")])])];
for casino in CASINOS {
casino_names.push((A::Left, vec![N::text(" ")]));
casino_names.push((A::Center, vec![casino.render()]));
remaining_cards.push((A::Left, vec![]));
remaining_cards.push((
A::Center,
vec![N::text(format!(
"{}",
CASINO_CARDS - casino_card_count(&self.played, *casino)
))],
));
remaining_tiles.push((A::Left, vec![]));
remaining_tiles.push((
A::Center,
vec![N::text(format!(
"{}",
CASINO_TILES - self.board.casino_tile_count(*casino)
))],
));
}
N::Table(vec![casino_names, remaining_cards, remaining_tiles])
}
}
fn block_offset(block: Block) -> (usize, usize) {
(
match block {
Block::A | Block::C | Block::E => 0,
Block::B | Block::D | Block::F => TILE_WIDTH * 3 + STRIP_FULL_WIDTH,
},
match block {
Block::A | Block::B => 0,
Block::C | Block::D => TILE_HEIGHT * 2 + ALLEY_FULL_HEIGHT,
Block::E => TILE_HEIGHT * 6 + ALLEY_FULL_HEIGHT * 2,
Block::F => TILE_HEIGHT * 5 + ALLEY_FULL_HEIGHT * 2,
},
)
}
impl Board {
fn render(&self) -> N {
let mut layers = vec![];
for block in BLOCKS {
let (x, y) = block_offset(*block);
layers.push((x, y, vec![self.render_block(*block)]));
}
N::Canvas(layers)
}
fn render_block(&self, block: Block) -> N {
let mut layers = vec![];
for lot in 1..=block.max_lot() {
let loc = Loc { block, lot };
let x = (lot - 1) % 3;
let y = (lot - 1) / 3;
layers.push((
x * TILE_WIDTH,
y * TILE_HEIGHT,
vec![self.get(&loc).render(&loc)],
));
}
N::Canvas(layers)
}
}
impl BoardTile {
fn render(&self, loc: &Loc) -> N {
let bot_text = format!("{}{:2}", loc.block, loc.lot);
let player_color: Col = match *self {
BoardTile::Owned { player }
| BoardTile::Built {
owner: Some(TileOwner { player, .. }),
..
} => player.into(),
_ => WHITE.into(),
};
let player_color_fg = player_color.inv().mono();
let middle_text = match *self {
BoardTile::Built {
owner: Some(TileOwner { die, .. }),
..
} => vec![N::Bg(
player_color,
vec![N::Fg(
player_color_fg,
vec![N::Bold(vec![N::text(format!(" {} ", die))])],
)],
)],
_ => vec![
N::Bg(
player_color,
vec![N::Fg(
player_color_fg,
vec![N::text(format!("${:2}", TILES[loc].build_cost))],
)],
),
N::text(format!("\n({})", TILES[loc].die)),
],
};
let border_bg = match *self {
BoardTile::Built { casino, .. } => *casino.color(),
_ => UNBUILT_TILE_BG,
};
let inlay_bg = WHITE;
let border_fg = border_bg.inv().mono();
let inlay_fg = inlay_bg.inv().mono();
N::Canvas(vec![
// Tile background
(
0,
0,
vec![N::Bg(
border_bg.into(),
vec![N::text(rect(TILE_WIDTH, TILE_HEIGHT))],
)],
),
// Inlay background
(
INLAY_LEFT,
INLAY_TOP,
vec![N::Bg(
inlay_bg.into(),
vec![N::text(rect(INLAY_WIDTH, INLAY_HEIGHT))],
)],
),
// Middle text
(
INLAY_LEFT,
INLAY_TOP,
vec![N::Align(
A::Center,
INLAY_WIDTH,
vec![N::Fg(inlay_fg.into(), middle_text)],
)],
),
// Bot text
(
0,
TILE_HEIGHT - 1,
vec![N::Align(
A::Center,
TILE_WIDTH,
vec![N::Fg(
border_fg.into(),
vec![N::Bold(vec![N::text(bot_text)])],
)],
)],
),
])
}
}
fn rect(w: usize, h: usize) -> String {
let line: String = iter::repeat(" ").take(w).collect();
let mut r = line.clone();
for _ in 0..h - 1 {
r.push('\n');
r.push_str(&line);
}
r
}
pub fn render_cash(amount: usize) -> N {
N::Bold(vec![N::Fg(
GREEN.into(),
vec![N::text(format!("${}", amount))],
)])
}<|fim▁end|> | ),
(A::Left, vec![]), |
<|file_name|>BarLineChart.js<|end_file_name|><|fim▁begin|>/*!
* Copyright 2010 - 2015 Pentaho Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
define([
"cdf/lib/CCC/def",
"./AbstractBarChart",
"../util"
], function(def, AbstractBarChart, util) {
return AbstractBarChart.extend({
methods: {
_rolesToCccDimensionsMap: {
'measuresLine': 'value' // maps to same dim group as 'measures' role
},
_noRoleInTooltipMeasureRoles: {'measures': true, 'measuresLine': true},
_options: {
plot2: true,
secondAxisIndependentScale: false,
// prevent default of -1 (which means last series) // TODO: is this needed??
secondAxisSeriesIndexes: null
},
_setNullInterpolationMode: function(options, value) {
options.plot2NullInterpolationMode = value;
},
_initAxes: function() {
this.base();
this._measureDiscrimGem || def.assert("Must exist to distinguish measures.");
var measureDiscrimCccDimName = this._measureDiscrimGem.cccDimName,
meaAxis = this.axes.measure,
barGems = meaAxis.gemsByRole[meaAxis.defaultRole],
barGemsById = def.query(barGems) // bar: measures, line: measuresLine
.uniqueIndex(function(gem) { return gem.id; });
// Create the dataPart dimension calculation
this.options.calculations.push({
names: 'dataPart',
calculation: function(datum, atoms) {
var meaGemId = datum.atoms[measureDiscrimCccDimName].value;
// Data part codes
// 0 -> bars
// 1 -> lines
atoms.dataPart = def.hasOwn(barGemsById, meaGemId) ? '0' : '1';
}
});
},
_readUserOptions: function(options, drawSpec) {
this.base(options, drawSpec);
var shape = drawSpec.shape;
if(shape && shape === 'none') {
options.pointDotsVisible = false;
} else {
options.pointDotsVisible = true;
options.extensionPoints.pointDot_shape = shape;
}
},
_configure: function() {
this.base();
this._configureAxisRange(/*isPrimary*/false, 'ortho2');
this._configureAxisTitle('ortho2',"");
this.options.plot2OrthoAxis = 2;
// Plot2 uses same color scale
// options.plot2ColorAxis = 2;
// options.color2AxisTransform = null;
},
_configureLabels: function(options, drawSpec) {
this.base.apply(this, arguments);
// Plot2
var lineLabelsAnchor = drawSpec.lineLabelsOption;
if(lineLabelsAnchor && lineLabelsAnchor !== 'none') {
options.plot2ValuesVisible = true;
options.plot2ValuesAnchor = lineLabelsAnchor;
options.plot2ValuesFont = util.defaultFont(util.readFont(drawSpec, 'label'));
options.extensionPoints.plot2Label_textStyle = drawSpec.labelColor;
}
},
_configureDisplayUnits: function() {<|fim▁hole|>
this._configureAxisDisplayUnits(/*isPrimary*/false, 'ortho2');
}
}
});
});<|fim▁end|> | this.base(); |
<|file_name|>data-table.spec.ts<|end_file_name|><|fim▁begin|>import { assert } from 'chai';
import { DataTable } from '../../src/group';
import { Text } from '../../src/question/text';
import { Required } from '../../src/validation';
describe('AngularForms :: Group :: DataTable', () => {
it('should be instantiable', () => {
assert.ok(new DataTable('G-01', 'A simple group', [[], []], []));
});
it('should getQuestionByName method', () => {
const dataTable: DataTable = new DataTable('G-01', 'A simple group', [[new Text('Q-01', 'A simple question')]]);
assert.isTrue(dataTable.getQuestionByName('Q-01') instanceof Text);
});
<|fim▁hole|>
assert.isTrue(dataTable1.isRequired());
assert.isFalse(dataTable2.isRequired());
});
});<|fim▁end|> | it('should isRequired method', () => {
const dataTable1: DataTable = new DataTable('G-01', 'A simple group', [], [new Required('Required')]);
const dataTable2: DataTable = new DataTable('G-01', 'A simple group', [], []); |
<|file_name|>crel.js<|end_file_name|><|fim▁begin|>/* Copyright (C) 2012 Kory Nunn
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
NOTE:
This code is formatted for run-speed and to assist compilers.
This might make it harder to read at times, but the code's intention should be transparent. */
// IIFE our function
((exporter) => {
// Define our function and its properties
// These strings are used multiple times, so this makes things smaller once compiled
const func = 'function',
isNodeString = 'isNode',
d = document,
// Helper functions used throughout the script
isType = (object, type) => typeof object === type,
// Recursively appends children to given element. As a text node if not already an element
appendChild = (element, child) => {
if (child !== null) {
if (Array.isArray(child)) { // Support (deeply) nested child elements
child.map(subChild => appendChild(element, subChild));
} else {
if (!crel[isNodeString](child)) {
child = d.createTextNode(child);
}
element.appendChild(child);
}
}
};
//
function crel (element, settings) {
// Define all used variables / shortcuts here, to make things smaller once compiled
let args = arguments, // Note: assigned to a variable to assist compilers.
index = 1,
<|fim▁hole|> // If first argument is an element, use it as is, otherwise treat it as a tagname
element = crel.isElement(element) ? element : d.createElement(element);
// Check if second argument is a settings object
if (isType(settings, 'object') && !crel[isNodeString](settings) && !Array.isArray(settings)) {
// Don't treat settings as a child
index++;
// Go through settings / attributes object, if it exists
for (key in settings) {
// Store the attribute into a variable, before we potentially modify the key
attribute = settings[key];
// Get mapped key / function, if one exists
key = crel.attrMap[key] || key;
// Note: We want to prioritise mapping over properties
if (isType(key, func)) {
key(element, attribute);
} else if (isType(attribute, func)) { // ex. onClick property
element[key] = attribute;
} else {
// Set the element attribute
element.setAttribute(key, attribute);
}
}
}
// Loop through all arguments, if any, and append them to our element if they're not `null`
for (; index < args.length; index++) {
appendChild(element, args[index]);
}
return element;
}
// Used for mapping attribute keys to supported versions in bad browsers, or to custom functionality
crel.attrMap = {};
crel.isElement = object => object instanceof Element;
crel[isNodeString] = node => node instanceof Node;
// Expose proxy interface
crel.proxy = new Proxy(crel, {
get: (target, key) => {
!(key in crel) && (crel[key] = crel.bind(null, key));
return crel[key];
}
});
// Export crel
exporter(crel, func);
})((product, func) => {
if (typeof exports === 'object') {
// Export for Browserify / CommonJS format
module.exports = product;
} else if (typeof define === func && define.amd) {
// Export for RequireJS / AMD format
define(() => product);
} else {
// Export as a 'global' function
this.crel = product;
}
});<|fim▁end|> | key,
attribute;
|
<|file_name|>read_test.go<|end_file_name|><|fim▁begin|>package msgp
import (
"bytes"
"fmt"
"io"
"math"
"math/rand"
"reflect"
"testing"
"time"
)
func TestSanity(t *testing.T) {
if !isfixint(0) {
t.Fatal("WUT.")
}
}
func TestReadIntf(t *testing.T) {
// NOTE: if you include cases
// with, say, int32s, the test
// will fail, b/c integers are
// always read out as int64, and
// unsigned integers as uint64
var testCases = []interface{}{
float64(128.032),
float32(9082.092),
int64(-40),
uint64(9082981),
time.Now(),
"hello!",
[]byte("hello!"),
map[string]interface{}{
"thing-1": "thing-1-value",
"thing-2": int64(800),
"thing-3": []byte("some inner bytes..."),
"thing-4": false,
},
}
var buf bytes.Buffer
var v interface{}
dec := NewReader(&buf)
enc := NewWriter(&buf)
for i, ts := range testCases {
buf.Reset()
err := enc.WriteIntf(ts)
if err != nil {
t.Errorf("Test case %d: %s", i, err)
continue
}
err = enc.Flush()
if err != nil {
t.Fatal(err)
}
v, err = dec.ReadIntf()
if err != nil {
t.Errorf("Test case: %d: %s", i, err)
}
/* for time, use time.Equal instead of reflect.DeepEqual */
if tm, ok := v.(time.Time); ok {
if !tm.Equal(v.(time.Time)) {
t.Errorf("%v != %v", ts, v)
}
} else if !reflect.DeepEqual(v, ts) {
t.Errorf("%v in; %v out", ts, v)
}
}
}
func TestReadMapHeader(t *testing.T) {
tests := []struct {
Sz uint32
}{
{0},
{1},
{tuint16},
{tuint32},
}
var buf bytes.Buffer
var sz uint32
var err error
wr := NewWriter(&buf)
rd := NewReader(&buf)
for i, test := range tests {
buf.Reset()
err = wr.WriteMapHeader(test.Sz)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
sz, err = rd.ReadMapHeader()
if err != nil {
t.Errorf("Test case %d: got error %s", i, err)
}
if sz != test.Sz {
t.Errorf("Test case %d: wrote size %d; got size %d", i, test.Sz, sz)
}
}
}
func BenchmarkReadMapHeader(b *testing.B) {
sizes := []uint32{0, 1, tuint16, tuint32}
data := make([]byte, 0, len(sizes)*5)
for _, d := range sizes {
data = AppendMapHeader(data, d)
}
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data) / len(sizes)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
rd.ReadMapHeader()
}
}
func TestReadArrayHeader(t *testing.T) {
tests := []struct {
Sz uint32
}{
{0},
{1},
{tuint16},
{tuint32},
}
var buf bytes.Buffer
var sz uint32
var err error
wr := NewWriter(&buf)
rd := NewReader(&buf)
for i, test := range tests {
buf.Reset()
err = wr.WriteArrayHeader(test.Sz)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
sz, err = rd.ReadArrayHeader()
if err != nil {
t.Errorf("Test case %d: got error %s", i, err)
}
if sz != test.Sz {
t.Errorf("Test case %d: wrote size %d; got size %d", i, test.Sz, sz)
}
}
}
func BenchmarkReadArrayHeader(b *testing.B) {
sizes := []uint32{0, 1, tuint16, tuint32}
data := make([]byte, 0, len(sizes)*5)
for _, d := range sizes {
data = AppendArrayHeader(data, d)
}
rd := NewReader(NewEndlessReader(data, b))
b.ReportAllocs()
b.SetBytes(int64(len(data) / len(sizes)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
rd.ReadArrayHeader()
}
}
func TestReadNil(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
wr.WriteNil()
wr.Flush()
err := rd.ReadNil()
if err != nil {
t.Fatal(err)
}
}
func BenchmarkReadNil(b *testing.B) {
data := AppendNil(nil)
rd := NewReader(NewEndlessReader(data, b))
b.ReportAllocs()
b.SetBytes(1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := rd.ReadNil()
if err != nil {
b.Fatal(err)
}
}
}
func TestReadFloat64(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
for i := 0; i < 100; i++ {
buf.Reset()
flt := (rand.Float64() - 0.5) * math.MaxFloat64
err := wr.WriteFloat64(flt)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadFloat64()
if err != nil {
t.Errorf("Error reading %f: %s", flt, err)
continue
}
if out != flt {
t.Errorf("Put in %f but got out %f", flt, out)
}
}
}
func BenchmarkReadFloat64(b *testing.B) {
fs := []float64{rand.Float64(), rand.Float64(), rand.Float64(), rand.Float64()}
data := make([]byte, 0, 9*len(fs))
for _, f := range fs {
data = AppendFloat64(data, f)
}
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(9)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadFloat64()
if err != nil {
b.Fatal(err)
}
}
}
func TestReadFloat32(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
for i := 0; i < 10000; i++ {
buf.Reset()
flt := (rand.Float32() - 0.5) * math.MaxFloat32
err := wr.WriteFloat32(flt)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadFloat32()
if err != nil {
t.Errorf("Error reading %f: %s", flt, err)
continue
}
if out != flt {
t.Errorf("Put in %f but got out %f", flt, out)
}
}
}
func BenchmarkReadFloat32(b *testing.B) {
fs := []float32{rand.Float32(), rand.Float32(), rand.Float32(), rand.Float32()}
data := make([]byte, 0, 5*len(fs))
for _, f := range fs {
data = AppendFloat32(data, f)
}
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(5)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadFloat32()
if err != nil {
b.Fatal(err)
}
}
}
func TestReadInt64(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
ints := []int64{-100000, -5000, -5, 0, 8, 240, int64(tuint16), int64(tuint32), int64(tuint64)}
uints := []uint64{0, 8, 240, uint64(tuint16), uint64(tuint32), uint64(tuint64)}
all := make([]interface{}, 0, len(ints)+len(uints))
for _, v := range ints {
all = append(all, v)
}
for _, v := range uints {
all = append(all, v)
}
for i, num := range all {
buf.Reset()
var err error
var in int64
switch num := num.(type) {
case int64:
err = wr.WriteInt64(num)
in = num
case uint64:
err = wr.WriteUint64(num)
in = int64(num)
default:
panic(num)
}
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadInt64()
if err != nil {
t.Fatal(err)
}
if out != in {
t.Errorf("Test case %d: put %d in and got %d out", i, num, in)
}
}
}
func TestReadIntOverflows(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
i8, i16, i32, i64, u8, u16, u32, u64 := 1, 2, 3, 4, 5, 6, 7, 8
overflowErr := func(err error, failBits int) bool {
bits := 0
switch err := err.(type) {
case IntOverflow:
bits = err.FailedBitsize
case UintOverflow:
bits = err.FailedBitsize
}
if bits == failBits {
return true
}
return false
}
belowZeroErr := func(err error, failBits int) bool {
switch err.(type) {
case UintBelowZero:
return true
}
return false
}
vs := []struct {
v interface{}
rdBits int
failBits int
errCheck func(err error, failBits int) bool
}{
{uint64(math.MaxInt64), i32, 32, overflowErr},
{uint64(math.MaxInt64), i16, 16, overflowErr},
{uint64(math.MaxInt64), i8, 8, overflowErr},
{uint64(math.MaxUint64), i64, 64, overflowErr},
{uint64(math.MaxUint64), i32, 64, overflowErr},
{uint64(math.MaxUint64), i16, 64, overflowErr},
{uint64(math.MaxUint64), i8, 64, overflowErr},
{uint64(math.MaxUint32), i32, 32, overflowErr},
{uint64(math.MaxUint32), i16, 16, overflowErr},
{uint64(math.MaxUint32), i8, 8, overflowErr},
{int64(math.MinInt64), u64, 64, belowZeroErr},
{int64(math.MinInt64), u32, 64, belowZeroErr},
{int64(math.MinInt64), u16, 64, belowZeroErr},
{int64(math.MinInt64), u8, 64, belowZeroErr},
{int64(math.MinInt32), u64, 64, belowZeroErr},
{int64(math.MinInt32), u32, 32, belowZeroErr},
{int64(math.MinInt32), u16, 16, belowZeroErr},
{int64(math.MinInt32), u8, 8, belowZeroErr},
{int64(math.MinInt16), u64, 64, belowZeroErr},
{int64(math.MinInt16), u32, 32, belowZeroErr},
{int64(math.MinInt16), u16, 16, belowZeroErr},
{int64(math.MinInt16), u8, 8, belowZeroErr},
{int64(math.MinInt8), u64, 64, belowZeroErr},
{int64(math.MinInt8), u32, 32, belowZeroErr},
{int64(math.MinInt8), u16, 16, belowZeroErr},
{int64(math.MinInt8), u8, 8, belowZeroErr},
{-1, u64, 64, belowZeroErr},
{-1, u32, 32, belowZeroErr},
{-1, u16, 16, belowZeroErr},
{-1, u8, 8, belowZeroErr},
}
for i, v := range vs {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
switch num := v.v.(type) {
case int:
wr.WriteInt64(int64(num))
case int64:
wr.WriteInt64(num)
case uint64:
wr.WriteUint64(num)
default:
panic(num)
}
wr.Flush()
var err error
switch v.rdBits {
case i64:
_, err = rd.ReadInt64()
case i32:
_, err = rd.ReadInt32()
case i16:
_, err = rd.ReadInt16()
case i8:
_, err = rd.ReadInt8()
case u64:
_, err = rd.ReadUint64()
case u32:
_, err = rd.ReadUint32()
case u16:
_, err = rd.ReadUint16()
case u8:
_, err = rd.ReadUint8()
}
if !v.errCheck(err, v.failBits) {
t.Fatal(err)
}
})
}
}
func BenchmarkReadInt64(b *testing.B) {
is := []int64{0, 1, 65000, rand.Int63()}
data := make([]byte, 0, 9*len(is))
for _, n := range is {
data = AppendInt64(data, n)
}
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data) / len(is)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadInt64()
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkReadUintWithInt64(b *testing.B) {
us := []uint64{0, 1, 10000, uint64(rand.Uint32() * 4)}
data := make([]byte, 0, 9*len(us))
for _, n := range us {
data = AppendUint64(data, n)
}
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data) / len(us)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadInt64()
if err != nil {
b.Fatal(err)
}
}
}
func TestReadUint64(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
ints := []uint64{0, 8, 240, uint64(tuint16), uint64(tuint32), uint64(tuint64)}
for i, num := range ints {
buf.Reset()
err := wr.WriteUint64(num)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadUint64()
if out != num {
t.Errorf("Test case %d: put %d in and got %d out", i, num, out)
}
}
}
func BenchmarkReadUint64(b *testing.B) {
us := []uint64{0, 1, 10000, uint64(rand.Uint32() * 4)}
data := make([]byte, 0, 9*len(us))
for _, n := range us {
data = AppendUint64(data, n)
}
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data) / len(us)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadUint64()
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkReadIntWithUint64(b *testing.B) {
is := []int64{0, 1, 65000, rand.Int63()}
data := make([]byte, 0, 9*len(is))
for _, n := range is {
data = AppendInt64(data, n)
}
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data) / len(is)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadUint64()
if err != nil {
b.Fatal(err)
}
}
}
func TestReadBytes(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
sizes := []int{0, 1, 225, int(tuint32)}
var scratch []byte
for i, size := range sizes {
buf.Reset()
bts := RandBytes(size)
err := wr.WriteBytes(bts)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadBytes(scratch)
if err != nil {
t.Errorf("test case %d: %s", i, err)
continue
}
if !bytes.Equal(bts, out) {
t.Errorf("test case %d: Bytes not equal.", i)
}
}
}
func benchBytes(size uint32, b *testing.B) {
data := make([]byte, 0, size+5)
data = AppendBytes(data, RandBytes(int(size)))
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
var scratch []byte
var err error
for i := 0; i < b.N; i++ {
scratch, err = rd.ReadBytes(scratch)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkRead16Bytes(b *testing.B) {
benchBytes(16, b)
}
func BenchmarkRead256Bytes(b *testing.B) {
benchBytes(256, b)
}
// This particular case creates
// an object larger than the default
// read buffer size, so it's a decent
// indicator of worst-case performance.
func BenchmarkRead2048Bytes(b *testing.B) {
benchBytes(2048, b)
}
func TestReadString(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
sizes := []int{0, 1, 225, int(math.MaxUint16 + 5)}
for i, size := range sizes {
buf.Reset()
in := string(RandBytes(size))
err := wr.WriteString(in)
if err != nil {
t.Fatal(err)
}
err = wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadString()
if err != nil {
t.Errorf("test case %d: %s", i, err)
}
if out != in {
t.Errorf("test case %d: strings not equal.", i)
t.Errorf("string (len = %d) in; string (len = %d) out", size, len(out))
}
}
}
func benchString(size uint32, b *testing.B) {
str := string(RandBytes(int(size)))
data := make([]byte, 0, len(str)+5)
data = AppendString(data, str)
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadString()
if err != nil {
b.Fatal(err)
}
}
}
func benchStringAsBytes(size uint32, b *testing.B) {
str := string(RandBytes(int(size)))
data := make([]byte, 0, len(str)+5)
data = AppendString(data, str)
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
var scratch []byte
var err error
for i := 0; i < b.N; i++ {
scratch, err = rd.ReadStringAsBytes(scratch)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkRead16StringAsBytes(b *testing.B) {
benchStringAsBytes(16, b)
}
func BenchmarkRead256StringAsBytes(b *testing.B) {
benchStringAsBytes(256, b)
}
func BenchmarkRead16String(b *testing.B) {
benchString(16, b)
}
func BenchmarkRead256String(b *testing.B) {
benchString(256, b)
}
func TestReadComplex64(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
for i := 0; i < 100; i++ {
buf.Reset()
f := complex(rand.Float32()*math.MaxFloat32, rand.Float32()*math.MaxFloat32)
wr.WriteComplex64(f)
err := wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadComplex64()
if err != nil {
t.Error(err)
continue
}
if out != f {
t.Errorf("Wrote %f; read %f", f, out)
}
}
}
func BenchmarkReadComplex64(b *testing.B) {
f := complex(rand.Float32(), rand.Float32())
data := AppendComplex64(nil, f)
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadComplex64()
if err != nil {
b.Fatal(err)
}
}
}
func TestReadComplex128(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
for i := 0; i < 10; i++ {
buf.Reset()
f := complex(rand.Float64()*math.MaxFloat64, rand.Float64()*math.MaxFloat64)
wr.WriteComplex128(f)
err := wr.Flush()
if err != nil {
t.Fatal(err)
}
out, err := rd.ReadComplex128()
if err != nil {
t.Error(err)
continue
}
if out != f {
t.Errorf("Wrote %f; read %f", f, out)
}
}
}
func BenchmarkReadComplex128(b *testing.B) {
f := complex(rand.Float64(), rand.Float64())
data := AppendComplex128(nil, f)
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadComplex128()
if err != nil {
b.Fatal(err)
}
}
}
func TestTime(t *testing.T) {
var buf bytes.Buffer
now := time.Now()
en := NewWriter(&buf)
dc := NewReader(&buf)
err := en.WriteTime(now)
if err != nil {
t.Fatal(err)
}
err = en.Flush()
if err != nil {
t.Fatal(err)
}
out, err := dc.ReadTime()
if err != nil {
t.Fatal(err)
}
// check for equivalence
if !now.Equal(out) {
t.Fatalf("%s in; %s out", now, out)
}
}
func BenchmarkReadTime(b *testing.B) {
t := time.Now()
data := AppendTime(nil, t)
rd := NewReader(NewEndlessReader(data, b))
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rd.ReadTime()
if err != nil {
b.Fatal(err)
}
}
}
func TestSkip(t *testing.T) {
var buf bytes.Buffer
wr := NewWriter(&buf)
rd := NewReader(&buf)
wr.WriteMapHeader(4)
wr.WriteString("key_1")
wr.WriteBytes([]byte("value_1"))
wr.WriteString("key_2")
wr.WriteFloat64(2.0)
wr.WriteString("key_3")
wr.WriteComplex128(3.0i)
wr.WriteString("key_4")
wr.WriteInt64(49080432189)
wr.Flush()
// this should skip the whole map
err := rd.Skip()
if err != nil {
t.Fatal(err)
}
tp, err := rd.NextType()
if err != io.EOF {
t.Errorf("expected %q; got %q", io.EOF, err)
t.Errorf("returned type %q", tp)
}
}
func BenchmarkSkip(b *testing.B) {
var buf bytes.Buffer
en := NewWriter(&buf)
en.WriteMapHeader(6)
en.WriteString("thing_one")
en.WriteString("value_one")
en.WriteString("thing_two")
en.WriteFloat64(3.14159)
en.WriteString("some_bytes")
en.WriteBytes([]byte("nkl4321rqw908vxzpojnlk2314rqew098-s09123rdscasd"))
en.WriteString("the_time")
en.WriteTime(time.Now())
en.WriteString("what?")
en.WriteBool(true)
en.WriteString("ext")
en.WriteExtension(&RawExtension{Type: 55, Data: []byte("raw data!!!")})
en.Flush()
bts := buf.Bytes()
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
rd := NewReader(NewEndlessReader(bts, b))
for i := 0; i < b.N; i++ {
err := rd.Skip()
if err != nil {
b.Fatal(err)
}
}
}
<|fim▁hole|> en := NewWriter(&buf)
en.WriteMapHeader(6)
en.WriteString("thing_one")
en.WriteString("value_one")
en.WriteString("thing_two")
en.WriteFloat64(3.14159)
en.WriteString("some_bytes")
en.WriteBytes([]byte("nkl4321rqw908vxzpojnlk2314rqew098-s09123rdscasd"))
en.WriteString("the_time")
en.WriteTime(time.Now())
en.WriteString("what?")
en.WriteBool(true)
en.WriteString("ext")
en.WriteExtension(&RawExtension{Type: 55, Data: []byte("raw data!!!")})
en.Flush()
// Read from a copy of the original buf.
de := NewReader(bytes.NewReader(buf.Bytes()))
w := new(bytes.Buffer)
n, err := de.CopyNext(w)
if err != nil {
t.Fatal(err)
}
if n != int64(buf.Len()) {
t.Fatalf("CopyNext returned the wrong value (%d != %d)",
n, buf.Len())
}
if !bytes.Equal(buf.Bytes(), w.Bytes()) {
t.Fatalf("not equal! %v, %v", buf.Bytes(), w.Bytes())
}
}<|fim▁end|> | func TestCopyNext(t *testing.T) {
var buf bytes.Buffer |
<|file_name|>dns_test.go<|end_file_name|><|fim▁begin|>package agent
import (
"fmt"
"net"
"os"
"strings"
"testing"
"time"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testutil"
"github.com/miekg/dns"
)
func makeDNSServer(t *testing.T) (string, *DNSServer) {
return makeDNSServerConfig(t, nil, nil)
}
func makeDNSServerConfig(
t *testing.T,
agentFn func(c *Config),
dnsFn func(*DNSConfig)) (string, *DNSServer) {
// Create the configs and apply the functions
agentConf := nextConfig()
if agentFn != nil {
agentFn(agentConf)
}
dnsConf := &DNSConfig{}
if dnsFn != nil {
dnsFn(dnsConf)
}
// Add in the recursor if any
if r := agentConf.DNSRecursor; r != "" {
agentConf.DNSRecursors = append(agentConf.DNSRecursors, r)
}
// Start the server
addr, _ := agentConf.ClientListener(agentConf.Addresses.DNS, agentConf.Ports.DNS)
dir, agent := makeAgent(t, agentConf)
server, err := NewDNSServer(agent, dnsConf, agent.logOutput,
agentConf.Domain, addr.String(), agentConf.DNSRecursors)
if err != nil {
t.Fatalf("err: %v", err)
}
return dir, server
}
// makeRecursor creates a generic DNS server which always returns
// the provided reply. This is useful for mocking a DNS recursor with
// an expected result.
func makeRecursor(t *testing.T, answer []dns.RR) *dns.Server {
dnsConf := nextConfig()
dnsAddr := fmt.Sprintf("%s:%d", dnsConf.Addresses.DNS, dnsConf.Ports.DNS)
mux := dns.NewServeMux()
mux.HandleFunc(".", func(resp dns.ResponseWriter, msg *dns.Msg) {
ans := &dns.Msg{Answer: answer[:]}
ans.SetReply(msg)
if err := resp.WriteMsg(ans); err != nil {
t.Fatalf("err: %s", err)
}
})
server := &dns.Server{
Addr: dnsAddr,
Net: "udp",
Handler: mux,
}
go server.ListenAndServe()
return server
}
// dnsCNAME returns a DNS CNAME record struct
func dnsCNAME(src, dest string) *dns.CNAME {
return &dns.CNAME{
Hdr: dns.RR_Header{
Name: dns.Fqdn(src),
Rrtype: dns.TypeCNAME,
Class: dns.ClassINET,
},
Target: dns.Fqdn(dest),
}
}
// dnsA returns a DNS A record struct
func dnsA(src, dest string) *dns.A {
return &dns.A{
Hdr: dns.RR_Header{
Name: dns.Fqdn(src),
Rrtype: dns.TypeA,
Class: dns.ClassINET,
},
A: net.ParseIP(dest),
}
}
func TestRecursorAddr(t *testing.T) {
addr, err := recursorAddr("8.8.8.8")
if err != nil {
t.Fatalf("err: %v", err)
}
if addr != "8.8.8.8:53" {
t.Fatalf("bad: %v", addr)
}
}
func TestDNS_NodeLookup(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("foo.node.consul.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
aRec, ok := in.Answer[0].(*dns.A)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if aRec.A.String() != "127.0.0.1" {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if aRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Answer[0])
}
// Re-do the query, but specify the DC
m = new(dns.Msg)
m.SetQuestion("foo.node.dc1.consul.", dns.TypeANY)
c = new(dns.Client)
in, _, err = c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
aRec, ok = in.Answer[0].(*dns.A)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if aRec.A.String() != "127.0.0.1" {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if aRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Answer[0])
}
// lookup a non-existing node, we should receive a SOA
m = new(dns.Msg)
m.SetQuestion("nofoo.node.dc1.consul.", dns.TypeANY)
c = new(dns.Client)
in, _, err = c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Ns) != 1 {
t.Fatalf("Bad: %#v %#v", in, len(in.Answer))
}
soaRec, ok := in.Ns[0].(*dns.SOA)
if !ok {
t.Fatalf("Bad: %#v", in.Ns[0])
}
if soaRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Ns[0])
}
}
func TestDNS_CaseInsensitiveNodeLookup(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "Foo",
Address: "127.0.0.1",
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("fOO.node.dc1.consul.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("empty lookup: %#v", in)
}
}
func TestDNS_NodeLookup_PeriodName(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node with period in name
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo.bar",
Address: "127.0.0.1",
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("foo.bar.node.consul.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
aRec, ok := in.Answer[0].(*dns.A)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if aRec.A.String() != "127.0.0.1" {
t.Fatalf("Bad: %#v", in.Answer[0])
}
}
func TestDNS_NodeLookup_AAAA(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "::4242:4242",
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("bar.node.consul.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
aRec, ok := in.Answer[0].(*dns.AAAA)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if aRec.AAAA.String() != "::4242:4242" {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if aRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Answer[0])
}
}
func TestDNS_NodeLookup_CNAME(t *testing.T) {
recursor := makeRecursor(t, []dns.RR{
dnsCNAME("www.google.com", "google.com"),
dnsA("google.com", "1.2.3.4"),
})
defer recursor.Shutdown()
dir, srv := makeDNSServerConfig(t, func(c *Config) {
c.DNSRecursor = recursor.Addr
}, nil)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "google",
Address: "www.google.com",
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("google.node.consul.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
// Should have the service record, CNAME record + A record
if len(in.Answer) != 3 {
t.Fatalf("Bad: %#v", in)
}
cnRec, ok := in.Answer[0].(*dns.CNAME)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if cnRec.Target != "www.google.com." {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if cnRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Answer[0])
}
}
func TestDNS_ReverseLookup(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo2",
Address: "127.0.0.2",
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("2.0.0.127.in-addr.arpa.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
ptrRec, ok := in.Answer[0].(*dns.PTR)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if ptrRec.Ptr != "foo2.node.dc1.consul." {
t.Fatalf("Bad: %#v", ptrRec)
}
}
func TestDNS_ReverseLookup_CustomDomain(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
srv.domain = dns.Fqdn("custom")
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo2",
Address: "127.0.0.2",
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("2.0.0.127.in-addr.arpa.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
ptrRec, ok := in.Answer[0].(*dns.PTR)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if ptrRec.Ptr != "foo2.node.dc1.custom." {
t.Fatalf("Bad: %#v", ptrRec)
}
}
func TestDNS_ReverseLookup_IPV6(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "::4242:4242",
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("2.4.2.4.2.4.2.4.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
ptrRec, ok := in.Answer[0].(*dns.PTR)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if ptrRec.Ptr != "bar.node.dc1.consul." {
t.Fatalf("Bad: %#v", ptrRec)
}
}
func TestDNS_ServiceLookup(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("db.service.consul.", dns.TypeSRV)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
srvRec, ok := in.Answer[0].(*dns.SRV)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if srvRec.Port != 12345 {
t.Fatalf("Bad: %#v", srvRec)
}
if srvRec.Target != "foo.node.dc1.consul." {
t.Fatalf("Bad: %#v", srvRec)
}
if srvRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Answer[0])
}
aRec, ok := in.Extra[0].(*dns.A)
if !ok {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.Hdr.Name != "foo.node.dc1.consul." {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.A.String() != "127.0.0.1" {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Extra[0])
}
// lookup a non-existing service, we should receive a SOA
m = new(dns.Msg)
m.SetQuestion("nodb.service.consul.", dns.TypeSRV)
c = new(dns.Client)
addr, _ = srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err = c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Ns) != 1 {
t.Fatalf("Bad: %#v", in)
}
soaRec, ok := in.Ns[0].(*dns.SOA)
if !ok {
t.Fatalf("Bad: %#v", in.Ns[0])
}
if soaRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Ns[0])
}
}
func TestDNS_ServiceLookup_ServiceAddress(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Address: "127.0.0.2",
Port: 12345,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("db.service.consul.", dns.TypeSRV)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
srvRec, ok := in.Answer[0].(*dns.SRV)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if srvRec.Port != 12345 {
t.Fatalf("Bad: %#v", srvRec)
}
if srvRec.Target != "foo.node.dc1.consul." {
t.Fatalf("Bad: %#v", srvRec)
}
if srvRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Answer[0])
}
aRec, ok := in.Extra[0].(*dns.A)
if !ok {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.Hdr.Name != "foo.node.dc1.consul." {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.A.String() != "127.0.0.2" {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Extra[0])
}
}
func TestDNS_CaseInsensitiveServiceLookup(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "Db",
Tags: []string{"Master"},
Port: 12345,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("mASTER.dB.service.consul.", dns.TypeSRV)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("empty lookup: %#v", in)
}
}
func TestDNS_ServiceLookup_TagPeriod(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"v1.master"},
Port: 12345,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("v1.master.db.service.consul.", dns.TypeSRV)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
srvRec, ok := in.Answer[0].(*dns.SRV)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if srvRec.Port != 12345 {
t.Fatalf("Bad: %#v", srvRec)
}
if srvRec.Target != "foo.node.dc1.consul." {
t.Fatalf("Bad: %#v", srvRec)
}
aRec, ok := in.Extra[0].(*dns.A)
if !ok {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.Hdr.Name != "foo.node.dc1.consul." {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.A.String() != "127.0.0.1" {
t.Fatalf("Bad: %#v", in.Extra[0])
}
}
func TestDNS_ServiceLookup_Dedup(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
args = &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "db2",
Service: "db",
Tags: []string{"slave"},
Port: 12345,
},
}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
args = &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "db3",
Service: "db",
Tags: []string{"slave"},
Port: 12346,
},
}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("db.service.consul.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
aRec, ok := in.Answer[0].(*dns.A)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if aRec.A.String() != "127.0.0.1" {
t.Fatalf("Bad: %#v", in.Answer[0])
}
}
func TestDNS_ServiceLookup_Dedup_SRV(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
args = &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "db2",
Service: "db",
Tags: []string{"slave"},
Port: 12345,
},
}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
args = &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "db3",
Service: "db",
Tags: []string{"slave"},
Port: 12346,
},
}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("db.service.consul.", dns.TypeSRV)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 2 {
t.Fatalf("Bad: %#v", in)
}
srvRec, ok := in.Answer[0].(*dns.SRV)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if srvRec.Port != 12345 && srvRec.Port != 12346 {
t.Fatalf("Bad: %#v", srvRec)
}
if srvRec.Target != "foo.node.dc1.consul." {
t.Fatalf("Bad: %#v", srvRec)
}
srvRec, ok = in.Answer[1].(*dns.SRV)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[1])
}
if srvRec.Port != 12346 && srvRec.Port != 12345 {
t.Fatalf("Bad: %#v", srvRec)
}
if srvRec.Port == in.Answer[0].(*dns.SRV).Port {
t.Fatalf("should be a different port")
}
if srvRec.Target != "foo.node.dc1.consul." {
t.Fatalf("Bad: %#v", srvRec)
}
aRec, ok := in.Extra[0].(*dns.A)
if !ok {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.Hdr.Name != "foo.node.dc1.consul." {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.A.String() != "127.0.0.1" {
t.Fatalf("Bad: %#v", in.Extra[0])
}
}
func TestDNS_Recurse(t *testing.T) {
recursor := makeRecursor(t, []dns.RR{dnsA("apple.com", "1.2.3.4")})
defer recursor.Shutdown()
dir, srv := makeDNSServerConfig(t, func(c *Config) {
c.DNSRecursor = recursor.Addr
}, nil)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
m := new(dns.Msg)
m.SetQuestion("apple.com.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) == 0 {
t.Fatalf("Bad: %#v", in)
}
if in.Rcode != dns.RcodeSuccess {
t.Fatalf("Bad: %#v", in)
}
}
func TestDNS_ServiceLookup_FilterCritical(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register nodes
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
Check: &structs.HealthCheck{
CheckID: "serf",
Name: "serf",
Status: structs.HealthCritical,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
args2 := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
Check: &structs.HealthCheck{
CheckID: "serf",
Name: "serf",
Status: structs.HealthCritical,
},
}
if err := srv.agent.RPC("Catalog.Register", args2, &out); err != nil {
t.Fatalf("err: %v", err)
}
args3 := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
Check: &structs.HealthCheck{
CheckID: "db",
Name: "db",
ServiceID: "db",
Status: structs.HealthCritical,
},
}
if err := srv.agent.RPC("Catalog.Register", args3, &out); err != nil {
t.Fatalf("err: %v", err)
}
args4 := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "baz",
Address: "127.0.0.3",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
}
if err := srv.agent.RPC("Catalog.Register", args4, &out); err != nil {
t.Fatalf("err: %v", err)
}
args5 := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "quux",
Address: "127.0.0.4",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
Check: &structs.HealthCheck{
CheckID: "db",
Name: "db",
ServiceID: "db",
Status: structs.HealthWarning,
},
}
if err := srv.agent.RPC("Catalog.Register", args5, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("db.service.consul.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
// Only 4 and 5 are not failing, so we should get 2 answers
if len(in.Answer) != 2 {
t.Fatalf("Bad: %#v", in)
}
ips := make(map[string]bool)
for _, resp := range in.Answer {
aRec := resp.(*dns.A)
ips[aRec.A.String()] = true
}
if !ips["127.0.0.3"] {
t.Fatalf("Bad: %#v should contain 127.0.0.3 (state healthy)", in)
}
if !ips["127.0.0.4"] {
t.Fatalf("Bad: %#v should contain 127.0.0.4 (state warning)", in)
}
}
func TestDNS_ServiceLookup_OnlyFailing(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register nodes
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
Check: &structs.HealthCheck{
CheckID: "serf",
Name: "serf",
Status: structs.HealthCritical,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
args2 := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
Check: &structs.HealthCheck{
CheckID: "serf",
Name: "serf",
Status: structs.HealthCritical,
},
}
if err := srv.agent.RPC("Catalog.Register", args2, &out); err != nil {
t.Fatalf("err: %v", err)
}
args3 := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
Check: &structs.HealthCheck{
CheckID: "db",
Name: "db",
ServiceID: "db",
Status: structs.HealthCritical,
},
}
if err := srv.agent.RPC("Catalog.Register", args3, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("db.service.consul.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
// All 3 are failing, so we should get 0 answers and an NXDOMAIN response
if len(in.Answer) != 0 {
t.Fatalf("Bad: %#v", in)
}
if in.Rcode != dns.RcodeNameError {
t.Fatalf("Bad: %#v", in)
}
}
func TestDNS_ServiceLookup_OnlyPassing(t *testing.T) {
dir, srv := makeDNSServerConfig(t, nil, func(c *DNSConfig) {
c.OnlyPassing = true
})
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register nodes
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
Check: &structs.HealthCheck{
CheckID: "db",
Name: "db",
ServiceID: "db",
Status: structs.HealthPassing,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
args2 := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
Check: &structs.HealthCheck{
CheckID: "db",
Name: "db",
ServiceID: "db",
Status: structs.HealthWarning,
},
}
if err := srv.agent.RPC("Catalog.Register", args2, &out); err != nil {
t.Fatalf("err: %v", err)
}
args3 := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "baz",
Address: "127.0.0.3",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
Check: &structs.HealthCheck{
CheckID: "db",
Name: "db",
ServiceID: "db",
Status: structs.HealthCritical,
},
}
if err := srv.agent.RPC("Catalog.Register", args3, &out); err != nil {
t.Fatalf("err: %v", err)
}
args4 := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "quux",
Address: "127.0.0.4",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
Check: &structs.HealthCheck{
CheckID: "db",
Name: "db",
ServiceID: "db",
Status: structs.HealthUnknown,
},
}
if err := srv.agent.RPC("Catalog.Register", args4, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("db.service.consul.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
// Only 1 is passing, so we should only get 1 answer
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
resp := in.Answer[0]
aRec := resp.(*dns.A)
if aRec.A.String() != "127.0.0.1" {
t.Fatalf("Bad: %#v", in.Answer[0])
}
}
func TestDNS_ServiceLookup_Randomize(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register nodes
for i := 0; i < 3*maxServiceResponses; i++ {
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: fmt.Sprintf("foo%d", i),
Address: fmt.Sprintf("127.0.0.%d", i+1),
Service: &structs.NodeService{
Service: "web",
Port: 8000,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
}
// Ensure the response is randomized each time.
uniques := map[string]struct{}{}
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
for i := 0; i < 10; i++ {
m := new(dns.Msg)
m.SetQuestion("web.service.consul.", dns.TypeANY)
c := new(dns.Client)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
// Response length should be truncated
// We should get an A record for each response
if len(in.Answer) != maxServiceResponses {
t.Fatalf("Bad: %#v", len(in.Answer))
}
// Collect all the names
var names []string
for _, rec := range in.Answer {
switch v := rec.(type) {
case *dns.SRV:
names = append(names, v.Target)
case *dns.A:
names = append(names, v.A.String())
}
}
nameS := strings.Join(names, "|")
// Tally the results
uniques[nameS] = struct{}{}
}
// Give some wiggle room. Since the responses are randomized and there
// is a finite number of combinations, requiring 0 duplicates every
// test run eventually gives us failures.
if len(uniques) < 2 {
t.Fatalf("unique response ratio too low: %d/10\n%v", len(uniques), uniques)
}
}
func TestDNS_ServiceLookup_Truncate(t *testing.T) {
dir, srv := makeDNSServerConfig(t, nil, func(c *DNSConfig) {
c.EnableTruncate = true
})<|fim▁hole|> testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register nodes
for i := 0; i < 3*maxServiceResponses; i++ {
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: fmt.Sprintf("foo%d", i),
Address: fmt.Sprintf("127.0.0.%d", i+1),
Service: &structs.NodeService{
Service: "web",
Port: 8000,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
}
// Ensure the response is randomized each time.
m := new(dns.Msg)
m.SetQuestion("web.service.consul.", dns.TypeANY)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
c := new(dns.Client)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
// Check for the truncate bit
if !in.Truncated {
t.Fatalf("should have truncate bit")
}
}
func TestDNS_ServiceLookup_MaxResponses(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register nodes
for i := 0; i < 6*maxServiceResponses; i++ {
nodeAddress := fmt.Sprintf("127.0.0.%d", i+1)
if i > 3 {
nodeAddress = fmt.Sprintf("fe80::%d", i+1)
}
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: fmt.Sprintf("foo%d", i),
Address: nodeAddress,
Service: &structs.NodeService{
Service: "web",
Port: 8000,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
}
// Ensure the response is randomized each time.
m := new(dns.Msg)
m.SetQuestion("web.service.consul.", dns.TypeANY)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
c := new(dns.Client)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 3 {
t.Fatalf("should receive 3 answers for ANY")
}
m.SetQuestion("web.service.consul.", dns.TypeA)
in, _, err = c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 3 {
t.Fatalf("should receive 3 answers for A")
}
m.SetQuestion("web.service.consul.", dns.TypeAAAA)
in, _, err = c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 3 {
t.Fatalf("should receive 3 answers for AAAA")
}
}
func TestDNS_ServiceLookup_CNAME(t *testing.T) {
recursor := makeRecursor(t, []dns.RR{
dnsCNAME("www.google.com", "google.com"),
dnsA("google.com", "1.2.3.4"),
})
defer recursor.Shutdown()
dir, srv := makeDNSServerConfig(t, func(c *Config) {
c.DNSRecursor = recursor.Addr
}, nil)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "google",
Address: "www.google.com",
Service: &structs.NodeService{
Service: "search",
Port: 80,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("search.service.consul.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
// Service CNAME, google CNAME, google A record
if len(in.Answer) != 3 {
t.Fatalf("Bad: %#v", in)
}
// Should have service CNAME
cnRec, ok := in.Answer[0].(*dns.CNAME)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if cnRec.Target != "www.google.com." {
t.Fatalf("Bad: %#v", in.Answer[0])
}
// Should have google CNAME
cnRec, ok = in.Answer[1].(*dns.CNAME)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[1])
}
if cnRec.Target != "google.com." {
t.Fatalf("Bad: %#v", in.Answer[1])
}
// Check we recursively resolve
if _, ok := in.Answer[2].(*dns.A); !ok {
t.Fatalf("Bad: %#v", in.Answer[2])
}
}
func TestDNS_NodeLookup_TTL(t *testing.T) {
recursor := makeRecursor(t, []dns.RR{
dnsCNAME("www.google.com", "google.com"),
dnsA("google.com", "1.2.3.4"),
})
defer recursor.Shutdown()
dir, srv := makeDNSServerConfig(t, func(c *Config) {
c.DNSRecursor = recursor.Addr
}, func(c *DNSConfig) {
c.NodeTTL = 10 * time.Second
c.AllowStale = true
c.MaxStale = time.Second
})
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("foo.node.consul.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
aRec, ok := in.Answer[0].(*dns.A)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if aRec.A.String() != "127.0.0.1" {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if aRec.Hdr.Ttl != 10 {
t.Fatalf("Bad: %#v", in.Answer[0])
}
// Register node with IPv6
args = &structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "::4242:4242",
}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Check an IPv6 record
m = new(dns.Msg)
m.SetQuestion("bar.node.consul.", dns.TypeANY)
in, _, err = c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
aaaaRec, ok := in.Answer[0].(*dns.AAAA)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if aaaaRec.AAAA.String() != "::4242:4242" {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if aaaaRec.Hdr.Ttl != 10 {
t.Fatalf("Bad: %#v", in.Answer[0])
}
// Register node with CNAME
args = &structs.RegisterRequest{
Datacenter: "dc1",
Node: "google",
Address: "www.google.com",
}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m = new(dns.Msg)
m.SetQuestion("google.node.consul.", dns.TypeANY)
in, _, err = c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
// Should have the CNAME record + a few A records
if len(in.Answer) < 2 {
t.Fatalf("Bad: %#v", in)
}
cnRec, ok := in.Answer[0].(*dns.CNAME)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if cnRec.Target != "www.google.com." {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if cnRec.Hdr.Ttl != 10 {
t.Fatalf("Bad: %#v", in.Answer[0])
}
}
func TestDNS_ServiceLookup_TTL(t *testing.T) {
confFn := func(c *DNSConfig) {
c.ServiceTTL = map[string]time.Duration{
"db": 10 * time.Second,
"*": 5 * time.Second,
}
c.AllowStale = true
c.MaxStale = time.Second
}
dir, srv := makeDNSServerConfig(t, nil, confFn)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node with 2 services
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
args = &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "api",
Port: 2222,
},
}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("db.service.consul.", dns.TypeSRV)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
srvRec, ok := in.Answer[0].(*dns.SRV)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if srvRec.Hdr.Ttl != 10 {
t.Fatalf("Bad: %#v", in.Answer[0])
}
aRec, ok := in.Extra[0].(*dns.A)
if !ok {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.Hdr.Ttl != 10 {
t.Fatalf("Bad: %#v", in.Extra[0])
}
m = new(dns.Msg)
m.SetQuestion("api.service.consul.", dns.TypeSRV)
in, _, err = c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
srvRec, ok = in.Answer[0].(*dns.SRV)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if srvRec.Hdr.Ttl != 5 {
t.Fatalf("Bad: %#v", in.Answer[0])
}
aRec, ok = in.Extra[0].(*dns.A)
if !ok {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.Hdr.Ttl != 5 {
t.Fatalf("Bad: %#v", in.Extra[0])
}
}
func TestDNS_ServiceLookup_SRV_RFC(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("_db._master.service.consul.", dns.TypeSRV)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
srvRec, ok := in.Answer[0].(*dns.SRV)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if srvRec.Port != 12345 {
t.Fatalf("Bad: %#v", srvRec)
}
if srvRec.Target != "foo.node.dc1.consul." {
t.Fatalf("Bad: %#v", srvRec)
}
if srvRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Answer[0])
}
aRec, ok := in.Extra[0].(*dns.A)
if !ok {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.Hdr.Name != "foo.node.dc1.consul." {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.A.String() != "127.0.0.1" {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Extra[0])
}
}
func TestDNS_ServiceLookup_SRV_RFC_TCP_Default(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("_db._tcp.service.consul.", dns.TypeSRV)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
srvRec, ok := in.Answer[0].(*dns.SRV)
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if srvRec.Port != 12345 {
t.Fatalf("Bad: %#v", srvRec)
}
if srvRec.Target != "foo.node.dc1.consul." {
t.Fatalf("Bad: %#v", srvRec)
}
if srvRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Answer[0])
}
aRec, ok := in.Extra[0].(*dns.A)
if !ok {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.Hdr.Name != "foo.node.dc1.consul." {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.A.String() != "127.0.0.1" {
t.Fatalf("Bad: %#v", in.Extra[0])
}
if aRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Extra[0])
}
}
func TestDNS_ServiceLookup_FilterACL(t *testing.T) {
confFn := func(c *Config) {
c.ACLMasterToken = "root"
c.ACLDatacenter = "dc1"
c.ACLDownPolicy = "deny"
c.ACLDefaultPolicy = "deny"
}
dir, srv := makeDNSServerConfig(t, confFn, nil)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register a service
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "foo",
Port: 12345,
},
WriteRequest: structs.WriteRequest{Token: "root"},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Set up the DNS query
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
m := new(dns.Msg)
m.SetQuestion("foo.service.consul.", dns.TypeA)
// Query with the root token. Should get results.
srv.agent.config.ACLToken = "root"
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
// Query with a non-root token without access. Should get nothing.
srv.agent.config.ACLToken = "anonymous"
in, _, err = c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 0 {
t.Fatalf("Bad: %#v", in)
}
}
func TestDNS_NonExistingLookup(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
// lookup a non-existing node, we should receive a SOA
m := new(dns.Msg)
m.SetQuestion("nonexisting.consul.", dns.TypeANY)
c := new(dns.Client)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Ns) != 1 {
t.Fatalf("Bad: %#v %#v", in, len(in.Answer))
}
soaRec, ok := in.Ns[0].(*dns.SOA)
if !ok {
t.Fatalf("Bad: %#v", in.Ns[0])
}
if soaRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Ns[0])
}
}
func TestDNS_NonExistingLookupEmptyAorAAAA(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// register v6 only service
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foov6",
Address: "fe80::1",
Service: &structs.NodeService{
Service: "webv6",
Port: 8000,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
// register v4 only service
args = &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foov4",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "webv4",
Port: 8000,
},
}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
// check for ipv6 records on ipv4 only service
m := new(dns.Msg)
m.SetQuestion("webv4.service.consul.", dns.TypeAAAA)
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
c := new(dns.Client)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Ns) != 1 {
t.Fatalf("Bad: %#v", in)
}
soaRec, ok := in.Ns[0].(*dns.SOA)
if !ok {
t.Fatalf("Bad: %#v", in.Ns[0])
}
if soaRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Ns[0])
}
// check for ipv4 records on ipv6 only service
m.SetQuestion("webv6.service.consul.", dns.TypeA)
in, _, err = c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Ns) != 1 {
t.Fatalf("Bad: %#v", in)
}
soaRec, ok = in.Ns[0].(*dns.SOA)
if !ok {
t.Fatalf("Bad: %#v", in.Ns[0])
}
if soaRec.Hdr.Ttl != 0 {
t.Fatalf("Bad: %#v", in.Ns[0])
}
}<|fim▁end|> | defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
|
<|file_name|>main.js<|end_file_name|><|fim▁begin|>if($.cookie('age') !== 'pass') {
$('body').addClass('uh-oh');
$('#age-verify').show();
}
<|fim▁hole|> //set locations
var location = new google.maps.LatLng(39.90658,-105.09859);
var stylesArray = [
{
"stylers": [
{ "saturation": -100 }
]
},{
"featureType": "road",
"elementType": "geometry",
"stylers": [
{ "visibility": "simplified" }
]
},{
"elementType": "labels.text.stroke",
"stylers": [
{ "visibility": "off" }
]
},{
"elementType": "labels.text.fill",
"stylers": [
{ "lightness": 3 }
]
},{
"featureType": "administrative.land_parcel",
"elementType": "geometry",
"stylers": [
{ "visibility": "simplified" }
]
}
];
//set options
var myOptions = {
center: location,
zoom: 14,
styles: stylesArray,
disableDefaultUI: true,
scrollwheel: false,
mapTypeId: google.maps.MapTypeId.ROADMAP
};
//create map
var map = new google.maps.Map(document.getElementById('map'),
myOptions);
//Add a marker and modal box
var image = new google.maps.MarkerImage('/assets/img/icn.map-marker.png',
//Marker size
new google.maps.Size(100, 160),
//Origin
new google.maps.Point(0,0),
//Anchor
new google.maps.Point(50, 160));
//place a custom marker
var marker = new google.maps.Marker({
position: location,
map: map,
icon: image
});
map.controls[google.maps.ControlPosition.TOP_RIGHT].push(new ZoomPanControl(map));
//create elements
function CreateElement(tagName, properties) {
var elem = document.createElement(tagName);
for (var prop in properties) {
if (prop == "style")
elem.style.cssText = properties[prop];
else if (prop == "class")
elem.className = properties[prop];
else
elem.setAttribute(prop, properties[prop]);
}
return elem;
}
//add map custom map controls
function ZoomPanControl(map) {
this.map = map
var t = this
var zoomPanContainer = CreateElement("div", { 'class':'map-controls' })
//Map Controls
div = CreateElement("div", {'title': 'Zoom in', 'class':'zoom-in' })
google.maps.event.addDomListener(div, "click", function() { t.zoom(ZoomDirection.IN); })
zoomPanContainer.appendChild(div)
div = CreateElement("div", {'title': 'Center', 'class':'center-map' })
google.maps.event.addDomListener(div, "click", function() { map.setCenter(location); })
zoomPanContainer.appendChild(div)
div = CreateElement("div", {'title': 'Zoom out', 'class':'zoom-out' })
google.maps.event.addDomListener(div, "click", function() { t.zoom(ZoomDirection.OUT); })
zoomPanContainer.appendChild(div)
return zoomPanContainer
}
ZoomPanControl.prototype.zoom = function(direction) {
var zoom = this.map.getZoom();
if (direction == ZoomDirection.IN && zoom < 19)
this.map.setZoom(zoom + 1);
else if (direction == ZoomDirection.OUT && zoom > 1)
this.map.setZoom(zoom - 1);
}
var ZoomDirection = {
IN: 0,
OUT: 1
}
}
//load articles
function loadPost(href) {
$('#beer').fadeIn(400)
$('#beer').load(href + ' #beer > *', function(response, status, xhr) {
if ( status == "error" ) {
console.log(xhr.status)
} else {
$('body').addClass('beer')
}
})
}
(function() {
// Highlight current section while scrolling DOWN
$('.section').waypoint(function(direction) {
if (direction === 'down') {
var $link = $('a[href="/' + this.id + '"]');
$('ul.nav.navbar-nav li').removeClass('active');
$link.parent().addClass('active');
}
}, { offset: '50%' });
// Highlight current section while scrolling UP
$('.section').waypoint(function(direction) {
if (direction === 'up') {
var $link = $('a[href="/' + this.id + '"]');
$('ul.nav.navbar-nav li').removeClass('active');
$link.parent().addClass('active');
}
}, {
offset: function() {
// This is the calculation that would give you
// "bottom of element hits middle of window"
return $.waypoints('viewportHeight') / 2 - $(this).outerHeight();
}
});
// history.js
if(!$('body').hasClass('blog')) {
$('ul.nav.navbar-nav li:not(".external") a, .beer > a').on('click', addressUpdate)
}
function addressUpdate(ev) {
ev.preventDefault()
var $that = $(this)
var separator = ' | '
var title = $(document).find('title').text()
title = title.substring(title.indexOf(separator), title.length)
//set title
if($('h3', this).length) {
title = $('h3', this).text() + title
} else {
title = $that.text() + title
}
//update url + title
var href = $that.attr('href')
History.pushState(null, title, href)
//load post
if($that.parent().hasClass('beer')) {
loadPost(href)
//scroll to section
} else {
$('#beer').fadeOut(400, function() {
$('body').removeClass('beer')
}).empty()
$('html, body').stop().animate({
scrollTop: ($('#' + href.replace('/', '')).offset().top-88)
}, 2000,'easeInOutExpo')
}
}
})();
//on page load
$(window).load(function() {
//scroll to section
var path = document.location.pathname
var article = path.split('/')[2]
path = path.substring(path.lastIndexOf('/') + 1, path.length)
if ($('#' + path).length && !article) {
$('html, body').stop().animate({
scrollTop: ($('#' + path).offset().top-88)
}, 2000,'easeInOutExpo')
} else if (typeof article !== 'undefined') {
$(document).scrollTop($('#blog').offset().top)
$('body').addClass('fixed')
}
})
google.maps.event.addDomListener(window, "load", showMap);
$(document).ready(function() {
// resizeContent()
// //attach on resize event
// $(window).resize(function() {
// resizeContent()
// });
// $('.hero-photo').each(function() {
// var img = $('> img', this)
// $(this).backstretch(img.attr('src'))
// img.hide()
// })
//setup full-screen images
$('.full-img').each(function() {
var img = $('> img', this)
$(this).backstretch(img.attr('src'))
img.hide()
})
//slider
$('.slider').slick({
autoplay: true,
infinite: true,
arrows: false,
speed: 800,
cssEase: 'cubic-bezier(0.86, 0, 0.07, 1)',
slidesToShow: 1,
slidesToScroll: 1,
responsive: [
{
breakpoint: 480,
settings: {
dots: true,
arrows: false
}
}
]
})
$('.trucks, .beers').slick({
infinite: true,
arrows: true,
prevArrow: '<button type="button" class="slick-prev"><i class="fa fa-angle-left"></i></button>',
nextArrow: '<button type="button" class="slick-next"><i class="fa fa-angle-right"></i></button>',
speed: 800,
cssEase: 'cubic-bezier(0.86, 0, 0.07, 1)',
slidesToShow: 5,
responsive: [
{
breakpoint: 1100,
settings: {
slidesToShow: 4,
slidesToScroll: 1
}
},
{
breakpoint: 900,
settings: {
slidesToShow: 3,
slidesToScroll: 1
}
},
{
breakpoint: 640,
settings: {
slidesToShow: 2,
slidesToScroll: 1
}
},
{
breakpoint: 480,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
arrows: false,
dots: true
}
}
]
})
//truck hover text
// $('.truck').on('hover', function () {
// //stuff to do on mouse enter
// $('.overlay', this).stop().fadeIn(200)
// },
// function () {
// //stuff to do on mouse leave
// $('.overlay', this).stop().fadeOut(200)
// });
$(document).on({
mouseenter: function () {
//stuff to do on mouse enter
$('.overlay', this).stop().fadeIn(200)
},
mouseleave: function () {
//stuff to do on mouse leave
$('.overlay', this).stop().fadeOut(200)
}
}, '.truck');
//mobile help
$(document).on('touchstart touchend', function(e) {
e.preventDefault()
$('.overlay', this).stop().fadeToggle(200)
}, '.truck');
$('#mc-form').ajaxChimp({
url: 'http://nickdimatteo.us2.list-manage.com/subscribe/post?u=3d3f46742d639b10307a1d9d8&id=a1365fcbea'
});
//age verification
$('#age-verify #enter').on('click', function(e) {
e.preventDefault();
$.cookie('age', 'pass');
$('#age-verify').fadeOut(400, function() {
$('body').removeClass('uh-oh');
});
});
//go to admin
$(document).keyup(function(e) {
if (e.keyCode == 27) {
console.log('hello')
window.location.href = '/admin';
}
});
})<|fim▁end|> | //generate map
function showMap() { |
<|file_name|>reporter.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Adapted from a contribution of Johan Dahlin
import collections
import errno
import re
import sys
try:
import multiprocessing
except ImportError: # Python 2.5
multiprocessing = None
import pep8
__all__ = ['multiprocessing', 'BaseQReport', 'QueueReport']
class BaseQReport(pep8.BaseReport):
"""Base Queue Report."""
_loaded = False # Windows support
# Reasoning for ignored error numbers is in-line below
ignored_errors = set([
# EPIPE: Added by sigmavirus24
# > If output during processing is piped to something that may close
# > its own stdin before we've finished printing results, we need to
# > catch a Broken pipe error and continue on.
# > (See also: https://gitlab.com/pycqa/flake8/issues/69)
errno.EPIPE,
# NOTE(sigmavirus24): When adding to this list, include the reasoning
# on the lines before the error code and always append your error
# code. Further, please always add a trailing `,` to reduce the visual
# noise in diffs.
])
def __init__(self, options):
assert options.jobs > 0
super(BaseQReport, self).__init__(options)
self.counters = collections.defaultdict(int)
self.n_jobs = options.jobs
# init queues
self.task_queue = multiprocessing.Queue()
self.result_queue = multiprocessing.Queue()
if sys.platform == 'win32':
# Work around http://bugs.python.org/issue10845
sys.modules['__main__'].__file__ = __file__
def _cleanup_queue(self, queue):
while not queue.empty():
queue.get_nowait()
def _put_done(self):
# collect queues
for i in range(self.n_jobs):
self.task_queue.put('DONE')
self.update_state(self.result_queue.get())
def _process_main(self):
if not self._loaded:
# Windows needs to parse again the configuration
from flake8.main import get_style_guide, DEFAULT_CONFIG
get_style_guide(parse_argv=True, config_file=DEFAULT_CONFIG)
for filename in iter(self.task_queue.get, 'DONE'):
self.input_file(filename)
def start(self):
super(BaseQReport, self).start()
self.__class__._loaded = True
# spawn processes
for i in range(self.n_jobs):
p = multiprocessing.Process(target=self.process_main)
p.daemon = True
p.start()
def stop(self):
try:
self._put_done()
except KeyboardInterrupt:
pass
finally:
# cleanup queues to unlock threads
self._cleanup_queue(self.result_queue)
self._cleanup_queue(self.task_queue)
super(BaseQReport, self).stop()
def process_main(self):
try:
self._process_main()
except KeyboardInterrupt:
pass
except IOError as ioerr:
# If we happen across an IOError that we aren't certain can/should
# be ignored, we should re-raise the exception.
if ioerr.errno not in self.ignored_errors:
raise
finally:
# ensure all output is flushed before main process continues
sys.stdout.flush()
sys.stderr.flush()
self.result_queue.put(self.get_state())
def get_state(self):
return {'total_errors': self.total_errors,
'counters': self.counters,
'messages': self.messages}
def update_state(self, state):
self.total_errors += state['total_errors']
for key, value in state['counters'].items():
self.counters[key] += value
self.messages.update(state['messages'])
class FileQReport(BaseQReport):
"""File Queue Report."""
print_filename = True
class QueueReport(pep8.StandardReport, BaseQReport):
"""Standard Queue Report."""
def get_file_results(self):<|fim▁hole|> """Print the result and return the overall count for this file."""
self._deferred_print.sort()
for line_number, offset, code, text, doc in self._deferred_print:
print(self._fmt % {
'path': self.filename,
'row': self.line_offset + line_number, 'col': offset + 1,
'code': code, 'text': text,
})
# stdout is block buffered when not stdout.isatty().
# line can be broken where buffer boundary since other processes
# write to same file.
# flush() after print() to avoid buffer boundary.
# Typical buffer size is 8192. line written safely when
# len(line) < 8192.
sys.stdout.flush()
if self._show_source:
if line_number > len(self.lines):
line = ''
else:
line = self.lines[line_number - 1]
print(line.rstrip())
sys.stdout.flush()
print(re.sub(r'\S', ' ', line[:offset]) + '^')
sys.stdout.flush()
if self._show_pep8 and doc:
print(' ' + doc.strip())
sys.stdout.flush()
return self.file_errors<|fim▁end|> | |
<|file_name|>camera_controller.hpp<|end_file_name|><|fim▁begin|>/*!
\file
\author Igor Mironchik (igor.mironchik at gmail dot com).
Copyright (c) 2017 Igor Mironchik
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of<|fim▁hole|>
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TREE__CAMERA_CONTROLLER_HPP__INCLUDED
#define TREE__CAMERA_CONTROLLER_HPP__INCLUDED
// Qt include.
#include <Qt3DCore/QEntity>
// C++ include.
#include <memory>
QT_BEGIN_NAMESPACE
namespace Qt3DRender {
class QCamera;
}
QT_END_NAMESPACE
//
// CameraController
//
class CameraControllerPrivate;
class CameraController Q_DECL_FINAL
: public Qt3DCore::QEntity
{
Q_OBJECT
public:
CameraController( Qt3DRender::QCamera * camera,
Qt3DCore::QEntity * parent );
~CameraController();
private slots:
void _q_onTriggered( float );
private:
friend class CameraControllerPrivate;
Q_DISABLE_COPY( CameraController )
std::unique_ptr< CameraControllerPrivate > d;
}; // class CameraController
#endif // TREE__CAMERA_CONTROLLER_HPP__INCLUDED<|fim▁end|> | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. |
<|file_name|>vehicle_type.py<|end_file_name|><|fim▁begin|>from enum import Enum
from typing import List, Union
import logging
import math
try:
from flask_babel import _
except ModuleNotFoundError:
pass
class VehicleType(Enum):
CAR = 1
TRUCK_UPTO_4 = 2
PICKUP_UPTO_4 = 3
TRUCK_4_TO_10 = 4
TRUCK_12_TO_16 = 5
TRUCK_16_TO_34 = 6
TRUCK_ABOVE_34 = 7
MOTORCYCLE_UPTO_50 = 8
MOTORCYCLE_50_TO_250 = 9
MOTORCYCLE_250_TO_500 = 10
BUS = 11
TAXI = 12
WORK = 13
TRACTOR = 14
BIKE = 15
TRAIN = 16
OTHER_AND_UNKNOWN = 17
MINIBUS = 18
MOTORCYCLE_ABOVE_500 = 19
ELECTRIC_SCOOTER = 21
MOBILITY_SCOOTER = 22
ELECTRIC_BIKE = 23
TRUCK_3_5_TO_10 = 24
TRUCK_10_TO_12 = 25
def get_categories(self) -> List[int]:
res = []
for t in list(VehicleCategory):
if self in t.value:
res.append(t)
return res
def get_english_display_name(self):
english_vehicle_type_display_names = {<|fim▁hole|> VehicleType.CAR: "private car",
VehicleType.TRUCK_UPTO_4: "truck upto 4 tons",
VehicleType.PICKUP_UPTO_4: "pickup upto 4 tons",
VehicleType.TRUCK_4_TO_10: "truck 4 to 10 tons",
VehicleType.TRUCK_12_TO_16: "truck 12 to 16 tons",
VehicleType.TRUCK_16_TO_34: "truck 16 to 34 tons",
VehicleType.TRUCK_ABOVE_34: "truck above 34 tons",
VehicleType.MOTORCYCLE_UPTO_50: "motorcycle upto 50 cc",
VehicleType.MOTORCYCLE_50_TO_250: "motorcycle 50 to 250 cc",
VehicleType.MOTORCYCLE_250_TO_500: "motorcycle 250 to 500 cc",
VehicleType.BUS: "bus",
VehicleType.TAXI: "taxi",
VehicleType.WORK: "work vehicle",
VehicleType.TRACTOR: "tractor",
VehicleType.BIKE: "bike",
VehicleType.TRAIN: "train",
VehicleType.OTHER_AND_UNKNOWN: "other and unknown",
VehicleType.MINIBUS: "minibus",
VehicleType.MOTORCYCLE_ABOVE_500: "motorcycle above 500 cc",
VehicleType.ELECTRIC_SCOOTER: "electric scooter",
VehicleType.MOBILITY_SCOOTER: "mobility scooter",
VehicleType.ELECTRIC_BIKE: "electric bike",
VehicleType.TRUCK_3_5_TO_10: "truck 3.5 to 10 tons",
VehicleType.TRUCK_10_TO_12: "truck 10 to 12 tons",
}
try:
return english_vehicle_type_display_names[self]
except (KeyError, TypeError):
logging.exception(f"VehicleType.get_display_name: {self}: no display string defined")
return "no display name defined"
@staticmethod
def to_type_code(db_val: Union[float, int]) -> int:
"""Values read from DB may arrive as float, and empty values come as nan"""
if isinstance(db_val, float):
if math.isnan(db_val):
return VehicleType.OTHER_AND_UNKNOWN.value
else:
return int(db_val)
elif isinstance(db_val, int):
return db_val
else:
logging.error(
f"VehicleType.fo_type_code: unknown value: {db_val}({type(db_val)})"
". returning OTHER_AND_UNKNOWN"
)
return VehicleType.OTHER_AND_UNKNOWN.value
VT = VehicleType
class VehicleCategory(Enum):
PROFESSIONAL_DRIVER = 1
PRIVATE_DRIVER = 2
LIGHT_ELECTRIC = 3
CAR = 4
LARGE = 5
MOTORCYCLE = 6
BICYCLE_AND_SMALL_MOTOR = 7
OTHER = 8
def get_codes(self) -> List[int]:
"""returns VehicleType codes of category"""
category_vehicle_types = {
VehicleCategory.PROFESSIONAL_DRIVER: [
VehicleType.TRUCK_UPTO_4,
VehicleType.PICKUP_UPTO_4,
VehicleType.TRUCK_4_TO_10,
VehicleType.TRUCK_12_TO_16,
VehicleType.TRUCK_16_TO_34,
VehicleType.TRUCK_ABOVE_34,
VehicleType.BUS,
VehicleType.TAXI,
VehicleType.WORK,
VehicleType.TRACTOR,
VehicleType.MINIBUS,
VehicleType.TRUCK_3_5_TO_10,
VehicleType.TRUCK_10_TO_12,
],
VehicleCategory.PRIVATE_DRIVER: [
VehicleType.CAR,
VehicleType.MOTORCYCLE_UPTO_50,
VehicleType.MOTORCYCLE_50_TO_250,
VehicleType.MOTORCYCLE_250_TO_500,
VehicleType.MOTORCYCLE_ABOVE_500,
],
VehicleCategory.LIGHT_ELECTRIC: [
VehicleType.ELECTRIC_SCOOTER,
VehicleType.MOBILITY_SCOOTER,
VehicleType.ELECTRIC_BIKE,
],
VehicleCategory.CAR: [VehicleType.CAR, VehicleType.TAXI],
VehicleCategory.LARGE: [
VehicleType.TRUCK_UPTO_4,
VehicleType.PICKUP_UPTO_4,
VehicleType.TRUCK_4_TO_10,
VehicleType.TRUCK_12_TO_16,
VehicleType.TRUCK_16_TO_34,
VehicleType.TRUCK_ABOVE_34,
VehicleType.BUS,
VehicleType.WORK,
VehicleType.TRACTOR,
VehicleType.MINIBUS,
VehicleType.TRUCK_3_5_TO_10,
VehicleType.TRUCK_10_TO_12,
],
VehicleCategory.MOTORCYCLE: [
VehicleType.MOTORCYCLE_UPTO_50,
VehicleType.MOTORCYCLE_50_TO_250,
VehicleType.MOTORCYCLE_250_TO_500,
VehicleType.MOTORCYCLE_ABOVE_500,
],
VehicleCategory.BICYCLE_AND_SMALL_MOTOR: [
VehicleType.BIKE,
VehicleType.ELECTRIC_SCOOTER,
VehicleType.ELECTRIC_BIKE,
],
VehicleCategory.OTHER: [
VehicleType.BIKE,
VehicleType.TRAIN,
VehicleType.OTHER_AND_UNKNOWN,
],
}
return list(map(lambda x: x.value, category_vehicle_types[self]))
def contains(self, vt_code: int) -> bool:
# noinspection PyTypeChecker
if not isinstance(int, vt_code):
logging.warning(f"VehicleCategory.contains: {vt_code}:{type(vt_code)}: not int")
return False
return vt_code in self.get_codes()
def get_english_display_name(self):
english_vehicle_type_display_names = {
VehicleCategory.PROFESSIONAL_DRIVER: "professional driver",
VehicleCategory.PRIVATE_DRIVER: "private driver",
VehicleCategory.LIGHT_ELECTRIC: "light electric vehicles",
VehicleCategory.CAR: "private car",
VehicleCategory.LARGE: "large vehicle",
VehicleCategory.MOTORCYCLE: "motorcycle",
VehicleCategory.BICYCLE_AND_SMALL_MOTOR: "bicycle and small motor vehicles",
VehicleCategory.OTHER: "other vehicle",
}
try:
return english_vehicle_type_display_names[self]
except (KeyError, TypeError):
logging.exception(f"VehicleType.get_display_name: {self}: no display string defined")
return "no display name defined"
_("professional driver")
_("private driver")
_("light electric vehicles")
_("private car")
_("large vehicle")
_("motorcycle")
_("bicycle and small motor vehicles")
_("other vehicle")<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>####################################################################################################
# neuropythy/datasets/__init__.py
# Datasets for neuropythy.<|fim▁hole|># mainly just to force these to load when datasets is loaded:
from .benson_winawer_2018 import (BensonWinawer2018Dataset)
from .hcp import (HCPDataset, HCPRetinotopyDataset, HCPMetaDataset)
from .visual_performance_fields import (VisualPerformanceFieldsDataset)
from .hcp_lines import (HCPLinesDataset)
# TODO: https://openneuro.org/crn/datasets/ds001499/snapshots/1.1.0/download -- add the BOLD5000
# : dataset to neuropythy (see bold5000.org)
# import this last so that we get the most updated version of data
from .core import (data, Dataset)<|fim▁end|> | # by Noah C. Benson
|
<|file_name|>AbilityEvent.js<|end_file_name|><|fim▁begin|>import GameEvent from './GameEvent';
import Creature from '../entities/creatures/Creature';
import Ability from '../abilities/Ability';
import Tile from '../tiles/Tile';
export default class AbilityEvent extends GameEvent {
/**
* @class AbilityEvent
* @description Fired whenever a creature attacks
*/
constructor(dungeon, creature, ability, tile) {
super(dungeon);
if(!(creature instanceof Creature)) {
throw new Error('Second parameter must be a Creature');
} else if(!(ability instanceof Ability)) {
throw new Error('Third parameter must be an Ability');
} else if((tile instanceof Tile) !== ability.isTargetted()) {
throw new Error('Fourth parameter must be a Tile iff ability is targetted');
}
this._creature = creature;
this._ability = ability;
this._tile = tile;
}
getCreature() {
return this._creature;
}
getAbility() {
return this._ability;
}
getTile() {
return this._tile;
}
<|fim▁hole|> var tile = dungeon.getTile(creature);
return `${creature} used ${ability}` + (tile ? ` on ${tile}` : '');
}
}<|fim▁end|> | getText(dungeon) {
var creature = this.getCreature();
var ability = this.getAbility(); |
<|file_name|>time.rs<|end_file_name|><|fim▁begin|>//! Temporal quantification.
//!
//! # Examples:
//!
//! There are multiple ways to create a new [`Duration`]:
//!
//! ```
//! # use std::time::Duration;
//! let five_seconds = Duration::from_secs(5);
//! assert_eq!(five_seconds, Duration::from_millis(5_000));
//! assert_eq!(five_seconds, Duration::from_micros(5_000_000));
//! assert_eq!(five_seconds, Duration::from_nanos(5_000_000_000));
//!
//! let ten_seconds = Duration::from_secs(10);
//! let seven_nanos = Duration::from_nanos(7);
//! let total = ten_seconds + seven_nanos;
//! assert_eq!(total, Duration::new(10, 7));
//! ```
//!
//! Using [`Instant`] to calculate how long a function took to run:
//!
//! ```ignore (incomplete)
//! let now = Instant::now();
//!
//! // Calling a slow function, it may take a while
//! slow_function();
//!
//! let elapsed_time = now.elapsed();
//! println!("Running slow_function() took {} seconds.", elapsed_time.as_secs());
//! ```
#![stable(feature = "time", since = "1.3.0")]
mod monotonic;
#[cfg(test)]
mod tests;
use crate::error::Error;
use crate::fmt;
use crate::ops::{Add, AddAssign, Sub, SubAssign};
use crate::sys::time;
use crate::sys_common::FromInner;
#[stable(feature = "time", since = "1.3.0")]
pub use core::time::Duration;
/// A measurement of a monotonically nondecreasing clock.
/// Opaque and useful only with [`Duration`].
///
/// Instants are always guaranteed to be no less than any previously measured
/// instant when created, and are often useful for tasks such as measuring
/// benchmarks or timing how long an operation takes.
///
/// Note, however, that instants are not guaranteed to be **steady**. In other
/// words, each tick of the underlying clock might not be the same length (e.g.
/// some seconds may be longer than others). An instant may jump forwards or
/// experience time dilation (slow down or speed up), but it will never go
/// backwards.
///
/// Instants are opaque types that can only be compared to one another. There is
/// no method to get "the number of seconds" from an instant. Instead, it only
/// allows measuring the duration between two instants (or comparing two
/// instants).
///
/// The size of an `Instant` struct may vary depending on the target operating
/// system.
///
/// Example:
///
/// ```no_run
/// use std::time::{Duration, Instant};
/// use std::thread::sleep;
///
/// fn main() {
/// let now = Instant::now();
///
/// // we sleep for 2 seconds
/// sleep(Duration::new(2, 0));
/// // it prints '2'
/// println!("{}", now.elapsed().as_secs());
/// }
/// ```
///
/// # OS-specific behaviors
///
/// An `Instant` is a wrapper around system-specific types and it may behave
/// differently depending on the underlying operating system. For example,
/// the following snippet is fine on Linux but panics on macOS:
///
/// ```no_run
/// use std::time::{Instant, Duration};
///
/// let now = Instant::now();
/// let max_nanoseconds = u64::MAX / 1_000_000_000;
/// let duration = Duration::new(max_nanoseconds, 0);
/// println!("{:?}", now + duration);
/// ```
///
/// # Underlying System calls
/// Currently, the following system calls are being used to get the current time using `now()`:
///
/// | Platform | System call |
/// |-----------|----------------------------------------------------------------------|
/// | SGX | [`insecure_time` usercall]. More information on [timekeeping in SGX] |
/// | UNIX | [clock_gettime (Monotonic Clock)] |
/// | Darwin | [mach_absolute_time] |
/// | VXWorks | [clock_gettime (Monotonic Clock)] |
/// | WASI | [__wasi_clock_time_get (Monotonic Clock)] |
/// | Windows | [QueryPerformanceCounter] |
///
/// [QueryPerformanceCounter]: https://docs.microsoft.com/en-us/windows/win32/api/profileapi/nf-profileapi-queryperformancecounter
/// [`insecure_time` usercall]: https://edp.fortanix.com/docs/api/fortanix_sgx_abi/struct.Usercalls.html#method.insecure_time
/// [timekeeping in SGX]: https://edp.fortanix.com/docs/concepts/rust-std/#codestdtimecode
/// [__wasi_clock_time_get (Monotonic Clock)]: https://github.com/WebAssembly/WASI/blob/master/phases/snapshot/docs.md#clock_time_get
/// [clock_gettime (Monotonic Clock)]: https://linux.die.net/man/3/clock_gettime
/// [mach_absolute_time]: https://developer.apple.com/library/archive/documentation/Darwin/Conceptual/KernelProgramming/services/services.html
///
/// **Disclaimer:** These system calls might change over time.
///
/// > Note: mathematical operations like [`add`] may panic if the underlying
/// > structure cannot represent the new point in time.
///
/// [`add`]: Instant::add
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[stable(feature = "time2", since = "1.8.0")]
pub struct Instant(time::Instant);
/// A measurement of the system clock, useful for talking to
/// external entities like the file system or other processes.
///
/// Distinct from the [`Instant`] type, this time measurement **is not
/// monotonic**. This means that you can save a file to the file system, then
/// save another file to the file system, **and the second file has a
/// `SystemTime` measurement earlier than the first**. In other words, an
/// operation that happens after another operation in real time may have an
/// earlier `SystemTime`!
///
/// Consequently, comparing two `SystemTime` instances to learn about the
/// duration between them returns a [`Result`] instead of an infallible [`Duration`]
/// to indicate that this sort of time drift may happen and needs to be handled.
///
/// Although a `SystemTime` cannot be directly inspected, the [`UNIX_EPOCH`]
/// constant is provided in this module as an anchor in time to learn
/// information about a `SystemTime`. By calculating the duration from this
/// fixed point in time, a `SystemTime` can be converted to a human-readable time,
/// or perhaps some other string representation.
///
/// The size of a `SystemTime` struct may vary depending on the target operating
/// system.
///
/// Example:
///
/// ```no_run
/// use std::time::{Duration, SystemTime};
/// use std::thread::sleep;
///
/// fn main() {
/// let now = SystemTime::now();
///
/// // we sleep for 2 seconds
/// sleep(Duration::new(2, 0));
/// match now.elapsed() {
/// Ok(elapsed) => {
/// // it prints '2'
/// println!("{}", elapsed.as_secs());
/// }
/// Err(e) => {
/// // an error occurred!
/// println!("Error: {:?}", e);
/// }
/// }
/// }
/// ```
///
/// # Underlying System calls
/// Currently, the following system calls are being used to get the current time using `now()`:
///
/// | Platform | System call |
/// |-----------|----------------------------------------------------------------------|
/// | SGX | [`insecure_time` usercall]. More information on [timekeeping in SGX] |
/// | UNIX | [clock_gettime (Realtime Clock)] |
/// | Darwin | [gettimeofday] |
/// | VXWorks | [clock_gettime (Realtime Clock)] |
/// | WASI | [__wasi_clock_time_get (Realtime Clock)] |
/// | Windows | [GetSystemTimePreciseAsFileTime] / [GetSystemTimeAsFileTime] |
///
/// [`insecure_time` usercall]: https://edp.fortanix.com/docs/api/fortanix_sgx_abi/struct.Usercalls.html#method.insecure_time
/// [timekeeping in SGX]: https://edp.fortanix.com/docs/concepts/rust-std/#codestdtimecode
/// [gettimeofday]: https://man7.org/linux/man-pages/man2/gettimeofday.2.html
/// [clock_gettime (Realtime Clock)]: https://linux.die.net/man/3/clock_gettime
/// [__wasi_clock_time_get (Realtime Clock)]: https://github.com/WebAssembly/WASI/blob/master/phases/snapshot/docs.md#clock_time_get
/// [GetSystemTimePreciseAsFileTime]: https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getsystemtimepreciseasfiletime
/// [GetSystemTimeAsFileTime]: https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getsystemtimeasfiletime
///
/// **Disclaimer:** These system calls might change over time.
///
/// > Note: mathematical operations like [`add`] may panic if the underlying
/// > structure cannot represent the new point in time.
///
/// [`add`]: SystemTime::add
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[stable(feature = "time2", since = "1.8.0")]
pub struct SystemTime(time::SystemTime);
/// An error returned from the `duration_since` and `elapsed` methods on
/// `SystemTime`, used to learn how far in the opposite direction a system time
/// lies.
///
/// # Examples
///
/// ```no_run
/// use std::thread::sleep;
/// use std::time::{Duration, SystemTime};
///
/// let sys_time = SystemTime::now();
/// sleep(Duration::from_secs(1));
/// let new_sys_time = SystemTime::now();
/// match sys_time.duration_since(new_sys_time) {
/// Ok(_) => {}
/// Err(e) => println!("SystemTimeError difference: {:?}", e.duration()),
/// }
/// ```
#[derive(Clone, Debug)]
#[stable(feature = "time2", since = "1.8.0")]
pub struct SystemTimeError(Duration);
impl Instant {
/// Returns an instant corresponding to "now".
///
/// # Examples
///
/// ```
/// use std::time::Instant;
///
/// let now = Instant::now();
/// ```
#[stable(feature = "time2", since = "1.8.0")]
pub fn now() -> Instant {
let os_now = time::Instant::now();
// And here we come upon a sad state of affairs. The whole point of
// `Instant` is that it's monotonically increasing. We've found in the
// wild, however, that it's not actually monotonically increasing for
// one reason or another. These appear to be OS and hardware level bugs,
// and there's not really a whole lot we can do about them. Here's a
// taste of what we've found:
//
// * #48514 - OpenBSD, x86_64
// * #49281 - linux arm64 and s390x
// * #51648 - windows, x86
// * #56560 - windows, x86_64, AWS
// * #56612 - windows, x86, vm (?)
// * #56940 - linux, arm64
// * https://bugzilla.mozilla.org/show_bug.cgi?id=1487778 - a similar
// Firefox bug
//
// It seems that this just happens a lot in the wild.
// We're seeing panics across various platforms where consecutive calls
// to `Instant::now`, such as via the `elapsed` function, are panicking
// as they're going backwards. Placed here is a last-ditch effort to try
// to fix things up. We keep a global "latest now" instance which is
// returned instead of what the OS says if the OS goes backwards.
//
// To hopefully mitigate the impact of this, a few platforms are
// excluded as "these at least haven't gone backwards yet".
if time::Instant::actually_monotonic() {
return Instant(os_now);
}
Instant(monotonic::monotonize(os_now))
}
/// Returns the amount of time elapsed from another instant to this one.
///
/// # Panics
///
/// This function will panic if `earlier` is later than `self`.
///
/// # Examples
///
/// ```no_run
/// use std::time::{Duration, Instant};
/// use std::thread::sleep;
///
/// let now = Instant::now();
/// sleep(Duration::new(1, 0));
/// let new_now = Instant::now();
/// println!("{:?}", new_now.duration_since(now));
/// ```
#[stable(feature = "time2", since = "1.8.0")]
pub fn duration_since(&self, earlier: Instant) -> Duration {
self.0.checked_sub_instant(&earlier.0).expect("supplied instant is later than self")
}
/// Returns the amount of time elapsed from another instant to this one,
/// or None if that instant is later than this one.
///
/// # Examples
///
/// ```no_run
/// use std::time::{Duration, Instant};
/// use std::thread::sleep;
///
/// let now = Instant::now();
/// sleep(Duration::new(1, 0));
/// let new_now = Instant::now();
/// println!("{:?}", new_now.checked_duration_since(now));
/// println!("{:?}", now.checked_duration_since(new_now)); // None
/// ```
#[stable(feature = "checked_duration_since", since = "1.39.0")]
pub fn checked_duration_since(&self, earlier: Instant) -> Option<Duration> {
self.0.checked_sub_instant(&earlier.0)
}
/// Returns the amount of time elapsed from another instant to this one,
/// or zero duration if that instant is later than this one.
///
/// # Examples
///
/// ```no_run
/// use std::time::{Duration, Instant};
/// use std::thread::sleep;
///
/// let now = Instant::now();
/// sleep(Duration::new(1, 0));
/// let new_now = Instant::now();
/// println!("{:?}", new_now.saturating_duration_since(now));
/// println!("{:?}", now.saturating_duration_since(new_now)); // 0ns
/// ```
#[stable(feature = "checked_duration_since", since = "1.39.0")]
pub fn saturating_duration_since(&self, earlier: Instant) -> Duration {
self.checked_duration_since(earlier).unwrap_or_default()
}
/// Returns the amount of time elapsed since this instant was created.
///
/// # Panics
///
/// This function may panic if the current time is earlier than this
/// instant, which is something that can happen if an `Instant` is
/// produced synthetically.
///
/// # Examples
///
/// ```no_run
/// use std::thread::sleep;
/// use std::time::{Duration, Instant};
///
/// let instant = Instant::now();
/// let three_secs = Duration::from_secs(3);
/// sleep(three_secs);
/// assert!(instant.elapsed() >= three_secs);
/// ```
#[stable(feature = "time2", since = "1.8.0")]
pub fn elapsed(&self) -> Duration {
Instant::now() - *self
}
/// Returns `Some(t)` where `t` is the time `self + duration` if `t` can be represented as
/// `Instant` (which means it's inside the bounds of the underlying data structure), `None`
/// otherwise.
#[stable(feature = "time_checked_add", since = "1.34.0")]
pub fn checked_add(&self, duration: Duration) -> Option<Instant> {
self.0.checked_add_duration(&duration).map(Instant)
}
/// Returns `Some(t)` where `t` is the time `self - duration` if `t` can be represented as
/// `Instant` (which means it's inside the bounds of the underlying data structure), `None`
/// otherwise.
#[stable(feature = "time_checked_add", since = "1.34.0")]
pub fn checked_sub(&self, duration: Duration) -> Option<Instant> {
self.0.checked_sub_duration(&duration).map(Instant)
}
}
#[stable(feature = "time2", since = "1.8.0")]
impl Add<Duration> for Instant {
type Output = Instant;
/// # Panics
///
/// This function may panic if the resulting point in time cannot be represented by the
/// underlying data structure. See [`Instant::checked_add`] for a version without panic.
fn add(self, other: Duration) -> Instant {
self.checked_add(other).expect("overflow when adding duration to instant")
}
}
#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
impl AddAssign<Duration> for Instant {
fn add_assign(&mut self, other: Duration) {
*self = *self + other;
}
}
#[stable(feature = "time2", since = "1.8.0")]
impl Sub<Duration> for Instant {
type Output = Instant;
fn sub(self, other: Duration) -> Instant {
self.checked_sub(other).expect("overflow when subtracting duration from instant")
}
}
#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
impl SubAssign<Duration> for Instant {
fn sub_assign(&mut self, other: Duration) {
*self = *self - other;
}
}
#[stable(feature = "time2", since = "1.8.0")]
impl Sub<Instant> for Instant {
type Output = Duration;
fn sub(self, other: Instant) -> Duration {
self.duration_since(other)
}
}
#[stable(feature = "time2", since = "1.8.0")]
impl fmt::Debug for Instant {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
impl SystemTime {
/// An anchor in time which can be used to create new `SystemTime` instances or
/// learn about where in time a `SystemTime` lies.
///
/// This constant is defined to be "1970-01-01 00:00:00 UTC" on all systems with
/// respect to the system clock. Using `duration_since` on an existing
/// `SystemTime` instance can tell how far away from this point in time a
/// measurement lies, and using `UNIX_EPOCH + duration` can be used to create a
/// `SystemTime` instance to represent another fixed point in time.
///
/// # Examples
///
/// ```no_run
/// use std::time::SystemTime;
///
/// match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
/// Ok(n) => println!("1970-01-01 00:00:00 UTC was {} seconds ago!", n.as_secs()),
/// Err(_) => panic!("SystemTime before UNIX EPOCH!"),
/// }
/// ```
#[stable(feature = "assoc_unix_epoch", since = "1.28.0")]
pub const UNIX_EPOCH: SystemTime = UNIX_EPOCH;
/// Returns the system time corresponding to "now".
///
/// # Examples
///
/// ```
/// use std::time::SystemTime;
///
/// let sys_time = SystemTime::now();
/// ```
#[stable(feature = "time2", since = "1.8.0")]
pub fn now() -> SystemTime {
SystemTime(time::SystemTime::now())
}
/// Returns the amount of time elapsed from an earlier point in time.
///
/// This function may fail because measurements taken earlier are not
/// guaranteed to always be before later measurements (due to anomalies such
/// as the system clock being adjusted either forwards or backwards).
/// [`Instant`] can be used to measure elapsed time without this risk of failure.
///
/// If successful, [`Ok`]`(`[`Duration`]`)` is returned where the duration represents
/// the amount of time elapsed from the specified measurement to this one.
///
/// Returns an [`Err`] if `earlier` is later than `self`, and the error
/// contains how far from `self` the time is.
///
/// # Examples
///
/// ```no_run
/// use std::time::SystemTime;
///
/// let sys_time = SystemTime::now();
/// let new_sys_time = SystemTime::now();
/// let difference = new_sys_time.duration_since(sys_time)
/// .expect("Clock may have gone backwards");
/// println!("{:?}", difference);
/// ```
#[stable(feature = "time2", since = "1.8.0")]
pub fn duration_since(&self, earlier: SystemTime) -> Result<Duration, SystemTimeError> {
self.0.sub_time(&earlier.0).map_err(SystemTimeError)
}
/// Returns the difference between the clock time when this
/// system time was created, and the current clock time.
///
/// This function may fail as the underlying system clock is susceptible to
/// drift and updates (e.g., the system clock could go backwards), so this
/// function might not always succeed. If successful, [`Ok`]`(`[`Duration`]`)` is
/// returned where the duration represents the amount of time elapsed from
/// this time measurement to the current time.
///
/// To measure elapsed time reliably, use [`Instant`] instead.
///
/// Returns an [`Err`] if `self` is later than the current system time, and
/// the error contains how far from the current system time `self` is.
///
/// # Examples
///
/// ```no_run
/// use std::thread::sleep;
/// use std::time::{Duration, SystemTime};
///
/// let sys_time = SystemTime::now();
/// let one_sec = Duration::from_secs(1);
/// sleep(one_sec);
/// assert!(sys_time.elapsed().unwrap() >= one_sec);
/// ```
#[stable(feature = "time2", since = "1.8.0")]
pub fn elapsed(&self) -> Result<Duration, SystemTimeError> {
SystemTime::now().duration_since(*self)
}
/// Returns `Some(t)` where `t` is the time `self + duration` if `t` can be represented as
/// `SystemTime` (which means it's inside the bounds of the underlying data structure), `None`
/// otherwise.
#[stable(feature = "time_checked_add", since = "1.34.0")]
pub fn checked_add(&self, duration: Duration) -> Option<SystemTime> {
self.0.checked_add_duration(&duration).map(SystemTime)
}
/// Returns `Some(t)` where `t` is the time `self - duration` if `t` can be represented as
/// `SystemTime` (which means it's inside the bounds of the underlying data structure), `None`
/// otherwise.
#[stable(feature = "time_checked_add", since = "1.34.0")]
pub fn checked_sub(&self, duration: Duration) -> Option<SystemTime> {
self.0.checked_sub_duration(&duration).map(SystemTime)
}
}
#[stable(feature = "time2", since = "1.8.0")]
impl Add<Duration> for SystemTime {
type Output = SystemTime;
/// # Panics
///
/// This function may panic if the resulting point in time cannot be represented by the
/// underlying data structure. See [`SystemTime::checked_add`] for a version without panic.
fn add(self, dur: Duration) -> SystemTime {
self.checked_add(dur).expect("overflow when adding duration to instant")
}
}
#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
impl AddAssign<Duration> for SystemTime {
fn add_assign(&mut self, other: Duration) {
*self = *self + other;
}
}
#[stable(feature = "time2", since = "1.8.0")]
impl Sub<Duration> for SystemTime {
type Output = SystemTime;
fn sub(self, dur: Duration) -> SystemTime {
self.checked_sub(dur).expect("overflow when subtracting duration from instant")
}
}
#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
impl SubAssign<Duration> for SystemTime {
fn sub_assign(&mut self, other: Duration) {
*self = *self - other;
}
}
#[stable(feature = "time2", since = "1.8.0")]
impl fmt::Debug for SystemTime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
/// An anchor in time which can be used to create new `SystemTime` instances or
/// learn about where in time a `SystemTime` lies.
///
/// This constant is defined to be "1970-01-01 00:00:00 UTC" on all systems with
/// respect to the system clock. Using `duration_since` on an existing
/// [`SystemTime`] instance can tell how far away from this point in time a
/// measurement lies, and using `UNIX_EPOCH + duration` can be used to create a
/// [`SystemTime`] instance to represent another fixed point in time.
///
/// # Examples<|fim▁hole|>/// ```no_run
/// use std::time::{SystemTime, UNIX_EPOCH};
///
/// match SystemTime::now().duration_since(UNIX_EPOCH) {
/// Ok(n) => println!("1970-01-01 00:00:00 UTC was {} seconds ago!", n.as_secs()),
/// Err(_) => panic!("SystemTime before UNIX EPOCH!"),
/// }
/// ```
#[stable(feature = "time2", since = "1.8.0")]
pub const UNIX_EPOCH: SystemTime = SystemTime(time::UNIX_EPOCH);
impl SystemTimeError {
/// Returns the positive duration which represents how far forward the
/// second system time was from the first.
///
/// A `SystemTimeError` is returned from the [`SystemTime::duration_since`]
/// and [`SystemTime::elapsed`] methods whenever the second system time
/// represents a point later in time than the `self` of the method call.
///
/// # Examples
///
/// ```no_run
/// use std::thread::sleep;
/// use std::time::{Duration, SystemTime};
///
/// let sys_time = SystemTime::now();
/// sleep(Duration::from_secs(1));
/// let new_sys_time = SystemTime::now();
/// match sys_time.duration_since(new_sys_time) {
/// Ok(_) => {}
/// Err(e) => println!("SystemTimeError difference: {:?}", e.duration()),
/// }
/// ```
#[stable(feature = "time2", since = "1.8.0")]
pub fn duration(&self) -> Duration {
self.0
}
}
#[stable(feature = "time2", since = "1.8.0")]
impl Error for SystemTimeError {
#[allow(deprecated)]
fn description(&self) -> &str {
"other time was not earlier than self"
}
}
#[stable(feature = "time2", since = "1.8.0")]
impl fmt::Display for SystemTimeError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "second time provided was later than self")
}
}
impl FromInner<time::SystemTime> for SystemTime {
fn from_inner(time: time::SystemTime) -> SystemTime {
SystemTime(time)
}
}<|fim▁end|> | /// |
<|file_name|>exp.rs<|end_file_name|><|fim▁begin|>use ast::Op;
use environment::Environment;
use errors::{EvalError, YamlError};
use ast::lit::Lit;
#[derive(Debug, PartialEq, Clone)]
pub enum Exp {
/// A unary operator like !
UnaryOp(Op, Box<Exp>),
/// A binary operator like + or -
BinaryOp(Op, Box<Exp>, Box<Exp>),
/// A variable to retrieve from the environment
Variable(String),
/// Bind a variable name to the evaluated expression
Declare(String, Box<Exp>),
/// Set an existing variable name to the evaluated expression
Assign(String, Box<Exp>),
/// A literal like 2 or "hello"
Lit(Lit),
}
impl Exp {
/// Evaluates a expression and returns a Result type wrapping an expression
pub fn eval(&self, env: &mut Environment) -> Result<Exp, YamlError> {
match *self {
Exp::Variable(ref name) => {
match env.get(name.as_str()) {
Some(lit) => Ok(Exp::Lit(lit)),
None => Err(YamlError::EvalError(EvalError::VarNotInEnv(name.clone()))),
}
}
Exp::Declare(ref name, ref exp) => {
if let Exp::Lit(value) = try!(exp.eval(env)) {
env.set(name.as_str(), value.clone());
Ok(Exp::Lit(value))
} else {
Err(YamlError::EvalError(EvalError::CannotReduceDeclare(exp.clone())))
}
}
Exp::Assign(ref name, ref exp) => {
if let Exp::Lit(value) = try!(exp.eval(env)) {
env.assign(name.as_str(), value.clone());
Ok(Exp::Lit(value))
} else {
Err(YamlError::EvalError(EvalError::CannotReduceAssign(exp.clone())))
}
}
Exp::UnaryOp(ref op, ref exp) => {
if let Exp::Lit(value) = try!(exp.eval(env)) {
match *op {
Op::Not => Ok(Exp::Lit(try!(!value))),
// Non-unary operators (for exhaustiveness checking)
Op::Plus | Op::Minus | Op::Times | Op::Divide | Op::Modulo |
Op::Exponent | Op::And | Op::Or | Op::Equal | Op::NotEqual => {
return Err(YamlError::EvalError(EvalError::NotUnOp(op.clone())))
}
}
} else {
Err(YamlError::EvalError(EvalError::CannotReduceUnOp(op.clone(), exp.clone())))
}
}
Exp::BinaryOp(ref op, ref exp1, ref exp2) => {
if let (Exp::Lit(val1), Exp::Lit(val2)) = (try!(exp1.eval(env)),
try!(exp2.eval(env))) {
Ok(Exp::Lit(match *op {
Op::Plus => try!(val1 + val2),
Op::Minus => try!(val1 - val2),
Op::Times => try!(val1 * val2),
Op::Divide => try!(val1 / val2),
Op::Modulo => try!(val1 % val2),
Op::Exponent => try!(val1.exp(val2)),
Op::And => try!(val1.and(val2)),
Op::Or => try!(val1.or(val2)),
Op::Equal => Lit::Bool(val1 == val2),
Op::NotEqual => Lit::Bool(val1 != val2),
// Non-binary operators (for exhaustiveness checking)
Op::Not => {
return Err(YamlError::EvalError(EvalError::NotBinOp(op.clone())))
}
}))
} else {
Err(YamlError::EvalError(EvalError::CannotReduceBinOp(op.clone(),
exp1.clone(),
exp2.clone())))
}
}
ref lit @ Exp::Lit(_) => Ok(lit.clone()),
}
}
}
#[cfg(test)]
mod tests {
use environment::{ASTEnvironment, Environment};
use ast::{Exp, Lit, Op};
#[test]
fn test_arith_ast() {
// Test that the result for ast:
// *
// / \
// 5 +
// / \
// - 6
// / \
// 3 2
// is "35"
let mut env = ASTEnvironment::new();
let sub_tree = Exp::BinaryOp(Op::Minus,
Box::new(Exp::Lit(Lit::Number(3))),
Box::new(Exp::Lit(Lit::Number(2))));
let add_tree = Exp::BinaryOp(Op::Plus,
Box::new(sub_tree),
Box::new(Exp::Lit(Lit::Number(6))));<|fim▁hole|> assert_eq!(times_tree.eval(&mut env), Ok(Exp::Lit(Lit::Number(35))));
}
#[test]
fn test_variable_ast() {
// Test that the result for ast:
// *
// / \
// a +
// / \
// - d
// / \
// b c
// is "35" when a is 5, b is 3, c is 2, and d is 6
let mut env = ASTEnvironment::new();
env.set("a", Lit::Number(5));
env.set("b", Lit::Number(3));
env.set("c", Lit::Number(2));
env.set("d", Lit::Number(6));
let (a, b, c, d) = ("a".to_owned(), "b".to_owned(), "c".to_owned(), "d".to_owned());
let sub_tree = Exp::BinaryOp(Op::Minus,
Box::new(Exp::Variable(b)),
Box::new(Exp::Variable(c)));
let add_tree = Exp::BinaryOp(Op::Plus, Box::new(sub_tree), Box::new(Exp::Variable(d)));
let times_tree = Exp::BinaryOp(Op::Times, Box::new(Exp::Variable(a)), Box::new(add_tree));
assert_eq!(times_tree.eval(&mut env), Ok(Exp::Lit(Lit::Number(35))));
}
#[test]
fn test_float_ast() {
// Test that the result for ast:
// *
// / \
// a +
// / \
// - c
// / \
// 1.5 b
// is "27.5" when a is 5, b is 2, and c is 6
let mut env = ASTEnvironment::new();
env.set("a", Lit::Number(5));
env.set("b", Lit::Number(2));
env.set("c", Lit::Number(6));
let (a, b, c) = ("a".to_owned(), "b".to_owned(), "c".to_owned());
let sub_tree = Exp::BinaryOp(Op::Minus,
Box::new(Exp::Lit(Lit::Decimal(1.5))),
Box::new(Exp::Variable(b)));
let add_tree = Exp::BinaryOp(Op::Plus, Box::new(sub_tree), Box::new(Exp::Variable(c)));
let times_tree = Exp::BinaryOp(Op::Times, Box::new(Exp::Variable(a)), Box::new(add_tree));
assert_eq!(times_tree.eval(&mut env), Ok(Exp::Lit(Lit::Decimal(27.5))));
}
#[test]
fn test_declare_assign() {
// Test that evaluating ast:
// :=
// / \
// x *
// / \
// 10 +
// / \
// 2 3
// results in x being bound to 50 in the current scope
// then after pushing a new scope and evaluating ast:
// =
// / \
// x +
// / \
// 1 2
// results in x being set to 3 in the original scope
let mut env = ASTEnvironment::new();
let add_tree = Exp::BinaryOp(Op::Plus,
Box::new(Exp::Lit(Lit::Number(2))),
Box::new(Exp::Lit(Lit::Number(3))));
let times_tree = Exp::BinaryOp(Op::Times,
Box::new(Exp::Lit(Lit::Number(10))),
Box::new(add_tree));
let declare_tree = Exp::Declare("x".to_owned(), Box::new(times_tree));
assert_eq!(declare_tree.eval(&mut env), Ok(Exp::Lit(Lit::Number(50))));
assert_eq!(env.get("x"), Some(Lit::Number(50)));
env.push();
let add_tree = Exp::BinaryOp(Op::Plus,
Box::new(Exp::Lit(Lit::Number(1))),
Box::new(Exp::Lit(Lit::Number(2))));
let assign_tree = Exp::Assign("x".to_owned(), Box::new(add_tree));
assert_eq!(assign_tree.eval(&mut env), Ok(Exp::Lit(Lit::Number(3))));
env.pop();
assert_eq!(env.get("x"), Some(Lit::Number(3)));
}
#[test]
fn test_equality() {
let mut env = ASTEnvironment::new();
// Test number equality
let ast = Exp::BinaryOp(Op::Equal,
Box::new(Exp::Lit(Lit::Number(5))),
Box::new(Exp::Lit(Lit::Number(5))));
assert_eq!(ast.eval(&mut env), Ok(Exp::Lit(Lit::Bool(true))));
let ast = Exp::BinaryOp(Op::Equal,
Box::new(Exp::Lit(Lit::Number(5))),
Box::new(Exp::Lit(Lit::Number(4))));
assert_eq!(ast.eval(&mut env), Ok(Exp::Lit(Lit::Bool(false))));
// Test decimal equality
let ast = Exp::BinaryOp(Op::Equal,
Box::new(Exp::Lit(Lit::Decimal(2.56))),
Box::new(Exp::Lit(Lit::Decimal(2.56))));
assert_eq!(ast.eval(&mut env), Ok(Exp::Lit(Lit::Bool(true))));
let ast = Exp::BinaryOp(Op::Equal,
Box::new(Exp::Lit(Lit::Decimal(2.56))),
Box::new(Exp::Lit(Lit::Decimal(2.55))));
assert_eq!(ast.eval(&mut env), Ok(Exp::Lit(Lit::Bool(false))));
// Test string equality
let ast = Exp::BinaryOp(Op::Equal,
Box::new(Exp::Lit(Lit::Str("Hello".to_owned()))),
Box::new(Exp::Lit(Lit::Str("Hello".to_owned()))));
assert_eq!(ast.eval(&mut env), Ok(Exp::Lit(Lit::Bool(true))));
let ast = Exp::BinaryOp(Op::Equal,
Box::new(Exp::Lit(Lit::Str("Hello".to_owned()))),
Box::new(Exp::Lit(Lit::Str("hello".to_owned()))));
assert_eq!(ast.eval(&mut env), Ok(Exp::Lit(Lit::Bool(false))));
}
#[test]
fn test_boolean_operators() {
let mut env = ASTEnvironment::new();
// Test and operator
let ast = Exp::BinaryOp(Op::And,
Box::new(Exp::Lit(Lit::Bool(true))),
Box::new(Exp::Lit(Lit::Bool(true))));
assert_eq!(ast.eval(&mut env), Ok(Exp::Lit(Lit::Bool(true))));
let ast = Exp::BinaryOp(Op::And,
Box::new(Exp::Lit(Lit::Bool(true))),
Box::new(Exp::Lit(Lit::Bool(false))));
assert_eq!(ast.eval(&mut env), Ok(Exp::Lit(Lit::Bool(false))));
// Test or operator
let ast = Exp::BinaryOp(Op::Or,
Box::new(Exp::Lit(Lit::Bool(true))),
Box::new(Exp::Lit(Lit::Bool(false))));
assert_eq!(ast.eval(&mut env), Ok(Exp::Lit(Lit::Bool(true))));
let ast = Exp::BinaryOp(Op::Or,
Box::new(Exp::Lit(Lit::Bool(false))),
Box::new(Exp::Lit(Lit::Bool(false))));
assert_eq!(ast.eval(&mut env), Ok(Exp::Lit(Lit::Bool(false))));
}
}<|fim▁end|> | let times_tree = Exp::BinaryOp(Op::Times,
Box::new(Exp::Lit(Lit::Number(5))),
Box::new(add_tree));
|
<|file_name|>DirectoryWatcher.js<|end_file_name|><|fim▁begin|>module('lively.ide.DirectoryWatcher').requires('lively.Network').toRun(function() {
// depends on the DirectoryWatcherServer
Object.extend(lively.ide.DirectoryWatcher, {
watchServerURL: new URL(Config.nodeJSURL+'/DirectoryWatchServer/'),
dirs: {},
reset: function() {
// lively.ide.DirectoryWatcher.reset()
this.dirs = {};
this.watchServerURL.withFilename('reset').asWebResource().post();
},
request: function(url, thenDo) {
return url.asWebResource().beAsync().withJSONWhenDone(function(json, status) {
thenDo(!json || json.error, json); }).get();
},
getFiles: function(dir, thenDo) {
this.request(this.watchServerURL.withFilename('files').withQuery({dir: dir}), thenDo);
},
getChanges: function(dir, since, startWatchTime, thenDo) {
this.request(this.watchServerURL.withFilename('changes').withQuery({
startWatchTime: startWatchTime, since: since, dir: dir}), thenDo);
},
withFilesOfDir: function(dir, doFunc) {
// Retrieves efficiently the files of dir. Uses a server side watcher that
// sends infos about file changes, deletions, creations.
// This methods synchs those with the cached state held in this object
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
// dir = lively.shell.exec('pwd', {sync:true}).resultString()
// lively.ide.DirectoryWatcher.dirs
// lively.ide.DirectoryWatcher.withFilesOfDir(dir, function(files) { show(Object.keys(files).length); })
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
var watchState = this.dirs[dir] || (this.dirs[dir] = {updateInProgress: false, callbacks: []});
doFunc && watchState.callbacks.push(doFunc);
if (watchState.updateInProgress) { return; }
watchState.updateInProgress = true;
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
if (!watchState.files) { // first time called
this.getFiles(dir, function(err, result) {
if (err) show("dir watch error: %s", err);
result.files && Properties.forEachOwn(result.files, function(path, stat) { extend(stat); })
Object.extend(watchState, {
files: result.files,
lastUpdated: result.startTime,
startTime: result.startTime
});
whenDone();
});
return;
}
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
var timeSinceLastUpdate = Date.now() - (watchState.lastUpdated || 0);
if (timeSinceLastUpdate < 10 * 1000) { whenDone(); } // recently updated
// get updates
this.getChanges(dir, watchState.lastUpdated, watchState.startTime, function(err, result) {
if (!result.changes || result.changes.length === 0) { whenDone(); return; }
watchState.lastUpdated = result.changes[0].time;
console.log('%s files changed in %s: %s', result.changes.length, dir, result.changes.pluck('path').join('\n'));
result.changes.forEach(function(change) {
switch (change.type) {
case 'removal': delete watchState.files[change.path]; break;
case 'creation': case 'change': watchState.files[change.path] = extend(change.stat); break;<|fim▁hole|> }
});
whenDone();
});
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
function whenDone() {
watchState.updateInProgress = false;
var cb;
while ((cb = watchState.callbacks.shift())) cb(watchState.files);
}
function extend(statObj) { // convert date string into a date object
if (!statObj) statObj = {};
statObj.isDirectory = !!(statObj.mode & 0x4000);
['atime', 'mtime', 'ctime'].forEach(function(field) {
if (statObj[field]) statObj[field] = new Date(statObj[field]); });
return statObj;
}
}
});
}) // end of module<|fim▁end|> | |
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()<|fim▁hole|>
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Token'
db.create_table('authtoken_token', (
('key', self.gf('django.db.models.fields.CharField')(max_length=40, primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='auth_token', unique=True, to=orm['%s.%s' % (User._meta.app_label, User._meta.object_name)])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('authtoken', ['Token'])
def backwards(self, orm):
# Deleting model 'Token'
db.delete_table('authtoken_token')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
"%s.%s" % (User._meta.app_label, User._meta.module_name): {
'Meta': {'object_name': User._meta.module_name, 'db_table': repr(User._meta.db_table)},
},
'authtoken.token': {
'Meta': {'object_name': 'Token'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'auth_token'", 'unique': 'True', 'to': "orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['authtoken']<|fim▁end|> | |
<|file_name|>motor_primitive.py<|end_file_name|><|fim▁begin|>from numpy import linspace, array, arange, tile, dot, zeros
from .gaussian import Gaussian
from ..utils import rk4
class BasisFunctions(object):
def __init__(self, n_basis, duration, dt, sigma):
self.n_basis = n_basis
means = linspace(0, duration, n_basis)
# FIXME:
variances = duration / (sigma * n_basis)**2
gaussians = [Gaussian(array([means[k]]), array([[variances]]))
for k in range(len(means))]
self.x = arange(0., duration, dt)
y = array([gaussians[k].normal(self.x.reshape(-1, 1)) for k in range(len(means))])
self.z = y / tile(sum(y, 0), (n_basis, 1))
def trajectory(self, weights):
return dot(weights, self.z)
class MovementPrimitive(object):
def __init__(self, duration, n_basis, dt, stiffness=0., damping=0.):
"""
:param float duration: duration of the movement in seconds
:param list dt: time step used for numerical integration
"""
self.dt = dt
self.duration = duration
self.stiffness = stiffness
self.damping = damping
self.basis = BasisFunctions(n_basis, self.duration, dt, 2.)
self.traj = zeros((self.duration/dt, 3))
self.acc = zeros(self.duration/dt) # +1 due to ..utils.rk4 implementation
def acceleration(self, t, state):
intrinsic_acc = - self.stiffness*state[0] - self.damping*state[1]
return array([state[1], self.acc[t / self.dt] + intrinsic_acc])
def trajectory(self, x0, command):
self.acc = self.basis.trajectory(command)
# self.acc[-1] = self.acc[-2] # still due to ..utils.rk4 implementation
t = 0.
self.traj[0, :] = [x0[0], x0[1], self.acc[0]]
i_t = 1
state = x0<|fim▁hole|> self.traj[i_t, :] = [state[0], state[1], self.acc[i_t]]
i_t += 1
return self.traj<|fim▁end|> | while i_t < self.duration / self.dt:
# print i_t, t, self.duration - self.dt
t, state = rk4(t, self.dt, state, self.acceleration)
# print state |
<|file_name|>Sample.py<|end_file_name|><|fim▁begin|>import math
from param import *
class Sample:
def __init__(self):
self.time = 0
self.home1_x = 0.0
self.home1_y = 0.0
self.home1_theta = 0.0
self.home2_x = 0.0
self.home2_y = 0.0
self.home2_theta = 0.0
self.away1_x = 0.0
self.away1_y = 0.0
self.away1_theta = 0.0
self.away2_x = 0.0
self.away2_y = 0.0
self.away2_theta = 0.0
self.ball_x = 0.0
self.ball_y = 0.0
self.kill = 0.0
def setDataFromSample(self,data):
self.time = round(timeToInt(data.header.stamp),2)<|fim▁hole|> mag = math.sqrt(home1_x**2+home1_y**2)
angleCamera = math.atan(HEIGHT_CAMERA/mag)
offset = HEIGHT_ROBOT / math.tan(angleCamera)
home1_x = home1_x - offset * math.cos(angleField)
home1_y = home1_y - offset * math.sin(angleField)
self.home1_x = round(home1_x,3)
self.home1_y = round(home1_y,3)
self.home2_x = pixelToMeter(data.home2_x)
self.home2_y = pixelToMeter(data.home2_y)
self.home2_theta = degreeToRadian(data.home2_theta)
self.away1_x = pixelToMeter(data.away1_x)
self.away1_y = pixelToMeter(data.away1_y)
self.away1_theta = degreeToRadian(data.away1_theta)
self.away2_x = pixelToMeter(data.away2_x)
self.away2_y = pixelToMeter(data.away2_y)
self.away2_theta = degreeToRadian(data.away2_theta)
self.ball_x = pixelToMeter(data.ball_x)
self.ball_y = pixelToMeter(data.ball_y)
def getDiscreteSample(self):
home1_x = meterToPixel(self.home1_x);
home1_y = meterToPixel(self.home1_y);
home1_theta = radianToDegree(self.home1_theta);
home2_x = meterToPixel(self.home2_x);
home2_y = meterToPixel(self.home2_y);
home2_theta = radianToDegree(self.home2_theta);
away1_x = meterToPixel(self.away1_x);
away1_y = meterToPixel(self.away1_y);
away1_theta = radianToDegree(self.away1_theta);
away2_x = meterToPixel(self.away2_x);
away2_y = meterToPixel(self.away2_y);
away2_theta = radianToDegree(self.away2_theta);
ball_x = meterToPixel(self.ball_x);
ball_y = meterToPixel(self.ball_y);
return (home1_x, home1_y, home1_theta,
home2_x, home2_y, home2_theta,
away1_x, away1_y, away1_theta,
away2_x, away2_x, away2_theta,
ball_x, ball_y)<|fim▁end|> | self.home1_theta = round(degreeToRadian(data.home1_theta),3)
home1_x = pixelToMeter(data.home1_x)
home1_y = pixelToMeter(data.home1_y)
angleField = math.atan2(home1_y, home1_x) |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>import numpy as np
import tensorflow as tf<|fim▁hole|>
from misc import *<|fim▁end|> |
import dists |
<|file_name|>resnet-v1.py<|end_file_name|><|fim▁begin|>'''
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
(Original author Wei Wu) by Antti-Pekka Hynninen
Implementing the original resnet ILSVRC 2015 winning network from:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Deep Residual Learning for Image Recognition"
'''
import mxnet as mx
def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True, bn_mom=0.9, workspace=256, memonger=False):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
if bottle_neck:
conv1 = mx.sym.Convolution(data=data, num_filter=int(num_filter*0.25), kernel=(1,1), stride=stride, pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv2 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv3 = mx.sym.Convolution(data=act2, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
bn3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if dim_match:
shortcut = data
else:
conv1sc = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_conv1sc')
shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return mx.sym.Activation(data=bn3 + shortcut, act_type='relu', name=name + '_relu3')
else:
conv1 = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv2 = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
if dim_match:
shortcut = data
else:
conv1sc = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_conv1sc')
shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return mx.sym.Activation(data=bn2 + shortcut, act_type='relu', name=name + '_relu3')
def resnet(units, num_stages, filter_list, num_classes, image_shape, bottle_neck=True, bn_mom=0.9, workspace=256, memonger=False):
"""Return ResNet symbol of
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stage
filter_list : list
Channel size of each stage
num_classes : int
Ouput size of symbol
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Workspace used in convolution operator
"""
num_unit = len(units)
assert(num_unit == num_stages)
data = mx.sym.Variable(name='data')
data = mx.sym.identity(data=data, name='id')
(nchannel, height, width) = image_shape
if height <= 32: # such as cifar10
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1,1), pad=(1, 1),
no_bias=True, name="conv0", workspace=workspace)
# Is this BatchNorm supposed to be here?
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
else: # often expected to be 224 such as imagenet
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2,2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.symbol.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')
for i in range(num_stages):
body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False,
name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, workspace=workspace,
memonger=memonger)
for j in range(units[i]-1):
body = residual_unit(body, filter_list[i+1], (1,1), True, name='stage%d_unit%d' % (i + 1, j + 2),
bottle_neck=bottle_neck, workspace=workspace, memonger=memonger)
# bn1 = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn1')
# relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
# Although kernel is not used here when global_pool=True, we should put one
pool1 = mx.symbol.Pooling(data=body, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
flat = mx.symbol.Flatten(data=pool1)
fc1 = mx.symbol.FullyConnected(data=flat, num_hidden=num_classes, name='fc1')
return mx.symbol.SoftmaxOutput(data=fc1, name='softmax')
def get_symbol(num_classes, num_layers, image_shape, conv_workspace=256, **kwargs):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
(Original author Wei Wu) by Antti-Pekka Hynninen
Implementing the original resnet ILSVRC 2015 winning network from:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Deep Residual Learning for Image Recognition"
"""
image_shape = [int(l) for l in image_shape.split(',')]
(nchannel, height, width) = image_shape
if height <= 28:<|fim▁hole|> bottle_neck = True
elif (num_layers-2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers-2)//6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on num_layers {}, you can do it youself".format(num_layers))
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on num_layers {}, you can do it youself".format(num_layers))
return resnet(units = units,
num_stages = num_stages,
filter_list = filter_list,
num_classes = num_classes,
image_shape = image_shape,
bottle_neck = bottle_neck,
workspace = conv_workspace)<|fim▁end|> | num_stages = 3
if (num_layers-2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers-2)//9]
filter_list = [16, 64, 128, 256] |
<|file_name|>Config.cpp<|end_file_name|><|fim▁begin|>//
// Created by Giacomo Tanganelli on 04/01/17.
//
#include <FS.h> //must be the first include
#include "Config.h"
#include <ArduinoJson.h>
int Config::ReadConfig() {
#if DEBUG
Serial.println("mounting FS...");
#endif
if (SPIFFS.begin()) {
#if DEBUG
Serial.println("mounted file system");
#endif
if (SPIFFS.exists("/config.json")) {
//file exists, reading and loading
#if DEBUG
Serial.println("reading config file");
#endif
File configFile = SPIFFS.open(CONFIG_FILE, "r");
if (configFile) {
#if DEBUG
Serial.println("opened config file");
#endif
size_t size = configFile.size();
// Allocate a buffer to store contents of the file.
std::unique_ptr<char[]> buf(new char[size]);
configFile.readBytes(buf.get(), size);
DynamicJsonBuffer jsonBuffer;
JsonObject &json = jsonBuffer.parseObject(buf.get());
if (json.success()) {
if(json.containsKey("portal_user"))
_portal_user = json["portal_user"].as<String>();
else
return PARAMETERS_ERROR;
if(json.containsKey("portal_pass"))
_portal_pass = json["portal_pass"].as<String>();
else
return PARAMETERS_ERROR;
if(json.containsKey("network"))
_network = json["network"].as<String>();
else
return PARAMETERS_ERROR;
if(json.containsKey("network_password"))
_network_password = json["network_password"].as<String>();
else
return PARAMETERS_ERROR;
if(json.containsKey("ip"))
_cse_ip.fromString(json["ip"].as<String>());
else
return PARAMETERS_ERROR;
if(json.containsKey("port"))
_cse_port = json["port"].as<int>();
else
return PARAMETERS_ERROR;
if(json.containsKey("id"))
_cse_id = json["id"].as<String>();
else
return PARAMETERS_ERROR;
if(json.containsKey("name"))
_cse_name = json["name"].as<String>();
else
return PARAMETERS_ERROR;
if(json.containsKey("app"))
_app_name = json["app"].as<String>();
else
return PARAMETERS_ERROR;
if(json.containsKey("dimmer"))
_dimmer_name = json["dimmer"].as<String>();
else
return PARAMETERS_ERROR;
if(json.containsKey("switch"))
_switch_name = json["switch"].as<String>();
else
return PARAMETERS_ERROR;
if(json.containsKey("user"))
_user = json["user"].as<String>();
else
return PARAMETERS_ERROR;
if(json.containsKey("pass"))
_pass = json["pass"].as<String>();
else
return PARAMETERS_ERROR;
} else {
return PARSING_FAILED;
}
} else {
return ERROR_OPENING;
}
} else {
return FILE_NOT_FOUND;
}
} else {
return FAILED_MOUNT_FS;
}
return 0;
}
const String& Config::get_network() const {
return _network;
}
const String& Config::get_network_password() const {
return _network_password;
}
void Config::handleChange(AsyncWebServerRequest *request) {
//TODO check confirm password
String page = FPSTR(HTML_HEAD);
page.replace("{v}", "Config Eagle Dimmer Switch");
page += FPSTR(HTML_SCRIPT);
page += FPSTR(HTML_STYLE);
page += FPSTR(HTML_HEAD_END);
page += FPSTR(HTML_FORM_CHANGE_START);
page += addParam("USER:", "user", "user", "user", "200", _portal_user, "text", "Only characters, numbers, _ and - are allowed", "pattern='[A-Za-z0-9_-]*'");
page += addParam("PASSWORD:", "password", "password", "password", "200", _portal_pass, "password", "Only characters, numbers, _ and - are allowed", "pattern='[A-Za-z0-9_-]*'");
page += addParam("CONFIRM PASSWORD:", "confirm_password", "confirm_password", "confirm_password", "200", "", "password", "Only characters, numbers, _ and - are allowed", "pattern='[A-Za-z0-9_-]*'");
page += FPSTR(HTML_FORM_CHANGE_END);
// page += FPSTR(HTML_SCAN_LINK);
page += FPSTR(HTML_END);
request->send(200, "text/html", page);
}
void Config::handleConfig(AsyncWebServerRequest *request) {
String page = FPSTR(HTML_HEAD);
page.replace("{v}", "Config Eagle Dimmer Switch");
page += FPSTR(HTML_SCRIPT);
page += FPSTR(HTML_STYLE);
page += FPSTR(HTML_HEAD_END);
/*WiFi.mode(WIFI_AP_STA);
delay(100);
int n = WiFi.scanNetworks();
if (n == 0) {
page += F("No networks found. Refresh to scan again.");
} else {
//sort networks
int indices[n];
for (int i = 0; i < n; i++) {
indices[i] = i;
}
for (int i = 0; i < n; i++) {
for (int j = i + 1; j < n; j++) {
if (WiFi.RSSI(indices[j]) > WiFi.RSSI(indices[i])) {
std::swap(indices[i], indices[j]);
}
}
}
// remove duplicates
String cssid;
for (int i = 0; i < n; i++) {
if (indices[i] == -1) continue;
cssid = WiFi.SSID(indices[i]);
for (int j = i + 1; j < n; j++) {
if (cssid == WiFi.SSID(indices[j])) {
indices[j] = -1; // set dup aps to index -1
}
}
}
//display networks in page
for (int i = 0; i < n; i++) {
if (indices[i] == -1) continue; // skip dups
int quality = WiFi.RSSI(indices[i]);
String item = FPSTR(HTML_ITEM);
String rssiQ;
rssiQ += quality;
item.replace("{v}", WiFi.SSID(indices[i]));
item.replace("{r}", rssiQ);
if (WiFi.encryptionType(indices[i]) != ENC_TYPE_NONE) {
item.replace("{i}", "l");
} else {
item.replace("{i}", "");
}
page += item;
}
page += "<br/>";
}*/
//TODO validate input
page += FPSTR(HTML_FORM_CHANGE);
page += FPSTR(HTML_FORM_START);
page += addParam("NETWORK:", "network", "network", "network", "200", _network, "text", "Network SSID", "");
page += addParam("NETWORK PASSWORD:", "network_password", "network_password", "network_password", "200", _network_password, "password", "Network Password", "");
page += addParam("CSE IP:", "ip", "ip", "cseIP", "15", _cse_ip.toString(), "text", "IP is not valid", "pattern='^(25[0-5]|2[0-4][0-9]|[1][0-9]?[0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'");
page += addParam("CSE PORT:", "port", "port", "csePort", "5", String(_cse_port), "text", "Port must be a number", "pattern='[0-9]*'");
page += addParam("CSE ID:", "id", "id", "cseId", "32", _cse_id, "text", "Only characters, numbers, _ and - are allowed", "pattern='[A-Za-z0-9_-]*'");
page += addParam("CSE NAME:", "name", "name", "cseName", "32", _cse_name, "text", "Only characters, numbers, _ and - are allowed", "pattern='[A-Za-z0-9_-]*'");
page += addParam("CSE USER:", "user", "user", "user", "32", _user, "text", "Only characters, numbers, _ and - are allowed", "pattern='[A-Za-z0-9_-]*'");
page += addParam("CSE PASSWORD:", "password", "password", "pass", "32", _pass, "password", "Only characters, numbers, _ and - are allowed", "pattern='[A-Za-z0-9_-]*'");
page += addParam("APPLICATION NAME:", "app", "app", "appName", "32", _app_name, "text", "Only characters, numbers, _ and - are allowed", "pattern='[A-Za-z0-9_-]*'");
/* page += addParam("DIMMER CONTAINER NAME:", "dimmer", "dimmer", "dimmerName", "32", _dimmer_name, "text");
page += addParam("SWITCH CONTAINER NAME:", "switch", "switch", "switchName", "32", _switch_name, "text");*/
page += FPSTR(HTML_FORM_END);
// page += FPSTR(HTML_SCAN_LINK);
page += FPSTR(HTML_FORM_RESET);
page += FPSTR(HTML_END);
request->send(200, "text/html", page);
}
String Config::addParam(String label, String i, String n, String p, String l, String v, String t, String title, String pattern) {
String pitem = FPSTR(HTML_FORM_PARAM);
pitem.replace("{label}", label);
pitem.replace("{i}", i);
pitem.replace("{n}", n);
pitem.replace("{p}", p);
pitem.replace("{l}", l);
pitem.replace("{v}", v);
pitem.replace("{t}", t);
pitem.replace("{title}", title);
pitem.replace("{pattern}", pattern);
return pitem;
}
void Config::handleChangeSave(AsyncWebServerRequest *request) {
if(request->hasParam("user")){
AsyncWebParameter* p = request->getParam("user");
_portal_user = p->value();
}
if(request->hasParam("password")){
AsyncWebParameter* p = request->getParam("password");
_portal_pass = p->value();
}
int check = WriteConfig();
String page = FPSTR(HTML_HEAD);
page.replace("{v}", "Config ESP");
//page += FPSTR(HTML_SCRIPT);
page += FPSTR(HTML_STYLE);
page += FPSTR(HTML_HEAD_END);
if(check == 0)
page += FPSTR(HTML_SAVED);
else
page += FPSTR(HTML_SAVED_ERROR);
page += FPSTR(HTML_END);
request->send(200, "text/html", page);
}
void Config::handleSave(AsyncWebServerRequest *request) {
if(request->hasParam("network")){
AsyncWebParameter* p = request->getParam("network");
_network = p->value();
}
if(request->hasParam("network_password")){
AsyncWebParameter* p = request->getParam("network_password");
_network_password = p->value();
}
if(request->hasParam("ip")){
AsyncWebParameter* p = request->getParam("ip");
_cse_ip.fromString(p->value());
}
if(request->hasParam("port")){
AsyncWebParameter* p = request->getParam("port");
_cse_port = p->value().toInt();
}
if(request->hasParam("id")){
AsyncWebParameter* p = request->getParam("id");
_cse_id = p->value();
}
if(request->hasParam("name")){
AsyncWebParameter* p = request->getParam("name");
_cse_name = p->value();
}
if(request->hasParam("user")){
AsyncWebParameter* p = request->getParam("user");
_user = p->value();
}
if(request->hasParam("password")){
AsyncWebParameter* p = request->getParam("password");
_pass = p->value();
}
/*if(request->hasParam("dimmer")){
AsyncWebParameter* p = request->getParam("dimmer");
_dimmer_name = p->value();
}
if(request->hasParam("switch")){
AsyncWebParameter* p = request->getParam("switch");
_switch_name = p->value();
}*/
if(request->hasParam("app")){
AsyncWebParameter* p = request->getParam("app");
_app_name = p->value();
}
int check = WriteConfig();
String page = FPSTR(HTML_HEAD);
page.replace("{v}", "Config ESP");
//page += FPSTR(HTML_SCRIPT);
page += FPSTR(HTML_STYLE);
page += FPSTR(HTML_HEAD_END);
if(check == 0)
page += FPSTR(HTML_SAVED);
else
page += FPSTR(HTML_SAVED_ERROR);
page += FPSTR(HTML_END);
request->send(200, "text/html", page);
}
void Config::handleReset(AsyncWebServerRequest *request) {
String page = FPSTR(HTML_HEAD);
page.replace("{v}", "Config ESP");
page += FPSTR(HTML_SCRIPT);
page += FPSTR(HTML_STYLE);
page += FPSTR(HTML_HEAD_END);
page += FPSTR(HTML_SAVED_RESET);
page += FPSTR(HTML_END);
request->send(200, "text/html", page);
DelConfig();
}
int Config::WriteConfig() {
if (SPIFFS.begin()) {
DynamicJsonBuffer jsonBuffer;<|fim▁hole|> json["name"] = _cse_name;
json["user"] = _user;
json["pass"] = _pass;
json["dimmer"] = _dimmer_name;
json["switch"] = _switch_name;
json["app"] = _app_name;
json["network"] = _network;
json["network_password"] = _network_password;
json["portal_user"] = _portal_user;
json["portal_pass"] = _portal_pass;
File configFile = SPIFFS.open(CONFIG_FILE, "w");
if (!configFile) {
return ERROR_OPENING;
}
json.printTo(configFile);
configFile.close();
} else
return FAILED_MOUNT_FS;
return 0;
}
int Config::DelConfig() {
if (SPIFFS.begin()) {
SPIFFS.remove(CONFIG_FILE);
} else
return FAILED_MOUNT_FS;
return 0;
}<|fim▁end|> | JsonObject &json = jsonBuffer.createObject();
json["ip"] = _cse_ip.toString();
json["port"] = String(_cse_port);
json["id"] = _cse_id; |
<|file_name|>train_imagenet.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""Example code of learning a large scale convnet from ILSVRC2012 dataset.
Prerequisite: To run this example, crop the center of ILSVRC2012 training and
validation images and scale them to 256x256, and make two lists of space-
separated CSV whose first column is full path to image and second column is
zero-origin label (this format is same as that used by Caffe's ImageDataLayer).
"""
import argparse
import cPickle as pickle
from datetime import timedelta
import json
import math
from multiprocessing import Pool
from Queue import Queue
import random
import sys
from threading import Thread
import time
import cv2
import numpy as np
from chainer import cuda, Variable, FunctionSet, optimizers
import chainer.functions as F
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Path to the mean file (computed by compute_mean.py)')
parser.add_argument('--arch', '-a', default='nin',
help='Convnet architecture (nin, alexbn, googlenet, googlenetbn)')
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--epoch', '-E', default=10, type=int,
help='Number of epochs to learn')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--loaderjob', '-j', default=20, type=int,
help='Number of parallel data loading processes')
parser.add_argument('--out', '-o', default='model',
help='Path to save model on each validation')
args = parser.parse_args()
assert 50000 % args.val_batchsize == 0
# Prepare dataset
def load_image_list(path):
tuples = []
for line in open(path):
pair = line.strip().split()
tuples.append((pair[0], np.int32(pair[1])))
return tuples
train_list = load_image_list(args.train)
val_list = load_image_list(args.val)
mean_image = pickle.load(open(args.mean, 'rb'))
# Prepare model
if args.arch == 'nin':
import nin
model = nin.NIN()
elif args.arch == 'alexbn':
import alexbn
model = alexbn.AlexBN()
elif args.arch == 'googlenet':
import inception
model = inception.GoogLeNet()
elif args.arch == 'googlenetbn':
import inceptionbn
model = inceptionbn.GoogLeNetBN()
else:
raise ValueError('Invalid architecture name')
if args.gpu >= 0:
cuda.init(args.gpu)
model.to_gpu()
# Setup optimizer
optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model.collect_parameters())
# ------------------------------------------------------------------------------
# This example consists of three threads: data feeder, logger and trainer. These
# communicate with each other via Queue.
data_q = Queue(maxsize=1)
res_q = Queue()
# Data loading routine
cropwidth = 256 - model.insize
def read_image(path, center=False, flip=False):
image = cv2.imread(path).transpose(2, 0, 1)
if center:
top = left = cropwidth / 2
else:
top = random.randint(0, cropwidth - 1)
left = random.randint(0, cropwidth - 1)
bottom = model.insize + top
right = model.insize + left
image = image[[2, 1, 0], top:bottom, left:right].astype(np.float32)
image -= mean_image[:, top:bottom, left:right]
image /= 255
if flip and random.randint(0, 1) == 0:
return image[:, :, ::-1]
else:
return image
# Data feeder
def feed_data():
i = 0
count = 0
x_batch = np.ndarray(
(args.batchsize, 3, model.insize, model.insize), dtype=np.float32)
y_batch = np.ndarray((args.batchsize,), dtype=np.int32)
val_x_batch = np.ndarray(
(args.val_batchsize, 3, model.insize, model.insize), dtype=np.float32)
val_y_batch = np.ndarray((args.val_batchsize,), dtype=np.int32)
batch_pool = [None] * args.batchsize
val_batch_pool = [None] * args.val_batchsize
pool = Pool(args.loaderjob)
data_q.put('train')
for epoch in xrange(1, 1 + args.epoch):
print >> sys.stderr, 'epoch', epoch
print >> sys.stderr, 'learning rate', optimizer.lr
perm = np.random.permutation(len(train_list))
for idx in perm:
path, label = train_list[idx]
batch_pool[i] = pool.apply_async(read_image, (path, False, True))
y_batch[i] = label
i += 1
if i == args.batchsize:
for j, x in enumerate(batch_pool):
x_batch[j] = x.get()
data_q.put((x_batch.copy(), y_batch.copy()))
i = 0
count += 1
if count % 100000 == 0:
data_q.put('val')
j = 0
for path, label in val_list:
val_batch_pool[j] = pool.apply_async(
read_image, (path, True, False))
val_y_batch[j] = label
j += 1
if j == args.val_batchsize:
for k, x in enumerate(val_batch_pool):
val_x_batch[k] = x.get()
data_q.put((val_x_batch.copy(), val_y_batch.copy()))
j = 0
data_q.put('train')
optimizer.lr *= 0.97
pool.close()
pool.join()
data_q.put('end')
# Logger
def log_result():
train_count = 0
train_cur_loss = 0
train_cur_accuracy = 0
begin_at = time.time()
val_begin_at = None
while True:
result = res_q.get()
if result == 'end':
print >> sys.stderr, ''
break
elif result == 'train':
print >> sys.stderr, ''
train = True
if val_begin_at is not None:
begin_at += time.time() - val_begin_at
val_begin_at = None
continue
elif result == 'val':
print >> sys.stderr, ''
train = False
val_count = val_loss = val_accuracy = 0
val_begin_at = time.time()
continue
loss, accuracy = result
if train:
train_count += 1
duration = time.time() - begin_at
throughput = train_count * args.batchsize / duration
sys.stderr.write(
'\rtrain {} updates ({} samples) time: {} ({} images/sec)'<|fim▁hole|> train_cur_loss += loss
train_cur_accuracy += accuracy
if train_count % 1000 == 0:
mean_loss = train_cur_loss / 1000
mean_error = 1 - train_cur_accuracy / 1000
print >> sys.stderr, ''
print json.dumps({'type': 'train', 'iteration': train_count,
'error': mean_error, 'loss': mean_loss})
sys.stdout.flush()
train_cur_loss = 0
train_cur_accuracy = 0
else:
val_count += args.val_batchsize
duration = time.time() - val_begin_at
throughput = val_count / duration
sys.stderr.write(
'\rval {} batches ({} samples) time: {} ({} images/sec)'
.format(val_count / args.val_batchsize, val_count,
timedelta(seconds=duration), throughput))
val_loss += loss
val_accuracy += accuracy
if val_count == 50000:
mean_loss = val_loss * args.val_batchsize / 50000
mean_error = 1 - val_accuracy * args.val_batchsize / 50000
print >> sys.stderr, ''
print json.dumps({'type': 'val', 'iteration': train_count,
'error': mean_error, 'loss': mean_loss})
sys.stdout.flush()
# Trainer
def train_loop():
while True:
while data_q.empty():
time.sleep(0.1)
inp = data_q.get()
if inp == 'end': # quit
res_q.put('end')
break
elif inp == 'train': # restart training
res_q.put('train')
train = True
continue
elif inp == 'val': # start validation
res_q.put('val')
pickle.dump(model, open('model', 'wb'), -1)
train = False
continue
x, y = inp
if args.gpu >= 0:
x = cuda.to_gpu(x)
y = cuda.to_gpu(y)
if train:
optimizer.zero_grads()
loss, accuracy = model.forward(x, y)
loss.backward()
optimizer.update()
else:
loss, accuracy = model.forward(x, y, train=False)
res_q.put((float(cuda.to_cpu(loss.data)),
float(cuda.to_cpu(accuracy.data))))
del loss, accuracy, x, y
# Invoke threads
feeder = Thread(target=feed_data)
feeder.daemon = True
feeder.start()
logger = Thread(target=log_result)
logger.daemon = True
logger.start()
train_loop()
feeder.join()
logger.join()
# Save final model
pickle.dump(model, open('model', 'wb'), -1)<|fim▁end|> | .format(train_count, train_count * args.batchsize,
timedelta(seconds=duration), throughput))
|
<|file_name|>watch.go<|end_file_name|><|fim▁begin|>// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import (
"io"
"sync"
"time"
"golang.org/x/net/context"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc"
"github.com/coreos/etcd/mvcc/mvccpb"
)
type watchServer struct {
clusterID int64
memberID int64
raftTimer etcdserver.RaftTimer
watchable mvcc.Watchable
}
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
return &watchServer{
clusterID: int64(s.Cluster().ID()),
memberID: int64(s.ID()),
raftTimer: s,
watchable: s.Watchable(),
}
}
var (
// External test can read this with GetProgressReportInterval()
// and change this to a small value to finish fast with
// SetProgressReportInterval().
progressReportInterval = 10 * time.Minute
progressReportIntervalMu sync.RWMutex
)
func GetProgressReportInterval() time.Duration {
progressReportIntervalMu.RLock()
defer progressReportIntervalMu.RUnlock()
return progressReportInterval
}
func SetProgressReportInterval(newTimeout time.Duration) {
progressReportIntervalMu.Lock()
defer progressReportIntervalMu.Unlock()
progressReportInterval = newTimeout
}
const (
// We send ctrl response inside the read loop. We do not want
// send to block read, but we still want ctrl response we sent to
// be serialized. Thus we use a buffered chan to solve the problem.
// A small buffer should be OK for most cases, since we expect the
// ctrl requests are infrequent.
ctrlStreamBufLen = 16
)
// serverWatchStream is an etcd server side stream. It receives requests
// from client side gRPC stream. It receives watch events from mvcc.WatchStream,
// and creates responses that forwarded to gRPC stream.
// It also forwards control message like watch created and canceled.
type serverWatchStream struct {
clusterID int64
memberID int64
raftTimer etcdserver.RaftTimer
gRPCStream pb.Watch_WatchServer
watchStream mvcc.WatchStream
ctrlStream chan *pb.WatchResponse
// progress tracks the watchID that stream might need to send
// progress to.
progress map[mvcc.WatchID]bool
// mu protects progress
mu sync.Mutex
// closec indicates the stream is closed.
closec chan struct{}
}
func (ws *watchServer) Watch(stream pb.Watch_WatchServer) error {
sws := serverWatchStream{
clusterID: ws.clusterID,
memberID: ws.memberID,
raftTimer: ws.raftTimer,
gRPCStream: stream,
watchStream: ws.watchable.NewWatchStream(),
// chan for sending control response like watcher created and canceled.
ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
progress: make(map[mvcc.WatchID]bool),
closec: make(chan struct{}),
}
go sws.sendLoop()
errc := make(chan error, 1)
go func() {
errc <- sws.recvLoop()
sws.close()
}()
select {
case err := <-errc:
return err
case <-stream.Context().Done():
err := stream.Context().Err()
// the only server-side cancellation is noleader for now.
if err == context.Canceled {
return rpctypes.ErrGRPCNoLeader
}
return err
}
}
func (sws *serverWatchStream) recvLoop() error {
for {
req, err := sws.gRPCStream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
switch uv := req.RequestUnion.(type) {
case *pb.WatchRequest_CreateRequest:
if uv.CreateRequest == nil {
break
}
creq := uv.CreateRequest
if len(creq.Key) == 0 {
// \x00 is the smallest key
creq.Key = []byte{0}
}
if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
// support >= key queries
creq.RangeEnd = []byte{}
}
wsrev := sws.watchStream.Rev()
rev := creq.StartRevision
if rev == 0 {
rev = wsrev + 1
}
id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev)
if id != -1 && creq.ProgressNotify {
sws.progress[id] = true
}
sws.ctrlStream <- &pb.WatchResponse{
Header: sws.newResponseHeader(wsrev),
WatchId: int64(id),
Created: true,
Canceled: id == -1,
}
case *pb.WatchRequest_CancelRequest:
if uv.CancelRequest != nil {
id := uv.CancelRequest.WatchId
err := sws.watchStream.Cancel(mvcc.WatchID(id))
if err == nil {
sws.ctrlStream <- &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()),
WatchId: id,
Canceled: true,
}
sws.mu.Lock()
delete(sws.progress, mvcc.WatchID(id))
sws.mu.Unlock()
}
}
// TODO: do we need to return error back to client?
default:
panic("not implemented")
}
}
}
func (sws *serverWatchStream) sendLoop() {
// watch ids that are currently active
ids := make(map[mvcc.WatchID]struct{})
// watch responses pending on a watch id creation message
pending := make(map[mvcc.WatchID][]*pb.WatchResponse)
interval := GetProgressReportInterval()
progressTicker := time.NewTicker(interval)
defer progressTicker.Stop()
for {
select {
case wresp, ok := <-sws.watchStream.Chan():
if !ok {
return
}
// TODO: evs is []mvccpb.Event type
// either return []*mvccpb.Event from the mvcc package
// or define protocol buffer with []mvccpb.Event.
evs := wresp.Events
events := make([]*mvccpb.Event, len(evs))
for i := range evs {
events[i] = &evs[i]
}
wr := &pb.WatchResponse{
Header: sws.newResponseHeader(wresp.Revision),
WatchId: int64(wresp.WatchID),
Events: events,
CompactRevision: wresp.CompactRevision,
}
if _, hasId := ids[wresp.WatchID]; !hasId {
// buffer if id not yet announced
wrs := append(pending[wresp.WatchID], wr)
pending[wresp.WatchID] = wrs
continue
}
mvcc.ReportEventReceived()
if err := sws.gRPCStream.Send(wr); err != nil {
return
}
sws.mu.Lock()
if _, ok := sws.progress[wresp.WatchID]; ok {
sws.progress[wresp.WatchID] = false
}
sws.mu.Unlock()
case c, ok := <-sws.ctrlStream:
if !ok {
return
}
if err := sws.gRPCStream.Send(c); err != nil {
return
}
// track id creation
wid := mvcc.WatchID(c.WatchId)
if c.Canceled {
delete(ids, wid)
continue
}
if c.Created {
// flush buffered events
ids[wid] = struct{}{}
for _, v := range pending[wid] {
mvcc.ReportEventReceived()
if err := sws.gRPCStream.Send(v); err != nil {
return
}
}
delete(pending, wid)
}
case <-progressTicker.C:
for id, ok := range sws.progress {
if ok {
sws.watchStream.RequestProgress(id)
}<|fim▁hole|> sws.progress[id] = true
}
case <-sws.closec:
// drain the chan to clean up pending events
for range sws.watchStream.Chan() {
mvcc.ReportEventReceived()
}
for _, wrs := range pending {
for range wrs {
mvcc.ReportEventReceived()
}
}
}
}
}
func (sws *serverWatchStream) close() {
sws.watchStream.Close()
close(sws.closec)
close(sws.ctrlStream)
}
func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
return &pb.ResponseHeader{
ClusterId: uint64(sws.clusterID),
MemberId: uint64(sws.memberID),
Revision: rev,
RaftTerm: sws.raftTimer.Term(),
}
}<|fim▁end|> | |
<|file_name|>testcase_base.py<|end_file_name|><|fim▁begin|>import os
import sys
import argparse
import ConfigParser
import testcase_service
from bantorra.util import define
from bantorra.util.log import LOG as L
class TestCase_Base(testcase_service.TestCaseUnit):
config = {}
"""
TestCase_Base.
- Parse Command Line Argument.
- Create Service's Instance.
- Read Config File and get value.
"""
def __init__(self, *args, **kwargs):
super(TestCase_Base, self).__init__(*args, **kwargs)
self.parse()
self.get_config()
self.service_check()
self.get_service()
@classmethod
def set(cls, name, value):<|fim▁hole|> @classmethod
def get(cls, name):
return cls.config[name]
def parse(self):
"""
Parse Command Line Arguments.
"""
return None
@classmethod
def get_service(cls):
"""
Get Service.
in the wifi branch, Used service is there.
"""
cls.core = cls.service["core"].get()
cls.picture = cls.service["picture"].get()
@classmethod
def get_config(cls, conf=""):
"""
Get Config File.
:arg string conf: config file path.
"""
cls.config = {}
if conf == "":
conf = os.path.join(define.APP_SCRIPT, "config.ini")
try:
config = ConfigParser.ConfigParser()
config.read(conf)
for section in config.sections():
for option in config.options(section):
cls.config["%s.%s" % (section, option)] = config.get(section, option)
except Exception as e:
L.warning('error: could not read config file: %s' % e)<|fim▁end|> | cls.config[name] = value
|
<|file_name|>get_all_test.go<|end_file_name|><|fim▁begin|>/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main<|fim▁hole|>import (
"testing"
"helm.sh/helm/v3/pkg/release"
)
func TestGetCmd(t *testing.T) {
tests := []cmdTestCase{{
name: "get all with a release",
cmd: "get all thomas-guide",
golden: "output/get-release.txt",
rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide"})},
}, {
name: "get all with a formatted release",
cmd: "get all elevated-turkey --template {{.Release.Chart.Metadata.Version}}",
golden: "output/get-release-template.txt",
rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "elevated-turkey"})},
}, {
name: "get all requires release name arg",
cmd: "get all",
golden: "output/get-all-no-args.txt",
wantError: true,
}}
runTestCmd(t, tests)
}
func TestGetAllRevisionCompletion(t *testing.T) {
revisionFlagCompletionTest(t, "get all")
}
func TestGetAllFileCompletion(t *testing.T) {
checkFileCompletion(t, "get all", false)
checkFileCompletion(t, "get all myrelease", false)
}<|fim▁end|> | |
<|file_name|>osx.py<|end_file_name|><|fim▁begin|><|fim▁hole|>raise NotImplementedError<|fim▁end|> | from btcommon import *
|
<|file_name|>mappers.py<|end_file_name|><|fim▁begin|><|fim▁hole|>#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
from sqlalchemy import orm
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy import Table
def map(engine, models):
meta = MetaData()
meta.bind = engine
if mapping_exists(models['instance']):
return
orm.mapper(models['instance'], Table('instances', meta, autoload=True))
orm.mapper(models['root_enabled_history'],
Table('root_enabled_history', meta, autoload=True))
orm.mapper(models['datastore'],
Table('datastores', meta, autoload=True))
orm.mapper(models['datastore_version'],
Table('datastore_versions', meta, autoload=True))
orm.mapper(models['capabilities'],
Table('capabilities', meta, autoload=True))
orm.mapper(models['capability_overrides'],
Table('capability_overrides', meta, autoload=True))
orm.mapper(models['service_statuses'],
Table('service_statuses', meta, autoload=True))
orm.mapper(models['dns_records'],
Table('dns_records', meta, autoload=True))
orm.mapper(models['agent_heartbeats'],
Table('agent_heartbeats', meta, autoload=True))
orm.mapper(models['quotas'],
Table('quotas', meta, autoload=True))
orm.mapper(models['quota_usages'],
Table('quota_usages', meta, autoload=True))
orm.mapper(models['reservations'],
Table('reservations', meta, autoload=True))
orm.mapper(models['backups'],
Table('backups', meta, autoload=True))
orm.mapper(models['security_group'],
Table('security_groups', meta, autoload=True))
orm.mapper(models['security_group_rule'],
Table('security_group_rules', meta, autoload=True))
orm.mapper(models['security_group_instance_association'],
Table('security_group_instance_associations', meta,
autoload=True))
orm.mapper(models['configurations'],
Table('configurations', meta, autoload=True))
orm.mapper(models['configuration_parameters'],
Table('configuration_parameters', meta, autoload=True))
orm.mapper(models['conductor_lastseen'],
Table('conductor_lastseen', meta, autoload=True))
orm.mapper(models['clusters'],
Table('clusters', meta, autoload=True))
orm.mapper(models['datastore_configuration_parameters'],
Table('datastore_configuration_parameters', meta,
autoload=True))
def mapping_exists(model):
try:
orm.class_mapper(model)
return True
except orm_exc.UnmappedClassError:
return False<|fim▁end|> | # Copyright 2011 OpenStack Foundation
# All Rights Reserved. |
<|file_name|>gis_visualization.js<|end_file_name|><|fim▁begin|>/* vim: set expandtab sw=4 ts=4 sts=4: */
/**
* @fileoverview functions used for visualizing GIS data
*
* @requires jquery
* @requires vendor/jquery/jquery.svg.js
* @requires vendor/jquery/jquery.mousewheel.js
* @requires vendor/jquery/jquery.event.drag-2.2.js
*/
/* global drawOpenLayers */ // templates/table/gis_visualization/gis_visualization.twig
// Constants
var zoomFactor = 1.5;
var defaultX = 0;
var defaultY = 0;
// Variables
var x = 0;
var y = 0;
var scale = 1;
var svg;
/**
* Zooms and pans the visualization.
*/
function zoomAndPan () {
var g = svg.getElementById('groupPanel');
if (!g) {
return;
}
g.setAttribute('transform', 'translate(' + x + ', ' + y + ') scale(' + scale + ')');
var id;
var circle;
$('circle.vector').each(function () {
id = $(this).attr('id');
circle = svg.getElementById(id);
$(svg).on('change', circle, {
r : (3 / scale),
'stroke-width' : (2 / scale)
});
});
var line;
$('polyline.vector').each(function () {
id = $(this).attr('id');
line = svg.getElementById(id);
$(svg).on('change', line, {
'stroke-width' : (2 / scale)
});
});
var polygon;
$('path.vector').each(function () {
id = $(this).attr('id');
polygon = svg.getElementById(id);
$(svg).on('change', polygon, {
'stroke-width' : (0.5 / scale)
});
});
}
/**
* Initially loads either SVG or OSM visualization based on the choice.
*/
function selectVisualization () {
if ($('#choice').prop('checked') !== true) {
$('#openlayersmap').hide();
} else {
$('#placeholder').hide();
}
}
/**
* Adds necessary styles to the div that coontains the openStreetMap.
*/
function styleOSM () {
var $placeholder = $('#placeholder');
var cssObj = {
'border' : '1px solid #aaa',
'width' : $placeholder.width(),
'height' : $placeholder.height(),
'float' : 'right'
};
$('#openlayersmap').css(cssObj);
}
/**
* Loads the SVG element and make a reference to it.
*/
function loadSVG () {
var $placeholder = $('#placeholder');
$placeholder.svg({
onLoad: function (svgRef) {
svg = svgRef;
}
});
// Removes the second SVG element unnecessarily added due to the above command
$placeholder.find('svg:nth-child(2)').remove();
}
/**
* Adds controllers for zooming and panning.
*/
function addZoomPanControllers () {
var $placeholder = $('#placeholder');
if ($('#placeholder').find('svg').length > 0) {
var pmaThemeImage = $('#pmaThemeImage').val();
// add panning arrows
$('<img class="button" id="left_arrow" src="' + pmaThemeImage + 'west-mini.png">').appendTo($placeholder);
$('<img class="button" id="right_arrow" src="' + pmaThemeImage + 'east-mini.png">').appendTo($placeholder);
$('<img class="button" id="up_arrow" src="' + pmaThemeImage + 'north-mini.png">').appendTo($placeholder);
$('<img class="button" id="down_arrow" src="' + pmaThemeImage + 'south-mini.png">').appendTo($placeholder);
// add zooming controls
$('<img class="button" id="zoom_in" src="' + pmaThemeImage + 'zoom-plus-mini.png">').appendTo($placeholder);
$('<img class="button" id="zoom_world" src="' + pmaThemeImage + 'zoom-world-mini.png">').appendTo($placeholder);
$('<img class="button" id="zoom_out" src="' + pmaThemeImage + 'zoom-minus-mini.png">').appendTo($placeholder);
}
}
/**
* Resizes the GIS visualization to fit into the space available.
*/
function resizeGISVisualization () {
var $placeholder = $('#placeholder');
var oldWidth = $placeholder.width();
var visWidth = $('#div_view_options').width() - 48;
// Assign new value for width
$placeholder.width(visWidth);
$('svg').attr('width', visWidth);
// Assign the offset created due to resizing to defaultX and center the svg.
defaultX = (visWidth - oldWidth) / 2;
x = defaultX;
y = 0;
scale = 1;<|fim▁hole|>/**
* Initialize the GIS visualization.
*/
function initGISVisualization () {
// Loads either SVG or OSM visualization based on the choice
selectVisualization();
// Resizes the GIS visualization to fit into the space available
resizeGISVisualization();
if (typeof OpenLayers !== 'undefined') {
// Configure OpenLayers
// eslint-disable-next-line no-underscore-dangle
OpenLayers._getScriptLocation = function () {
return './js/vendor/openlayers/';
};
// Adds necessary styles to the div that coontains the openStreetMap
styleOSM();
// Draws openStreetMap with openLayers
drawOpenLayers();
}
// Loads the SVG element and make a reference to it
loadSVG();
// Adds controllers for zooming and panning
addZoomPanControllers();
zoomAndPan();
}
function getRelativeCoords (e) {
var position = $('#placeholder').offset();
return {
x : e.pageX - position.left,
y : e.pageY - position.top
};
}
/**
* Ajax handlers for GIS visualization page
*
* Actions Ajaxified here:
*
* Zooming in and zooming out on mousewheel movement.
* Panning the visualization on dragging.
* Zooming in on double clicking.
* Zooming out on clicking the zoom out button.
* Panning on clicking the arrow buttons.
* Displaying tooltips for GIS objects.
*/
/**
* Unbind all event handlers before tearing down a page
*/
AJAX.registerTeardown('table/gis_visualization.js', function () {
$(document).off('click', '#choice');
$(document).off('mousewheel', '#placeholder');
$(document).off('dragstart', 'svg');
$(document).off('mouseup', 'svg');
$(document).off('drag', 'svg');
$(document).off('dblclick', '#placeholder');
$(document).off('click', '#zoom_in');
$(document).off('click', '#zoom_world');
$(document).off('click', '#zoom_out');
$(document).off('click', '#left_arrow');
$(document).off('click', '#right_arrow');
$(document).off('click', '#up_arrow');
$(document).off('click', '#down_arrow');
$('.vector').off('mousemove').off('mouseout');
});
AJAX.registerOnload('table/gis_visualization.js', function () {
// If we are in GIS visualization, initialize it
if ($('#gis_div').length > 0) {
initGISVisualization();
}
if (typeof OpenLayers === 'undefined') {
$('#choice, #labelChoice').hide();
}
$(document).on('click', '#choice', function () {
if ($(this).prop('checked') === false) {
$('#placeholder').show();
$('#openlayersmap').hide();
} else {
$('#placeholder').hide();
$('#openlayersmap').show();
}
});
$(document).on('mousewheel', '#placeholder', function (event, delta) {
event.preventDefault();
var relCoords = getRelativeCoords(event);
if (delta > 0) {
// zoom in
scale *= zoomFactor;
// zooming in keeping the position under mouse pointer unmoved.
x = relCoords.x - (relCoords.x - x) * zoomFactor;
y = relCoords.y - (relCoords.y - y) * zoomFactor;
zoomAndPan();
} else {
// zoom out
scale /= zoomFactor;
// zooming out keeping the position under mouse pointer unmoved.
x = relCoords.x - (relCoords.x - x) / zoomFactor;
y = relCoords.y - (relCoords.y - y) / zoomFactor;
zoomAndPan();
}
return true;
});
var dragX = 0;
var dragY = 0;
$(document).on('dragstart', 'svg', function (event, dd) {
$('#placeholder').addClass('placeholderDrag');
dragX = Math.round(dd.offsetX);
dragY = Math.round(dd.offsetY);
});
$(document).on('mouseup', 'svg', function () {
$('#placeholder').removeClass('placeholderDrag');
});
$(document).on('drag', 'svg', function (event, dd) {
var newX = Math.round(dd.offsetX);
x += newX - dragX;
dragX = newX;
var newY = Math.round(dd.offsetY);
y += newY - dragY;
dragY = newY;
zoomAndPan();
});
$(document).on('dblclick', '#placeholder', function (event) {
scale *= zoomFactor;
// zooming in keeping the position under mouse pointer unmoved.
var relCoords = getRelativeCoords(event);
x = relCoords.x - (relCoords.x - x) * zoomFactor;
y = relCoords.y - (relCoords.y - y) * zoomFactor;
zoomAndPan();
});
$(document).on('click', '#zoom_in', function (e) {
e.preventDefault();
// zoom in
scale *= zoomFactor;
var $placeholder = $('#placeholder').find('svg');
var width = $placeholder.attr('width');
var height = $placeholder.attr('height');
// zooming in keeping the center unmoved.
x = width / 2 - (width / 2 - x) * zoomFactor;
y = height / 2 - (height / 2 - y) * zoomFactor;
zoomAndPan();
});
$(document).on('click', '#zoom_world', function (e) {
e.preventDefault();
scale = 1;
x = defaultX;
y = defaultY;
zoomAndPan();
});
$(document).on('click', '#zoom_out', function (e) {
e.preventDefault();
// zoom out
scale /= zoomFactor;
var $placeholder = $('#placeholder').find('svg');
var width = $placeholder.attr('width');
var height = $placeholder.attr('height');
// zooming out keeping the center unmoved.
x = width / 2 - (width / 2 - x) / zoomFactor;
y = height / 2 - (height / 2 - y) / zoomFactor;
zoomAndPan();
});
$(document).on('click', '#left_arrow', function (e) {
e.preventDefault();
x += 100;
zoomAndPan();
});
$(document).on('click', '#right_arrow', function (e) {
e.preventDefault();
x -= 100;
zoomAndPan();
});
$(document).on('click', '#up_arrow', function (e) {
e.preventDefault();
y += 100;
zoomAndPan();
});
$(document).on('click', '#down_arrow', function (e) {
e.preventDefault();
y -= 100;
zoomAndPan();
});
/**
* Detect the mousemove event and show tooltips.
*/
$('.vector').on('mousemove', function (event) {
var contents = $.trim(Functions.escapeHtml($(this).attr('name')));
$('#tooltip').remove();
if (contents !== '') {
$('<div id="tooltip">' + contents + '</div>').css({
position : 'absolute',
top : event.pageY + 10,
left : event.pageX + 10,
border : '1px solid #fdd',
padding : '2px',
'background-color' : '#fee',
opacity : 0.90
}).appendTo('body').fadeIn(200);
}
});
/**
* Detect the mouseout event and hide tooltips.
*/
$('.vector').on('mouseout', function () {
$('#tooltip').remove();
});
});<|fim▁end|> | }
|
<|file_name|>project.rs<|end_file_name|><|fim▁begin|>// Examples to illustrate project loading
extern crate r2api;
extern crate r2pipe;
extern crate radeco_lib;
use r2api::api_trait::R2Api;
use r2pipe::R2;
use radeco_lib::frontend::radeco_containers::{FunctionLoader, ModuleLoader, ProjectLoader};
use radeco_lib::frontend::radeco_source::Source;
use std::cell::RefCell;
use std::rc::Rc;
fn main() {
{
let mut r2 = R2::new(Some("/bin/ls")).expect("Failed to load r2");
r2.analyze();
let src: Rc<Source> = Rc::new(Rc::new(RefCell::new(r2)));
let p = ProjectLoader::default()
.path("/bin/ls")
.source(Rc::clone(&src))
.module_loader(
ModuleLoader::default()
.parallel()
.build_ssa()
.build_callgraph()
.load_datarefs()
.function_loader(FunctionLoader::default().include_defaults()),
)
.load();
for m in p.iter() {
for rfn in m.module.iter() {
println!("{:#X}", rfn.function.0);<|fim▁hole|>}<|fim▁end|> | }
}
} |
<|file_name|>timelines_test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Tests for the Timelines flow."""
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import flags
from grr.lib import test_lib
# pylint: disable=unused-import
from grr.lib.flows.general import timelines as _
# pylint: enable=unused-import
from grr.lib.rdfvalues import paths as rdf_paths
class TestTimelines(test_lib.FlowTestsBaseclass):
"""Test the timelines flow."""
client_id = "C.0000000000000005"
def testMACTimes(self):
"""Test that the timelining works with files."""
with test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.OS, test_lib.ClientVFSHandlerFixture):
client_mock = action_mocks.ActionMock("ListDirectory")
output_path = "analysis/Timeline/MAC"
pathspec = rdf_paths.PathSpec(path="/",
pathtype=rdf_paths.PathSpec.PathType.OS)
for _ in test_lib.TestFlowHelper(
"RecursiveListDirectory", client_mock, client_id=self.client_id,
pathspec=pathspec, token=self.token):
pass
<|fim▁hole|> for _ in test_lib.TestFlowHelper(
"MACTimes", client_mock, client_id=self.client_id, token=self.token,
path="/", output=output_path):
pass
fd = aff4.FACTORY.Open(self.client_id.Add(output_path), token=self.token)
timestamp = 0
events = list(fd.Query("event.stat.pathspec.path contains grep"))
for event in events:
# Check the times are monotonously increasing.
self.assert_(event.event.timestamp >= timestamp)
timestamp = event.event.timestamp
self.assert_("grep" in event.event.stat.pathspec.path)
# 9 files, each having mac times = 27 events.
self.assertEqual(len(events), 27)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)<|fim▁end|> | # Now make a timeline |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>//! Basic 2D object and shader.
use std::rc::Rc;
use glium::{ Display, Frame };
use texture::{ Texture, TextureRef };
use mesh::Mesh;
use shader::ShaderType;
use transform::Transform;
use renderer::{ Renderer, Renderable };<|fim▁hole|>
/// 2D Sprite vertex type
#[derive(Copy, Clone)]
pub struct Vertex
{
pub position: [f32; 2],
pub texture_position: [f32; 2],
}
implement_vertex!{ Vertex, position, texture_position }
/// Simple sprite shader
pub struct Shader;
impl ShaderType for Shader
{
type Vertex = Vertex;
type Uniforms = Uniforms;
fn vertex() -> &'static str
{
include_str!("sprite.vert")
}
fn fragment() -> &'static str
{
include_str!("sprite.frag")
}
}
/// Sprite rendering uniforms.
pub struct Uniforms
{
pub matrix: [[f32; 4]; 4],
pub opacity: f32,
pub image: TextureRef,
}
implement_uniforms! { Uniforms, matrix, opacity, image }
/// Basic 2D object.
pub struct Sprite
{
pub id: Id,
texture: TextureRef,
width: f32,
height: f32,
opacity: f32,
rect: Rect,
mesh: Mesh<Vertex>,
pub transform: Transform,
}
impl Sprite
{
pub fn new(display: &Display, tex: Rc<Texture>, width: u32, height: u32)
-> Sprite
{
let rect = Rect::new(0, 0, tex.width as i32, tex.height as i32);
Sprite::with_rect(display, tex, rect, width, height)
}
/// Create a sprite with size and
/// [texture atlas](https://en.wikipedia.org/wiki/Texture_atlas).
pub fn with_rect(display: &Display, tex: Rc<Texture>,
rect: Rect, width: u32, height: u32)
-> Sprite
{
let (w, h) = (width as f32, height as f32);
Sprite
{
id: Id::new(),
texture: TextureRef(tex),
width: w,
height: h,
transform: Transform::new(),
opacity: 1.0,
mesh: Sprite::build_mesh(display, w, h, &rect),
rect: rect,
}
}
fn build_mesh(display: &Display, width: f32, height: f32, rect: &Rect)
-> Mesh<Vertex>
{
let x = rect.x as f32;
let y = rect.y as f32;
let w = rect.width as f32;
let h = rect.height as f32;
let verties = [
Vertex { position: [ 0.0, height], texture_position: [ x, y+h] },
Vertex { position: [ 0.0, 0.0], texture_position: [ x, y] },
Vertex { position: [width, 0.0], texture_position: [x+w, y] },
Vertex { position: [width, height], texture_position: [x+w, y+h] },
];
Mesh::with_indices(display, &verties, &[0, 1, 2, 2, 3, 0])
}
pub fn resize(&mut self, display: &Display, width: f32, height: f32)
{
self.mesh = Sprite::build_mesh(display, width, height, &self.rect);
}
/// Change the texture atlas rectangle.
pub fn rect(&mut self, display: &Display, rect: Rect)
{
self.mesh = Sprite::build_mesh(display, self.width, self.height, &rect);
self.rect = rect;
}
}
impl Renderable<Shader> for Sprite
{
fn draw(&self, renderer: &Renderer<Shader>, target: &mut Frame, parent: &Matrix4<f32>)
{
let uniforms = Uniforms
{
image: self.texture.clone(),
opacity: self.opacity,
matrix: (parent * self.transform.matrix()).into(),
};
renderer.draw(target, &self.mesh, &uniforms);
}
}<|fim▁end|> | use rect::Rect;
use cgmath::Matrix4;
use id::Id; |
<|file_name|>is-extensible.js<|end_file_name|><|fim▁begin|><|fim▁hole|>description: The [[IsExtensible]] internal method returns `false`
flags: [module]
---*/
import * as ns from './is-extensible.js';
assert.sameValue(Object.isExtensible(ns), false);<|fim▁end|> | // Copyright (C) 2016 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
esid: sec-module-namespace-exotic-objects-isextensible |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
# guaranteed to exist even on RHEL 5 because we now require python-hashlib
import hashlib
import re
import shutil
import pwd
import urlparse
import inspect
from config_common.rhn_log import log_debug
hashlib_has_usedforsecurity = False
if 'usedforsecurity' in inspect.getargspec(hashlib.new)[0]:
hashlib_has_usedforsecurity = True
_normpath_re = re.compile("^(%s)+" % os.sep)
def normalize_path(path):
"""
os.path.normpath does not remove path separator duplicates at the
beginning of the path
"""
return _normpath_re.sub(os.sep, os.path.normpath(path))
def join_path(*args):
return normalize_path(os.sep.join(args))
def path_full_split(path):
"""
Given a path, it fully splits it into constituent path
components (as opposed to os.path.split which splits it into
trailing component and preceeding path
"""
path = normalize_path(path)
splitpath = []
while 1:
path, current = os.path.split(path)
if current == '':
if path:
# Absolute path
splitpath.append(os.sep)
break
splitpath.append(current)
splitpath.reverse()
return splitpath
def copyfile_p(src, dst):
"""
Simple util function, copies src path to dst path, making
directories as necessary. File permissions are not preserved.
"""
directory = os.path.split(dst)[0]
try:
os.makedirs(directory)
except OSError, e:
if e.errno != 17:
# not File exists
raise
if os.path.isdir(src):
if not os.path.exists(dst):
os.mkdir(dst)
elif os.path.islink(src):
exists = hasattr(os.path, "lexists") and os.path.lexists or os.path.exists
if exists(dst):
os.remove(dst)
os.symlink(os.readlink(src), dst)
else:
shutil.copyfile(src, dst)
def mkdir_p(path, mode=None, symlinks=None, allfiles=None):
"""
Similar to 'mkdir -p' -- makes all directories necessary to ensure
the 'path' is a directory, and return the list of directories that were
made as a result
"""
if mode is None:
mode = 0700
dirs_created = []
components = path_full_split(path)
for i in range(1,len(components)):
d = os.path.join(*components[:i+1])
if symlinks:
for symlink in symlinks:
if symlink['path'] == d:
# create symlink and remove it from symlink list
os.symlink(symlink['symlink'], symlink['path'])
symlinks.remove(symlink)
allfiles.remove(symlink)
dirs_created.append(symlink)
continue
log_debug(8, "testing",d)
try:
os.mkdir(d, mode)
except OSError, e:
if e.errno != 17:
raise
else:
log_debug(8, "created",d)
dirs_created.append(d)
log_debug(6, "dirs_created:",dirs_created)
return dirs_created
def rmdir_p(path, stoppath):
"""
if rmdir had a -p option, this would be it. remove dir and up
until empty dir is hit, or stoppath is reached
path and stoppath have to be absolute paths
"""
# First normalize both paths
stoppath = normalize_path(os.sep + stoppath)
path = normalize_path(os.sep + path)
# stoppath has to be a prefix of path
if path[:len(stoppath)] != stoppath:
raise OSError, "Could not remove %s: %s is not a prefix" % (
path, stoppath)
while 1:
if stoppath == path:
# We're done
break
# Try to remove the directory
try:
os.rmdir(path)
except OSError:
# Either the directory is full, or we don't have permissions; stop
break
path, current = os.path.split(path)
if current == '':
# We're done - reached the root
break
#returns slashstring with any trailing slash removed
def rm_trailing_slash(slashstring):
if slashstring[-1] == "/":
slashstring = slashstring[0:-1]
return slashstring
def getContentChecksum(checksum_type, contents):<|fim▁hole|> engine = hashlib.new(checksum_type, usedforsecurity=False)
else:
engine = hashlib.new(checksum_type)
engine.update(contents)
return engine.hexdigest()
def sha256_file(filename):
engine = hashlib.new('sha256')
fh = open(filename, "r")
while 1:
buf = fh.read(4096)
if not buf:
break
engine.update(buf)
return engine.hexdigest()
def parse_url(server_url, scheme="https"):
return urlparse.urlparse(server_url, scheme=scheme)
def unparse_url(url_tuple):
return urlparse.urlunparse(url_tuple)
def get_home_dir():
uid = os.getuid()
ent = pwd.getpwuid(uid)
return ent[5]<|fim▁end|> | if hashlib_has_usedforsecurity: |
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>// Copyright © 2015, Peter Atashian
// Licensed under the MIT License <LICENSE.md><|fim▁hole|>}<|fim▁end|> | extern crate build;
fn main() {
build::link("nddeapi", true) |
<|file_name|>_version.py<|end_file_name|><|fim▁begin|>"""Utility to compare (Numpy) version strings.
The NumpyVersion class allows properly comparing numpy version strings.
The LooseVersion and StrictVersion classes that distutils provides don't
work; they don't recognize anything like alpha/beta/rc/dev versions.
"""
import re
from scipy._lib.six import string_types
__all__ = ['NumpyVersion']
class NumpyVersion():
"""Parse and compare numpy version strings.
Numpy has the following versioning scheme (numbers given are examples; they
can be >9) in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
- Beta: '1.8.0b1', '1.8.0b2', etc.
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
- Development versions after a1: '1.8.0a1.dev-f1234afa',
'1.8.0b2.dev-f1234afa',
'1.8.1rc1.dev-f1234afa', etc.
- Development versions (no git hash available): '1.8.0.dev-Unknown'
Comparing needs to be done against a valid version string or other
`NumpyVersion` instance.
Parameters
----------
vstring : str
Numpy version string (``np.__version__``).
Notes
-----
All dev versions of the same (pre-)release compare equal.
Examples
--------
>>> from scipy._lib._version import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0':
... print('skip')
skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
"""
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d[.]\d+[.]\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
self.version = ver_main.group()
self.major, self.minor, self.bugfix = [int(x) for x in
self.version.split('.')]
if len(vstring) == ver_main.end():
self.pre_release = 'final'
else:
alpha = re.match(r'a\d', vstring[ver_main.end():])
beta = re.match(r'b\d', vstring[ver_main.end():])
rc = re.match(r'rc\d', vstring[ver_main.end():])
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
if pre_rel:
self.pre_release = pre_rel[0].group()
else:
self.pre_release = ''
self.is_devversion = bool(re.search(r'.dev', vstring))
def _compare_version(self, other):
"""Compare major.minor.bugfix"""
if self.major == other.major:
if self.minor == other.minor:
if self.bugfix == other.bugfix:
vercmp = 0
elif self.bugfix > other.bugfix:
vercmp = 1
else:
vercmp = -1
elif self.minor > other.minor:
vercmp = 1<|fim▁hole|> vercmp = 1
else:
vercmp = -1
return vercmp
def _compare_pre_release(self, other):
"""Compare alpha/beta/rc/final."""
if self.pre_release == other.pre_release:
vercmp = 0
elif self.pre_release == 'final':
vercmp = 1
elif other.pre_release == 'final':
vercmp = -1
elif self.pre_release > other.pre_release:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare(self, other):
if not isinstance(other, (string_types, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
if isinstance(other, string_types):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
if vercmp == 0:
# Same x.y.z version, check for alpha/beta/rc
vercmp = self._compare_pre_release(other)
if vercmp == 0:
# Same version and same pre-release, check if dev version
if self.is_devversion is other.is_devversion:
vercmp = 0
elif self.is_devversion:
vercmp = -1
else:
vercmp = 1
return vercmp
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __eq__(self, other):
return self._compare(other) == 0
def __ne__(self, other):
return self._compare(other) != 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __repr__(self):
return "NumpyVersion(%s)" % self.vstring<|fim▁end|> | else:
vercmp = -1
elif self.major > other.major: |
<|file_name|>parallel_stmts_test.go<|end_file_name|><|fim▁begin|>// Copyright 2017 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Nathan VanBenschoten ([email protected])
package sql
import (
"fmt"
"reflect"
"testing"
"golang.org/x/net/context"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/internal/client"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/pkg/errors"
)
func newPlanNode() planNode {
return &emptyNode{}
}
// assertLen asserts the number of plans in the ParallelizeQueue.
func assertLen(t *testing.T, pq *ParallelizeQueue, exp int) {
if l := pq.Len(); l != exp {
t.Errorf("expected plan count of %d, found %d", exp, l)
}
}
// assertLenEventually is like assertLen, but can be used in racy situations
// where proper synchronization can not be performed.
func assertLenEventually(t *testing.T, pq *ParallelizeQueue, exp int) {
testutils.SucceedsSoon(t, func() error {
if l := pq.Len(); l != exp {
return errors.Errorf("expected plan count of %d, found %d", exp, l)
}
return nil
})
}
// waitAndAssertEmptyWithErr waits for the ParallelizeQueue to drain, then asserts
// that the queue is empty. It returns the error produced by ParallelizeQueue.Wait.
func waitAndAssertEmptyWithErr(t *testing.T, pq *ParallelizeQueue) error {
err := pq.Wait()
if l := pq.Len(); l != 0 {
t.Errorf("expected empty ParallelizeQueue, found %d plans remaining", l)
}
return err
}
func waitAndAssertEmpty(t *testing.T, pq *ParallelizeQueue) {
if err := waitAndAssertEmptyWithErr(t, pq); err != nil {
t.Fatalf("unexpected error waiting for ParallelizeQueue to drain: %v", err)
}
}
// TestParallelizeQueueNoDependencies tests three plans run through a ParallelizeQueue
// when none of the plans are dependent on each other. Because of their independence,
// we use channels to guarantee deterministic execution.
func TestParallelizeQueueNoDependencies(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
var res []int
run1, run2, run3 := make(chan struct{}), make(chan struct{}), make(chan struct{})
// Executes: plan3 -> plan1 -> plan2.
pq := MakeParallelizeQueue(NoDependenciesAnalyzer)
pq.Add(ctx, newPlanNode(), func(plan planNode) error {
<-run1
res = append(res, 1)
assertLen(t, &pq, 3)
close(run3)
return nil
})
pq.Add(ctx, newPlanNode(), func(plan planNode) error {
<-run2
res = append(res, 2)
assertLenEventually(t, &pq, 1)
return nil
})
pq.Add(ctx, newPlanNode(), func(plan planNode) error {
<-run3
res = append(res, 3)
assertLenEventually(t, &pq, 2)
close(run2)
return nil
})
close(run1)
waitAndAssertEmpty(t, &pq)
exp := []int{1, 3, 2}
if !reflect.DeepEqual(res, exp) {
t.Fatalf("expected parallel execution side effects %v, found %v", exp, res)
}
}
// TestParallelizeQueueAllDependent tests three plans run through a ParallelizeQueue
// when all of the plans are dependent on each other. Because of their dependence, we
// need no extra synchronization to guarantee deterministic execution.
func TestParallelizeQueueAllDependent(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
var res []int
run := make(chan struct{})
analyzer := dependencyAnalyzerFunc(func(p1 planNode, p2 planNode) bool {
return false
})
// Executes: plan1 -> plan2 -> plan3.
pq := MakeParallelizeQueue(analyzer)
pq.Add(ctx, newPlanNode(), func(plan planNode) error {
<-run
res = append(res, 1)
assertLen(t, &pq, 3)
return nil
})
pq.Add(ctx, newPlanNode(), func(plan planNode) error {
res = append(res, 2)
assertLen(t, &pq, 2)
return nil
})
pq.Add(ctx, newPlanNode(), func(plan planNode) error {
res = append(res, 3)
assertLen(t, &pq, 1)
return nil
})
close(run)
waitAndAssertEmpty(t, &pq)
exp := []int{1, 2, 3}
if !reflect.DeepEqual(res, exp) {
t.Fatalf("expected parallel execution side effects %v, found %v", exp, res)
}
}
// TestParallelizeQueueSingleDependency tests three plans where one is dependent on
// another. Because one plan is dependent, it will be held in the pending queue
// until the prerequisite plan completes execution.
func TestParallelizeQueueSingleDependency(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
var res []int
plan1, plan2, plan3 := newPlanNode(), newPlanNode(), newPlanNode()
run1, run3 := make(chan struct{}), make(chan struct{})
analyzer := dependencyAnalyzerFunc(func(p1 planNode, p2 planNode) bool {
if (p1 == plan1 && p2 == plan2) || (p1 == plan2 && p2 == plan1) {
// plan1 and plan2 are dependent
return false
}
return true
})
// Executes: plan3 -> plan1 -> plan2.
pq := MakeParallelizeQueue(analyzer)
pq.Add(ctx, plan1, func(plan planNode) error {
<-run1
res = append(res, 1)
assertLenEventually(t, &pq, 2)
return nil
})
pq.Add(ctx, plan2, func(plan planNode) error {
res = append(res, 2)
assertLen(t, &pq, 1)
return nil
})
pq.Add(ctx, plan3, func(plan planNode) error {
<-run3
res = append(res, 3)
assertLen(t, &pq, 3)
close(run1)
return nil
})
close(run3)
waitAndAssertEmpty(t, &pq)
exp := []int{3, 1, 2}
if !reflect.DeepEqual(res, exp) {
t.Fatalf("expected parallel execution side effects %v, found %v", exp, res)
}
}
// TestParallelizeQueueError tests three plans where one is dependent on another
// and the prerequisite plan throws an error.
func TestParallelizeQueueError(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
var res []int
plan1, plan2, plan3 := newPlanNode(), newPlanNode(), newPlanNode()
run1, run3 := make(chan struct{}), make(chan struct{})
planErr := errors.Errorf("plan1 will throw this error")
analyzer := dependencyAnalyzerFunc(func(p1 planNode, p2 planNode) bool {
if (p1 == plan1 && p2 == plan2) || (p1 == plan2 && p2 == plan1) {
// plan1 and plan2 are dependent
return false
}
return true
})
// Executes: plan3 -> plan1 (error!) -> plan2 (dropped).
pq := MakeParallelizeQueue(analyzer)
pq.Add(ctx, plan1, func(plan planNode) error {
<-run1
res = append(res, 1)
assertLenEventually(t, &pq, 2)
return planErr
})
pq.Add(ctx, plan2, func(plan planNode) error {
// Should never be called. We assert this using the res slice, because
// we can't call t.Fatalf in a different goroutine.
res = append(res, 2)
return nil
})
pq.Add(ctx, plan3, func(plan planNode) error {
<-run3
res = append(res, 3)
assertLen(t, &pq, 3)
close(run1)
return nil
})
close(run3)
<|fim▁hole|> }
exp := []int{3, 1}
if !reflect.DeepEqual(res, exp) {
t.Fatalf("expected parallel execution side effects %v, found %v", exp, res)
}
}
// TestParallelizeQueueAddAfterError tests that if a plan is added to a ParallelizeQueue
// after an error has been produced but before Wait has been called, that the plan
// will never be run. It then tests that once Wait has been called, the error state
// will be cleared.
func TestParallelizeQueueAddAfterError(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
var res []int
plan1, plan2, plan3 := newPlanNode(), newPlanNode(), newPlanNode()
planErr := errors.Errorf("plan1 will throw this error")
// Executes: plan1 (error!) -> plan2 (dropped) -> plan3.
pq := MakeParallelizeQueue(NoDependenciesAnalyzer)
pq.Add(ctx, plan1, func(plan planNode) error {
res = append(res, 1)
assertLen(t, &pq, 1)
return planErr
})
testutils.SucceedsSoon(t, func() error {
// We need this, because any signal from within plan1's execution could
// race with the beginning of plan2.
if pqErr := pq.Err(); pqErr == nil {
return errors.Errorf("plan1 not yet run")
}
return nil
})
pq.Add(ctx, plan2, func(plan planNode) error {
// Should never be called. We assert this using the res slice, because
// we can't call t.Fatalf in a different goroutine.
res = append(res, 2)
return nil
})
// Wait for the ParallelizeQueue to clear and assert that we see the
// correct error.
resErr := waitAndAssertEmptyWithErr(t, &pq)
if resErr != planErr {
t.Fatalf("expected plan1 to throw error %v, found %v", planErr, resErr)
}
pq.Add(ctx, plan3, func(plan planNode) error {
// Will be called, because the error is cleared when Wait is called.
res = append(res, 3)
assertLen(t, &pq, 1)
return nil
})
waitAndAssertEmpty(t, &pq)
exp := []int{1, 3}
if !reflect.DeepEqual(res, exp) {
t.Fatalf("expected parallel execution side effects %v, found %v", exp, res)
}
}
func planNodeForQuery(
t *testing.T, s serverutils.TestServerInterface, sql string,
) (planNode, func()) {
kvDB := s.KVClient().(*client.DB)
txn := client.NewTxn(kvDB)
txn.Proto().OrigTimestamp = s.Clock().Now()
p := makeInternalPlanner("plan", txn, security.RootUser, &MemoryMetrics{})
p.session.leases.leaseMgr = s.LeaseManager().(*LeaseManager)
p.session.Database = "test"
stmts, err := p.parser.Parse(sql)
if err != nil {
t.Fatal(err)
}
if len(stmts) != 1 {
t.Fatalf("expected to parse 1 statement, got: %d", len(stmts))
}
stmt := stmts[0]
plan, err := p.makePlan(context.TODO(), stmt)
if err != nil {
t.Fatal(err)
}
return plan, func() {
finishInternalPlanner(p)
}
}
func TestSpanBasedDependencyAnalyzer(t *testing.T) {
defer leaktest.AfterTest(t)()
s, db, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop(context.TODO())
if _, err := db.Exec(`CREATE DATABASE test`); err != nil {
t.Fatal(err)
}
if _, err := db.Exec(`SET DATABASE = test`); err != nil {
t.Fatal(err)
}
if _, err := db.Exec(`CREATE TABLE foo (k INT PRIMARY KEY)`); err != nil {
t.Fatal(err)
}
if _, err := db.Exec(`
CREATE TABLE bar (
k INT PRIMARY KEY,
v INT,
a INT,
UNIQUE INDEX idx(v)
)
`); err != nil {
t.Fatal(err)
}
if _, err := db.Exec(`CREATE TABLE fks (f INT REFERENCES foo)`); err != nil {
t.Fatal(err)
}
for _, test := range []struct {
query1, query2 string
independent bool
}{
// The dependency analyzer is only used for RETURNING NOTHING statements
// at the moment, but it should work on all statement types.
{`SELECT * FROM foo`, `SELECT * FROM bar`, true},
{`SELECT * FROM foo`, `SELECT * FROM bar@idx`, true},
{`SELECT * FROM foo`, `SELECT * FROM foo`, true},
{`SELECT * FROM foo`, `SELECT * FROM fks`, true},
{`DELETE FROM foo`, `DELETE FROM bar`, true},
{`DELETE FROM foo`, `DELETE FROM foo`, false},
{`DELETE FROM foo`, `DELETE FROM bar WHERE (SELECT k = 1 FROM foo)`, false},
{`DELETE FROM foo`, `DELETE FROM bar WHERE (SELECT f = 1 FROM fks)`, true},
{`DELETE FROM foo`, `DELETE FROM fks`, false},
{`DELETE FROM bar`, `DELETE FROM fks`, true},
{`DELETE FROM foo`, `SELECT * FROM foo`, false},
{`DELETE FROM foo`, `SELECT * FROM bar`, true},
{`DELETE FROM bar`, `SELECT * FROM bar`, false},
{`DELETE FROM bar`, `SELECT * FROM bar@idx`, false},
{`INSERT INTO foo VALUES (1)`, `INSERT INTO bar VALUES (1)`, true},
{`INSERT INTO foo VALUES (1)`, `INSERT INTO foo VALUES (1)`, false},
{`INSERT INTO foo VALUES (1)`, `INSERT INTO bar SELECT k FROM foo`, false},
{`INSERT INTO foo VALUES (1)`, `INSERT INTO bar SELECT f FROM fks`, true},
{`INSERT INTO foo VALUES (1)`, `INSERT INTO fks VALUES (1)`, false},
{`INSERT INTO bar VALUES (1)`, `INSERT INTO fks VALUES (1)`, true},
{`INSERT INTO foo VALUES (1)`, `SELECT * FROM foo`, false},
{`INSERT INTO foo VALUES (1)`, `SELECT * FROM bar`, true},
{`INSERT INTO bar VALUES (1)`, `SELECT * FROM bar`, false},
{`INSERT INTO bar VALUES (1)`, `SELECT * FROM bar@idx`, false},
{`INSERT INTO foo VALUES (1)`, `DELETE FROM foo`, false},
{`INSERT INTO foo VALUES (1)`, `DELETE FROM bar`, true},
{`UPDATE foo SET k = 1`, `UPDATE bar SET k = 1`, true},
{`UPDATE foo SET k = 1`, `UPDATE foo SET k = 1`, false},
{`UPDATE foo SET k = 1`, `UPDATE bar SET k = (SELECT k FROM foo)`, false},
{`UPDATE foo SET k = 1`, `UPDATE bar SET k = (SELECT f FROM fks)`, true},
{`UPDATE foo SET k = 1`, `INSERT INTO fks VALUES (1)`, false},
{`UPDATE bar SET k = 1`, `INSERT INTO fks VALUES (1)`, true},
{`UPDATE foo SET k = 1`, `SELECT * FROM foo`, false},
{`UPDATE foo SET k = 1`, `SELECT * FROM bar`, true},
{`UPDATE bar SET k = 1`, `SELECT * FROM bar`, false},
{`UPDATE bar SET k = 1`, `SELECT * FROM bar@idx`, false},
{`UPDATE foo SET k = 1`, `DELETE FROM foo`, false},
{`UPDATE foo SET k = 1`, `DELETE FROM bar`, true},
// Statements like statement_timestamp enforce a strict ordering on
// statements, restricting reordering and thus independence.
{`SELECT * FROM foo`, `SELECT *, statement_timestamp() FROM bar`, false},
{`DELETE FROM foo`, `DELETE FROM bar WHERE '2015-10-01'::TIMESTAMP = statement_timestamp()`, true},
} {
for _, reverse := range []bool{false, true} {
q1, q2 := test.query1, test.query2
if reverse {
// Verify commutativity.
q1, q2 = q2, q1
}
name := fmt.Sprintf("%s | %s", q1, q2)
t.Run(name, func(t *testing.T) {
da := NewSpanBasedDependencyAnalyzer()
plan1, finish1 := planNodeForQuery(t, s, q1)
defer finish1()
plan2, finish2 := planNodeForQuery(t, s, q2)
defer finish2()
indep := da.Independent(context.TODO(), plan1, plan2)
if exp := test.independent; indep != exp {
t.Errorf("expected da.Independent(%q, %q) = %t, but found %t",
q1, q2, exp, indep)
}
})
}
}
}<|fim▁end|> | resErr := waitAndAssertEmptyWithErr(t, &pq)
if resErr != planErr {
t.Fatalf("expected plan1 to throw error %v, found %v", planErr, resErr) |
<|file_name|>input.js<|end_file_name|><|fim▁begin|><|fim▁hole|>var Obj = {
myMethod(a, b) {
},
*myGenerator(a, b) {
}
}<|fim▁end|> | // methods (functions of objects)
// see: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Functions/Method_definitions
// http://www.ecma-international.org/ecma-262/6.0/#sec-method-definitions |
<|file_name|>data.js<|end_file_name|><|fim▁begin|>var chartPrevious = [
{"Name":"", "Description":"charts"},
{"Name":"1 hour", "Description":"1 hour"},
{"Name":"2 hours", "Description":"2 hours"},
{"Name":"10 hours", "Description":"10 hours"},
{"Name":"1 day", "Description":"1 day"},
{"Name":"2 days", "Description":"2 days"},
{"Name":"3 days", "Description":"3 days"},
{"Name":"7 days", "Description":"7 days"},
{"Name":"14 days", "Description":"14 days"},
{"Name":"30 days", "Description":"30 days"}
];
var employees = [
{"name":"Employee10", "surname":"Employee1", "filename" : "sav3.png", "department":"marketing"},
{"name":"Employee11", "surname":"Employee1", "filename" : "who3.png", "department":"marketing"},
{"name":"Employee12", "surname":"Employee1", "filename" : "jac4.png", "department":"digital"},
{"name":"Employee13", "surname":"Employee1", "filename" : "mart4.png", "department":"digital"},
{"name":"Employee14", "surname":"Employee1", "filename" : "sav4.png", "department":"digital"},
{"name":"Employee15", "surname":"Employee1", "filename" : "who4.png", "department":"digital"},
{"name":"Employee16", "surname":"Employee1", "filename" : "jac5.png", "department":"digital"},
{"name":"Employee17", "surname":"Employee1", "filename" : "mart5.png", "department":"marketing"},<|fim▁hole|>{"name":"Employee18", "surname":"Employee1", "filename" : "sav5.png", "department":"digital"},
{"name":"Employee19", "surname":"Employee1", "filename" : "who5.png", "department":"digital"},
{"name":"Employee1", "surname":"Employee1", "filename" : "jac1.png", "department":"marketing"},
{"name":"Employee2", "surname":"Employee1", "filename" : "mart1.png", "department":"marketing"},
{"name":"Employee3", "surname":"Employee1", "filename" : "sav1.png", "department":"marketing"},
{"name":"Employee4", "surname":"Employee1", "filename" : "who1.png", "department":"marketing"},
{"name":"Employee5", "surname":"Employee1", "filename" : "jac2.png", "department":"marketing"},
{"name":"Employee6", "surname":"Employee1", "filename" : "mart2.png", "department":"marketing"},
{"name":"Employee6", "surname":"Employee1", "filename" : "sav2.png", "department":"marketing"},
{"name":"Employee7", "surname":"Employee1", "filename" : "who2.png", "department":"marketing"},
{"name":"Employee8", "surname":"Employee1", "filename" : "jac3.png", "department":"marketing"},
{"name":"Employee9", "surname":"Employee1", "filename" : "mart3.png", "department":"marketing"},
{"name":"Employee20", "surname":"Employee1", "filename" : "jac6.png", "department":"marketing"},
{"name":"Employee21", "surname":"Employee1", "filename" : "mart6.png", "department":"marketing"},
{"name":"Employee22", "surname":"Employee1", "filename" : "sav6.png", "department":"marketing"},
{"name":"Employee23", "surname":"Employee1", "filename" : "who6.png", "department":"marketing"},
{"name":"Employee24", "surname":"Employee1", "filename" : "jac7.png", "department":"marketing"},
{"name":"Employee25", "surname":"Employee1", "filename" : "mart7.png", "department":"marketing"},
{"name":"Employee26", "surname":"Employee1", "filename" : "sav7.png", "department":"marketing"},
{"name":"Employee27", "surname":"Employee1", "filename" : "who7.png", "department":"marketing"},
{"name":"Employee28", "surname":"Employee1", "filename" : "jac8.png", "department":"staff"},
{"name":"Employee29", "surname":"Employee1", "filename" : "mart8.png", "department":"staff"},
{"name":"Employee40", "surname":"Employee1", "filename" : "jac11.png", "department":"staff"},
{"name":"Employee41", "surname":"Employee1", "filename" : "mart11.png", "department":"marketing"},
{"name":"Employee42", "surname":"Employee1", "filename" : "sav11.png", "department":"marketing"},
{"name":"Employee43", "surname":"Employee1", "filename" : "who11.png", "department":"marketing"},
{"name":"Employee44", "surname":"Employee1", "filename" : "jac12.png", "department":"staff"},
{"name":"Employee45", "surname":"Employee1", "filename" : "mart12.png", "department":"staff"},
{"name":"Employee46", "surname":"Employee1", "filename" : "sav12.png", "department":"staff"},
{"name":"Employee47", "surname":"Employee1", "filename" : "who12.png", "department":"staff"},
{"name":"Employee48", "surname":"Employee1", "filename" : "jac13.png", "department":"staff"},
{"name":"Employee49", "surname":"Employee1", "filename" : "mart13.png", "department":"staff"},
{"name":"Employee30", "surname":"Employee1", "filename" : "sav8.png", "department":"marketing"},
{"name":"Employee31", "surname":"Employee1", "filename" : "who8.png", "department":"marketing"},
{"name":"Employee32", "surname":"Employee1", "filename" : "jac9.png", "department":"marketing"},
{"name":"Employee33", "surname":"Employee1", "filename" : "mart9.png", "department":"staff"},
{"name":"Employee34", "surname":"Employee1", "filename" : "sav9.png", "department":"staff"},
{"name":"Employee35", "surname":"Employee1", "filename" : "who9.png", "department":"marketing"},
{"name":"Employee36", "surname":"Employee1", "filename" : "jac10.png", "department":"marketing"},
{"name":"Employee37", "surname":"Employee1", "filename" : "mart10.png", "department":"staff"},
{"name":"Employee38", "surname":"Employee1", "filename" : "sav10.png", "department":"staff"},
{"name":"Employee39", "surname":"Employee1", "filename" : "who10.png", "department":"staff"}
];<|fim▁end|> | |
<|file_name|>acrobot.py<|end_file_name|><|fim▁begin|>'''
Created on Jan 11, 2016
@author: Lucas Lehnert ([email protected])
Control on the Acrobot domain.
'''
import numpy as np
import matplotlib.pyplot as plt
from mdp import MDPContinuousState
from basisfunction import getTiledStateActionBasisFunction
from policy import BoltzmannPolicy
from qlearning import Q, GQ, PGQ, SARSA
from experiment import experimentSimulateTransitions
from util.numpy_json import numpyDictionaryToJson
def rk4( derivatives, x_t, t, stepSize ):
k1 = derivatives( x_t, t )
k2 = derivatives( x_t + stepSize / 2.0 * k1, t + stepSize / 2.0 )
k3 = derivatives( x_t + stepSize / 2.0 * k2, t + stepSize / 2.0 )
k4 = derivatives( x_t + stepSize * k3, t + stepSize )
x_t1 = x_t + stepSize / 6.0 * ( k1 + 2 * k2 + 2 * k3 + k4 )
return x_t1
def wrap( x, m, M ):
'''
also from rlpy
'''
diff = M - m
while x > M:
x -= diff
while x < m:
x += diff
return x
def bound( x, m, M ):
return min( max( x, m ), M )
def createAcrobotMDP():
thetaRange = np.linspace( 0, 2 * np.pi, 10 )
thetaDot1Range = np.linspace( -4 * np.pi, 4 * np.pi, 9 )
thetaDot2Range = np.linspace( -9 * np.pi, 9 * np.pi, 19 )
t1, t2, t1d, t2d = np.meshgrid( thetaRange, thetaRange, thetaDot1Range, thetaDot2Range )
discretizedStateSpace = np.array( [ t1.flatten(), t2.flatten(), t1d.flatten(), t2d.flatten() ], \
dtype=np.double ).T
torque = np.array( [ -1, 0, 1 ] )
m1 = m2 = 1
l1 = 1
lc1 = lc2 = 0.5
I1 = I2 = 1
g = 9.8
def derivatives( sa, t ):
theta1 = sa[0]
theta2 = sa[1]
dtheta1 = sa[2]
dtheta2 = sa[3]
tau = sa[4]
d1 = m1 * lc1 ** 2 + m2 \
* ( l1 ** 2 + lc2 ** 2 + 2 * l1 * lc2 * np.cos( theta2 ) ) + I1 + I2
d2 = m2 * ( lc2 ** 2 + l1 * lc2 * np.cos( theta2 ) ) + I2
phi2 = m2 * lc2 * g * np.cos( theta1 + theta2 - np.pi / 2. )
phi1 = -m2 * l1 * lc2 * dtheta2 ** 2 * np.sin( theta2 ) \
- 2 * m2 * l1 * lc2 * dtheta2 * dtheta1 * np.sin( theta2 ) \
+ ( m1 * lc1 + m2 * l1 ) * g * np.cos( theta1 - np.pi / 2 ) + phi2
ddtheta2 = ( tau + d2 / d1 * phi1 - m2 * l1 * lc2 * dtheta1 ** 2 * np.sin( theta2 ) - phi2 ) \
/ ( m2 * lc2 ** 2 + I2 - d2 ** 2 / d1 )
ddtheta1 = -( d2 * ddtheta2 + phi1 ) / d1
return np.array( [ dtheta1, dtheta2, ddtheta1, ddtheta2, 0. ] )
def transitionSampler( state, action ):
sa = np.append( state, [action] )
sa_next = rk4( derivatives, sa, 0, .2 )
staten = sa_next[:4]
staten[0] = wrap( staten[0], -np.pi, np.pi )
staten[1] = wrap( staten[1], -np.pi, np.pi )
staten[2] = bound( staten[2], -4 * np.pi, 4 * np.pi )
staten[3] = bound( staten[3], -9 * np.pi, 9 * np.pi )
return staten
isGoalState = lambda s :-np.cos( s[0] ) - np.cos( s[1] + s[0] ) > 1.
def rewardFunction( state, action, staten ):
return -1.0 if not isGoalState( staten ) else 0.0
gamma = 1.0
def startStateSampler():
return np.zeros( 4 )
discretizedStartStateDistribution = np.zeros( len( discretizedStateSpace ) )
startInd = np.where( np.all( discretizedStateSpace == np.zeros( 4 ), axis=1 ) )[0][0]
discretizedStartStateDistribution[startInd] = 1.0
return MDPContinuousState( torque, transitionSampler, rewardFunction, gamma, startStateSampler, \
isGoalState, discretizedStateSpace, discretizedStartStateDistribution )
#def getBasisFunction( mdp ):
# return tileCodedStateActionBinaryBasisFunction( \
# mdp.getStateSpace(), mdp.getActionSpace(), \
# [2. / 5. * np.pi, 2. / 5. * np.pi, 8. / 6. * np.pi, 18. / 6. * np.pi], 1 )
def getBasisFunction( mdp ):
return getTiledStateActionBasisFunction( mdp, [12, 14, 12, 14] )
# minS = np.array( map( lambda s: np.min( s ), mdp.getStateSpace().T ) )
# maxS = np.array( map( lambda s: np.max( s ), mdp.getStateSpace().T ) )
#
## tileNum = np.array([2.,2.,2.,2.])
# tileLen = ( maxS - minS ) / tileNum
## phiLen = int( np.prod(tileNum) )
## print 'number of features: ' + str(phiLen)
#
# def phis( s ):
# stripeInd = np.array( np.floor( ( s - minS ) / tileLen - ( 10 ** -10 ) ), dtype=np.int )
# phiv = []
#
# for i in range( len( tileNum ) ):
# stripe = np.zeros( tileNum[i] )
# stripe[stripeInd[i]] = 1.0
# # print str(i) + ':' + str(stripe)
# if len( phiv ) == 0:
# phiv = stripe
# else:
# phiv = np.outer( phiv, stripe ).flatten()
# return phiv
#
# actionSet = mdp.getActionSpace()
# def phia( a ):
# aInd = np.where( a == actionSet )[0][0]
# phiv = np.zeros( len( actionSet ) )
# phiv[aInd] = 1.0
# return phiv
#
# def phi( s, a ):
# ps = phis( s )
# pa = phia( a )
# return np.outer( pa, ps ).flatten()
#
# return phi
def testAcrobot():
mdp = createAcrobotMDP()
# phi = getBasisFunction( mdp )
# phi = tileCodedStateActionBinaryBasisFunction( \
# mdp.getStateSpace(), mdp.getActionSpace(), \
# [2. / 5. * np.pi, 2. / 5. * np.pi, 8. / 6. * np.pi, 18. / 6. * np.pi], 4 )
phi = getBasisFunction( mdp )
alpha = 0.05
beta = 0.01
# traceLambda = 0.0
temp = 0.5
controlPolicy = BoltzmannPolicy( mdp.getActionSpace(), temperature=temp )
# controlPolicy = GreedyPolicy( mdp.getActionSpace() )
initTheta = np.zeros( len( phi( mdp.getStateSpace()[0], mdp.getActionSpace()[0] ) ) )
# agent = SARSA( mdp.getGamma(), controlPolicy, traceLambda=traceLambda, basisFunction=phi, \
# initTheta=initTheta, alpha=alpha )
# agent = Q( initTheta=initTheta, basisFunction=phi, gamma=mdp.getGamma(), \
# alpha=alpha, actionSpace=mdp.getActionSpace(), \
# behaviorPolicy=controlPolicy, targetPolicy=controlPolicy )
agent = GQ( initTheta=initTheta, basisFunction=phi, gamma=mdp.getGamma(), \
alpha=alpha, beta=beta, actionSpace=mdp.getActionSpace(), \
behaviorPolicy=controlPolicy, targetPolicy=controlPolicy )
iterations = 5000
def rewardListener( s, a, snext, reward ):
return reward
tnorm = lambda t : np.linalg.norm( t.getTheta() )
thetaNormLog = []
episodeLengthLog = []
theta = agent.getTheta()
for epInd in range( 800 ):
print 'Episode ' + str( epInd + 1 )
errorBenchmarksRep, rewardLog = experimentSimulateTransitions( \
iterations, mdp, controlPolicy, agent, \
errorMeasures=[tnorm], transitionListener=[rewardListener], actionFromAgent=False )
thetaNorm = map( lambda n: n[-1], errorBenchmarksRep )
episodeLength = map( lambda e: len( e ), rewardLog )
thetaNext = agent.getTheta()
print theta - thetaNext
theta = thetaNext
thetaNormLog.append( thetaNorm[0] )
episodeLengthLog.append( episodeLength[0] )
print '\tLength: ' + str( episodeLength[0] )
print '\ttheta norm: ' + str( thetaNormLog[0] )
thetaNormLog = np.array( thetaNormLog )
episodeLengthLog = np.array( episodeLengthLog )
# episodeLengthMean = np.mean( episodeLengthLog, axis=0 )
# episodeLengthStd = np.std( episodeLengthLog, axis=0 )
# episodeStep = range( 0, len( episodeLengthMean ) )
#
# episodeLengthMeanSub = episodeLengthMean[0:len( episodeLengthMean ):50]
# episodeLengthStdSub = episodeLengthStd[0:len( episodeLengthStd ):50]
# episodeStepSub = episodeStep[0:len( episodeStep ):50]
print episodeLengthLog
plt.plot( range( 1, len( episodeLengthLog ) + 1 ), episodeLengthLog )
plt.xlabel( 'Episode' )
plt.ylabel( 'Episode Length' )
# plt.gca().setyscale( 'log' )
plt.ylim( [0, 2000] )
plt.show()
def runExperiment( **configuration ):
mdp = createAcrobotMDP()
phi = getBasisFunction( mdp )
initTheta = np.zeros( len( phi( mdp.getStateSpace()[0], mdp.getActionSpace()[0] ) ) )
piBehavior = BoltzmannPolicy( mdp.getActionSpace(), temperature=configuration['behaviorTemperature'] )
piTarget = BoltzmannPolicy( mdp.getActionSpace(), temperature=configuration['targetTemperature'] )
alpha = configuration['alpha']
beta = configuration['beta']
iterations = configuration['iterations']
repeats = configuration['repeats']
def rewardListener( s, a, snext, reward ):
return reward
tnorm = lambda t : np.linalg.norm( t.getTheta() )
thetaNormExp = []
episodeLengthExp = []
actionFromAgent = True if configuration['agent'] == 'SARSA' else False
successfulRepeats = 0
for rep in range( repeats ):
if configuration['agent'] == 'Q':
agent = Q( initTheta=initTheta, basisFunction=phi, gamma=mdp.getGamma(), \
alpha=alpha, actionSpace=mdp.getActionSpace(), \
behaviorPolicy=piBehavior, targetPolicy=piTarget )
elif configuration['agent'] == 'SARSA':
agent = SARSA( mdp.getGamma(), behaviorPolicy=piBehavior, traceLambda=configuration['traceLambda'], \
basisFunction=phi, initTheta=initTheta, alpha=alpha )
elif configuration['agent'] == 'GQ':
agent = GQ( initTheta=initTheta, basisFunction=phi, gamma=mdp.getGamma(), \
alpha=alpha, beta=beta, actionSpace=mdp.getActionSpace(), \
behaviorPolicy=piBehavior, targetPolicy=piTarget )
elif configuration['agent'] == 'PGQ':
agent = PGQ( initTheta=initTheta, basisFunction=phi, gamma=mdp.getGamma(), \
alpha=alpha, beta=beta, actionSpace=mdp.getActionSpace(), \
behaviorPolicy=piBehavior, targetPolicy=piTarget )
<|fim▁hole|> try:
thetaNormLog = []
episodeLengthLog = []
for _ in range( configuration['episodes'] ):
errorBenchmarksRep, rewardLog, _ = experimentSimulateTransitions( \
iterations, mdp, piBehavior, agent, \
errorMeasures=[tnorm], transitionListener=[rewardListener], \
actionFromAgent=actionFromAgent )
thetaNorm = map( lambda n: n[-1], errorBenchmarksRep )
episodeLength = map( lambda e: len( e ), rewardLog )
thetaNormLog.append( thetaNorm[0] )
episodeLengthLog.append( episodeLength[0] )
thetaNormLog = np.array( thetaNormLog )
episodeLengthLog = np.array( episodeLengthLog )
thetaNormExp.append( thetaNormLog )
episodeLengthExp.append( episodeLengthLog )
successfulRepeats += 1
except Exception as e:
print e
continue
experimentResults = { 'thetaNorm' : thetaNormExp, 'episodeLength' : episodeLengthExp, \
'successfulRepeats' : successfulRepeats }
return experimentResults
def main():
import datetime
startTime = datetime.datetime.now()
print 'Started at ' + str( startTime )
import argparse
parser = argparse.ArgumentParser( description='Acrobot Experiments', \
formatter_class=argparse.ArgumentDefaultsHelpFormatter )
parser.add_argument( '-r', '--resultFile', type=str, default='../experiment/test.json', help='Result file path.' )
parser.add_argument( '-e', '--episodes', type=int, default=1, help='Number of episodes to run.' )
parser.add_argument( '-i', '--iterations', type=int, default=10, help='Number of iterations to run.' )
parser.add_argument( '-R', '--repeats', type=int, default=1, help='Number of repeats to run.' )
parser.add_argument( '-a', '--alpha', type=float, default=0.1, help='Alpha learning rate to run.' )
parser.add_argument( '-b', '--beta', type=float, default=0.1, help='Beta learning rate to run.' )
parser.add_argument( '-A', '--agent', type=str, default='GQ', help='Algorithm to run.' )
parser.add_argument( '--behaviorTemperature', type=float, default=1.0, help='Behavior temperature.' )
parser.add_argument( '--targetTemperature', type=float, default=1.0, help='Target temperature.' )
args = parser.parse_args()
configuration = {}
configuration['episodes'] = args.episodes
configuration['iterations'] = args.iterations
configuration['repeats'] = args.repeats
configuration['agent'] = args.agent
configuration['alpha'] = args.alpha
configuration['beta'] = args.beta
configuration['behaviorTemperature'] = args.behaviorTemperature
configuration['targetTemperature'] = args.targetTemperature
experimentResults = runExperiment( **configuration )
result = {'configuration' : configuration,
'experiment' : 'mountaincar',
'results' : experimentResults }
numpyDictionaryToJson( result, args.resultFile )
stopTime = datetime.datetime.now()
print 'Done at ' + str( stopTime )
print 'Durection ' + str( stopTime - startTime )
if __name__ == '__main__':
main()
pass<|fim▁end|> | print 'Running repeat ' + str( rep ) |
<|file_name|>face_quad9.C<|end_file_name|><|fim▁begin|>// The libMesh Finite Element Library.
// Copyright (C) 2002-2018 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
// Local includes
#include "libmesh/side.h"
#include "libmesh/edge_edge3.h"
#include "libmesh/face_quad9.h"
#include "libmesh/enum_io_package.h"
#include "libmesh/enum_order.h"
namespace libMesh
{
// ------------------------------------------------------------
// Quad9 class static member initializations
const int Quad9::num_nodes;
const int Quad9::num_sides;
const int Quad9::num_children;
const int Quad9::nodes_per_side;
const unsigned int Quad9::side_nodes_map[Quad9::num_sides][Quad9::nodes_per_side] =
{
{0, 1, 4}, // Side 0
{1, 2, 5}, // Side 1
{2, 3, 6}, // Side 2
{3, 0, 7} // Side 3
};
#ifdef LIBMESH_ENABLE_AMR
const float Quad9::_embedding_matrix[Quad9::num_children][Quad9::num_nodes][Quad9::num_nodes] =
{
// embedding matrix for child 0
{
// 0 1 2 3 4 5 6 7 8
{ 1.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000 }, // 0
{ 0.00000, 0.00000, 0.00000, 0.00000, 1.00000, 0.00000, 0.00000, 0.00000, 0.00000 }, // 1
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 1.00000 }, // 2
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 1.00000, 0.00000 }, // 3
{ 0.375000, -0.125000, 0.00000, 0.00000, 0.750000, 0.00000, 0.00000, 0.00000, 0.00000 }, // 4
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.375000, 0.00000, -0.125000, 0.00000, 0.750000 }, // 5
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, -0.125000, 0.00000, 0.375000, 0.750000 }, // 6
{ 0.375000, 0.00000, 0.00000, -0.125000, 0.00000, 0.00000, 0.00000, 0.750000, 0.00000 }, // 7
{ 0.140625, -0.0468750, 0.0156250, -0.0468750, 0.281250, -0.0937500, -0.0937500, 0.281250, 0.562500 } // 8
},
// embedding matrix for child 1
{
// 0 1 2 3 4 5 6 7 8
{ 0.00000, 0.00000, 0.00000, 0.00000, 1.00000, 0.00000, 0.00000, 0.00000, 0.00000 }, // 0
{ 0.00000, 1.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000 }, // 1
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 1.00000, 0.00000, 0.00000, 0.00000 }, // 2
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 1.00000 }, // 3
{ -0.125000, 0.375000, 0.00000, 0.00000, 0.750000, 0.00000, 0.00000, 0.00000, 0.00000 }, // 4
{ 0.00000, 0.375000, -0.125000, 0.00000, 0.00000, 0.750000, 0.00000, 0.00000, 0.00000 }, // 5
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.375000, 0.00000, -0.125000, 0.750000 }, // 6
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.375000, 0.00000, -0.125000, 0.00000, 0.750000 }, // 7
{ -0.0468750, 0.140625, -0.0468750, 0.0156250, 0.281250, 0.281250, -0.0937500, -0.0937500, 0.562500 } // 8
},
// embedding matrix for child 2
{
// 0 1 2 3 4 5 6 7 8
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 1.00000, 0.00000 }, // 0
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 1.00000 }, // 1
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 1.00000, 0.00000, 0.00000 }, // 2
{ 0.00000, 0.00000, 0.00000, 1.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000 }, // 3
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, -0.125000, 0.00000, 0.375000, 0.750000 }, // 4
{ 0.00000, 0.00000, 0.00000, 0.00000, -0.125000, 0.00000, 0.375000, 0.00000, 0.750000 }, // 5
{ 0.00000, 0.00000, -0.125000, 0.375000, 0.00000, 0.00000, 0.750000, 0.00000, 0.00000 }, // 6
{ -0.125000, 0.00000, 0.00000, 0.375000, 0.00000, 0.00000, 0.00000, 0.750000, 0.00000 }, // 7
{ -0.0468750, 0.0156250, -0.0468750, 0.140625, -0.0937500, -0.0937500, 0.281250, 0.281250, 0.562500 } // 8
},
// embedding matrix for child 3
{
// 0 1 2 3 4 5 6 7 8
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 1.00000 }, // 0
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 1.00000, 0.00000, 0.00000, 0.00000 }, // 1
{ 0.00000, 0.00000, 1.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000 }, // 2
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 1.00000, 0.00000, 0.00000 }, // 3
{ 0.00000, 0.00000, 0.00000, 0.00000, 0.00000, 0.375000, 0.00000, -0.125000, 0.750000 }, // 4
{ 0.00000, -0.125000, 0.375000, 0.00000, 0.00000, 0.750000, 0.00000, 0.00000, 0.00000 }, // 5
{ 0.00000, 0.00000, 0.375000, -0.125000, 0.00000, 0.00000, 0.750000, 0.00000, 0.00000 }, // 6
{ 0.00000, 0.00000, 0.00000, 0.00000, -0.125000, 0.00000, 0.375000, 0.00000, 0.750000 }, // 7
{ 0.0156250, -0.0468750, 0.140625, -0.0468750, -0.0937500, 0.281250, 0.281250, -0.0937500, 0.562500 } // 8
}
};
#endif
// ------------------------------------------------------------
// Quad9 class member functions
bool Quad9::is_vertex(const unsigned int i) const
{
if (i < 4)
return true;
return false;
}
bool Quad9::is_edge(const unsigned int i) const
{
if (i < 4)
return false;
if (i > 7)
return false;
return true;
}
bool Quad9::is_face(const unsigned int i) const
{
if (i > 7)
return true;
return false;
}
bool Quad9::is_node_on_side(const unsigned int n,
const unsigned int s) const
{
libmesh_assert_less (s, n_sides());
return std::find(std::begin(side_nodes_map[s]),
std::end(side_nodes_map[s]),
n) != std::end(side_nodes_map[s]);
}
std::vector<unsigned>
Quad9::nodes_on_side(const unsigned int s) const
{
libmesh_assert_less(s, n_sides());
return {std::begin(side_nodes_map[s]), std::end(side_nodes_map[s])};
}
bool Quad9::has_affine_map() const
{
// make sure corners form a parallelogram
Point v = this->point(1) - this->point(0);
if (!v.relative_fuzzy_equals(this->point(2) - this->point(3)))
return false;
// make sure "horizontal" sides are straight
v /= 2;
if (!v.relative_fuzzy_equals(this->point(4) - this->point(0)) ||
!v.relative_fuzzy_equals(this->point(6) - this->point(3)))
return false;
// make sure "vertical" sides are straight
// and the center node is centered
v = (this->point(3) - this->point(0))/2;
if (!v.relative_fuzzy_equals(this->point(7) - this->point(0)) ||
!v.relative_fuzzy_equals(this->point(5) - this->point(1)) ||
!v.relative_fuzzy_equals(this->point(8) - this->point(4)))
return false;
return true;<|fim▁hole|>Order Quad9::default_order() const
{
return SECOND;
}
dof_id_type Quad9::key (const unsigned int s) const
{
libmesh_assert_less (s, this->n_sides());
switch (s)
{
case 0:
return
this->compute_key (this->node_id(4));
case 1:
return
this->compute_key (this->node_id(5));
case 2:
return
this->compute_key (this->node_id(6));
case 3:
return
this->compute_key (this->node_id(7));
default:
libmesh_error_msg("Invalid side s = " << s);
}
}
dof_id_type Quad9::key () const
{
return this->compute_key(this->node_id(8));
}
unsigned int Quad9::which_node_am_i(unsigned int side,
unsigned int side_node) const
{
libmesh_assert_less (side, this->n_sides());
libmesh_assert_less (side_node, Quad9::nodes_per_side);
return Quad9::side_nodes_map[side][side_node];
}
std::unique_ptr<Elem> Quad9::build_side_ptr (const unsigned int i,
bool proxy)
{
libmesh_assert_less (i, this->n_sides());
if (proxy)
return libmesh_make_unique<Side<Edge3,Quad9>>(this,i);
else
{
std::unique_ptr<Elem> edge = libmesh_make_unique<Edge3>();
edge->subdomain_id() = this->subdomain_id();
// Set the nodes
for (unsigned n=0; n<edge->n_nodes(); ++n)
edge->set_node(n) = this->node_ptr(Quad9::side_nodes_map[i][n]);
return edge;
}
}
void Quad9::connectivity(const unsigned int sf,
const IOPackage iop,
std::vector<dof_id_type> & conn) const
{
libmesh_assert_less (sf, this->n_sub_elem());
libmesh_assert_not_equal_to (iop, INVALID_IO_PACKAGE);
conn.resize(4);
switch (iop)
{
case TECPLOT:
{
switch(sf)
{
case 0:
// linear sub-quad 0
conn[0] = this->node_id(0)+1;
conn[1] = this->node_id(4)+1;
conn[2] = this->node_id(8)+1;
conn[3] = this->node_id(7)+1;
return;
case 1:
// linear sub-quad 1
conn[0] = this->node_id(4)+1;
conn[1] = this->node_id(1)+1;
conn[2] = this->node_id(5)+1;
conn[3] = this->node_id(8)+1;
return;
case 2:
// linear sub-quad 2
conn[0] = this->node_id(7)+1;
conn[1] = this->node_id(8)+1;
conn[2] = this->node_id(6)+1;
conn[3] = this->node_id(3)+1;
return;
case 3:
// linear sub-quad 3
conn[0] = this->node_id(8)+1;
conn[1] = this->node_id(5)+1;
conn[2] = this->node_id(2)+1;
conn[3] = this->node_id(6)+1;
return;
default:
libmesh_error_msg("Invalid sf = " << sf);
}
}
case VTK:
{
conn.resize(9);
conn[0] = this->node_id(0);
conn[1] = this->node_id(1);
conn[2] = this->node_id(2);
conn[3] = this->node_id(3);
conn[4] = this->node_id(4);
conn[5] = this->node_id(5);
conn[6] = this->node_id(6);
conn[7] = this->node_id(7);
conn[8] = this->node_id(8);
return;
/*
switch(sf)
{
case 0:
// linear sub-quad 0
conn[0] = this->node_id(0);
conn[1] = this->node_id(4);
conn[2] = this->node_id(8);
conn[3] = this->node_id(7);
return;
case 1:
// linear sub-quad 1
conn[0] = this->node_id(4);
conn[1] = this->node_id(1);
conn[2] = this->node_id(5);
conn[3] = this->node_id(8);
return;
case 2:
// linear sub-quad 2
conn[0] = this->node_id(7);
conn[1] = this->node_id(8);
conn[2] = this->node_id(6);
conn[3] = this->node_id(3);
return;
case 3:
// linear sub-quad 3
conn[0] = this->node_id(8);
conn[1] = this->node_id(5);
conn[2] = this->node_id(2);
conn[3] = this->node_id(6);
return;
default:
libmesh_error_msg("Invalid sf = " << sf);
}*/
}
default:
libmesh_error_msg("Unsupported IO package " << iop);
}
}
BoundingBox Quad9::loose_bounding_box () const
{
// This might have curved edges, or might be a curved surface in
// 3-space, in which case the full bounding box can be larger than
// the bounding box of just the nodes.
//
//
// FIXME - I haven't yet proven the formula below to be correct for
// biquadratics - RHS
Point pmin, pmax;
for (unsigned d=0; d<LIBMESH_DIM; ++d)
{
const Real center = this->point(8)(d);
Real hd = std::abs(center - this->point(0)(d));
for (unsigned int p=0; p != 8; ++p)
hd = std::max(hd, std::abs(center - this->point(p)(d)));
pmin(d) = center - hd;
pmax(d) = center + hd;
}
return BoundingBox(pmin, pmax);
}
Real Quad9::volume () const
{
// Make copies of our points. It makes the subsequent calculations a bit
// shorter and avoids dereferencing the same pointer multiple times.
Point
x0 = point(0), x1 = point(1), x2 = point(2),
x3 = point(3), x4 = point(4), x5 = point(5),
x6 = point(6), x7 = point(7), x8 = point(8);
// Construct constant data vectors.
// \vec{x}_{\xi} = \vec{a1}*xi*eta^2 + \vec{b1}*eta**2 + \vec{c1}*xi*eta + \vec{d1}*xi + \vec{e1}*eta + \vec{f1}
// \vec{x}_{\eta} = \vec{a2}*xi^2*eta + \vec{b2}*xi**2 + \vec{c2}*xi*eta + \vec{d2}*xi + \vec{e2}*eta + \vec{f2}
// This is copy-pasted directly from the output of a Python script.
Point
a1 = x0/2 + x1/2 + x2/2 + x3/2 - x4 - x5 - x6 - x7 + 2*x8,
b1 = -x0/4 + x1/4 + x2/4 - x3/4 - x5/2 + x7/2,
c1 = -x0/2 - x1/2 + x2/2 + x3/2 + x4 - x6,
d1 = x5 + x7 - 2*x8,
e1 = x0/4 - x1/4 + x2/4 - x3/4,
f1 = x5/2 - x7/2,
a2 = a1,
b2 = -x0/4 - x1/4 + x2/4 + x3/4 + x4/2 - x6/2,
c2 = -x0/2 + x1/2 + x2/2 - x3/2 - x5 + x7,
d2 = x0/4 - x1/4 + x2/4 - x3/4,
e2 = x4 + x6 - 2*x8,
f2 = -x4/2 + x6/2;
// 3x3 quadrature, exact for bi-quintics
const unsigned int N = 3;
const Real q[N] = {-std::sqrt(15)/5., 0., std::sqrt(15)/5.};
const Real w[N] = {5./9, 8./9, 5./9};
Real vol=0.;
for (unsigned int i=0; i<N; ++i)
for (unsigned int j=0; j<N; ++j)
vol += w[i] * w[j] *
cross_norm(q[i]*q[j]*q[j]*a1 + q[j]*q[j]*b1 + q[j]*q[i]*c1 + q[i]*d1 + q[j]*e1 + f1,
q[i]*q[i]*q[j]*a2 + q[i]*q[i]*b2 + q[j]*q[i]*c2 + q[i]*d2 + q[j]*e2 + f2);
return vol;
}
unsigned int Quad9::n_second_order_adjacent_vertices (const unsigned int n) const
{
switch (n)
{
case 4:
case 5:
case 6:
case 7:
return 2;
case 8:
return 4;
default:
libmesh_error_msg("Invalid n = " << n);
}
}
unsigned short int Quad9::second_order_adjacent_vertex (const unsigned int n,
const unsigned int v) const
{
libmesh_assert_greater_equal (n, this->n_vertices());
libmesh_assert_less (n, this->n_nodes());
switch (n)
{
case 8:
{
libmesh_assert_less (v, 4);
return static_cast<unsigned short int>(v);
}
default:
{
libmesh_assert_less (v, 2);
// use the matrix that we inherited from \p Quad
return _second_order_adjacent_vertices[n-this->n_vertices()][v];
}
}
}
std::pair<unsigned short int, unsigned short int>
Quad9::second_order_child_vertex (const unsigned int n) const
{
libmesh_assert_greater_equal (n, this->n_vertices());
libmesh_assert_less (n, this->n_nodes());
/*
* the _second_order_vertex_child_* vectors are
* stored in face_quad.C, since they are identical
* for Quad8 and Quad9 (for the first 4 higher-order nodes)
*/
return std::pair<unsigned short int, unsigned short int>
(_second_order_vertex_child_number[n],
_second_order_vertex_child_index[n]);
}
} // namespace libMesh<|fim▁end|> | }
|
<|file_name|>connector-definition-visitor.js<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2017, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
define(['lodash', 'log', 'event_channel', './abstract-source-gen-visitor', './connector-action-visitor',
'./variable-declaration-visitor', './connector-declaration-visitor', './statement-visitor-factory'],
function(_, log, EventChannel, AbstractSourceGenVisitor, ConnectorActionVisitor,
VariableDeclarationVisitor, ConnectorDeclarationVisitor, StatementVisitorFactory) {
/**
* @param {ASTVisitor} parent - parent visitor
* @constructor
*/
var ConnectorDefinitionVisitor = function (parent) {
AbstractSourceGenVisitor.call(this, parent);
};
ConnectorDefinitionVisitor.prototype = Object.create(AbstractSourceGenVisitor.prototype);
ConnectorDefinitionVisitor.prototype.constructor = ConnectorDefinitionVisitor;
ConnectorDefinitionVisitor.prototype.canVisitConnectorDefinition = function(connectorDefinition){
return true;
};<|fim▁hole|> * @param {ConnectorDefinition} connectorDefinition - Connector Definition
*/
ConnectorDefinitionVisitor.prototype.beginVisitConnectorDefinition = function(connectorDefinition){
/**
* set the configuration start for the connector definition language construct
* If we need to add additional parameters which are dynamically added to the configuration start
* that particular source generation has to be constructed here
*/
var self = this;
var argumentsSrc = "";
_.forEach(connectorDefinition.getAnnotations(), function(annotation) {
if (!_.isEmpty(annotation.value)) {
var constructedPathAnnotation;
if (annotation.key.indexOf(":") === -1) {
constructedPathAnnotation = '@' + annotation.key + '("' + annotation.value + '")\n';
} else {
constructedPathAnnotation = '@' + annotation.key.split(":")[0] + '(' + annotation.key.split(":")[1] +
' = "' + annotation.value + '")\n';
}
self.appendSource(constructedPathAnnotation);
}
});
_.forEach(connectorDefinition.getArguments(), function(argument, index){
argumentsSrc += argument.type + " ";
argumentsSrc += argument.identifier;
if (connectorDefinition.getArguments().length - 1 != index) {
argumentsSrc += ", ";
}
});
var constructedSourceSegment = 'connector ' + connectorDefinition.getConnectorName() +
' (' + argumentsSrc + ')' + ' {\n';
this.appendSource(constructedSourceSegment);
log.debug('Begin Visit Connector Definition');
};
ConnectorDefinitionVisitor.prototype.visitConnectorDefinition = function(connectorDefinition){
log.debug('Visit Connector Definition');
};
/**
* End visiting the connector definition
* @param {ConnectorDefinition} connectorDefinition - Connector Definition
*/
ConnectorDefinitionVisitor.prototype.endVisitConnectorDefinition = function(connectorDefinition){
this.appendSource("}\n");
this.getParent().appendSource(this.getGeneratedSource());
log.debug('End Visit Connector Definition');
};
/**
* Visit Connector Action
* @param {ConnectorAction} connectorAction
*/
ConnectorDefinitionVisitor.prototype.visitConnectorAction = function(connectorAction){
var connectorActionVisitor = new ConnectorActionVisitor(this);
connectorAction.accept(connectorActionVisitor);
};
/**
* Visit Connector Declaration
* @param {ConnectorDeclaration} connectorDeclaration
*/
ConnectorDefinitionVisitor.prototype.visitConnectorDeclaration = function(connectorDeclaration){
var connectorDeclarationVisitor = new ConnectorDeclarationVisitor(this);
connectorDeclaration.accept(connectorDeclarationVisitor);
};
/**
* Visit Variable Declaration
* @param {VariableDeclaration} variableDeclaration
*/
ConnectorDefinitionVisitor.prototype.visitVariableDeclaration = function(variableDeclaration){
var variableDeclarationVisitor = new VariableDeclarationVisitor(this);
variableDeclaration.accept(variableDeclarationVisitor);
};
/**
* Visit Statements
* @param {Statement} statement
*/
ConnectorDefinitionVisitor.prototype.visitStatement = function (statement) {
var statementVisitorFactory = new StatementVisitorFactory();
var statementVisitor = statementVisitorFactory.getStatementVisitor(statement, this);
statement.accept(statementVisitor);
};
return ConnectorDefinitionVisitor;
});<|fim▁end|> |
/**
* Begin the visit and generate the source |
<|file_name|>harfbuzz.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
use app_units::Au;
use euclid::Point2D;
use font::{ShapingFlags, Font, FontTableMethods, FontTableTag, ShapingOptions, KERN};
use harfbuzz::{HB_DIRECTION_LTR, HB_DIRECTION_RTL, HB_MEMORY_MODE_READONLY};
use harfbuzz::{hb_blob_create, hb_face_create_for_tables};
use harfbuzz::{hb_buffer_create, hb_font_destroy};
use harfbuzz::{hb_buffer_get_glyph_infos, hb_shape};
use harfbuzz::{hb_buffer_set_direction, hb_buffer_set_script};
use harfbuzz::{hb_buffer_t, hb_codepoint_t, hb_font_funcs_t};
use harfbuzz::{hb_face_t, hb_font_t};
use harfbuzz::{hb_position_t, hb_tag_t};
use harfbuzz::hb_blob_t;
use harfbuzz::hb_bool_t;
use harfbuzz::hb_buffer_add_utf8;
use harfbuzz::hb_buffer_destroy;
use harfbuzz::hb_buffer_get_glyph_positions;
use harfbuzz::hb_buffer_get_length;
use harfbuzz::hb_face_destroy;
use harfbuzz::hb_feature_t;
use harfbuzz::hb_font_create;
use harfbuzz::hb_font_funcs_create;
use harfbuzz::hb_font_funcs_set_glyph_h_advance_func;
use harfbuzz::hb_font_funcs_set_glyph_h_kerning_func;
use harfbuzz::hb_font_funcs_set_nominal_glyph_func;
use harfbuzz::hb_font_set_funcs;
use harfbuzz::hb_font_set_ppem;
use harfbuzz::hb_font_set_scale;
use harfbuzz::hb_glyph_info_t;
use harfbuzz::hb_glyph_position_t;
use platform::font::FontTable;
use std::{char, cmp, ptr};
use std::os::raw::{c_char, c_int, c_uint, c_void};
use text::glyph::{ByteIndex, GlyphData, GlyphId, GlyphStore};
use text::shaping::ShaperMethods;
use text::util::{fixed_to_float, float_to_fixed, is_bidi_control};
const NO_GLYPH: i32 = -1;
const LIGA: u32 = ot_tag!('l', 'i', 'g', 'a');
pub struct ShapedGlyphData {
count: usize,
glyph_infos: *mut hb_glyph_info_t,
pos_infos: *mut hb_glyph_position_t,
}
pub struct ShapedGlyphEntry {
codepoint: GlyphId,
advance: Au,
offset: Option<Point2D<Au>>,
}
impl ShapedGlyphData {
pub fn new(buffer: *mut hb_buffer_t) -> ShapedGlyphData {
unsafe {
let mut glyph_count = 0;
let glyph_infos = hb_buffer_get_glyph_infos(buffer, &mut glyph_count);
assert!(!glyph_infos.is_null());
let mut pos_count = 0;
let pos_infos = hb_buffer_get_glyph_positions(buffer, &mut pos_count);
assert!(!pos_infos.is_null());
assert_eq!(glyph_count, pos_count);
ShapedGlyphData {
count: glyph_count as usize,
glyph_infos: glyph_infos,
pos_infos: pos_infos,
}
}
}
#[inline(always)]
fn byte_offset_of_glyph(&self, i: usize) -> u32 {
assert!(i < self.count);
unsafe {
let glyph_info_i = self.glyph_infos.offset(i as isize);
(*glyph_info_i).cluster
}
}
pub fn len(&self) -> usize {
self.count
}
/// Returns shaped glyph data for one glyph, and updates the y-position of the pen.
pub fn entry_for_glyph(&self, i: usize, y_pos: &mut Au) -> ShapedGlyphEntry {
assert!(i < self.count);
unsafe {
let glyph_info_i = self.glyph_infos.offset(i as isize);
let pos_info_i = self.pos_infos.offset(i as isize);
let x_offset = Shaper::fixed_to_float((*pos_info_i).x_offset);
let y_offset = Shaper::fixed_to_float((*pos_info_i).y_offset);
let x_advance = Shaper::fixed_to_float((*pos_info_i).x_advance);
let y_advance = Shaper::fixed_to_float((*pos_info_i).y_advance);
let x_offset = Au::from_f64_px(x_offset);
let y_offset = Au::from_f64_px(y_offset);
let x_advance = Au::from_f64_px(x_advance);
let y_advance = Au::from_f64_px(y_advance);
let offset = if x_offset == Au(0) && y_offset == Au(0) && y_advance == Au(0) {
None
} else {
// adjust the pen..
if y_advance > Au(0) {
*y_pos = *y_pos - y_advance;
}
Some(Point2D::new(x_offset, *y_pos - y_offset))
};
ShapedGlyphEntry {
codepoint: (*glyph_info_i).codepoint as GlyphId,
advance: x_advance,
offset: offset,
}
}
}
}
#[derive(Debug)]
pub struct Shaper {
hb_face: *mut hb_face_t,
hb_font: *mut hb_font_t,
font: *const Font,
}
impl Drop for Shaper {
fn drop(&mut self) {
unsafe {
assert!(!self.hb_face.is_null());
hb_face_destroy(self.hb_face);
assert!(!self.hb_font.is_null());
hb_font_destroy(self.hb_font);
}
}
}
impl Shaper {
pub fn new(font: *const Font) -> Shaper {
unsafe {
let hb_face: *mut hb_face_t =
hb_face_create_for_tables(Some(font_table_func),
font as *const c_void as *mut c_void,
None);
let hb_font: *mut hb_font_t = hb_font_create(hb_face);
// Set points-per-em. if zero, performs no hinting in that direction.
let pt_size = (*font).actual_pt_size.to_f64_px();
hb_font_set_ppem(hb_font, pt_size as c_uint, pt_size as c_uint);
// Set scaling. Note that this takes 16.16 fixed point.
hb_font_set_scale(hb_font,
Shaper::float_to_fixed(pt_size) as c_int,
Shaper::float_to_fixed(pt_size) as c_int);
// configure static function callbacks.
hb_font_set_funcs(hb_font, HB_FONT_FUNCS.0, font as *mut Font as *mut c_void, None);
Shaper {
hb_face: hb_face,
hb_font: hb_font,
font: font,
}
}
}
fn float_to_fixed(f: f64) -> i32 {
float_to_fixed(16, f)
}
fn fixed_to_float(i: hb_position_t) -> f64 {
fixed_to_float(16, i)
}
}
impl ShaperMethods for Shaper {
/// Calculate the layout metrics associated with the given text when painted in a specific
/// font.
fn shape_text(&self, text: &str, options: &ShapingOptions, glyphs: &mut GlyphStore) {
unsafe {
let hb_buffer: *mut hb_buffer_t = hb_buffer_create();
hb_buffer_set_direction(hb_buffer, if options.flags.contains(ShapingFlags::RTL_FLAG) {
HB_DIRECTION_RTL
} else {
HB_DIRECTION_LTR
});
hb_buffer_set_script(hb_buffer, options.script.to_hb_script());
hb_buffer_add_utf8(hb_buffer,
text.as_ptr() as *const c_char,
text.len() as c_int,
0,
text.len() as c_int);
let mut features = Vec::new();
if options.flags.contains(ShapingFlags::IGNORE_LIGATURES_SHAPING_FLAG) {
features.push(hb_feature_t {
tag: LIGA,
value: 0,
start: 0,
end: hb_buffer_get_length(hb_buffer),
})
}
if options.flags.contains(ShapingFlags::DISABLE_KERNING_SHAPING_FLAG) {
features.push(hb_feature_t {
tag: KERN,
value: 0,
start: 0,
end: hb_buffer_get_length(hb_buffer),
})
}
hb_shape(self.hb_font, hb_buffer, features.as_mut_ptr(), features.len() as u32);
self.save_glyph_results(text, options, glyphs, hb_buffer);
hb_buffer_destroy(hb_buffer);
}
}
}
impl Shaper {
fn save_glyph_results(&self,
text: &str,
options: &ShapingOptions,
glyphs: &mut GlyphStore,
buffer: *mut hb_buffer_t) {
let glyph_data = ShapedGlyphData::new(buffer);
let glyph_count = glyph_data.len();
let byte_max = text.len();
debug!("Shaped text[byte count={}], got back {} glyph info records.",
byte_max,
glyph_count);
// make map of what chars have glyphs
let mut byte_to_glyph = vec![NO_GLYPH; byte_max];
debug!("(glyph idx) -> (text byte offset)");
for i in 0..glyph_data.len() {
let loc = glyph_data.byte_offset_of_glyph(i) as usize;
if loc < byte_max {
byte_to_glyph[loc] = i as i32;
} else {
debug!("ERROR: tried to set out of range byte_to_glyph: idx={}, glyph idx={}",
loc,
i);
}
debug!("{} -> {}", i, loc);
}
debug!("text: {:?}", text);
debug!("(char idx): char->(glyph index):");
for (i, ch) in text.char_indices() {
debug!("{}: {:?} --> {}", i, ch, byte_to_glyph[i]);
}
let mut glyph_span = 0..0;
let mut byte_range = 0..0;
let mut y_pos = Au(0);
// main loop over each glyph. each iteration usually processes 1 glyph and 1+ chars.
// in cases with complex glyph-character associations, 2+ glyphs and 1+ chars can be
// processed.
while glyph_span.start < glyph_count {
debug!("Processing glyph at idx={}", glyph_span.start);
glyph_span.end = glyph_span.start;
byte_range.end = glyph_data.byte_offset_of_glyph(glyph_span.start) as usize;
while byte_range.end < byte_max {
byte_range.end += 1;
// Extend the byte range to include any following byte without its own glyph.
while byte_range.end < byte_max && byte_to_glyph[byte_range.end] == NO_GLYPH {
byte_range.end += 1;
}
// Extend the glyph range to include all glyphs covered by bytes processed so far.
let mut max_glyph_idx = glyph_span.end;
for glyph_idx in &byte_to_glyph[byte_range.clone()] {
if *glyph_idx != NO_GLYPH {
max_glyph_idx = cmp::max(*glyph_idx as usize + 1, max_glyph_idx);
}
}
if max_glyph_idx > glyph_span.end {
glyph_span.end = max_glyph_idx;
debug!("Extended glyph span to {:?}", glyph_span);
}
// if there's just one glyph, then we don't need further checks.
if glyph_span.len() == 1 { break; }
// if no glyphs were found yet, extend the char byte range more.
if glyph_span.len() == 0 { continue; }
// If byte_range now includes all the byte offsets found in glyph_span, then we
// have found a contiguous "cluster" and can stop extending it.
let mut all_glyphs_are_within_cluster: bool = true;
for j in glyph_span.clone() {
let loc = glyph_data.byte_offset_of_glyph(j) as usize;
if !(byte_range.start <= loc && loc < byte_range.end) {
all_glyphs_are_within_cluster = false;
break
}
}
if all_glyphs_are_within_cluster {
break
}
// Otherwise, the bytes we have seen so far correspond to a non-contiguous set of
// glyphs. Keep extending byte_range until we fill in all the holes in the glyph
// span or reach the end of the text.
}
assert!(byte_range.len() > 0);
assert!(glyph_span.len() > 0);
// Now byte_range is the ligature clump formed by the glyphs in glyph_span.
// We will save these glyphs to the glyph store at the index of the first byte.
let byte_idx = ByteIndex(byte_range.start as isize);
if glyph_span.len() == 1 {
// Fast path: 1-to-1 mapping of byte offset to single glyph.
//
// TODO(Issue #214): cluster ranges need to be computed before
// shaping, and then consulted here.
// for now, just pretend that every character is a cluster start.
// (i.e., pretend there are no combining character sequences).
// 1-to-1 mapping of character to glyph also treated as ligature start.
//
// NB: When we acquire the ability to handle ligatures that cross word boundaries,
// we'll need to do something special to handle `word-spacing` properly.
let character = text[byte_range.clone()].chars().next().unwrap();
if is_bidi_control(character) {
// Don't add any glyphs for bidi control chars
} else if character == '\t' {
// Treat tabs in pre-formatted text as a fixed number of spaces.
//
// TODO: Proper tab stops.
const TAB_COLS: i32 = 8;
let (space_glyph_id, space_advance) = glyph_space_advance(self.font);
let advance = Au::from_f64_px(space_advance) * TAB_COLS;
let data = GlyphData::new(space_glyph_id,
advance,
Default::default(),
true,
true);
glyphs.add_glyph_for_byte_index(byte_idx, character, &data);
} else {
let shape = glyph_data.entry_for_glyph(glyph_span.start, &mut y_pos);
let advance = self.advance_for_shaped_glyph(shape.advance, character, options);
let data = GlyphData::new(shape.codepoint,
advance,
shape.offset,
true,
true);
glyphs.add_glyph_for_byte_index(byte_idx, character, &data);
}
} else {
// collect all glyphs to be assigned to the first character.
let mut datas = vec!();
for glyph_i in glyph_span.clone() {
let shape = glyph_data.entry_for_glyph(glyph_i, &mut y_pos);
datas.push(GlyphData::new(shape.codepoint,
shape.advance,
shape.offset,
true, // treat as cluster start
glyph_i > glyph_span.start));
// all but first are ligature continuations
}
// now add the detailed glyph entry.
glyphs.add_glyphs_for_byte_index(byte_idx, &datas);
}
glyph_span.start = glyph_span.end;
byte_range.start = byte_range.end;
}
// this must be called after adding all glyph data; it sorts the
// lookup table for finding detailed glyphs by associated char index.
glyphs.finalize_changes();
}
fn advance_for_shaped_glyph(&self, mut advance: Au, character: char, options: &ShapingOptions)
-> Au {
if let Some(letter_spacing) = options.letter_spacing {
advance = advance + letter_spacing;
};
// CSS 2.1 § 16.4 states that "word spacing affects each space (U+0020) and non-breaking
// space (U+00A0) left in the text after the white space processing rules have been
// applied. The effect of the property on other word-separator characters is undefined."
// We elect to only space the two required code points.
if character == ' ' || character == '\u{a0}' {
// https://drafts.csswg.org/css-text-3/#word-spacing-property
let (length, percent) = options.word_spacing;
advance = (advance + length) + Au::new((advance.0 as f32 * percent.into_inner()) as i32);
}
advance
}
}
/// Callbacks from Harfbuzz when font map and glyph advance lookup needed.
struct FontFuncs(*mut hb_font_funcs_t);
unsafe impl Sync for FontFuncs {}
lazy_static! {
static ref HB_FONT_FUNCS: FontFuncs = unsafe {
let hb_funcs = hb_font_funcs_create();
hb_font_funcs_set_nominal_glyph_func(hb_funcs, Some(glyph_func), ptr::null_mut(), None);
hb_font_funcs_set_glyph_h_advance_func(
hb_funcs, Some(glyph_h_advance_func), ptr::null_mut(), None);
hb_font_funcs_set_glyph_h_kerning_func(
hb_funcs, Some(glyph_h_kerning_func), ptr::null_mut(), None);
FontFuncs(hb_funcs)
};
}
extern fn glyph_func(_: *mut hb_font_t,
font_data: *mut c_void,
unicode: hb_codepoint_t,
glyph: *mut hb_codepoint_t,
_: *mut c_void)
-> hb_bool_t {
let font: *const Font = font_data as *const Font;
assert!(!font.is_null());
unsafe {
match (*font).glyph_index(char::from_u32(unicode).unwrap()) {
Some(g) => {
*glyph = g as hb_codepoint_t;
true as hb_bool_t
}
None => false as hb_bool_t
}
}
}
extern fn glyph_h_advance_func(_: *mut hb_font_t,
font_data: *mut c_void,
glyph: hb_codepoint_t,
_: *mut c_void)
-> hb_position_t {
let font: *mut Font = font_data as *mut Font;
assert!(!font.is_null());
unsafe {
let advance = (*font).glyph_h_advance(glyph as GlyphId);
Shaper::float_to_fixed(advance)
}
}
fn glyph_space_advance(font: *const Font) -> (hb_codepoint_t, f64) {
let space_unicode = ' ';
let space_glyph: hb_codepoint_t;
match unsafe { (*font).glyph_index(space_unicode) } {
Some(g) => {
space_glyph = g as hb_codepoint_t;
}
None => panic!("No space info")
}
let space_advance = unsafe { (*font).glyph_h_advance(space_glyph as GlyphId) };
(space_glyph, space_advance)
}
extern fn glyph_h_kerning_func(_: *mut hb_font_t,
font_data: *mut c_void,
first_glyph: hb_codepoint_t,
second_glyph: hb_codepoint_t,
_: *mut c_void)
-> hb_position_t {
let font: *mut Font = font_data as *mut Font;
assert!(!font.is_null());
unsafe {
let advance = (*font).glyph_h_kerning(first_glyph as GlyphId, second_glyph as GlyphId);
Shaper::float_to_fixed(advance)
}
}
// Callback to get a font table out of a font.
extern fn font_table_func(_: *mut hb_face_t,
tag: hb_tag_t,
user_data: *mut c_void)
-> *mut hb_blob_t {
unsafe {<|fim▁hole|> // NB: These asserts have security implications.
let font = user_data as *const Font;
assert!(!font.is_null());
// TODO(Issue #197): reuse font table data, which will change the unsound trickery here.
match (*font).table_for_tag(tag as FontTableTag) {
None => ptr::null_mut(),
Some(font_table) => {
// `Box::into_raw` intentionally leaks the FontTable so we don't destroy the buffer
// while HarfBuzz is using it. When HarfBuzz is done with the buffer, it will pass
// this raw pointer back to `destroy_blob_func` which will deallocate the Box.
let font_table_ptr = Box::into_raw(Box::new(font_table));
let buf = (*font_table_ptr).buffer();
// HarfBuzz calls `destroy_blob_func` when the buffer is no longer needed.
let blob = hb_blob_create(buf.as_ptr() as *const c_char,
buf.len() as c_uint,
HB_MEMORY_MODE_READONLY,
font_table_ptr as *mut c_void,
Some(destroy_blob_func));
assert!(!blob.is_null());
blob
}
}
}
}
extern fn destroy_blob_func(font_table_ptr: *mut c_void) {
unsafe {
drop(Box::from_raw(font_table_ptr as *mut FontTable));
}
}<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># coding=utf-8
# Copyright (C) Duncan Macleod (2015)
#
# This file is part of the GW DetChar python package.
#
# GW DetChar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GW DetChar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GW DetChar. If not, see <http://www.gnu.org/licenses/>.
"""Methods and utilties for performing Omega pipline scans
See Chatterji 2005 [thesis] for details on the Q-pipeline.
"""
<|fim▁hole|>__credits__ = 'Alex Urban <[email protected]>'
# -- imports ------------------------------------------------------------------
# import pyomega utils
from .core import *<|fim▁end|> | __author__ = 'Duncan Macleod <[email protected]>' |
<|file_name|>name.rs<|end_file_name|><|fim▁begin|>//! Types for the *m.room.name* event.
use ruma_events_macros::StateEventContent;
use serde::{Deserialize, Serialize};
use crate::{InvalidInput, StateEvent};
/// The room name is a human-friendly string designed to be displayed to the end-user.
pub type NameEvent = StateEvent<NameEventContent>;
/// The payload for `NameEvent`.
#[derive(Clone, Debug, Deserialize, Serialize, StateEventContent)]
#[ruma_event(type = "m.room.name")]
pub struct NameEventContent {
/// The name of the room. This MUST NOT exceed 255 bytes.
#[serde(default, deserialize_with = "room_name")]
pub(crate) name: Option<String>,
}
impl NameEventContent {
/// Create a new `NameEventContent` with the given name.
///
/// # Errors
///
/// `InvalidInput` will be returned if the name is more than 255 bytes.
pub fn new(name: String) -> Result<Self, InvalidInput> {
match name.len() {
0 => Ok(Self { name: None }),
1..=255 => Ok(Self { name: Some(name) }),
_ => Err(InvalidInput(
"a room name cannot be more than 255 bytes".to_string(),
)),
}
}
/// The name of the room, if any.
pub fn name(&self) -> Option<&str> {
self.name.as_deref()
}
}
fn room_name<'de, D>(deserializer: D) -> Result<Option<String>, D::Error>
where
D: serde::de::Deserializer<'de>,
{
use serde::de::Error;
// this handles the null case and the empty string or nothing case
match Option::<String>::deserialize(deserializer)? {
Some(name) => match name.len() {
0 => Ok(None),
1..=255 => Ok(Some(name)),
_ => Err(D::Error::custom(
"a room name cannot be more than 255 bytes",
)),
},
None => Ok(None),
}
}
#[cfg(test)]
mod tests {
use std::{
convert::TryFrom,
iter::FromIterator,
time::{Duration, UNIX_EPOCH},
};
use js_int::Int;
use matches::assert_matches;
use ruma_identifiers::{EventId, RoomId, UserId};
use serde_json::{from_value as from_json_value, json, to_value as to_json_value};
use crate::{EventJson, StateEvent, UnsignedData};
use super::NameEventContent;
#[test]
fn serialization_with_optional_fields_as_none() {
let name_event = StateEvent {
content: NameEventContent {
name: Some("The room name".to_string()),
},
event_id: EventId::try_from("$h29iv0s8:example.com").unwrap(),
origin_server_ts: UNIX_EPOCH + Duration::from_millis(1),
prev_content: None,
room_id: RoomId::try_from("!n8f893n9:example.com").unwrap(),
sender: UserId::try_from("@carl:example.com").unwrap(),
state_key: "".to_string(),
unsigned: UnsignedData::default(),
};
let actual = to_json_value(&name_event).unwrap();
let expected = json!({
"content": {
"name": "The room name"
},
"event_id": "$h29iv0s8:example.com",
"origin_server_ts": 1,
"room_id": "!n8f893n9:example.com",
"sender": "@carl:example.com",
"state_key": "",
"type": "m.room.name"
});
assert_eq!(actual, expected);
}
#[test]
fn serialization_with_all_fields() {
let name_event = StateEvent {
content: NameEventContent {
name: Some("The room name".to_string()),
},
event_id: EventId::try_from("$h29iv0s8:example.com").unwrap(),
origin_server_ts: UNIX_EPOCH + Duration::from_millis(1),
prev_content: Some(NameEventContent {
name: Some("The old name".to_string()),
}),
room_id: RoomId::try_from("!n8f893n9:example.com").unwrap(),
sender: UserId::try_from("@carl:example.com").unwrap(),
state_key: "".to_string(),
unsigned: UnsignedData {
age: Some(Int::from(100)),
..UnsignedData::default()
},
};
let actual = to_json_value(&name_event).unwrap();
let expected = json!({
"content": {
"name": "The room name"
},
"event_id": "$h29iv0s8:example.com",
"origin_server_ts": 1,
"prev_content": { "name": "The old name" },
"room_id": "!n8f893n9:example.com",
"sender": "@carl:example.com",
"state_key": "",
"type": "m.room.name",
"unsigned": {
"age": 100
}
});
assert_eq!(actual, expected);
}
#[test]
fn absent_field_as_none() {
let json_data = json!({
"content": {},
"event_id": "$h29iv0s8:example.com",
"origin_server_ts": 1,
"room_id": "!n8f893n9:example.com",
"sender": "@carl:example.com",
"state_key": "",
"type": "m.room.name"
});
assert_eq!(
from_json_value::<EventJson<StateEvent<NameEventContent>>>(json_data)
.unwrap()
.deserialize()
.unwrap()
.content
.name,
None<|fim▁hole|> }
#[test]
fn name_fails_validation_when_too_long() {
// "XXXX .." 256 times
let long_string: String = String::from_iter(std::iter::repeat('X').take(256));
assert_eq!(long_string.len(), 256);
let long_content_json = json!({ "name": &long_string });
let from_raw: EventJson<NameEventContent> = from_json_value(long_content_json).unwrap();
let result = from_raw.deserialize();
assert!(result.is_err(), "Result should be invalid: {:?}", result);
}
#[test]
fn json_with_empty_name_creates_content_as_none() {
let long_content_json = json!({ "name": "" });
let from_raw: EventJson<NameEventContent> = from_json_value(long_content_json).unwrap();
assert_matches!(
from_raw.deserialize().unwrap(),
NameEventContent { name: None }
);
}
#[test]
fn new_with_empty_name_creates_content_as_none() {
assert_matches!(
NameEventContent::new(String::new()).unwrap(),
NameEventContent { name: None }
);
}
#[test]
fn null_field_as_none() {
let json_data = json!({
"content": {
"name": null
},
"event_id": "$h29iv0s8:example.com",
"origin_server_ts": 1,
"room_id": "!n8f893n9:example.com",
"sender": "@carl:example.com",
"state_key": "",
"type": "m.room.name"
});
assert_eq!(
from_json_value::<EventJson<StateEvent<NameEventContent>>>(json_data)
.unwrap()
.deserialize()
.unwrap()
.content
.name,
None
);
}
#[test]
fn empty_string_as_none() {
let json_data = json!({
"content": {
"name": ""
},
"event_id": "$h29iv0s8:example.com",
"origin_server_ts": 1,
"room_id": "!n8f893n9:example.com",
"sender": "@carl:example.com",
"state_key": "",
"type": "m.room.name"
});
assert_eq!(
from_json_value::<EventJson<StateEvent<NameEventContent>>>(json_data)
.unwrap()
.deserialize()
.unwrap()
.content
.name,
None
);
}
#[test]
fn nonempty_field_as_some() {
let name = Some("The room name".to_string());
let json_data = json!({
"content": {
"name": "The room name"
},
"event_id": "$h29iv0s8:example.com",
"origin_server_ts": 1,
"room_id": "!n8f893n9:example.com",
"sender": "@carl:example.com",
"state_key": "",
"type": "m.room.name"
});
assert_eq!(
from_json_value::<EventJson<StateEvent<NameEventContent>>>(json_data)
.unwrap()
.deserialize()
.unwrap()
.content
.name,
name
);
}
}<|fim▁end|> | ); |
<|file_name|>app.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*-
# Copyright © 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import keystonemiddleware.audit as audit_middleware
from oslo_config import cfg
import oslo_middleware.cors as cors_middleware
import pecan
from ironic.api import config
from ironic.api.controllers import base
from ironic.api import hooks
from ironic.api import middleware
from ironic.api.middleware import auth_token
from ironic.common import exception
from ironic.conf import CONF
class IronicCORS(cors_middleware.CORS):
"""Ironic-specific CORS class
We're adding the Ironic-specific version headers to the list of simple
headers in order that a request bearing those headers might be accepted by
the Ironic REST API.
"""
simple_headers = cors_middleware.CORS.simple_headers + [
'X-Auth-Token',
base.Version.max_string,
base.Version.min_string,
base.Version.string
]
def get_pecan_config():
# Set up the pecan configuration
filename = config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(pecan_config=None, extra_hooks=None):
app_hooks = [hooks.ConfigHook(),
hooks.DBHook(),
hooks.ContextHook(pecan_config.app.acl_public_routes),
hooks.RPCHook(),
hooks.NoExceptionTracebackHook(),
hooks.PublicUrlHook()]
if extra_hooks:
app_hooks.extend(extra_hooks)
if not pecan_config:
pecan_config = get_pecan_config()
pecan.configuration.set_config(dict(pecan_config), overwrite=True)
app = pecan.make_app(
pecan_config.app.root,
debug=CONF.pecan_debug,
static_root=pecan_config.app.static_root if CONF.pecan_debug else None,
force_canonical=getattr(pecan_config.app, 'force_canonical', True),
hooks=app_hooks,
wrap_app=middleware.ParsableErrorMiddleware,
)
if CONF.audit.enabled:
try:
app = audit_middleware.AuditMiddleware(
app,
audit_map_file=CONF.audit.audit_map_file,
ignore_req_list=CONF.audit.ignore_req_list
)
except (EnvironmentError, OSError,
audit_middleware.PycadfAuditApiConfigError) as e:
raise exception.InputFileError(
file_name=CONF.audit.audit_map_file,
reason=e
)
if CONF.auth_strategy == "keystone":
app = auth_token.AuthTokenMiddleware(
app, dict(cfg.CONF),
public_api_routes=pecan_config.app.acl_public_routes)
# Create a CORS wrapper, and attach ironic-specific defaults that must be
# included in all CORS responses.
app = IronicCORS(app, CONF)
cors_middleware.set_defaults(
allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'],
expose_headers=[base.Version.max_string, base.Version.min_string,
base.Version.string]<|fim▁hole|>
class VersionSelectorApplication(object):
def __init__(self):
pc = get_pecan_config()
self.v1 = setup_app(pecan_config=pc)
def __call__(self, environ, start_response):
return self.v1(environ, start_response)<|fim▁end|> | )
return app
|
<|file_name|>oc_adm_registry.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/registry -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_adm_registry
short_description: Module to manage openshift registry
description:
- Manage openshift registry programmatically.
options:
state:
description:
- The desired action when managing openshift registry
- present - update or create the registry
- absent - tear down the registry service and deploymentconfig
- list - returns the current representiation of a registry
required: false
default: False
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- The name of the registry
required: false
default: None
aliases: []
namespace:
description:
- The selector when filtering on node labels
required: false
default: None
aliases: []
images:
description:
- The image to base this registry on - ${component} will be replaced with --type
required: 'openshift3/ose-${component}:${version}'
default: None
aliases: []
latest_images:
description:
- If true, attempt to use the latest image for the registry instead of the latest release.
required: false
default: False
aliases: []
labels:
description:
- A set of labels to uniquely identify the registry and its components.
required: false
default: None
aliases: []
enforce_quota:
description:
- If set, the registry will refuse to write blobs if they exceed quota limits
required: False
default: False
aliases: []
mount_host:
description:
- If set, the registry volume will be created as a host-mount at this path.
required: False
default: False
aliases: []
ports:
description:
- A comma delimited list of ports or port pairs to expose on the registry pod. The default is set for 5000.
required: False
default: [5000]
aliases: []
replicas:
description:
- The replication factor of the registry; commonly 2 when high availability is desired.
required: False
default: 1
aliases: []
selector:
description:
- Selector used to filter nodes on deployment. Used to run registries on a specific set of nodes.
required: False
default: None
aliases: []
service_account:
description:
- Name of the service account to use to run the registry pod.
required: False
default: 'registry'
aliases: []
tls_certificate:
description:
- An optional path to a PEM encoded certificate (which may contain the private key) for serving over TLS
required: false
default: None
aliases: []
tls_key:
description:
- An optional path to a PEM encoded private key for serving over TLS
required: false
default: None
aliases: []
volume_mounts:
description:
- The volume mounts for the registry.
required: false
default: None
aliases: []
daemonset:
description:
- Use a daemonset instead of a deployment config.
required: false
default: False
aliases: []
edits:
description:
- A list of modifications to make on the deploymentconfig
required: false
default: None
aliases: []
env_vars:
description:
- A dictionary of modifications to make on the deploymentconfig. e.g. FOO: BAR
required: false
default: None
aliases: []
force:
description:
- Force a registry update.
required: false
default: False
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create a secure registry
oc_adm_registry:
name: docker-registry
service_account: registry
replicas: 2
namespace: default
selector: type=infra
images: "registry.ops.openshift.com/openshift3/ose-${component}:${version}"
env_vars:
REGISTRY_CONFIGURATION_PATH: /etc/registryconfig/config.yml
REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt
REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key
REGISTRY_HTTP_SECRET: supersecret
volume_mounts:
- path: /etc/secrets
name: dockercerts
type: secret
secret_name: registry-secret
- path: /etc/registryconfig
name: dockersecrets
type: secret
secret_name: docker-registry-config
edits:
- key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.strategy.rollingParams
value:
intervalSeconds: 1
maxSurge: 50%
maxUnavailable: 50%
timeoutSeconds: 600
updatePeriodSeconds: 1
action: put
- key: spec.template.spec.containers[0].resources.limits.memory
value: 2G
action: update
- key: spec.template.spec.containers[0].resources.requests.memory
value: 1G
action: update
register: registryout
'''
# -*- -*- -*- End included fragment: doc/registry -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b<|fim▁hole|> if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
''' Class to model an openshift DeploymentConfig'''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
metadata:
name: default_dc
namespace: default
spec:
replicas: 0
selector:
default_dc: default_dc
strategy:
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 0
maxUnavailable: 25%
timeoutSeconds: 600
updatePercent: -25
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
spec:
containers:
- env:
- name: default
value: default
image: default
imagePullPolicy: IfNotPresent
name: default_dc
ports:
- containerPort: 8000
hostPort: 8000
protocol: TCP
name: default_port
resources: {}
terminationMessagePath: /dev/termination-log
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
type: compute
restartPolicy: Always
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
triggers:
- type: ConfigChange
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content=None):
''' Constructor for deploymentconfig '''
if not content:
content = DeploymentConfig.default_deployment_config
super(DeploymentConfig, self).__init__(content=content)
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
env = self.get_env_vars()
if env:
env.append({'name': key, 'value': value})
rval = True
else:
result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
rval = result[0]
return rval
def exists_env_value(self, key, value):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key and result['value'] == value:
return True
return False
def exists_env_key(self, key):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key:
return True
return False
def get_env_var(self, key):
'''return a environment variables '''
results = self.get(DeploymentConfig.env_path) or []
if not results:
return None
for env_var in results:
if env_var['name'] == key:
return env_var
return None
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
def delete_env_var(self, keys):
'''delete a list of keys '''
if not isinstance(keys, list):
keys = [keys]
env_vars_array = self.get_env_vars()
modified = False
idx = None
for key in keys:
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
modified = True
del env_vars_array[idx]
if modified:
return True
return False
def update_env_var(self, key, value):
'''place an env in the env var list'''
env_vars_array = self.get_env_vars()
idx = None
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
env_vars_array[idx]['value'] = value
else:
self.add_env_value(key, value)
return True
def exists_volume_mount(self, volume_mount):
''' return whether a volume mount exists '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts:
return False
volume_mount_found = False
for exist_volume_mount in exist_volume_mounts:
if exist_volume_mount['name'] == volume_mount['name']:
volume_mount_found = True
break
return volume_mount_found
def exists_volume(self, volume):
''' return whether a volume exists '''
exist_volumes = self.get_volumes()
volume_found = False
for exist_volume in exist_volumes:
if exist_volume['name'] == volume['name']:
volume_found = True
break
return volume_found
def find_volume_by_name(self, volume, mounts=False):
''' return the index of a volume '''
volumes = []
if mounts:
volumes = self.get_volume_mounts()
else:
volumes = self.get_volumes()
for exist_volume in volumes:
if exist_volume['name'] == volume['name']:
return exist_volume
return None
def get_replicas(self):
''' return replicas setting '''
return self.get(DeploymentConfig.replicas_path)
def get_volume_mounts(self):
'''return volume mount information '''
return self.get_volumes(mounts=True)
def get_volumes(self, mounts=False):
'''return volume mount information '''
if mounts:
return self.get(DeploymentConfig.volume_mounts_path) or []
return self.get(DeploymentConfig.volumes_path) or []
def delete_volume_by_name(self, volume):
'''delete a volume '''
modified = False
exist_volume_mounts = self.get_volume_mounts()
exist_volumes = self.get_volumes()
del_idx = None
for idx, exist_volume in enumerate(exist_volumes):
if 'name' in exist_volume and exist_volume['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volumes[del_idx]
modified = True
del_idx = None
for idx, exist_volume_mount in enumerate(exist_volume_mounts):
if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volume_mounts[idx]
modified = True
return modified
def add_volume_mount(self, volume_mount):
''' add a volume or volume mount to the proper location '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts and volume_mount:
self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
else:
exist_volume_mounts.append(volume_mount)
def add_volume(self, volume):
''' add a volume or volume mount to the proper location '''
exist_volumes = self.get_volumes()
if not volume:
return
if not exist_volumes:
self.put(DeploymentConfig.volumes_path, [volume])
else:
exist_volumes.append(volume)
def update_replicas(self, replicas):
''' update replicas value '''
self.put(DeploymentConfig.replicas_path, replicas)
def update_volume(self, volume):
'''place an env in the env var list'''
exist_volumes = self.get_volumes()
if not volume:
return False
# update the volume
update_idx = None
for idx, exist_vol in enumerate(exist_volumes):
if exist_vol['name'] == volume['name']:
update_idx = idx
break
if update_idx != None:
exist_volumes[update_idx] = volume
else:
self.add_volume(volume)
return True
def update_volume_mount(self, volume_mount):
'''place an env in the env var list'''
modified = False
exist_volume_mounts = self.get_volume_mounts()
if not volume_mount:
return False
# update the volume mount
for exist_vol_mount in exist_volume_mounts:
if exist_vol_mount['name'] == volume_mount['name']:
if 'mountPath' in exist_vol_mount and \
str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
exist_vol_mount['mountPath'] = volume_mount['mountPath']
modified = True
break
if not modified:
self.add_volume_mount(volume_mount)
modified = True
return modified
def needs_update_volume(self, volume, volume_mount):
''' verify a volume update is needed '''
exist_volume = self.find_volume_by_name(volume)
exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
results = []
results.append(exist_volume['name'] == volume['name'])
if 'secret' in volume:
results.append('secret' in exist_volume)
results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
results.append(exist_volume_mount['name'] == volume_mount['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'emptyDir' in volume:
results.append(exist_volume_mount['name'] == volume['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'persistentVolumeClaim' in volume:
pvc = 'persistentVolumeClaim'
results.append(pvc in exist_volume)
if results[-1]:
results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
if 'claimSize' in volume[pvc]:
results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
elif 'hostpath' in volume:
results.append('hostPath' in exist_volume)
results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
return not all(results)
def needs_update_replicas(self, replicas):
''' verify whether a replica update is needed '''
current_reps = self.get(DeploymentConfig.replicas_path)
return not current_reps == replicas
# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class SecretConfig(object):
''' Handle secret options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig,
secrets=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.secrets = secrets
self.data = {}
self.create_dict()
def create_dict(self):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['data'] = {}
if self.secrets:
for key, value in self.secrets.items():
self.data['data'][key] = value
# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
''' Class to wrap the oc command line tools '''
secret_path = "data"
kind = 'secret'
def __init__(self, content):
'''secret constructor'''
super(Secret, self).__init__(content=content)
self._secrets = None
@property
def secrets(self):
'''secret property getter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
@secrets.setter
def secrets(self):
'''secret property setter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
def get_secrets(self):
''' returns all of the defined secrets '''
return self.get(Secret.secret_path) or {}
def add_secret(self, key, value):
''' add a secret '''
if self.secrets:
self.secrets[key] = value
else:
self.put(Secret.secret_path, {key: value})
return True
def delete_secret(self, key):
''' delete secret'''
try:
del self.secrets[key]
except KeyError as _:
return False
return True
def find_secret(self, key):
''' find secret'''
rval = None
try:
rval = self.secrets[key]
except KeyError as _:
return None
return {'key': key, 'value': rval}
def update_secret(self, key, value):
''' update a secret'''
if key in self.secrets:
self.secrets[key] = value
else:
self.add_secret(key, value)
return True
# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/service.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class ServiceConfig(object):
''' Handle service options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
ports,
selector=None,
labels=None,
cluster_ip=None,
portal_ip=None,
session_affinity=None,
service_type=None,
external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
self.ports = ports
self.selector = selector
self.labels = labels
self.cluster_ip = cluster_ip
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
self.external_ips = external_ips
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiates a service dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Service'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
self.data['metadata']['labels'] = {}
for lab, lab_value in self.labels.items():
self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
self.data['spec']['ports'] = self.ports
else:
self.data['spec']['ports'] = []
if self.selector:
self.data['spec']['selector'] = self.selector
self.data['spec']['sessionAffinity'] = self.session_affinity or 'None'
if self.cluster_ip:
self.data['spec']['clusterIP'] = self.cluster_ip
if self.portal_ip:
self.data['spec']['portalIP'] = self.portal_ip
if self.service_type:
self.data['spec']['type'] = self.service_type
if self.external_ips:
self.data['spec']['externalIPs'] = self.external_ips
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
super(Service, self).__init__(content=content)
def get_ports(self):
''' get a list of ports '''
return self.get(Service.port_path) or []
def get_selector(self):
''' get the service selector'''
return self.get(Service.selector_path) or {}
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get_ports()
if not ports:
self.put(Service.port_path, inc_ports)
else:
ports.extend(inc_ports)
return True
def find_ports(self, inc_port):
''' find a specific port '''
for port in self.get_ports():
if port['port'] == inc_port['port']:
return port
return None
def delete_ports(self, inc_ports):
''' remove a port from a service '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get(Service.port_path) or []
if not ports:
return True
removed = False
for inc_port in inc_ports:
port = self.find_ports(inc_port)
if port:
ports.remove(port)
removed = True
return removed
def add_cluster_ip(self, sip):
'''add cluster ip'''
self.put(Service.cluster_ip, sip)
def add_portal_ip(self, pip):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
def get_external_ips(self):
''' get a list of external_ips '''
return self.get(Service.external_ips) or []
def add_external_ips(self, inc_external_ips):
''' add an external_ip to the external_ips list '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get_external_ips()
if not external_ips:
self.put(Service.external_ips, inc_external_ips)
else:
external_ips.extend(inc_external_ips)
return True
def find_external_ips(self, inc_external_ip):
''' find a specific external IP '''
val = None
try:
idx = self.get_external_ips().index(inc_external_ip)
val = self.get_external_ips()[idx]
except ValueError:
pass
return val
def delete_external_ips(self, inc_external_ips):
''' remove an external IP from a service '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get(Service.external_ips) or []
if not external_ips:
return True
removed = False
for inc_external_ip in inc_external_ips:
external_ip = self.find_external_ips(inc_external_ip)
if external_ip:
external_ips.remove(external_ip)
removed = True
return removed
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*-
class Volume(object):
''' Class to represent an openshift volume object'''
volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
"dc": "spec.template.spec.containers[0].volumeMounts",
"rc": "spec.template.spec.containers[0].volumeMounts",
}
volumes_path = {"pod": "spec.volumes",
"dc": "spec.template.spec.volumes",
"rc": "spec.template.spec.volumes",
}
@staticmethod
def create_volume_structure(volume_info):
''' return a properly structured volume '''
volume_mount = None
volume = {'name': volume_info['name']}
volume_type = volume_info['type'].lower()
if volume_type == 'secret':
volume['secret'] = {}
volume[volume_info['type']] = {'secretName': volume_info['secret_name']}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'emptydir':
volume['emptyDir'] = {}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':
volume['persistentVolumeClaim'] = {}
volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']
volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize']
elif volume_type == 'hostpath':
volume['hostPath'] = {}
volume['hostPath']['path'] = volume_info['path']
elif volume_type == 'configmap':
volume['configMap'] = {}
volume['configMap']['name'] = volume_info['configmap_name']
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
return (volume, volume_mount)
# -*- -*- -*- End included fragment: lib/volume.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_version.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCVersion(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
debug):
''' Constructor for OCVersion '''
super(OCVersion, self).__init__(None, config)
self.debug = debug
def get(self):
'''get and return version information '''
results = {}
version_results = self._version()
if version_results['returncode'] == 0:
filtered_vers = Utils.filter_versions(version_results['results'])
custom_vers = Utils.add_custom_versions(filtered_vers)
results['returncode'] = version_results['returncode']
results.update(filtered_vers)
results.update(custom_vers)
return results
raise OpenShiftCLIError('Problem detecting openshift version.')
@staticmethod
def run_ansible(params):
'''run the idempotent ansible code'''
oc_version = OCVersion(params['kubeconfig'], params['debug'])
if params['state'] == 'list':
#pylint: disable=protected-access
result = oc_version.get()
return {'state': params['state'],
'results': result,
'changed': False}
# -*- -*- -*- End included fragment: class/oc_version.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_adm_registry.py -*- -*- -*-
class RegistryException(Exception):
''' Registry Exception Class '''
pass
class RegistryConfig(OpenShiftCLIConfig):
''' RegistryConfig is a DTO for the registry. '''
def __init__(self, rname, namespace, kubeconfig, registry_options):
super(RegistryConfig, self).__init__(rname, namespace, kubeconfig, registry_options)
class Registry(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
volume_mount_path = 'spec.template.spec.containers[0].volumeMounts'
volume_path = 'spec.template.spec.volumes'
env_path = 'spec.template.spec.containers[0].env'
def __init__(self,
registry_config,
verbose=False):
''' Constructor for Registry
a registry consists of 3 or more parts
- dc/docker-registry
- svc/docker-registry
Parameters:
:registry_config:
:verbose:
'''
super(Registry, self).__init__(registry_config.namespace, registry_config.kubeconfig, verbose)
self.version = OCVersion(registry_config.kubeconfig, verbose)
self.svc_ip = None
self.portal_ip = None
self.config = registry_config
self.verbose = verbose
self.registry_parts = [{'kind': 'dc', 'name': self.config.name},
{'kind': 'svc', 'name': self.config.name},
]
self.__prepared_registry = None
self.volume_mounts = []
self.volumes = []
if self.config.config_options['volume_mounts']['value']:
for volume in self.config.config_options['volume_mounts']['value']:
volume_info = {'secret_name': volume.get('secret_name', None),
'name': volume.get('name', None),
'type': volume.get('type', None),
'path': volume.get('path', None),
'claimName': volume.get('claim_name', None),
'claimSize': volume.get('claim_size', None),
}
vol, vol_mount = Volume.create_volume_structure(volume_info)
self.volumes.append(vol)
self.volume_mounts.append(vol_mount)
self.dconfig = None
self.svc = None
@property
def deploymentconfig(self):
''' deploymentconfig property '''
return self.dconfig
@deploymentconfig.setter
def deploymentconfig(self, config):
''' setter for deploymentconfig property '''
self.dconfig = config
@property
def service(self):
''' service property '''
return self.svc
@service.setter
def service(self, config):
''' setter for service property '''
self.svc = config
@property
def prepared_registry(self):
''' prepared_registry property '''
if not self.__prepared_registry:
results = self.prepare_registry()
if not results or ('returncode' in results and results['returncode'] != 0):
raise RegistryException('Could not perform registry preparation. {}'.format(results))
self.__prepared_registry = results
return self.__prepared_registry
@prepared_registry.setter
def prepared_registry(self, data):
''' setter method for prepared_registry attribute '''
self.__prepared_registry = data
def get(self):
''' return the self.registry_parts '''
self.deploymentconfig = None
self.service = None
rval = 0
for part in self.registry_parts:
result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
self.service = Service(result['results'][0])
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'deploymentconfig': self.deploymentconfig, 'service': self.service}
def exists(self):
'''does the object exist?'''
if self.deploymentconfig and self.service:
return True
return False
def delete(self, complete=True):
'''return all pods '''
parts = []
for part in self.registry_parts:
if not complete and part['kind'] == 'svc':
continue
parts.append(self._delete(part['kind'], part['name']))
# Clean up returned results
rval = 0
for part in parts:
# pylint: disable=invalid-sequence-index
if 'returncode' in part and part['returncode'] != 0:
rval = part['returncode']
return {'returncode': rval, 'results': parts}
def prepare_registry(self):
''' prepare a registry for instantiation '''
options = self.config.to_option_list(ascommalist='labels')
cmd = ['registry']
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
# probably need to parse this
# pylint thinks results is a string
# pylint: disable=no-member
if results['returncode'] != 0 and 'items' not in results['results']:
raise RegistryException('Could not perform registry preparation. {}'.format(results))
service = None
deploymentconfig = None
# pylint: disable=invalid-sequence-index
for res in results['results']['items']:
if res['kind'] == 'DeploymentConfig':
deploymentconfig = DeploymentConfig(res)
elif res['kind'] == 'Service':
service = Service(res)
# Verify we got a service and a deploymentconfig
if not service or not deploymentconfig:
return results
# results will need to get parsed here and modifications added
deploymentconfig = DeploymentConfig(self.add_modifications(deploymentconfig))
# modify service ip
if self.svc_ip:
service.put('spec.clusterIP', self.svc_ip)
if self.portal_ip:
service.put('spec.portalIP', self.portal_ip)
# the dry-run doesn't apply the selector correctly
if self.service:
service.put('spec.selector', self.service.get_selector())
# need to create the service and the deploymentconfig
service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict)
deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict)
return {"service": service,
"service_file": service_file,
"service_update": False,
"deployment": deploymentconfig,
"deployment_file": deployment_file,
"deployment_update": False}
def create(self):
'''Create a registry'''
results = []
self.needs_update()
# if the object is none, then we need to create it
# if the object needs an update, then we should call replace
# Handle the deploymentconfig
if self.deploymentconfig is None:
results.append(self._create(self.prepared_registry['deployment_file']))
elif self.prepared_registry['deployment_update']:
results.append(self._replace(self.prepared_registry['deployment_file']))
# Handle the service
if self.service is None:
results.append(self._create(self.prepared_registry['service_file']))
elif self.prepared_registry['service_update']:
results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
for result in results:
# pylint: disable=invalid-sequence-index
if 'returncode' in result and result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def update(self):
'''run update for the registry. This performs a replace if required'''
# Store the current service IP
if self.service:
svcip = self.service.get('spec.clusterIP')
if svcip:
self.svc_ip = svcip
portip = self.service.get('spec.portalIP')
if portip:
self.portal_ip = portip
results = []
if self.prepared_registry['deployment_update']:
results.append(self._replace(self.prepared_registry['deployment_file']))
if self.prepared_registry['service_update']:
results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
for result in results:
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def add_modifications(self, deploymentconfig):
''' update a deployment config with changes '''
# The environment variable for REGISTRY_HTTP_SECRET is autogenerated
# We should set the generated deploymentconfig to the in memory version
# the following modifications will overwrite if needed
if self.deploymentconfig:
result = self.deploymentconfig.get_env_var('REGISTRY_HTTP_SECRET')
if result:
deploymentconfig.update_env_var('REGISTRY_HTTP_SECRET', result['value'])
# Currently we know that our deployment of a registry requires a few extra modifications
# Modification 1
# we need specific environment variables to be set
for key, value in self.config.config_options['env_vars'].get('value', {}).items():
if not deploymentconfig.exists_env_key(key):
deploymentconfig.add_env_value(key, value)
else:
deploymentconfig.update_env_var(key, value)
# Modification 2
# we need specific volume variables to be set
for volume in self.volumes:
deploymentconfig.update_volume(volume)
for vol_mount in self.volume_mounts:
deploymentconfig.update_volume_mount(vol_mount)
# Modification 3
# Edits
edit_results = []
for edit in self.config.config_options['edits'].get('value', []):
if edit['action'] == 'put':
edit_results.append(deploymentconfig.put(edit['key'],
edit['value']))
if edit['action'] == 'update':
edit_results.append(deploymentconfig.update(edit['key'],
edit['value'],
edit.get('index', None),
edit.get('curr_value', None)))
if edit['action'] == 'append':
edit_results.append(deploymentconfig.append(edit['key'],
edit['value']))
if edit_results and not any([res[0] for res in edit_results]):
return None
return deploymentconfig.yaml_dict
def needs_update(self):
''' check to see if we need to update '''
exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol']
if self.service is None or \
not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
self.service.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['service_update'] = True
exclude_list = ['dnsPolicy',
'terminationGracePeriodSeconds',
'restartPolicy', 'timeoutSeconds',
'livenessProbe', 'readinessProbe',
'terminationMessagePath',
'securityContext',
'imagePullPolicy',
'protocol', # ports.portocol: TCP
'type', # strategy: {'type': 'rolling'}
'defaultMode', # added on secrets
'activeDeadlineSeconds', # added in 1.5 for timeouts
]
if self.deploymentconfig is None or \
not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
self.deploymentconfig.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['deployment_update'] = True
return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False
# In the future, we would like to break out each ansible state into a function.
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''run idempotent ansible code'''
registry_options = {'images': {'value': params['images'], 'include': True},
'latest_images': {'value': params['latest_images'], 'include': True},
'labels': {'value': params['labels'], 'include': True},
'ports': {'value': ','.join(params['ports']), 'include': True},
'replicas': {'value': params['replicas'], 'include': True},
'selector': {'value': params['selector'], 'include': True},
'service_account': {'value': params['service_account'], 'include': True},
'mount_host': {'value': params['mount_host'], 'include': True},
'env_vars': {'value': params['env_vars'], 'include': False},
'volume_mounts': {'value': params['volume_mounts'], 'include': False},
'edits': {'value': params['edits'], 'include': False},
'tls_key': {'value': params['tls_key'], 'include': True},
'tls_certificate': {'value': params['tls_certificate'], 'include': True},
}
# Do not always pass the daemonset and enforce-quota parameters because they are not understood
# by old versions of oc.
# Default value is false. So, it's safe to not pass an explicit false value to oc versions which
# understand these parameters.
if params['daemonset']:
registry_options['daemonset'] = {'value': params['daemonset'], 'include': True}
if params['enforce_quota']:
registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True}
rconfig = RegistryConfig(params['name'],
params['namespace'],
params['kubeconfig'],
registry_options)
ocregistry = Registry(rconfig, params['debug'])
api_rval = ocregistry.get()
state = params['state']
########
# get
########
if state == 'list':
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
if not ocregistry.exists():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
# Unsure as to why this is angry with the return type.
# pylint: disable=redefined-variable-type
api_rval = ocregistry.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
if state == 'present':
########
# Create
########
if not ocregistry.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
api_rval = ocregistry.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if not params['force'] and not ocregistry.needs_update():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
api_rval = ocregistry.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. %s' % state}
# -*- -*- -*- End included fragment: class/oc_adm_registry.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_adm_registry.py -*- -*- -*-
def main():
'''
ansible oc module for registry
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
images=dict(default=None, type='str'),
latest_images=dict(default=False, type='bool'),
labels=dict(default=None, type='dict'),
ports=dict(default=['5000'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
service_account=dict(default='registry', type='str'),
mount_host=dict(default=None, type='str'),
volume_mounts=dict(default=None, type='list'),
env_vars=dict(default={}, type='dict'),
edits=dict(default=[], type='list'),
enforce_quota=dict(default=False, type='bool'),
force=dict(default=False, type='bool'),
daemonset=dict(default=False, type='bool'),
tls_key=dict(default=None, type='str'),
tls_certificate=dict(default=None, type='str'),
),
supports_check_mode=True,
)
results = Registry.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_adm_registry.py -*- -*- -*-<|fim▁end|> | return c
''' |
<|file_name|>gzip.go<|end_file_name|><|fim▁begin|>// Package compression defines a response compressing Handler.
// It compresses the body of the http response sent back to a client.
package compression
import (
"compress/gzip"
"io"
"net/http"
"strings"
"sync"
"github.com/atdiar/xhttp"
)
// Gzipper defines the structure of the response compressing Handler.
type Gzipper struct {
pool *sync.Pool // useful here to recycle gzip buffers
skip map[string]bool
next xhttp.Handler
}
// NewHandler returns a response compressing Handler.
func NewHandler() Gzipper {
g := Gzipper{}
g.skip = map[string]bool{
"GET": false,
"POST": false,
"PUT": false,
"PATCH": false,
"DELETE": false,
"HEAD": false,
"OPTIONS": false,
}
g.pool = &sync.Pool{New: func() interface{} { return gzip.NewWriter(nil) }}
return g
}
// Skip is used to disable gzip compression for a given http method.
func (g Gzipper) Skip(method string) Gzipper {
if _, ok := g.skip[strings.ToUpper(method)]; !ok {
panic(method + " is not a valid method")
}
g.skip[method] = true
return g
}
// This is a type of wrapper around a http.ResponseWriter which buffers data
// before compressing the whole and writing.
type compressingWriter struct {
io.WriteCloser
http.ResponseWriter
p *sync.Pool
}
func newcompressingWriter(w http.ResponseWriter, p *sync.Pool) compressingWriter {
w1 := p.Get()
w2 := w1.(*gzip.Writer)
w2.Reset(w)
return compressingWriter{w2, w, p}
}
// Write is using the gzip writer Write method.
func (cw compressingWriter) Write(b []byte) (int, error) {
if cw.ResponseWriter.Header().Get("Content-Type") == "" {
cw.ResponseWriter.Header().Set("Content-Type", http.DetectContentType(b))
cw.ResponseWriter.Header().Del("Content-Length")
}
return cw.WriteCloser.Write(b)
}
// Close flushes the compressed bytestring to the underlying ResponseWriter.
// Then it releases the gzip.Writer, putting it back into the Pool.
func (cw compressingWriter) Close() error {
z := cw.WriteCloser.(*gzip.Writer)
err := z.Flush()
cw.p.Put(z)
return err
}
func (cw compressingWriter) Wrappee() http.ResponseWriter { return cw.ResponseWriter }<|fim▁hole|>
// ServeHTTP handles a http.Request by gzipping the http response body and
// setting the right http Headers.
func (g Gzipper) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if mustSkip, exist := g.skip[strings.ToUpper(req.Method)]; exist && mustSkip {
if g.next != nil {
g.next.ServeHTTP(w, req)
}
return
}
// We create a compressingWriter that will enable
//the response writing w/ Compression.
wc := newcompressingWriter(w, g.pool)
w.Header().Add("Vary", "Accept-Encoding")
if !strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
if g.next != nil {
g.next.ServeHTTP(w, req)
}
return
}
wc.Header().Set("Content-Encoding", "gzip")
// All the conditions are present : we shall compress the data before writing
// it out.
if g.next != nil {
g.next.ServeHTTP(wc, req)
}
err := wc.Close()
if err != nil {
panic(err)
}
}
// Link registers a next request Handler to be called by ServeHTTP method.
// It returns the result of the linking.
func (g Gzipper) Link(h xhttp.Handler) xhttp.HandlerLinker {
g.next = h
return g
}<|fim▁end|> | |
<|file_name|>main.ts<|end_file_name|><|fim▁begin|>import { LoggerError } from './errors';
import { getConfiguration, setConfiguration } from './configuration';
import { Writer, Level, Record } from './models';
import { getLogger } from './logger';
export {
LoggerError,
getConfiguration,<|fim▁hole|> setConfiguration,
Writer,
Level,
Record,
getLogger
};
export default function () {
}<|fim▁end|> | |
<|file_name|>lang.js<|end_file_name|><|fim▁begin|>var
utils = require('enyo/utils'),
kind = require('enyo/kind');
describe('language', function () {
describe('usage', function () {
describe('Callee', function () {
var dn = '';
var fn = function() {
dn = arguments.callee.displayName;
};
it('should have proper callee', function () {
fn.displayName = "fn";
fn();
expect(dn).to.equal('fn')
});
});
describe('Class', function () {
var AClass;
before(function () {
AClass = kind({
name: "AClass"
});
});
after(function () {
AClass = null;
});
it('should be a function', function () {
expect(AClass).to.be.a('function')
});
});
describe('isString', function () {
var iframe;
before(function () {
var iframeDoc;
// Create alternate window context to write vars from
iframe = document.createElement("iframe"),
document.body.appendChild(iframe);
iframeDoc = iframe.contentDocument || iframe.contentWindow.document;
iframeDoc.write("<script>parent.iString = new String('hello');</script>");
iframeDoc.close();
});
after(function () {
document.body.removeChild(iframe);
iframe = null;
});
it('should determine strings properly', function () {
expect(utils.isString("string")).to.be.true;
});
// This will fail:<|fim▁hole|> // - typeof (b/c it is a string instance)
// https://github.com/enyojs/enyo/issues/2
/* global iString */
it('should determine strings written from other window contexts correctly', function () {
expect(utils.isString(iString)).to.be.true;
});
});
describe('indexOf', function () {
it('should have proper index', function () {
var index = utils.indexOf("foo", [null, null, null, null,"foo"]);
expect(index).to.equal(4)
});
});
describe('indexOf greater than array length', function () {
it('should equal -1', function () {
var index = utils.indexOf("foo", [null, null, null, null,"foo"], 10);
expect(index).to.equal(-1)
});
});
describe('AsyncMethod', function () {
var timesCalled;
before(function () {
timesCalled = 0;
});
it('should be called twice', function (done) {
utils.asyncMethod(function () { timesCalled++; });
utils.asyncMethod(this, function (i) { timesCalled += i; }, 1);
setTimeout(function() {
expect(timesCalled).to.equal(2)
done();
}, 25);
});
});
describe('isObject', function () {
it('should be true that an object is an object', function () {
expect(utils.isObject({})).to.be.true
});
it('should not be true that undefined is an object', function () {
expect(utils.isObject(undefined)).to.be.false
});
it('should not be true that null is an object', function () {
expect(utils.isObject(null)).to.be.false
});
it('should not be true that an array is an object', function () {
expect(utils.isObject([1,2,3])).to.be.false
});
it('should not be true that a number is an object', function () {
expect(utils.isObject(42)).to.be.false
});
it('should not be true that a string is an object', function () {
expect(utils.isObject("forty-two")).to.be.false
});
});
describe('isArray', function () {
it('should not be true that an object is an array', function () {
expect(utils.isArray({})).to.be.false
});
it('should not be true that undefined is an array', function () {
expect(utils.isArray(undefined)).to.be.false
});
it('should not be true that null is an array', function () {
expect(utils.isArray(null)).to.be.false
});
it('should be true that an array is an array', function () {
expect(utils.isArray([1,2,3])).to.be.true
});
it('should not be true that a number is an array', function () {
expect(utils.isArray(42)).to.be.false
});
it('should not be true that a string is an array', function () {
expect(utils.isArray("forty-two")).to.be.false
});
});
});
});<|fim▁end|> | // - instanceof from another context |
<|file_name|>generate_md_functions.py<|end_file_name|><|fim▁begin|># encoding: UTF-8
__author__ = 'CHENXY'
from string import join
from sgit_struct import structDict
def processCallBack(line):
orignalLine = line
line = line.replace(' virtual void ', '') # 删除行首的无效内容
line = line.replace('{};\n', '') # 删除行尾的无效内容
content = line.split('(')
cbName = content[0] # 回调函数名称
cbArgs = content[1] # 回调函数参数
if cbArgs[-1] == ' ':
cbArgs = cbArgs.replace(') ', '')
else:
cbArgs = cbArgs.replace(')', '')
cbArgsList = cbArgs.split(', ') # 将每个参数转化为列表
cbArgsTypeList = []
cbArgsValueList = []
for arg in cbArgsList: # 开始处理参数
content = arg.split(' ')
if len(content) > 1:
if 'struct' not in content:
cbArgsTypeList.append(content[0]) # 参数类型列表
cbArgsValueList.append(content[1]) # 参数数据列表
else:
print content
cbArgsTypeList.append(content[1]) # 参数类型列表
cbArgsValueList.append(content[2]+content[3]) # 参数数据列表
createTask(cbName, cbArgsTypeList, cbArgsValueList, orignalLine)
createProcess(cbName, cbArgsTypeList, cbArgsValueList)
# 生成.h文件中的process部分
process_line = 'void process' + cbName[2:] + '(Task task);\n'
fheaderprocess.write(process_line)
fheaderprocess.write('\n')
# 生成.h文件中的on部分
if 'OnRspError' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict error) {};\n'
elif 'OnRspQry' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, int id, bool last) {};\n'
elif 'OnRsp' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, int id, bool last) {};\n'
elif 'OnRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data) {};\n'
elif 'OnErrRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error) {};\n'
else:
on_line = ''
fheaderon.write(on_line)
fheaderon.write('\n')
# 生成封装部分
createWrap(cbName)
#----------------------------------------------------------------------
def createWrap(cbName):
"""在Python封装段代码中进行处理"""
# 生成.h文件中的on部分
if 'OnRspError' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict error)\n'
override_line = '("on' + cbName[2:] + '")(error);\n'
elif 'OnRsp' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, int id, bool last)\n'
override_line = '("on' + cbName[2:] + '")(data, error, id, last);\n'
elif 'OnRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data)\n'
override_line = '("on' + cbName[2:] + '")(data);\n'
elif 'OnErrRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error)\n'
override_line = '("on' + cbName[2:] + '")(data, error);\n'
else:
on_line = ''
if on_line is not '':
fwrap.write(on_line)
fwrap.write('{\n')
fwrap.write('\ttry\n')
fwrap.write('\t{\n')
fwrap.write('\t\tthis->get_override'+override_line)
fwrap.write('\t}\n')
fwrap.write('\tcatch (error_already_set const &)\n')
fwrap.write('\t{\n')
fwrap.write('\t\tPyErr_Print();\n')
fwrap.write('\t}\n')
fwrap.write('};\n')
fwrap.write('\n')
def createTask(cbName, cbArgsTypeList, cbArgsValueList, orignalLine):
# 从回调函数生成任务对象,并放入队列
funcline = orignalLine.replace(' virtual void ', 'void ' + apiName + '::')
funcline = funcline.replace('{};', '')
funcline = funcline.replace(' {}', '')
ftask.write(funcline)
ftask.write('{\n')
ftask.write("\tTask task = Task();\n")
ftask.write("\ttask.task_name = " + cbName.upper() + ";\n")
# define常量
global define_count
fdefine.write("#define " + cbName.upper() + ' ' + str(define_count) + '\n')
define_count = define_count + 1
# switch段代码
fswitch.write("case " + cbName.upper() + ':\n')
fswitch.write("{\n")
fswitch.write("\tthis->" + cbName.replace('On', 'process') + '(task);\n')
fswitch.write("\tbreak;\n")
fswitch.write("}\n")
fswitch.write("\n")
for i, type_ in enumerate(cbArgsTypeList):
if type_ == 'int':
ftask.write("\ttask.task_id = " + cbArgsValueList[i] + ";\n")
elif type_ == 'bool':
ftask.write("\ttask.task_last = " + cbArgsValueList[i] + ";\n")
elif 'CSgitFtdcRspInfoField' in type_:
ftask.write("\n")
ftask.write("\tif (pRspInfo)\n")
ftask.write("\t{\n")
ftask.write("\t\ttask.task_error = " + cbArgsValueList[i] + ";\n")
ftask.write("\t}\n")
ftask.write("\telse\n")
ftask.write("\t{\n")
ftask.write("\t\tCSgitFtdcRspInfoField empty_error = CSgitFtdcRspInfoField();\n")
ftask.write("\t\tmemset(&empty_error, 0, sizeof(empty_error));\n")
ftask.write("\t\ttask.task_error = empty_error;\n")
ftask.write("\t}\n")
else:
ftask.write("\n")
ftask.write("\tif (" + cbArgsValueList[i][1:] + ")\n")
ftask.write("\t{\n")
ftask.write("\t\ttask.task_data = " + cbArgsValueList[i] + ";\n")
ftask.write("\t}\n")
ftask.write("\telse\n")
ftask.write("\t{\n")
ftask.write("\t\t" + type_ + " empty_data = " + type_ + "();\n")
ftask.write("\t\tmemset(&empty_data, 0, sizeof(empty_data));\n")
ftask.write("\t\ttask.task_data = empty_data;\n")
ftask.write("\t}\n")
ftask.write("\tthis->task_queue.push(task);\n")
ftask.write("};\n")
ftask.write("\n")
def createProcess(cbName, cbArgsTypeList, cbArgsValueList):
# 从队列中提取任务,并转化为python字典
fprocess.write("void " + apiName + '::' + cbName.replace('On', 'process') + '(Task task)' + "\n")
fprocess.write("{\n")
fprocess.write("\tPyLock lock;\n")
onArgsList = []
for i, type_ in enumerate(cbArgsTypeList):
if 'CSgitFtdcRspInfoField' in type_:
fprocess.write("\t"+ type_ + ' task_error = any_cast<' + type_ + '>(task.task_error);\n')
fprocess.write("\t"+ "dict error;\n")
struct = structDict[type_]<|fim▁hole|>
onArgsList.append('error')
elif type_ in structDict:
fprocess.write("\t"+ type_ + ' task_data = any_cast<' + type_ + '>(task.task_data);\n')
fprocess.write("\t"+ "dict data;\n")
struct = structDict[type_]
for key in struct.keys():
fprocess.write("\t"+ 'data["' + key + '"] = task_data.' + key + ';\n')
fprocess.write("\n")
onArgsList.append('data')
elif type_ == 'bool':
onArgsList.append('task.task_last')
elif type_ == 'int':
onArgsList.append('task.task_id')
onArgs = join(onArgsList, ', ')
fprocess.write('\tthis->' + cbName.replace('On', 'on') + '(' + onArgs +');\n')
fprocess.write("};\n")
fprocess.write("\n")
def processFunction(line):
line = line.replace(' virtual int ', '') # 删除行首的无效内容
line = line.replace(') = 0;\n', '') # 删除行尾的无效内容
content = line.split('(')
fcName = content[0] # 回调函数名称
fcArgs = content[1] # 回调函数参数
fcArgs = fcArgs.replace(')', '')
fcArgsList = fcArgs.split(',') # 将每个参数转化为列表
fcArgsTypeList = []
fcArgsValueList = []
for arg in fcArgsList: # 开始处理参数
content = arg.split(' ')
if len(content) >= 2:
fcArgsTypeList.append(content[0]) # 参数类型列表
fcArgsValueList.append(content[1]) # 参数数据列表
print line
print fcArgs
print fcArgsList
print fcArgsTypeList
if len(fcArgsTypeList)>0 and fcArgsTypeList[0] in structDict:
createFunction(fcName, fcArgsTypeList, fcArgsValueList)
# 生成.h文件中的主动函数部分
if 'Req' in fcName:
req_line = 'int req' + fcName[3:] + '(dict req, int nRequestID);\n'
fheaderfunction.write(req_line)
fheaderfunction.write('\n')
def createFunction(fcName, fcArgsTypeList, fcArgsValueList):
type_ = fcArgsTypeList[0]
struct = structDict[type_]
ffunction.write('int ' + apiName + '::req' + fcName[3:] + '(dict req, int nRequestID)\n')
ffunction.write('{\n')
ffunction.write('\t' + type_ +' myreq = ' + type_ + '();\n')
ffunction.write('\tmemset(&myreq, 0, sizeof(myreq));\n')
for key, value in struct.items():
if value == 'string':
line = '\tgetString(req, "' + key + '", myreq.' + key + ');\n'
elif value == 'char':
line = '\tgetChar(req, "' + key + '", &myreq.' + key + ');\n'
elif value == 'int':
line = '\tgetInt(req, "' + key + '", &myreq.' + key + ');\n'
elif value == 'long':
line = '\tgetLong(req, "' + key + '", &myreq.' + key + ');\n'
elif value == 'short':
line = '\tgetShort(req, "' + key + '", &myreq.' + key + ');\n'
elif value == 'double':
line = '\tgetDouble(req, "' + key + '", &myreq.' + key + ');\n'
ffunction.write(line)
ffunction.write('\tint i = this->api->' + fcName + '(&myreq, nRequestID);\n')
ffunction.write('\treturn i;\n')
ffunction.write('};\n')
ffunction.write('\n')
#########################################################
apiName = 'MdApi'
fcpp = open('SgitFtdcMdApi.h', 'r')
ftask = open('sgit_md_task.cpp', 'w')
fprocess = open('sgit_md_process.cpp', 'w')
ffunction = open('sgit_md_function.cpp', 'w')
fdefine = open('sgit_md_define.cpp', 'w')
fswitch = open('sgit_md_switch.cpp', 'w')
fheaderprocess = open('sgit_md_header_process.h', 'w')
fheaderon = open('sgit_md_header_on.h', 'w')
fheaderfunction = open('sgit_md_header_function.h', 'w')
fwrap = open('sgit_md_wrap.cpp', 'w')
define_count = 1
for line in fcpp:
if " virtual void On" in line:
print 'callback'
processCallBack(line)
elif " virtual int" in line:
print 'function'
processFunction(line)
fcpp.close()
ftask.close()
fprocess.close()
ffunction.close()
fswitch.close()
fdefine.close()
fheaderprocess.close()
fheaderon.close()
fheaderfunction.close()
fwrap.close()<|fim▁end|> | for key in struct.keys():
fprocess.write("\t"+ 'error["' + key + '"] = task_error.' + key + ';\n')
fprocess.write("\n") |
<|file_name|>included_urls2.py<|end_file_name|><|fim▁begin|>"""
These URL patterns are included in two different ways in the main urls.py, with
<|fim▁hole|>"""
from django.conf.urls import url
from .views import empty_view
urlpatterns = [
url(r'^part/(?P<value>\w+)/$', empty_view, name="part"),
url(r'^part2/(?:(?P<value>\w+)/)?$', empty_view, name="part2"),
]<|fim▁end|> | an extra argument present in one case. Thus, there are two different ways for
each name to resolve and Django must distinguish the possibilities based on the
argument list.
|
<|file_name|>sms_manager.js<|end_file_name|><|fim▁begin|>// Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
// License: GNU General Public License v3. See license.txt
erpnext.SMSManager = function SMSManager(doc) {
var me = this;
this.setup = function() {
var default_msg = {
'Lead' : '',
'Opportunity' : 'Your enquiry has been logged into the system. Ref No: ' + doc.name,
'Quotation' : 'Quotation ' + doc.name + ' has been sent via email. Thanks!',
'Sales Order' : 'Sales Order ' + doc.name + ' has been created against '
+ (doc.quotation_no ? ('Quote No:' + doc.quotation_no) : '')
+ (doc.po_no ? (' for your PO: ' + doc.po_no) : ''),
'Delivery Note' : 'Items has been delivered against delivery note: ' + doc.name
+ (doc.po_no ? (' for your PO: ' + doc.po_no) : ''),
'Sales Invoice': 'Invoice ' + doc.name + ' has been sent via email '
+ (doc.po_no ? (' for your PO: ' + doc.po_no) : ''),
'Material Request' : 'Material Request ' + doc.name + ' has been raised in the system',
'Purchase Order' : 'Purchase Order ' + doc.name + ' has been sent via email',
'Purchase Receipt' : 'Items has been received against purchase receipt: ' + doc.name
}
if (in_list(['Sales Order', 'Delivery Note', 'Sales Invoice'], doc.doctype))
this.show(doc.contact_person, 'Customer', doc.customer, '', default_msg[doc.doctype]);
else if (doc.doctype === 'Quotation')
this.show(doc.contact_person, 'Customer', doc.party_name, '', default_msg[doc.doctype]);
else if (in_list(['Purchase Order', 'Purchase Receipt'], doc.doctype))
this.show(doc.contact_person, 'Supplier', doc.supplier, '', default_msg[doc.doctype]);
else if (doc.doctype == 'Lead')
this.show('', '', '', doc.mobile_no, default_msg[doc.doctype]);
else if (doc.doctype == 'Opportunity')<|fim▁hole|> this.show('', '', '', doc.contact_no, default_msg[doc.doctype]);
else if (doc.doctype == 'Material Request')
this.show('', '', '', '', default_msg[doc.doctype]);
};
this.get_contact_number = function(contact, ref_doctype, ref_name) {
frappe.call({
method: "frappe.core.doctype.sms_settings.sms_settings.get_contact_number",
args: {
contact_name: contact,
ref_doctype: ref_doctype,
ref_name: ref_name
},
callback: function(r) {
if(r.exc) { frappe.msgprint(r.exc); return; }
me.number = r.message;
me.show_dialog();
}
});
};
this.show = function(contact, ref_doctype, ref_name, mobile_nos, message) {
this.message = message;
if (mobile_nos) {
me.number = mobile_nos;
me.show_dialog();
} else if (contact){
this.get_contact_number(contact, ref_doctype, ref_name)
} else {
me.show_dialog();
}
}
this.show_dialog = function() {
if(!me.dialog)
me.make_dialog();
me.dialog.set_values({
'message': me.message,
'number': me.number
})
me.dialog.show();
}
this.make_dialog = function() {
var d = new frappe.ui.Dialog({
title: 'Send SMS',
width: 400,
fields: [
{fieldname:'number', fieldtype:'Data', label:'Mobile Number', reqd:1},
{fieldname:'message', fieldtype:'Text', label:'Message', reqd:1},
{fieldname:'send', fieldtype:'Button', label:'Send'}
]
})
d.fields_dict.send.input.onclick = function() {
var btn = d.fields_dict.send.input;
var v = me.dialog.get_values();
if(v) {
$(btn).set_working();
frappe.call({
method: "frappe.core.doctype.sms_settings.sms_settings.send_sms",
args: {
receiver_list: [v.number],
msg: v.message
},
callback: function(r) {
$(btn).done_working();
if(r.exc) {frappe.msgprint(r.exc); return; }
me.dialog.hide();
}
});
}
}
this.dialog = d;
}
this.setup();
}<|fim▁end|> | |
<|file_name|>momentum.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Momentum for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.optimizer_v2 import optimizer_v2
from tensorflow.python.training import training_ops
class MomentumOptimizer(optimizer_v2.OptimizerV2):
"""Optimizer that implements the Momentum algorithm.
Computes (if `use_nesterov = False`):
```
accumulation = momentum * accumulation + gradient
variable -= learning_rate * accumulation
```
Note that in the dense version of this algorithm, `accumulation` is updated
and applied regardless of a gradient's value, whereas the sparse version (when
the gradient is an `IndexedSlices`, typically because of `tf.gather` or an
embedding) only updates variable slices and corresponding `accumulation` terms
when that part of the variable was used in the forward pass.
"""
def __init__(self, learning_rate, momentum,
use_locking=False, name="Momentum", use_nesterov=False):
"""Construct a new Momentum optimizer.
Some of the args below are hyperparameters, where a hyperparameter is
defined as a scalar Tensor, a regular Python value or a callable (which
will be evaluated when `apply_gradients` is called) returning a scalar
Tensor or a Python value.
Args:
learning_rate: A float hyperparameter. The learning rate.
momentum: A float hyperparameter. The momentum.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Momentum".
use_nesterov: If `True` use Nesterov Momentum.
See [Sutskever et al., 2013](
http://jmlr.org/proceedings/papers/v28/sutskever13.pdf).
This implementation always computes gradients at the value of the
variable(s) passed to the optimizer. Using Nesterov Momentum makes the
variable(s) track the values called `theta_t + mu*v_t` in the paper.
@compatibility(eager)
When eager execution is enabled, learning_rate and momentum can each be a
callable that takes no arguments and returns the actual value to use. This
can be useful for changing these values across different invocations of
optimizer functions.
@end_compatibility
"""
super(MomentumOptimizer, self).__init__(use_locking, name)
self._set_hyper("learning_rate", learning_rate)
self._set_hyper("momentum", momentum)
self._use_nesterov = use_nesterov
def _create_vars(self, var_list, state):
for v in var_list:
state.zeros_slot(v, "momentum")
def _apply_dense(self, grad, var, state):
mom = state.get_slot(var, "momentum")
return training_ops.apply_momentum(
var,
mom,
state.get_hyper("learning_rate", var.dtype.base_dtype),
grad,
state.get_hyper("momentum", var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov).op
def _resource_apply_dense(self, grad, var, state):
mom = state.get_slot(var, "momentum")
return training_ops.resource_apply_momentum(
var.handle,
mom.handle,
state.get_hyper("learning_rate", var.dtype.base_dtype),
grad,
state.get_hyper("momentum", var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov)
def _apply_sparse(self, grad, var, state):
mom = state.get_slot(var, "momentum")
return training_ops.sparse_apply_momentum(
var,
mom,
state.get_hyper("learning_rate", var.dtype.base_dtype),<|fim▁hole|> state.get_hyper("momentum", var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov).op
def _resource_apply_sparse(self, grad, var, indices, state):
mom = state.get_slot(var, "momentum")
return training_ops.resource_sparse_apply_momentum(
var.handle,
mom.handle,
state.get_hyper("learning_rate", var.dtype.base_dtype),
grad,
indices,
state.get_hyper("momentum", var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov)<|fim▁end|> | grad.values,
grad.indices, |
<|file_name|>settings.py<|end_file_name|><|fim▁begin|>"""
Django settings for kboard project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fbk#a_$7&@566onvmd1xfxyszz)npb+d5gq#y9q(n0wg_k)v0x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'accounts.Account'
# Application definition
INSTALLED_APPS = [
'accounts',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'board',
'django_summernote',
'djangobower',
'pipeline',
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',<|fim▁hole|> 'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kboard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'core.context_processors.navbar'
],
},
},
]
WSGI_APPLICATION = 'kboard.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, '../database/db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'kboard',
'USER': 'root',
'PASSWORD': 'root'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Bower settings
BOWER_INSTALLED_APPS = [
'jquery#3.1.1',
'bootstrap#3.3.7'
]
BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, '../')
# Summernote settings
SUMMERNOTE_CONFIG = {}
# pipeline settings
PIPELINE = {
'COMPILERS': {
'libsasscompiler.LibSassCompiler',
},
'JAVASCRIPT': {
'main': {
'source_filenames': [
'js/*.js'
],
'output_filename': 'js/vendor.js'
},
},
'STYLESHEETS': {
'main': {
'source_filenames': [
'style/*.scss'
],
'output_filename': 'style/main.css'
},
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, '../static')
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
os.path.join(BOWER_COMPONENTS_ROOT, 'bower_components'),
]
MEDIA_URL = '/file/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'file')
# Registration
# https://django-registration.readthedocs.io/en/2.1.2/index.html
ACCOUNT_ACTIVATION_DAYS = 7
# Email Activation
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = os.environ.get('KBOARD_EMAIL')
EMAIL_HOST_PASSWORD = os.environ.get('KBOARD_PASSWORD')
# When Login success, go to main page.
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"<|fim▁end|> | |
<|file_name|>decode.rs<|end_file_name|><|fim▁begin|>//! Decodes a Bitterlemon-encoded byte stream into its original bit stream.
use std::iter::Iterator;
use std::result;
/// Decodes a Bitterlemon byte stream into an iterator of `bool`s.
/// `source` can be any iterator that yields `u8` values.
///
/// # Errors
///
/// Unlike encoding, decoding has a chance of failure. The exposed iterator
/// will return a [`Result`] object to handle the possibility of an invalid
/// input stream. The `Ok` value in this case is of type `bool`.
///
/// [`Result`]: type.Result.html
pub fn decode<S>(input: S) -> Decoder<S>
where S : Iterator<Item=u8> {
Decoder::<S> {
source: input,
state: DecodeState::Pending,
}
}
/// Manages the state for decoding a Bitterlemon byte stream.
///
/// To perform a decoding, see [`decode`](#fn.decode).
pub struct Decoder<S> {
state: DecodeState,
source: S,
}
/// Describes errors that can occur when decoding a Bitterlemon byte stream.
#[derive(Debug, PartialEq, Eq)]
pub enum Error {
/// Input had too few bytes to cleanly decode. The associated values are:
///
/// * number of bits lost due to truncated input;
/// * number of bytes still expected from the input.
TruncatedInput(u8, u8),
}
/// Decode operations yield this on each iteration.
pub type Result = result::Result<bool, Error>;
impl<S> Iterator for Decoder<S>
where S : Iterator<Item=u8> {
type Item = Result;
fn next(&mut self) -> Option<Self::Item> {
// pull from source if needs be
if self.must_pull() {
let next = self.source.next();
self.next_with_pulled(next)
} else {
self.next_from_existing()
}
}
}
impl<S> Decoder<S>
where S : Iterator<Item=u8> {
fn must_pull(&self) -> bool {
match self.state {
DecodeState::Pending => true,
DecodeState::Done => false,
DecodeState::Run(remaining, _) => remaining == 0,
DecodeState::Frame(remaining, _, stage_size) => remaining == 0 || stage_size == 0,
}
}
fn next_with_pulled(&mut self, next: Option<u8>) -> Option<<Self as Iterator>::Item> {
// handle None from source
let next = match next {
Some(x) => x,
None => match self.state {
DecodeState::Pending => {
self.state = DecodeState::Done;
return None;
}, // source was empty
DecodeState::Done => { return None; }, // always return None here
DecodeState::Run(_, _) => {
unreachable!("next_with_pulled called with more run bits to flush: {:?}", self.state);
},
DecodeState::Frame(remaining, _, stage_size) => {
debug_assert!(stage_size == 0);
debug_assert!(remaining > 0);
// missing bytes to complete the frame
let error_specifics = Error::TruncatedInput(remaining, (remaining + 7) >> 3);
return Some(Err(error_specifics));
}
}
};
// handle mid-frame
if match self.state {
DecodeState::Frame(ref mut remaining, ref mut stage, ref mut stage_size)
if *remaining > 0 => {
debug_assert!(*stage_size == 0); // shouldn't have pulled otherwise
*stage = next;
*stage_size = 8;
// now fall through to real iteration logic
true
},
_ => false
} {
return self.next_from_existing();
}
let got = match next {
n if n < 0x80 => {
// frame beginning
let frame_size = byte_to_frame_size(n);
self.state = DecodeState::Frame(frame_size, 0, 0);
None
},
n => {
// new run
let frame_size = byte_to_run_size(n);
let mode = n >= 0xc0;
self.state = if frame_size > 1 {
// don't bother moving to run state if only one bit in this run
// also, leaving this method in state Run(0, _) is a logic error
DecodeState::Run(frame_size - 1, mode)
}
else {
DecodeState::Pending
};
Some(Ok(mode))
}
};
got.or_else(|| {
let next = self.source.next();
self.next_with_pulled(next)
})
}
fn next_from_existing(&mut self) -> Option<<Self as Iterator>::Item> {
let (to_return, next_state) = match self.state {
DecodeState::Pending => unreachable!(),
DecodeState::Done => { return None; },
DecodeState::Run(ref mut remaining, ref run_mode) => {
*remaining -= 1;
(*run_mode, if *remaining == 0 {Some(DecodeState::Pending)} else {None})
},
DecodeState::Frame(ref mut remaining, ref mut stage, ref mut stage_size) => {
let got_bit = (*stage & 0x80) != 0;
*stage = (*stage & 0x7f) << 1;
*stage_size -= 1;
*remaining -= 1;
(got_bit, if *remaining == 0 {Some(DecodeState::Pending)} else {None})
}
};
if let Some(next_state) = next_state {
self.state = next_state;
}
Some(Ok(to_return))
}
}
fn byte_to_run_size(byte: u8) -> u8 {
let byte = byte & 0x3f;
if byte == 0 { 0x40 } else { byte }
}
fn byte_to_frame_size(byte: u8) -> u8 {
if byte == 0 { 0x80 } else { byte }
}
#[derive(Debug)]
enum DecodeState {
Pending, // should pull
Run(u8, bool), // run count, is_set
Frame(u8, u8, u8), // frame bit count, stage contents, stage size
Done, // iteration complete
}
#[cfg(test)]
mod test_decoder {
macro_rules! decoder {
( $($e:expr),* ) => {
{
let v = vec![$( $e, )*];
super::decode(v.into_iter())
}
}
}
#[test]
fn empty_input() {
let mut iter = decoder![];
assert_eq!(iter.next(), None);
assert_eq!(iter.next(), None);
}
fn single_run_impl(top_bits: u8, mode: bool) {
for i in 0..0x3fu8 {
let run_size = super::byte_to_run_size(i);
let mut iter = decoder![i+top_bits];
for _ in 0..run_size {
assert_eq!(iter.next(), Some(Ok(mode)));
}
assert_eq!(iter.next(), None);
}
}
#[test]
fn single_run_clear() {
single_run_impl(0x80, false)
}
#[test]
fn single_run_set() {
single_run_impl(0xc0, true)
}
#[test]
fn single_byte_frame() {
let case = |byte_in: u8, bool_out: bool| {
let mut iter = decoder![0x01, byte_in];
assert_eq!(iter.next(), Some(Ok(bool_out)));
};
case(0xff, true);
case(0x00, false);
case(0x80, true);
case(0x7f, false);
}
#[test]
fn full_byte_frame() {
let mut iter = decoder![0x08, 0x55];
let mut expected = false;
for _ in 0..8 {
assert_eq!(iter.next(), Some(Ok(expected)));
expected = !expected;
}
assert_eq!(iter.next(), None);
}
#[test]
fn two_byte_frame() {
let mut iter = decoder![0x0f, 0x55, 0x55];
let mut expected = false;
for _ in 0..15 {
assert_eq!(iter.next(), Some(Ok(expected)));
expected = !expected;
}
assert_eq!(iter.next(), None);
}
#[test]
fn alternate_runs_frames() {
let case = |bytes: &[u8], count: usize, first_output: bool| {
let mut iter = super::decode(bytes.iter().map(|&b| b));
let mut expected = first_output;
for _ in 0..count {
assert_eq!(iter.next(), Some(Ok(expected)));
expected = !expected;
}
assert_eq!(iter.next(), None);
};
case(&[0xc1, 0x10, 0x55, 0x55, 0x81], 18, true);
case(&[0x81, 0x10, 0xaa, 0xaa, 0xc1], 18, false);
case(&[0x08, 0xaa, 0xc1, 0x08, 0x55], 17, true);
case(&[0x08, 0x55, 0x81, 0x08, 0xaa], 17, false);
}
#[test]
fn error_on_frame_cutoff() {
let case = |bytes: &[u8], bits_lost: u8, bytes_missing: u8| {
let mut iter = super::decode(bytes.iter().map(|&b| b));
let ok_count = (bytes.len() - 1) * 8;
for _ in 0..ok_count {
assert!(match iter.next() {
Some(Ok(_)) => true,
_ => false
});
}
let error = iter.next();
assert!(error.is_some());
let error = error.unwrap();
assert!(error.is_err());
match error.unwrap_err() {
super::Error::TruncatedInput(pl, bm) => {
assert_eq!(pl, bits_lost);
assert_eq!(bm, bytes_missing);<|fim▁hole|> }
};
};
case(&[0x01], 1, 1);
case(&[0x02], 2, 1);
case(&[0x00], 0x80, 0x10);
case(&[0x09, 0xff], 1, 1);
case(&[0x7f, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14], 7, 1);
}
#[test]
fn next_none_idempotence() {
let src = &[0xc1u8];
let mut iter = super::decode(src.iter().map(|&b| b));
assert!(iter.next().is_some());
for _ in 0..20 {
assert_eq!(iter.next(), None);
}
}
}<|fim▁end|> | |
<|file_name|>bitcoin_sr.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.1" language="sr">
<context>
<name>AboutDialog</name>
<message>
<source>About Peershares</source>
<translation type="vanished">О Peershares-у</translation>
</message>
<message>
<location filename="../forms/aboutdialog.ui" line="14"/>
<source>About ybshares</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/aboutdialog.ui" line="53"/>
<source><b>ybshares</b> : version</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/aboutdialog.ui" line="91"/>
<source>Copyright © 2013-2014 ybshares Developers</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/aboutdialog.ui" line="104"/>
<source>Copyright © 2012-2013 The Bitcoin Developers
Copyright © 2009-2012 Bitcoin Developers
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file license.txt or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="14"/>
<source>Address Book</source>
<translation>Адресар</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="20"/>
<source>These are your ybshares addresses for receiving shares. You may want to give a different one to each sender so you can keep track of who is transferring shares to you.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="33"/>
<source>Double-click to edit address or label</source>
<translation>Кликните два пута да промените адресу и/или етикету</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="57"/>
<source>Create a new address</source>
<translation>Прави нову адресу</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="60"/>
<source>&New Address...</source>
<translation>&Нова адреса...</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="71"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Копира изабрану адресу на системски клипборд</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="74"/>
<source>&Copy to Clipboard</source>
<translation>Ис&копирај на клипборд</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="85"/>
<source>Show &QR Code</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="96"/>
<source>Sign a message to prove you own this ybshares address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="99"/>
<source>&Sign Message</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="110"/>
<source>Delete the currently selected address from the list. Only sending addresses can be deleted.</source>
<translation>Брише изабрану адресу. Могуће је брисати само адресе са којих се шаље.</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="113"/>
<source>&Delete</source>
<translation>&Избриши</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="65"/>
<source>Copy address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="66"/>
<source>Copy label</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="67"/>
<source>Edit</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="68"/>
<source>Delete</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="285"/>
<source>Export Address Book Data</source>
<translation>Извоз података из адресара</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="286"/>
<source>Comma separated file (*.csv)</source>
<translation>Зарезом одвојене вредности (*.csv)</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="299"/>
<source>Error exporting</source>
<translation>Грешка током извоза</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="299"/>
<source>Could not write to file %1.</source>
<translation>Није могуће писати у фајл %1.</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="81"/>
<source>Label</source>
<translation>Етикета</translation>
</message>
<message>
<location filename="../addresstablemodel.cpp" line="81"/>
<source>Address</source>
<translation>Адреса</translation>
</message>
<message>
<location filename="../addresstablemodel.cpp" line="81"/>
<source>Dividend address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../addresstablemodel.cpp" line="117"/>
<source>(no label)</source>
<translation>(без етикете)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="26"/>
<source>Dialog</source>
<translation>Дијалог</translation>
</message>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="94"/>
<source>TextLabel</source>
<translation>TextLabel</translation>
</message>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="47"/>
<source>Enter passphrase</source>
<translation>Унесите лозинку</translation>
</message>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="61"/>
<source>New passphrase</source>
<translation>Нова лозинка</translation>
</message>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="75"/>
<source>Repeat new passphrase</source>
<translation>Поновите нову лозинку</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="34"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="35"/>
<source>Encrypt wallet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="38"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="43"/>
<source>Unlock wallet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="46"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="51"/>
<source>Decrypt wallet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="54"/>
<source>Change passphrase</source>
<translation>Промена лозинке</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="55"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="101"/>
<source>Confirm wallet encryption</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="102"/>
<source>WARNING: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR ybshares</b>!
Are you sure you wish to encrypt your wallet?</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="112"/>
<source>ybshares will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your ybshares from being stolen by malware infecting your computer.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="111"/>
<location filename="../askpassphrasedialog.cpp" line="160"/>
<source>Wallet encrypted</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="117"/>
<location filename="../askpassphrasedialog.cpp" line="124"/>
<location filename="../askpassphrasedialog.cpp" line="166"/>
<location filename="../askpassphrasedialog.cpp" line="172"/>
<source>Wallet encryption failed</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="118"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="136"/>
<source>Wallet unlock failed</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="137"/>
<location filename="../askpassphrasedialog.cpp" line="148"/>
<location filename="../askpassphrasedialog.cpp" line="167"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="147"/>
<source>Wallet decryption failed</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="161"/>
<source>Wallet passphrase was succesfully changed.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="208"/>
<location filename="../askpassphrasedialog.cpp" line="232"/>
<source>Warning: The Caps Lock key is on.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="125"/>
<location filename="../askpassphrasedialog.cpp" line="173"/>
<source>The supplied passphrases do not match.</source>
<translation>Лозинке које сте унели се не подударају.</translation>
</message>
</context>
<context>
<name>BalanceScannerThread</name>
<message>
<location filename="../distributedivdialog.cpp" line="68"/>
<source>Scanning thread did not terminate properly</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../distributedivdialog.cpp" line="96"/>
<source>Please Wait</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../distributedivdialog.cpp" line="97"/>
<source>Scanning local blockchain</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<source>Peershares Portfolio</source>
<translation type="obsolete">Peershares новчаник</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="186"/>
<source>&Overview</source>
<translation type="unfinished">&Општи преглед</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="187"/>
<source>Show general overview of holdings</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="192"/>
<source>&Transactions</source>
<translation type="unfinished">&Трансакције</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="193"/>
<source>Browse transaction history</source>
<translation type="unfinished">Претражите историјат трансакција</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="198"/>
<source>&Address Book</source>
<translation type="unfinished">&Адресар</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="199"/>
<source>Edit the list of stored addresses and labels</source>
<translation type="unfinished">Уредите запамћене адресе и њихове етикете</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="204"/>
<source>&Receive shares</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="205"/>
<source>Show the list of addresses for receiving payments</source>
<translation type="unfinished">Прегледајте листу адреса на којима прихватате уплате</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="210"/>
<source>&Send shares</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="216"/>
<source>Sign &message</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="217"/>
<source>Prove you control an address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="236"/>
<source>E&xit</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="237"/>
<source>Quit application</source>
<translation type="unfinished">Напустите програм</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="240"/>
<source>&About %1</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Show information about Peershares</source>
<translation type="obsolete">Прегледајте информације о Peershares-у</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="243"/>
<source>About &Qt</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="244"/>
<source>Show information about Qt</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="246"/>
<source>&Options...</source>
<translation type="unfinished">П&оставке...</translation>
</message>
<message>
<source>Modify configuration options for Peershares</source>
<translation type="obsolete">Изаберите могућности Peershares-а</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="251"/>
<source>&Export...</source>
<translation type="unfinished">&Извоз...</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="252"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="253"/>
<source>&Encrypt Portfolio</source>
<translation type="unfinished">&Шифровање новчаника</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="254"/>
<source>Encrypt or decrypt portfolio</source>
<translation type="unfinished">Шифровање и дешифровање новчаника</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="256"/>
<source>&Unlock Wallet for Minting Only</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="257"/>
<source>Unlock wallet only for minting. Sending coins will still require the passphrase.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="259"/>
<source>&Backup Wallet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="260"/>
<source>Backup portfolio to another location</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="261"/>
<source>&Change Passphrase</source>
<translation type="unfinished">Промени &лозинку</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="262"/>
<source>Change the passphrase used for portfolio encryption</source>
<translation type="unfinished">Мењање лозинке којом се шифрује новчаник</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="263"/>
<source>&Debug window</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="264"/>
<source>Open debugging and diagnostic console</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="267"/>
<source>&Distribute dividends</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="268"/>
<source>Distribute dividends to share holders</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="294"/>
<source>&File</source>
<translation type="unfinished">&Фајл</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="303"/>
<source>S&hares</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="307"/>
<source>&Settings</source>
<translation type="unfinished">&Подешавања</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="314"/>
<source>&Help</source>
<translation type="unfinished">П&омоћ</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="323"/>
<source>Tabs toolbar</source>
<translation type="unfinished">Трака са картицама</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="334"/>
<source>Actions toolbar</source>
<translation type="unfinished">Трака са алаткама</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="346"/>
<source>[testnet]</source>
<translation type="unfinished">[testnet]</translation>
</message>
<message>
<source>Peershares-qt</source>
<translation type="obsolete">Peershares-qt</translation>
</message>
<message numerus="yes">
<source>%n active connection(s) to Peershares network</source>
<translation type="obsolete">
<numerusform>%n активна веза са Peershares мрежом</numerusform>
<numerusform>%n активне везе са Peershares мрежом</numerusform>
<numerusform>%n активних веза са Peershares мрежом</numerusform>
</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="78"/>
<source>ybshares Portfolio</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="211"/>
<source>Send shares to a ybshares address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="241"/>
<source>Show information about ybshares</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="247"/>
<source>Modify configuration options for ybshares</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="249"/>
<source>Show/Hide &ybshares</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="250"/>
<source>Show or hide the ybshares window</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="265"/>
<source>&Export Bitcoin keys</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="266"/>
<source>Export the bitcoin keys associated with the ybshares addresses to bitcoin via RPC</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="410"/>
<source>ybshares client</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="437"/>
<source>ybshares-qt</source>
<translation type="unfinished"></translation>
</message>
<message numerus="yes">
<location filename="../bitcoingui.cpp" line="501"/>
<source>%n active connection(s) to ybshares network</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="525"/>
<source>Synchronizing with network...</source>
<translation type="unfinished">Синхронизација са мрежом у току...</translation>
</message>
<message numerus="yes">
<location filename="../bitcoingui.cpp" line="527"/>
<source>~%n block(s) remaining</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="538"/>
<source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="550"/>
<source>Downloaded %1 blocks of transaction history.</source>
<translation type="unfinished">Преузето је %1 блокова историјата трансакција.</translation>
</message>
<message numerus="yes">
<location filename="../bitcoingui.cpp" line="565"/>
<source>%n second(s) ago</source>
<translation type="unfinished">
<numerusform>пре %n секунд</numerusform>
<numerusform>пре %n секунде</numerusform>
<numerusform>пре %n секунди</numerusform>
</translation>
</message>
<message numerus="yes">
<location filename="../bitcoingui.cpp" line="569"/>
<source>%n minute(s) ago</source>
<translation type="unfinished">
<numerusform>пре %n минут</numerusform>
<numerusform>пре %n минута</numerusform>
<numerusform>пре %n минута</numerusform>
</translation>
</message>
<message numerus="yes">
<location filename="../bitcoingui.cpp" line="573"/>
<source>%n hour(s) ago</source>
<translation type="unfinished">
<numerusform>пре %n сат</numerusform>
<numerusform>пре %n сата</numerusform>
<numerusform>пре %n сати</numerusform>
</translation>
</message>
<message numerus="yes">
<location filename="../bitcoingui.cpp" line="577"/>
<source>%n day(s) ago</source>
<translation type="unfinished">
<numerusform>пре %n дан</numerusform>
<numerusform>пре %n дана</numerusform>
<numerusform>пре %n дана</numerusform>
</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="583"/>
<source>Up to date</source>
<translation type="unfinished">Ажурно</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="588"/>
<source>Catching up...</source>
<translation type="unfinished">Ажурирање у току...</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="596"/>
<source>Last received block was generated %1.</source>
<translation type="unfinished">Последњи примљени блок је направљен %1.</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="652"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="657"/>
<source>Sending...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="684"/>
<source>Sent transaction</source>
<translation type="unfinished">Послана трансакција</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="685"/>
<source>Incoming transaction</source>
<translation type="unfinished">Придошла трансакција</translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="686"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="813"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked for block minting only</b></source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="813"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="823"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="880"/>
<source>Backup Portfolio</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="880"/>
<source>Portfolio Data (*.dat)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="883"/>
<source>Backup Failed</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="883"/>
<source>There was an error trying to save the portfolio data to the new location.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="914"/>
<location filename="../bitcoingui.cpp" line="922"/>
<source>Bitcoin keys export</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="915"/>
<source>%1 key(s) were exported to Bitcoin.
%2 key(s) were either already known or invalid.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoingui.cpp" line="923"/>
<source>Error: %1</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="128"/>
<source>A fatal error occured. YBshares can no longer continue safely and will quit.</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>DisplayOptionsPage</name>
<message>
<location filename="../optionsdialog.cpp" line="274"/>
<source>&Unit to show amounts in: </source>
<translation>&Јединица за приказивање износа: </translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="278"/>
<source>Choose the default subdivision unit to show in the interface, and when sending coins</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="285"/>
<source>&Display addresses in transaction list</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="286"/>
<source>Whether to show YBshares addresses in the transaction list</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>DistributeDivDialog</name>
<message>
<location filename="../forms/distributedivdialog.ui" line="29"/>
<source>Distribute Dividends</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/distributedivdialog.ui" line="53"/>
<source>Calculate Dividends</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/distributedivdialog.ui" line="60"/>
<source>Number of bitcoins to distribute to shareholders:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/distributedivdialog.ui" line="89"/>
<source>Record date (local time):</source>
<translation type="unfinished"></translation><|fim▁hole|> <translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/distributedivdialog.ui" line="128"/>
<source>Shareholders on record on this date will receive dividends.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/distributedivdialog.ui" line="157"/>
<source>Export List ...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../distributedivdialog.cpp" line="107"/>
<source>YBshares Address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../distributedivdialog.cpp" line="107"/>
<source>Shares</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../distributedivdialog.cpp" line="107"/>
<source>Bitcoin Address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../distributedivdialog.cpp" line="107"/>
<source>Dividend</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../distributedivdialog.cpp" line="266"/>
<source>Save As ...</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="14"/>
<source>Edit Address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/editaddressdialog.ui" line="25"/>
<source>&Label</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/editaddressdialog.ui" line="35"/>
<source>The label associated with this address book entry</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/editaddressdialog.ui" line="42"/>
<source>&Address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/editaddressdialog.ui" line="52"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="20"/>
<source>New receiving address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="24"/>
<source>New sending address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="27"/>
<source>Edit receiving address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="31"/>
<source>Edit sending address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="91"/>
<source>The entered address "%1" is already in the address book.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="96"/>
<source>The entered address "%1" is not a valid bitcoin address.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="101"/>
<source>Could not unlock wallet.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="106"/>
<source>New key generation failed.</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>MainOptionsPage</name>
<message>
<location filename="../optionsdialog.cpp" line="176"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="177"/>
<source>Show only a tray icon after minimizing the window</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="185"/>
<source>Map port using &UPnP</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="216"/>
<source>Mandatory network transaction fee per kB transferred. Most transactions are 1 kB and incur a 0.01 share fee. Note: transfer size may increase depending on the number of input transactions totaled to fund the output.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="222"/>
<source>Additional network &fee</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="233"/>
<source>Detach databases at shutdown</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="234"/>
<source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="180"/>
<source>M&inimize on close</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="171"/>
<source>&Start YBshares on system startup</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="172"/>
<source>Automatically start YBshares after the computer is turned on</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="181"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="186"/>
<source>Automatically open the YBshares client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="189"/>
<source>&Connect through SOCKS4 proxy:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="190"/>
<source>Connect to the YBshares network through a SOCKS4 proxy (e.g. when connecting through Tor)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="195"/>
<source>Proxy &IP: </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="201"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="204"/>
<source>&Port: </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="210"/>
<source>Port of the proxy (e.g. 1234)</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>MessagePage</name>
<message>
<location filename="../forms/messagepage.ui" line="14"/>
<source>Message</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/messagepage.ui" line="38"/>
<source>The address to sign the message with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/messagepage.ui" line="48"/>
<source>Choose adress from address book</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/messagepage.ui" line="58"/>
<source>Alt+A</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/messagepage.ui" line="71"/>
<source>Paste address from clipboard</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/messagepage.ui" line="81"/>
<source>Alt+P</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/messagepage.ui" line="93"/>
<source>Enter the message you want to sign here</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/messagepage.ui" line="105"/>
<source>Click "Sign Message" to get signature</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/messagepage.ui" line="131"/>
<source>Copy the current signature to the system clipboard</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/messagepage.ui" line="120"/>
<source>&Sign Message</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/messagepage.ui" line="20"/>
<source>You can sign messages with your YBshares addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/messagepage.ui" line="117"/>
<source>Sign a message to prove you own this YBshares address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/messagepage.ui" line="134"/>
<source>&Copy to Clipboard</source>
<translation>Ис&копирај на клипборд</translation>
</message>
<message>
<location filename="../messagepage.cpp" line="74"/>
<location filename="../messagepage.cpp" line="89"/>
<location filename="../messagepage.cpp" line="101"/>
<source>Error signing</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../messagepage.cpp" line="74"/>
<source>%1 is not a valid address.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../messagepage.cpp" line="89"/>
<source>Private key for %1 is not available.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../messagepage.cpp" line="101"/>
<source>Sign failed</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../optionsdialog.cpp" line="80"/>
<source>Main</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="85"/>
<source>Display</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="105"/>
<source>Options</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="14"/>
<source>Form</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="40"/>
<source>Balance:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="54"/>
<source>Number of transactions:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="61"/>
<source>0</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="68"/>
<source>Unconfirmed:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="82"/>
<source>Stake:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="102"/>
<source>Wallet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/overviewpage.ui" line="138"/>
<source><b>Recent transactions</b></source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="104"/>
<source>Your current balance</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="109"/>
<source>Your current stake</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="114"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="117"/>
<source>Total number of transactions in wallet</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="14"/>
<source>Dialog</source>
<translation>Дијалог</translation>
</message>
<message>
<location filename="../forms/qrcodedialog.ui" line="32"/>
<source>QR Code</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/qrcodedialog.ui" line="55"/>
<source>Request Payment</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/qrcodedialog.ui" line="70"/>
<source>Amount:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/qrcodedialog.ui" line="105"/>
<source>shares</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/qrcodedialog.ui" line="121"/>
<source>Label:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/qrcodedialog.ui" line="144"/>
<source>Message:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/qrcodedialog.ui" line="186"/>
<source>&Save As...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="46"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="64"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="121"/>
<source>Save Image...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="121"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="14"/>
<source>ybshares Debugging Window</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="24"/>
<source>Information</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="33"/>
<source>Client name</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="40"/>
<location filename="../forms/rpcconsole.ui" line="60"/>
<location filename="../forms/rpcconsole.ui" line="106"/>
<location filename="../forms/rpcconsole.ui" line="156"/>
<location filename="../forms/rpcconsole.ui" line="176"/>
<location filename="../forms/rpcconsole.ui" line="196"/>
<location filename="../forms/rpcconsole.ui" line="229"/>
<location filename="../rpcconsole.cpp" line="338"/>
<source>N/A</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="53"/>
<source>Client version</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="79"/>
<source>Version</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="92"/>
<source>Network</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="99"/>
<source>Number of connections</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="119"/>
<source>On testnet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="142"/>
<source>Block chain</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="149"/>
<source>Current number of blocks</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="169"/>
<source>Estimated total blocks</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="189"/>
<source>Last block time</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="222"/>
<source>Build date</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="237"/>
<source>Console</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="270"/>
<source>></source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/rpcconsole.ui" line="286"/>
<source>Clear console</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="306"/>
<source>Welcome to the ybshares RPC console.<br>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.<br>Type <b>help</b> for an overview of available commands.</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="14"/>
<location filename="../sendcoinsdialog.cpp" line="122"/>
<location filename="../sendcoinsdialog.cpp" line="127"/>
<location filename="../sendcoinsdialog.cpp" line="132"/>
<location filename="../sendcoinsdialog.cpp" line="137"/>
<location filename="../sendcoinsdialog.cpp" line="143"/>
<location filename="../sendcoinsdialog.cpp" line="148"/>
<location filename="../sendcoinsdialog.cpp" line="153"/>
<source>Send Shares</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="64"/>
<source>Send to multiple recipients at once</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="67"/>
<source>&Add recipient...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="84"/>
<source>Remove all transaction fields</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="87"/>
<source>Clear all</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="106"/>
<source>Balance:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="113"/>
<source>123.456 BTC</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="144"/>
<source>Confirm the send action</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="147"/>
<source>&Send</source>
<translation>&Пошаљи</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="94"/>
<source><b>%1</b> to %2 (%3)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="100"/>
<source>Are you sure you want to send %1?</source>
<translation>Да ли сте сигурни да желите да пошаљете %1?</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="100"/>
<source> and </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="123"/>
<source>The recepient address is not valid, please recheck.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="99"/>
<source>Confirm send shares</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="128"/>
<source>The amount to pay must be at least one cent (0.01).</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="133"/>
<source>Amount exceeds your balance</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="138"/>
<source>Total exceeds your balance when the %1 transaction fee is included</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="144"/>
<source>Duplicate address found, can only send to each address once in one send operation</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="149"/>
<source>Error: Transaction creation failed </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="154"/>
<source>Error: The transaction was rejected. This might happen if some of the shares in your portfolio were already spent, such as if you used a copy of wallet.dat and shares were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="14"/>
<source>Form</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="29"/>
<source>A&mount:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="66"/>
<location filename="../sendcoinsentry.cpp" line="26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="75"/>
<source>&Label:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="42"/>
<source>Transfer &to:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="93"/>
<source>The address to send the payment to</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="103"/>
<source>Choose address from address book</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="113"/>
<source>Alt+A</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="120"/>
<source>Paste address from clipboard</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="130"/>
<source>Alt+P</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/sendcoinsentry.ui" line="137"/>
<source>Remove this recipient</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="25"/>
<source>Enter a ybshares address</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="20"/>
<source>Open for %1 blocks</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="22"/>
<source>Open until %1</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="28"/>
<source>%1/offline?</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="30"/>
<source>%1/unconfirmed</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="32"/>
<source>%1 confirmations</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="50"/>
<source><b>Status:</b> </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="55"/>
<source>, has not been successfully broadcast yet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="57"/>
<source>, broadcast through %1 node</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="59"/>
<source>, broadcast through %1 nodes</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="63"/>
<source><b>Date:</b> </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="70"/>
<source><b>Source:</b> Generated<br></source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="76"/>
<location filename="../transactiondesc.cpp" line="93"/>
<source><b>From:</b> </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="93"/>
<source>unknown</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="94"/>
<location filename="../transactiondesc.cpp" line="117"/>
<location filename="../transactiondesc.cpp" line="176"/>
<source><b>To:</b> </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="97"/>
<source> (yours, label: </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="99"/>
<source> (yours)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="134"/>
<location filename="../transactiondesc.cpp" line="148"/>
<location filename="../transactiondesc.cpp" line="193"/>
<location filename="../transactiondesc.cpp" line="210"/>
<source><b>Credit:</b> </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="136"/>
<source>(%1 matures in %2 more blocks)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="140"/>
<source>(not accepted)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="184"/>
<location filename="../transactiondesc.cpp" line="192"/>
<location filename="../transactiondesc.cpp" line="207"/>
<source><b>Debit:</b> </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="198"/>
<source><b>Transaction fee:</b> </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="214"/>
<source><b>Net amount:</b> </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="220"/>
<source>Message:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="222"/>
<source>Comment:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="224"/>
<source>Transaction ID:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="227"/>
<source>Generated coins must wait 520 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, it will change to "not accepted" and not be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiondesc.cpp" line="229"/>
<source>Staked coins must wait 520 blocks before they can return to balance and be spent. When you generated this proof-of-stake block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, it will change to "not accepted" and not be a valid stake. This may occasionally happen if another node generates a proof-of-stake block within a few seconds of yours.</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="14"/>
<source>Transaction details</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/transactiondescdialog.ui" line="20"/>
<source>This pane shows a detailed description of the transaction</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="214"/>
<source>Date</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="214"/>
<source>Type</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="214"/>
<source>Address</source>
<translation>Адреса</translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="214"/>
<source>Amount</source>
<translation type="unfinished"></translation>
</message>
<message numerus="yes">
<location filename="../transactiontablemodel.cpp" line="275"/>
<source>Open for %n block(s)</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="278"/>
<source>Open until %1</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="281"/>
<source>Offline (%1 confirmations)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="284"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="287"/>
<source>Confirmed (%1 confirmations)</source>
<translation type="unfinished"></translation>
</message>
<message numerus="yes">
<location filename="../transactiontablemodel.cpp" line="295"/>
<source>Mined balance will be available in %n more blocks</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="301"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="304"/>
<source>Generated but not accepted</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="347"/>
<source>Received with</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="349"/>
<source>Received from</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="352"/>
<source>Sent to</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="354"/>
<source>Payment to yourself</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="356"/>
<source>Mined</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="358"/>
<source>Mint by stake</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="396"/>
<source>(n/a)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="595"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="597"/>
<source>Date and time that the transaction was received.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="599"/>
<source>Type of transaction.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="601"/>
<source>Destination address of transaction.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactiontablemodel.cpp" line="603"/>
<source>Amount removed from or added to balance.</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="55"/>
<location filename="../transactionview.cpp" line="71"/>
<source>All</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="56"/>
<source>Today</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="57"/>
<source>This week</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="58"/>
<source>This month</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="59"/>
<source>Last month</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="60"/>
<source>This year</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="61"/>
<source>Range...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="72"/>
<source>Received with</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="74"/>
<source>Sent to</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="76"/>
<source>To yourself</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="77"/>
<source>Mined</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="78"/>
<source>Mint by stake</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="79"/>
<source>Other</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="85"/>
<source>Enter address or label to search</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="91"/>
<source>Min amount</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="125"/>
<source>Copy address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="126"/>
<source>Copy label</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="127"/>
<source>Copy amount</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="128"/>
<source>Edit label</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="129"/>
<source>Show details...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="269"/>
<source>Export Transaction Data</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="270"/>
<source>Comma separated file (*.csv)</source>
<translation>Зарезом одвојене вредности (*.csv)</translation>
</message>
<message>
<location filename="../transactionview.cpp" line="278"/>
<source>Confirmed</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="279"/>
<source>Date</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="280"/>
<source>Type</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="281"/>
<source>Label</source>
<translation>Етикета</translation>
</message>
<message>
<location filename="../transactionview.cpp" line="282"/>
<source>Address</source>
<translation>Адреса</translation>
</message>
<message>
<location filename="../transactionview.cpp" line="283"/>
<source>Amount</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="284"/>
<source>ID</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="288"/>
<source>Error exporting</source>
<translation>Грешка током извоза</translation>
</message>
<message>
<location filename="../transactionview.cpp" line="288"/>
<source>Could not write to file %1.</source>
<translation>Није могуће писати у фајл %1.</translation>
</message>
<message>
<location filename="../transactionview.cpp" line="383"/>
<source>Range:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../transactionview.cpp" line="391"/>
<source>to</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="147"/>
<source>Sending...</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="11"/>
<source>Warning: Disk space is low </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="13"/>
<source>Usage:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="15"/>
<source>List commands</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="16"/>
<source>Get help for a command</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="17"/>
<source>Options:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="8"/>
<source>Unable to bind to port %d on this computer. ybshares is probably already running.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="12"/>
<source>ybshares version</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="14"/>
<source>Send command to -server or ybsharesd</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="18"/>
<source>Specify configuration file (default: ybshares.conf)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="19"/>
<source>Specify pid file (default: ybsharesd.pid)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="20"/>
<source>Generate coins</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="21"/>
<source>Don't generate coins</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="22"/>
<source>Start minimized</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="23"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="24"/>
<source>Specify data directory</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="25"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="26"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="27"/>
<source>Specify connection timeout (in milliseconds)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="28"/>
<source>Connect through socks4 proxy</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="29"/>
<source>Allow DNS lookups for addnode and connect</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="30"/>
<source>Listen for connections on <port> (default: 9901 or testnet: 9903)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="31"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="32"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="33"/>
<source>Connect only to the specified node</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="34"/>
<source>Find peers using internet relay chat (default: 0)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="35"/>
<source>Accept connections from outside (default: 1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="36"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="37"/>
<source>Find peers using DNS lookup (default: 1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="38"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="39"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="42"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 10000)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="43"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 10000)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="44"/>
<source>Use Universal Plug and Play to map the listening port (default: 1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="45"/>
<source>Use Universal Plug and Play to map the listening port (default: 0)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="46"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="47"/>
<source>Accept command line and JSON-RPC commands</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="48"/>
<source>Run in the background as a daemon and accept commands</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="49"/>
<source>Use the test network</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="50"/>
<source>Output extra debugging information</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="51"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="52"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="53"/>
<source>Send trace/debug info to debugger</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="54"/>
<source>Username for JSON-RPC connections</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="55"/>
<source>Password for JSON-RPC connections</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="56"/>
<source>Listen for JSON-RPC connections on <port> (default: 9902)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="57"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="58"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="59"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="62"/>
<source>Upgrade wallet to latest format</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="63"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="64"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="65"/>
<source>How many blocks to check at startup (default: 2500, 0 = all)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="66"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="67"/>
<source>
SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="70"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="71"/>
<source>Server certificate file (default: server.cert)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="72"/>
<source>Server private key (default: server.pem)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="73"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="76"/>
<source>This help message</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="77"/>
<source>Usage</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="78"/>
<source>Cannot obtain a lock on data directory %s. ybshares is probably already running.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="81"/>
<source>ybshares</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="88"/>
<source>Error loading wallet.dat: Wallet requires newer version of ybshares</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="89"/>
<source>Wallet needed to be rewritten: restart ybshares to complete</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="103"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=ybsharesrpc
rpcpassword=%s
(you do not need to remember this password)
If the file does not exist, create it with owner-readable-only file permissions.
</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="119"/>
<source>Warning: Please check that your computer's date and time are correct. If your clock is wrong ybshares will not work properly.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="82"/>
<source>Loading addresses...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="83"/>
<source>Error loading addr.dat</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="84"/>
<source>Loading block index...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="85"/>
<source>Error loading blkindex.dat</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="86"/>
<source>Loading wallet...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="87"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="90"/>
<source>Error loading wallet.dat</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="91"/>
<source>Cannot downgrade wallet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="92"/>
<source>Cannot initialize keypool</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="93"/>
<source>Cannot write default address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="94"/>
<source>Rescanning...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="95"/>
<source>Done loading</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="96"/>
<source>Invalid -proxy address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="97"/>
<source>Invalid amount for -paytxfee=<amount></source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="98"/>
<source>Warning: -paytxfee is set very high. This is the transaction fee you will pay if you send a transaction.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="101"/>
<source>Error: CreateThread(StartNode) failed</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="102"/>
<source>To use the %s option</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="112"/>
<source>Error</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="113"/>
<source>An error occured while setting up the RPC port %i for listening: %s</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="114"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="122"/>
<source>Error: Wallet locked, unable to create transaction </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="123"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="126"/>
<source>Error: Transaction creation failed </source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="127"/>
<source>Sending...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="128"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="132"/>
<source>Invalid amount</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoinstrings.cpp" line="133"/>
<source>Insufficient funds</source>
<translation type="unfinished"></translation>
</message>
</context>
</TS><|fim▁end|> | </message>
<message>
<location filename="../forms/distributedivdialog.ui" line="105"/>
<source>Get List of Shareholders</source> |
<|file_name|>tasks.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import
from celery import shared_task
import os.path
import logging
import csv
from django.core.exceptions import ObjectDoesNotExist
from .RandomAuthorSet import RandomAuthorSet<|fim▁hole|>
logger = logging.getLogger(__name__)
AUTHOR_SET_FILENAME = 'authors.csv'
@shared_task
def evaluation_create_author_set(name, setsize, num_min_publications, database='mag'):
'''
Task to create a random author set.
:param name: Evaluation name
:param setsize: Size of the site, i.e. the number of authors
:param num_min_publications: Minimum number of an author's publications
:param database: Database name
'''
dir = create_dir(os.path.join(config.EVALUATION_DIR, name))
author_set = RandomAuthorSet(database=database)
logger.info('{} -- create random author set of size {}'.format(name, setsize))
author_set.create(setsize=setsize, num_min_publications=num_min_publications)
logger.info('{} -- create random author set done'.format(name))
filename_author_set = author_set.store(os.path.join(dir, AUTHOR_SET_FILENAME))
#for author_info in e.get():
# author_id = author_info['author_id']
# pass
return filename_author_set
@shared_task
def evaluation_run(name, strategies):
'''
Evaluation run task.
:param name: Evaluation name
:param strategies: List of strategies
'''
evaluation_dir = os.path.join(config.EVALUATION_DIR, name)
with open(os.path.join(evaluation_dir, AUTHOR_SET_FILENAME)) as author_set_file:
reader = csv.DictReader(author_set_file)
for row in reader:
if len(row) == 3:
try:
strategies_result = evaluation_citations(author_id=row['author_id'], evaluation_name=name, strategies=strategies)
for strategy_result in strategies_result:
__store_evaluation_result(path=evaluation_dir,
filename=strategy_result['strategy_name'],
row=[row['author_id'],
row['num_citations'],
row['num_publications'],
strategy_result['num_inspected_publications'],
strategy_result['num_citations']])
except(EmptyPublicationSetException):
continue
except(ObjectDoesNotExist) as e:
raise e
return True
@shared_task
def evaluation_citations(author_id, strategies=None, evaluation_name='default'):
'''
Evaluation run view.
:param author_id: Author ID
:param strategies: List of strategies
:param evaluation_name: Evaluation name
:raise ObjectDoesNotExits:
:raise MultipleObjectsReturned:
:raise EmptyPublicationSetException:
'''
result = []
try:
citationfinder = CitationFinder(database='mag', evaluation=True)
author_id, length_publication_set = citationfinder.publication_set.set_by_author(id=int(author_id))
logger.info('{} author: set {} publications'.format(author_id, length_publication_set))
citationfinder.load_stored_citations()
for strategy in strategies:
strategy_name = citationfinder.run(strategy)
logger.info('{}: finished strategy "{}"'.format(author_id, strategy_name))
num_inspected_publications, num_citations = citationfinder.store_evaluation(path=create_dir(os.path.join(config.EVALUATION_DIR, evaluation_name, strategy_name)),
filename=author_id)
result.append({'strategy_name': strategy_name,
'num_inspected_publications': num_inspected_publications,
'num_citations': num_citations})
return result
except(ObjectDoesNotExist) as e:
raise e
except(EmptyPublicationSetException) as e:
raise e
def __store_evaluation_result(path, filename, row):
'''
Store evaluation result.
:param path: Path
:param filename: Name of the file
:param row: Row to append to the file
'''
filename = os.path.join(path, 'meta_{}.csv'.format(filename))
file_exists = os.path.isfile(filename)
try:
with open(filename, 'a+') as csvfile:
writer = csv.writer(csvfile)
if not file_exists:
writer.writerow(['author_id', 'author_num_citations', 'author_num_publications', 'num_inspected_publications', 'num_citations'])
writer.writerow(row)
return filename
except(IOError) as e:
raise e<|fim▁end|> | from ..CitationFinder import CitationFinder, EmptyPublicationSetException
from scholarly_citation_finder import config
from scholarly_citation_finder.lib.file import create_dir |
<|file_name|>entries.py<|end_file_name|><|fim▁begin|># Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Log entries within the Google Stackdriver Logging API."""
import collections
import json
import re
from google.protobuf.any_pb2 import Any
from google.protobuf.json_format import MessageToDict
from google.protobuf.json_format import Parse
from google.cloud.logging.resource import Resource
from google.cloud._helpers import _name_from_project_path
from google.cloud._helpers import _rfc3339_nanos_to_datetime
from google.cloud._helpers import _datetime_to_rfc3339
_GLOBAL_RESOURCE = Resource(type='global', labels={})
_LOGGER_TEMPLATE = re.compile(r"""
projects/ # static prefix
(?P<project>[^/]+) # initial letter, wordchars + hyphen
/logs/ # static midfix
(?P<name>[^/]+) # initial letter, wordchars + allowed punc
""", re.VERBOSE)
def logger_name_from_path(path):
"""Validate a logger URI path and get the logger name.
:type path: str
:param path: URI path for a logger API request.
:rtype: str
:returns: Logger name parsed from ``path``.
:raises: :class:`ValueError` if the ``path`` is ill-formed or if
the project from the ``path`` does not agree with the
``project`` passed in.
"""
return _name_from_project_path(path, None, _LOGGER_TEMPLATE)
def _int_or_none(value):
"""Helper: return an integer or ``None``."""
if value is not None:
value = int(value)
return value
_LOG_ENTRY_FIELDS = ( # (name, default)
('log_name', None),
('labels', None),
('insert_id', None),
('severity', None),
('http_request', None),
('timestamp', None),
('resource', _GLOBAL_RESOURCE),
('trace', None),
('span_id', None),
('trace_sampled', None),
('source_location', None),
('operation', None),
('logger', None),
('payload', None),
)
_LogEntryTuple = collections.namedtuple(
'LogEntry', (field for field, _ in _LOG_ENTRY_FIELDS))
_LogEntryTuple.__new__.__defaults__ = tuple(
default for _, default in _LOG_ENTRY_FIELDS)
_LOG_ENTRY_PARAM_DOCSTRING = """\
:type log_name: str
:param log_name: the name of the logger used to post the entry.
:type labels: dict
:param labels: (optional) mapping of labels for the entry
:type insert_id: text
:param insert_id: (optional) the ID used to identify an entry uniquely.
:type severity: str
:param severity: (optional) severity of event being logged.
:type http_request: dict
:param http_request: (optional) info about HTTP request associated with
the entry.
:type timestamp: :class:`datetime.datetime`
:param timestamp: (optional) timestamp for the entry
:type resource: :class:`~google.cloud.logging.resource.Resource`
:param resource: (Optional) Monitored resource of the entry
:type trace: str
:param trace: (optional) traceid to apply to the entry.
:type span_id: str
:param span_id: (optional) span_id within the trace for the log entry.
Specify the trace parameter if span_id is set.
:type trace_sampled: bool
:param trace_sampled: (optional) the sampling decision of the trace
associated with the log entry.
:type source_location: dict
:param source_location: (optional) location in source code from which
the entry was emitted.
:type operation: dict
:param operation: (optional) additional information about a potentially
long-running operation associated with the log entry.
:type logger: :class:`google.cloud.logging.logger.Logger`
:param logger: the logger used to write the entry.
"""
_LOG_ENTRY_SEE_ALSO_DOCSTRING = """\
See:
https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry
"""
class LogEntry(_LogEntryTuple):
__doc__ = """
Log entry.
""" + _LOG_ENTRY_PARAM_DOCSTRING + _LOG_ENTRY_SEE_ALSO_DOCSTRING
received_timestamp = None
@classmethod
def _extract_payload(cls, resource):
"""Helper for :meth:`from_api_repr`"""
return None
@classmethod
def from_api_repr(cls, resource, client, loggers=None):
"""Factory: construct an entry given its API representation
:type resource: dict
:param resource: text entry resource representation returned from
the API
:type client: :class:`google.cloud.logging.client.Client`
:param client: Client which holds credentials and project
configuration.
:type loggers: dict
:param loggers:
(Optional) A mapping of logger fullnames -> loggers. If not
passed, the entry will have a newly-created logger.
:rtype: :class:`google.cloud.logging.entries.LogEntry`
:returns: Log entry parsed from ``resource``.
"""
if loggers is None:
loggers = {}
logger_fullname = resource['logName']
logger = loggers.get(logger_fullname)
if logger is None:
logger_name = logger_name_from_path(logger_fullname)
logger = loggers[logger_fullname] = client.logger(logger_name)
payload = cls._extract_payload(resource)
insert_id = resource.get('insertId')
timestamp = resource.get('timestamp')
if timestamp is not None:
timestamp = _rfc3339_nanos_to_datetime(timestamp)
labels = resource.get('labels')
severity = resource.get('severity')
http_request = resource.get('httpRequest')
trace = resource.get('trace')
span_id = resource.get('spanId')
trace_sampled = resource.get('traceSampled')
source_location = resource.get('sourceLocation')
if source_location is not None:
line = source_location.pop('line', None)
source_location['line'] = _int_or_none(line)
operation = resource.get('operation')
monitored_resource_dict = resource.get('resource')
monitored_resource = None
if monitored_resource_dict is not None:
monitored_resource = Resource._from_dict(monitored_resource_dict)
inst = cls(
log_name=logger_fullname,
insert_id=insert_id,
timestamp=timestamp,
labels=labels,
severity=severity,
http_request=http_request,<|fim▁hole|> trace=trace,
span_id=span_id,
trace_sampled=trace_sampled,
source_location=source_location,
operation=operation,
logger=logger,
payload=payload,
)
received = resource.get('receiveTimestamp')
if received is not None:
inst.received_timestamp = _rfc3339_nanos_to_datetime(received)
return inst
def to_api_repr(self):
"""API repr (JSON format) for entry.
"""
info = {}
if self.log_name is not None:
info['logName'] = self.log_name
if self.resource is not None:
info['resource'] = self.resource._to_dict()
if self.labels is not None:
info['labels'] = self.labels
if self.insert_id is not None:
info['insertId'] = self.insert_id
if self.severity is not None:
info['severity'] = self.severity
if self.http_request is not None:
info['httpRequest'] = self.http_request
if self.timestamp is not None:
info['timestamp'] = _datetime_to_rfc3339(self.timestamp)
if self.trace is not None:
info['trace'] = self.trace
if self.span_id is not None:
info['spanId'] = self.span_id
if self.trace_sampled is not None:
info['traceSampled'] = self.trace_sampled
if self.source_location is not None:
source_location = self.source_location.copy()
source_location['line'] = str(source_location.pop('line', 0))
info['sourceLocation'] = source_location
if self.operation is not None:
info['operation'] = self.operation
return info
class TextEntry(LogEntry):
__doc__ = """
Log entry with text payload.
""" + _LOG_ENTRY_PARAM_DOCSTRING + """
:type payload: str | unicode
:param payload: payload for the log entry.
""" + _LOG_ENTRY_SEE_ALSO_DOCSTRING
@classmethod
def _extract_payload(cls, resource):
"""Helper for :meth:`from_api_repr`"""
return resource['textPayload']
def to_api_repr(self):
"""API repr (JSON format) for entry.
"""
info = super(TextEntry, self).to_api_repr()
info['textPayload'] = self.payload
return info
class StructEntry(LogEntry):
__doc__ = """
Log entry with JSON payload.
""" + _LOG_ENTRY_PARAM_DOCSTRING + """
:type payload: dict
:param payload: payload for the log entry.
""" + _LOG_ENTRY_SEE_ALSO_DOCSTRING
@classmethod
def _extract_payload(cls, resource):
"""Helper for :meth:`from_api_repr`"""
return resource['jsonPayload']
def to_api_repr(self):
"""API repr (JSON format) for entry.
"""
info = super(StructEntry, self).to_api_repr()
info['jsonPayload'] = self.payload
return info
class ProtobufEntry(LogEntry):
__doc__ = """
Log entry with protobuf message payload.
""" + _LOG_ENTRY_PARAM_DOCSTRING + """
:type payload: protobuf message
:param payload: payload for the log entry.
""" + _LOG_ENTRY_SEE_ALSO_DOCSTRING
@classmethod
def _extract_payload(cls, resource):
"""Helper for :meth:`from_api_repr`"""
return resource['protoPayload']
@property
def payload_pb(self):
if isinstance(self.payload, Any):
return self.payload
@property
def payload_json(self):
if not isinstance(self.payload, Any):
return self.payload
def to_api_repr(self):
"""API repr (JSON format) for entry.
"""
info = super(ProtobufEntry, self).to_api_repr()
info['protoPayload'] = MessageToDict(self.payload)
return info
def parse_message(self, message):
"""Parse payload into a protobuf message.
Mutates the passed-in ``message`` in place.
:type message: Protobuf message
:param message: the message to be logged
"""
# NOTE: This assumes that ``payload`` is already a deserialized
# ``Any`` field and ``message`` has come from an imported
# ``pb2`` module with the relevant protobuf message type.
Parse(json.dumps(self.payload), message)<|fim▁end|> | resource=monitored_resource, |
<|file_name|>combineTarget.js<|end_file_name|><|fim▁begin|>var stream = require('stream');
var util = require('util');
function combineTarget(){
stream.Writable.call(this);
this.file = '';
}
util.inherits(combineTarget,stream.Writable);
combineTarget.prototype._write = function(chunk,encoding,callback){
this.file += chunk.toString();<|fim▁hole|>
module.exports = combineTarget;<|fim▁end|> | callback();
}; |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _toast = require('./toast.container');
Object.defineProperty(exports, 'default', {
enumerable: true,
get: function get() {
return _interopRequireDefault(_toast).default;
}
});
var _redux = require('./redux');
Object.keys(_redux).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function get() {
return _redux[key];
}
});
});<|fim▁hole|> get: function get() {
return _interopRequireDefault(_redux).default;
}
});
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }<|fim▁end|> | Object.defineProperty(exports, 'reducer', {
enumerable: true, |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>#### FORMS
from flask import current_app
from flask.ext.wtf import Form
from flask.ext.security.forms import RegisterForm, LoginForm, RegisterFormMixin
from wtforms import (SelectField, StringField, SubmitField, TextAreaField,
HiddenField, FileField, RadioField, SelectField, IntegerField, ValidationError,
PasswordField)
from wtforms.fields.html5 import URLField
from wtforms.validators import Length, DataRequired, AnyOf, Regexp, NumberRange, Optional, Email, URL
from flask.ext.wtf.file import FileAllowed, FileField
from werkzeug.local import LocalProxy
from zxcvbn import password_strength
_datastore = LocalProxy(lambda: current_app.extensions['security'].datastore)
def good_enough_password(form, field):
if password_strength(field.data)['score'] < 4:
msg = 'Get a better password'
raise ValidationError(msg)
def unique_user_username(form, field):
if _datastore.find_user(username=field.data) is not None:
msg = '{0} is already associated with an account.'.format(field.data)
raise ValidationError(msg)
def unique_user_email(form, field):
if _datastore.get_user(field.data) is not None:
msg = '{} alread associated with an account'.format(field.data)
raise ValidationError(msg)
class ExtendedRegisterForm(RegisterForm):
username=StringField('Username', [DataRequired(),
Regexp(r'^\w+$', message="Only alphanumeric characters"),
Length(min=4, max=20),
unique_user_username])
class RegisterForm(Form, RegisterFormMixin):
email=StringField('Email', [DataRequired(), Email(), unique_user_email])
username=StringField('Username', [DataRequired(),
Regexp(r'^\w+$', message="Only alphanumeric characters"),
Length(min=4, max=20),
unique_user_username])
password=PasswordField('Password', [DataRequired(), good_enough_password])
class ChangePasswordForm(Form):
password=PasswordField('New password', [DataRequired(),
good_enough_password])
submit=SubmitField('Update')
class ExtendedLoginForm(LoginForm):
email=StringField('Login', [DataRequired()])
class OpenIssueForm(Form):
severity=SelectField('Severity', choices=[('Critical', 'Critical'),
('Medium', 'Medium'),
('Low', 'Low'),
('Future', 'Future')])
type=SelectField('Type', choices=[('Plumbing', 'Plumbing'),
('Electrical', 'Electrical'),
('Heating/Air Conditioning', 'Heating/Air Conditioning'),
('Cleaning', 'Cleaning'),
('Other', 'Other')])
photos=FileField('Photo', validators=[FileAllowed(['jpg', 'jpeg', 'png'], 'Images only!')])
description=TextAreaField('Description', [DataRequired()])
submit=SubmitField('Open')
class CloseIssueForm(Form):
reason=TextAreaField('Reason', [DataRequired()])
submit=SubmitField('Close')
class AddLandlordForm(Form):
location=SelectField('Location', coerce=int)
submit=SubmitField('Add')
class EndLandlordForm(Form):
end=HiddenField(default='True', validators=[AnyOf('True')])
submit=SubmitField('End')
class ConfirmTenantForm(Form):
confirm=SubmitField('Confirm', default='True')
disallow=SubmitField('Disallow', default='False')
<|fim▁hole|> submit=SubmitField('Invite')
class CommentForm(Form):
comment=TextAreaField('Comment', [DataRequired()])
submit=SubmitField('Add Comment')
class AddPropertyForm(Form):
unit=IntegerField('Unit:', [Optional(), NumberRange(min=1)])
address=StringField('Address:', [DataRequired()])
city=StringField('City:', [DataRequired()])
state=StringField('State:', [DataRequired()])
description=TextAreaField('Description:', [DataRequired()])
submit=SubmitField('Add Property')
class AddProviderForm(Form):
name=StringField('Name:', [DataRequired()])
area=SelectField('Area:', choices=[('Plumbing', 'Plumbing'),
('Electrical', 'Electrical'),
('Heating/Air Conditioning', 'Heating/Air Conditioning'),
('Cleaning', 'Cleaning'),
('Other', 'Other')])
email=StringField('Email:', [Email(), DataRequired()])
phone=StringField('Phone #:', [Optional(), Length(min=10)])
website=StringField('Website:', [Optional(), URL()])
submit=SubmitField('Add Provider')
class SelectProviderForm(Form):
provider=SelectField('Provider:', choices=[])
submit=SubmitField('Select Provider')
class ConnectProviderForm(Form):
action=SubmitField('Connect')
class ModifyPropertyForm(Form):
description=TextAreaField('Description:', [DataRequired()])
submit=SubmitField('Modify Property')
class AddPhoneNumber(Form):
phone=StringField('Phone #:', [DataRequired(), Length(min=10)])
country=SelectField('Country', choices=[('1', 'US'), ('02', 'UK')])
submit=SubmitField('Update number')
class ChangeNotifyForm(Form):
method=SelectField('Method', choices=[('Email', 'Email'),
('None', 'None')])
submit=SubmitField('Confirm')
class ResendNotifyForm(Form):
resend=SubmitField('Resend email', default='True')
class ImportYelpURLForm(Form):
url=URLField('Yelp URL')
submit=SubmitField('Import')
class SelectYelpProviderForm(Form):
id_=HiddenField()
submit=SubmitField('Save')
class ConfirmYelpChoiceForm(Form):
provider=HiddenField()
confirm=SubmitField('Confirm')
__all__=['AddLandlordForm', 'AddPhoneNumber', 'AddPropertyForm',
'AddProviderForm', 'AddTenantForm', 'ChangeNotifyForm',
'CloseIssueForm', 'CommentForm', 'ConfirmTenantForm',
'ConnectProviderForm', 'EndLandlordForm', 'ExtendedLoginForm',
'ExtendedRegisterForm', 'ModifyPropertyForm', 'OpenIssueForm',
'ResendNotifyForm', 'SelectProviderForm']<|fim▁end|> | class AddTenantForm(Form):
user=StringField('User', [DataRequired()])
apt=SelectField('Property', coerce=int) |
<|file_name|>Occurrence.py<|end_file_name|><|fim▁begin|># -*- coding: utf8 -*-
###########################################################################
# This is the package latexparser
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
# copyright (c) Laurent Claessens, 2010,2012-2016
# email: [email protected]
import codecs
from latexparser.InputPaths import InputPaths
class Occurrence(object):
"""
self.as_written : the code as it appears in the file, including \MyMacro, including the backslash.
self.position : the position at which this occurrence appears.
Example, if we look at the LatexCode
Hello word, \MyMacro{first}
and then \MyMacro{second}
the first occurrence of \MyMacro has position=12
"""
def __init__(self,name,arguments,as_written="",position=0):
self.arguments = arguments
self.number_of_arguments = len(arguments)
self.name = name
self.as_written = as_written
self.arguments_list = arguments
self.position = position
def configuration(self):
r"""
Return the way the arguments are separated in as_written.
Example, if we have
\MyMacro<space>{A}<tab>{B}
{C},
we return the list
["<space>","tab","\n"]
The following has to be true:
self.as_written == self.name+self.configuration()[0]+self.arguments_list[0]+etc.
"""
l=[]
a = self.as_written.split(self.name)[1]
for arg in self.arguments_list:
split = a.split("{"+arg+"}")
separator=split[0]
try:
a=split[1]
except IndexError:
print(self.as_written)
raise
l.append(separator)
return l
def change_argument(self,num,func):
r"""
Apply the function <func> to the <n>th argument of self. Then return a new object.
"""
n=num-1 # Internally, the arguments are numbered from 0.
arguments=self.arguments_list
configuration=self.configuration()
arguments[n]=func(arguments[n])
new_text=self.name
if len(arguments) != len(configuration):
print("Error : length of the configuration list has to be the same as the number of arguments")
raise ValueError
for i in range(len(arguments)):
new_text=new_text+configuration[i]+"{"+arguments[i]+"}"<|fim▁hole|> return Occurrence(self.name,arguments,new_text,self.position)
def analyse(self):
return globals()["Occurrence_"+self.name[1:]](self) # We have to remove the initial "\" in the name of the macro.
def __getitem__(self,a):
return self.arguments[a]
def __str__(self):
return self.as_written
class Occurrence_newlabel(object):
r"""
takes an occurrence of \newlabel and creates an object which contains the information.
In the self.section_name we remove "\relax" from the string.
"""
def __init__(self,occurrence):
self.occurrence = occurrence
self.arguments = self.occurrence.arguments
if len(self.arguments) == 0 :
self.name = "Non interesting; probably the definition"
self.listoche = [None,None,None,None,None]
self.value,self.page,self.section_name,self.fourth,self.fifth=(None,None,None,None,None)
else :
self.name = self.arguments[0][0]
self.listoche = [a[0] for a in SearchArguments(self.arguments[1][0],5)[0]]
self.value = self.listoche[0]
self.page = self.listoche[1]
self.section_name = self.listoche[2].replace(r"\relax","")
self.fourth = self.listoche[3] # I don't know the role of the fourth argument of \newlabel
self.fifth = self.listoche[4] # I don't know the role of the fifth argument of \newlabel
class Occurrence_addInputPath(object):
def __init__(self,Occurrence):
self.directory=Occurrence[0]
class Occurrence_cite(object):
def __init__(self,occurrence):
self.label = occurrence[0]
def entry(self,codeBibtex):
return codeBibtex[self.label]
class Occurrence_newcommand(object):
def __init__(self,occurrence):
self.occurrence = occurrence
self.number_of_arguments = 0
if self.occurrence[1][1] == "[]":
self.number_of_arguments = self.occurrence[1][0]
self.name = self.occurrence[0][0]#[0]
self.definition = self.occurrence[-1][0]
class Occurrence_label(object):
def __init__(self,occurrence):
self.occurrence=occurrence
self.label=self.occurrence.arguments[0]
class Occurrence_ref(object):
def __init__(self,occurrence):
self.occurrence=occurrence
self.label=self.occurrence.arguments[0]
class Occurrence_eqref(object):
def __init__(self,occurrence):
self.occurrence=occurrence
self.label=self.occurrence.arguments[0]
class Occurrence_input(Occurrence):
def __init__(self,occurrence):
Occurrence.__init__(self,occurrence.name,occurrence.arguments,as_written=occurrence.as_written,position=occurrence.position)
self.occurrence = occurrence
self.filename = self.occurrence[0]
self.input_paths=InputPaths()
self._file_content=None # Make file_content "lazy"
def file_content(self,input_paths=None):
r"""
return the content of the file corresponding to this occurrence of
\input.
This is not recursive.
- 'input_path' is the list of paths in which we can search for files.
See the macro `\addInputPath` in the file
https://github.com/LaurentClaessens/mazhe/blob/master/configuration.tex
"""
import os.path
# Memoize
if self._file_content is not None :
return self._file_content
# At least, we are searching in the current directory :
if input_paths is None :
raise # Just to know who should do something like that
# Creating the filename
filename=self.filename
strict_filename = filename
if "." not in filename:
strict_filename=filename+".tex"
# Searching for the correct file in the subdirectories
fn=input_paths.get_file(strict_filename)
try:
# Without [:-1] I got an artificial empty line at the end.
text = "".join( codecs.open(fn,"r",encoding="utf8") )[:-1]
except IOError :
print("Warning : file %s not found."%strict_filename)
raise
self._file_content=text
return self._file_content<|fim▁end|> | |
<|file_name|>new-box-syntax.rs<|end_file_name|><|fim▁begin|>/* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/ */
// Tests that the new `box` syntax works with unique pointers and GC pointers.
use std::gc::Gc;
use std::owned::HEAP;
struct Structure {
x: int,
y: int,
}
pub fn main() {
let x: ~int = box(HEAP) 2;
let y: ~int = box 2;
let z: Gc<int> = box(GC) 2;
let a: Gc<Structure> = box(GC) Structure {
x: 10,
y: 20,
};<|fim▁hole|><|fim▁end|> | let b: ~int = box()(1 + 2);
let c = box()(3 + 4);
let d = box(GC)(5 + 6);
} |
<|file_name|>PMDGWrapper.cpp<|end_file_name|><|fim▁begin|>#include "PMDGWrapper.h"
static enum DATA_REQUEST_ID {
DATA_REQUEST,
CONTROL_REQUEST,
AIR_PATH_REQUEST
};
static enum EVENT_ID {
EVENT_SIM_START, // used to track the loaded aircraft
};
HANDLE hSimConnect = NULL;
byte bQuit=false;
PMDG_NGX_Data sPmdgData;
int iPmdgUpdated=0;
PMDG_NGX_Control sControl;
void(__stdcall * airCraftLoadedCallback)(char *airPath);
HANDLE PollForDataThread;
bool InitSimConnect(bool restart)
{
int hr=SimConnect_Open(&hSimConnect, "PMDGWrapper", NULL, 0, 0, 0);
if (!SUCCEEDED(hr))
{
hSimConnect=NULL;
return false;
}
// 1) Set up data connection
SetupDataConnection();
// 2) Set up control connection
SetupControlConnection();
// 3) Request current aircraft .air file path
//hr = SimConnect_RequestSystemState(hSimConnect, AIR_PATH_REQUEST, "AircraftLoaded");
// also request notifications on sim start and aircraft change
if (!restart)
hr = SimConnect_SubscribeToSystemEvent(hSimConnect, EVENT_SIM_START, "SimStart");
// process messages
if (PollForDataThread==NULL)
PollForDataThread = CreateThread(NULL, 0, PollForData, 0, 0, NULL);
// get the first data
Sleep(50);
return true;
}
void SetupDataConnection()
{
// Associate an ID with the PMDG data area name
int hr = SimConnect_MapClientDataNameToID (hSimConnect, PMDG_NGX_DATA_NAME, PMDG_NGX_DATA_ID);
// Define the data area structure - this is a required step
int size=sizeof(PMDG_NGX_Data);
hr = SimConnect_AddToClientDataDefinition (hSimConnect, PMDG_NGX_DATA_DEFINITION, 0, sizeof(PMDG_NGX_Data), 0, 0);
// Sign up for notification of data change.
// SIMCONNECT_CLIENT_DATA_REQUEST_FLAG_CHANGED flag asks for the data to be sent only when some of the data is changed.
hr = SimConnect_RequestClientData(hSimConnect, PMDG_NGX_DATA_ID, DATA_REQUEST, PMDG_NGX_DATA_DEFINITION,
SIMCONNECT_CLIENT_DATA_PERIOD_ON_SET, SIMCONNECT_CLIENT_DATA_REQUEST_FLAG_CHANGED, 0, 0, 0);
}
void SetupControlConnection()
{
// First method: control data area
sControl.Event = 0;
sControl.Parameter = 0;
// Associate an ID with the PMDG control area name
int hr = SimConnect_MapClientDataNameToID (hSimConnect, PMDG_NGX_CONTROL_NAME, PMDG_NGX_CONTROL_ID);
// Define the control area structure - this is a required step
hr = SimConnect_AddToClientDataDefinition (hSimConnect, PMDG_NGX_CONTROL_DEFINITION, 0, sizeof(PMDG_NGX_Control), 0, 0);
// Sign up for notification of control change.
hr = SimConnect_RequestClientData(hSimConnect, PMDG_NGX_CONTROL_ID, CONTROL_REQUEST, PMDG_NGX_CONTROL_DEFINITION,
SIMCONNECT_CLIENT_DATA_PERIOD_ON_SET, SIMCONNECT_CLIENT_DATA_REQUEST_FLAG_CHANGED, 0, 0, 0);
}
void CloseSimConnect()
{
SimConnect_Close(hSimConnect);
hSimConnect=NULL;
}
DWORD WINAPI PollForData(LPVOID lpParam)
{
while( bQuit == 0 )
{
// receive and process the NGX data
SimConnect_CallDispatch(hSimConnect, MyDispatchProc, NULL);
if (false)
{
CloseSimConnect();
InitSimConnect(true);
}
Sleep(100);
}
return 0;
}
void CALLBACK MyDispatchProc(SIMCONNECT_RECV* pData, DWORD cbData, void *pContext)
{
switch(pData->dwID)
{
case SIMCONNECT_RECV_ID_EXCEPTION:
break;
case SIMCONNECT_RECV_ID_OPEN:
break;
case SIMCONNECT_RECV_ID_CLIENT_DATA: // Receive and process the NGX data block
{
SIMCONNECT_RECV_CLIENT_DATA *pObjData = (SIMCONNECT_RECV_CLIENT_DATA*)pData;
switch(pObjData->dwRequestID)
{
case DATA_REQUEST:
{
if (iPmdgUpdated%100 == 0)
printf("Receive and process the NGX data block count=%d\n", iPmdgUpdated);
PMDG_NGX_Data *pS = (PMDG_NGX_Data*)&pObjData->dwData;
sPmdgData = *pS;
iPmdgUpdated++;
break;
}
case CONTROL_REQUEST:
{
printf("Receive and process the NGX control block event=%d\n", ((PMDG_NGX_Control*)&pObjData->dwData)->Event);
// keep the present state of Control area to know if the server had received and reset the command
PMDG_NGX_Control *pS = (PMDG_NGX_Control*)&pObjData->dwData;
//printf("Received control: %d %d\n", pS->Event, pS->Parameter);
sControl = *pS;
break;
}
}
break;
}
case SIMCONNECT_RECV_ID_EVENT:
{
SIMCONNECT_RECV_EVENT *evt = (SIMCONNECT_RECV_EVENT*)pData;
switch (evt->uEventID)
{
case EVENT_SIM_START: // Track aircraft changes
{
HRESULT hr = SimConnect_RequestSystemState(hSimConnect, AIR_PATH_REQUEST, "AircraftLoaded");
break;
}
}
break;
}
case SIMCONNECT_RECV_ID_SYSTEM_STATE: // Track aircraft changes
{
SIMCONNECT_RECV_SYSTEM_STATE *evt = (SIMCONNECT_RECV_SYSTEM_STATE*)pData;
if (evt->dwRequestID == AIR_PATH_REQUEST)
{
if (airCraftLoadedCallback!=NULL)
airCraftLoadedCallback(evt->szString);
if (strstr(evt->szString, "PMDG 737") != NULL)
{
//SetupDataConnection();
//SetupControlConnection();
}
}
break;
}
case SIMCONNECT_RECV_ID_QUIT:
{
break;
}
default:
printf("Received:%d\n",pData->dwID);
break;
}
fflush(stdout);
}
__declspec(dllexport) void SetACLoadedCallback(void(__stdcall * callback)(char *airPath))
{
airCraftLoadedCallback=callback;
HRESULT hr = SimConnect_RequestSystemState(hSimConnect, AIR_PATH_REQUEST, "AircraftLoaded");
}
__declspec(dllexport) int GetPMDGDataStructureLength()
{
int size=sizeof(PMDG_NGX_Data);
return size;
}
<|fim▁hole|>__declspec(dllexport) void* GetPMDGData()
{
return &sPmdgData;
}
void RaiseMPDGEvent(char *eventName, int parameter)
{
int eventID = offsetof(PMDG_NGX_Data, COMM_ServiceInterphoneSw);
}
__declspec(dllexport) int RaisePMDGEvent(int pmdgEvent, int parameter)
{
if (hSimConnect==NULL)
return -1;
// wait for the previous command to finish
while (sControl.Event != 0)
Sleep(2);
sControl.Event = pmdgEvent;
sControl.Parameter = parameter;
int hr=SimConnect_SetClientData (hSimConnect, PMDG_NGX_CONTROL_ID, PMDG_NGX_CONTROL_DEFINITION, 0, 0, sizeof(PMDG_NGX_Control), &sControl);
return hr>0 ? 1 : 0;
}<|fim▁end|> | |
<|file_name|>test_swver.py<|end_file_name|><|fim▁begin|>__author__ = "Nitin Kumar, Rick Sherman"
__credits__ = "Jeremy Schulman"
import unittest
from nose.plugins.attrib import attr
from mock import patch
import os
from jnpr.junos import Device
from jnpr.junos.facts.swver import facts_software_version as software_version, version_info
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
@attr('unit')
class TestVersionInfo(unittest.TestCase):
def test_version_info_after_type_len_else(self):
self.assertIsNone(version_info('12.1X46-D10').build)
def test_version_info_constructor_else_exception(self):
self.assertEqual(version_info('11.4R7').build, '7')
def test_version_info_repr(self):
self.assertEqual(repr(version_info('11.4R7.5')),
'junos.version_info(major=(11, 4), '
'type=R, minor=7, build=5)')
def test_version_info_lt(self):
self.assertLess(version_info('13.3-20131120'), (14, 1))
def test_version_info_lt_eq(self):<|fim▁hole|> def test_version_info_gt(self):
self.assertGreater(version_info('13.3-20131120'), (12, 1))
def test_version_info_gt_eq(self):
self.assertGreaterEqual(version_info('13.3-20131120'), (12, 1))
def test_version_info_eq(self):
self.assertEqual(version_info('13.3-20131120'), (13, 3))
def test_version_info_not_eq(self):
self.assertNotEqual(version_info('13.3-20131120'), (15, 3))
@attr('unit')
class TestSrxCluster(unittest.TestCase):
@patch('ncclient.manager.connect')
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager
self.dev = Device(host='1.1.1.1', user='rick', password='password123',
gather_facts=False)
self.dev.open()
self.facts = {}
@patch('jnpr.junos.Device.execute')
def test_swver(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.facts['master'] = 'RE0'
software_version(self.dev, self.facts)
self.assertEqual(self.facts['version'], '12.3R6.6')
@patch('jnpr.junos.Device.execute')
def test_swver_hostname_none(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.facts['master'] = 'RE5'
self.facts['version_RE5'] = '15.3R6.6'
software_version(self.dev, self.facts)
self.assertEqual(self.facts['version'], '15.3R6.6')
# --> JLS, there should always be a facts['master'] assigned.
# @patch('jnpr.junos.Device.execute')
# def test_swver_master_none(self, mock_execute):
# mock_execute.side_effect = self._mock_manager
# self.facts['master'] = None
# software_version(self.dev, self.facts)
# self.assertEqual(self.facts['version'], '12.3R6.6')
@patch('jnpr.junos.Device.execute')
@patch('jnpr.junos.facts.swver.re.findall')
def test_swver_exception_handling(self, mock_re_findall, mock_execute):
mock_execute.side_effect = self._mock_manager
mock_re_findall.side_effect = IndexError
self.facts['master'] = 'RE0'
software_version(self.dev, self.facts)
self.assertEqual(self.facts['version'], '0.0I0.0')
def _read_file(self, fname):
from ncclient.xml_ import NCElement
fpath = os.path.join(os.path.dirname(__file__),
'rpc-reply', fname)
foo = open(fpath).read()
rpc_reply = NCElement(foo, self.dev._conn.
_device_handler.transform_reply())\
._NCElement__doc[0]
return rpc_reply
def _mock_manager(self, *args, **kwargs):
if kwargs:
device_params = kwargs['device_params']
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
if args:
return self._read_file(args[0].tag + '.xml')<|fim▁end|> | self.assertLessEqual(version_info('13.3-20131120'), (14, 1))
|
<|file_name|>perceval_gerrit_me.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python3
from datetime import datetime, timedelta
from perceval.backends.core.gerrit import Gerrit
# hostname of the Gerrit instance
hostname = 'gerrit.opnfv.org'
# user for sshing to the Gerrit instance
user = 'd.arroyome'
# retrieve only reviews changed since one day ago
from_date = datetime.now() - timedelta(days=1)
# create a Gerrit object, pointing to hostname, using user for ssh access<|fim▁hole|>for review in repo.fetch(from_date=from_date):
print(review['data']['number'])<|fim▁end|> | repo = Gerrit(hostname=hostname, user=user)
# fetch all reviews as an iterator, and iterate it printing each review id |
<|file_name|>preprocessing.py<|end_file_name|><|fim▁begin|># coding: utf-8
# # Preprocessing Notebook
#
# ### Author: James Foster, [email protected]
#
# #### Install gomill: http://mjw.woodcraft.me.uk/gomill/doc/0.7.4/install.html
# In[1]:
import numpy as np
import pandas as pd
from gomill import sgf
from gomill import ascii_boards
from gomill import sgf_moves
from IPython.core.debugger import Tracer
# In[2]:
def sgf_filename_to_game(game_filename):
"""
Read in sgf game file and convert to gomill Game object
"""
with open(game_filename, 'r') as myfile:
game_string=myfile.read() #.replace('\n', '')
g = sgf.Sgf_game.from_string(game_string)
return g
# In[3]:
def game_to_string(game):
"""
Print info about Game object
"""
print g.get_winner()
print g.get_size()
print g.get_root().get_raw('BR')
print
for node in g.get_main_sequence():
print node
# In[4]:
def show_sgf_file(sgf_game, move_number=None):
"""
Show the position from an SGF file. If a move number is specified, the position
before that move is shown (this is to match the behaviour of GTP loadsgf).
"""
try:
board, plays = sgf_moves.get_setup_and_moves(sgf_game)
except ValueError, e:
raise StandardError(str(e))
if move_number is not None:
move_number = max(0, move_number-1)
plays = plays[:move_number]
for colour, move in plays:
if move is None:
continue
row, col = move
try:
board.play(row, col, colour)
except ValueError:
raise StandardError("illegal move in sgf file")
print ascii_boards.render_board(board)
print
# In[5]:
def game_to_board(game, move_number=None):
"""
Convert gomill Game object to Board object. If move number is
specified, the position before that move is shown (this is to
match the behaviour of GTP loadsgf).
"""
if move_number<1:
raise ValueError('Game undefined for move_number < 1')
try:
board, plays = sgf_moves.get_setup_and_moves(game)
except ValueError, e:
raise StandardError(str(e))
if move_number is not None:
move_number = max(0, move_number-1)
if move_number==0: # Special case for first move of the game
turn, _ = plays[0]
plays = plays[:move_number]
swap_dict = {'w':'b', 'b':'w'}
for colour, move in plays:
if move is None:
continue
row, col = move
try:
board.play(row, col, colour)
except ValueError:
raise StandardError("illegal move in sgf file")
turn = swap_dict[colour]
if move_number is None or move_number > len(plays): # Game is over, it's neither player's turn
turn = None
return (board, turn)
# In[6]:
def game_move_to_board(game, move_number):
"""
Convert gomill Game object to Board object that includes only the specified move.
The position before the specified move is shown (this is to match the behaviour of GTP loadsgf).
"""
try:
board, plays = sgf_moves.get_setup_and_moves(game)
except ValueError, e:
raise StandardError(str(e))
if move_number is not None:
move_number = max(0, move_number-2)
play = plays[move_number]
colour, move = play # Unpack tuple
row, col = move # Unpack tuple
try:
board.play(row, col, colour)
except ValueError:
raise StandardError("illegal move in sgf file")
turn = colour
return (board, turn)
# In[7]:
def board_to_array(board, dimension=1, turn=None, white=-1, black=1):
"""
Convert gomill Board object to numpy 1D array (default) or 2D matrix.
If turn is None, use default values for white and black stones (default is white=-1, black=1).
Else, convert stones to perspective of player whose turn it is: 1 is my stone, -1 is your stone.
"""
size = board.side
if dimension==1:
array = np.zeros(size*size, dtype=np.int8) # Initialize numpy 1D array of zeros
elif dimension==2:
array = np.zeros((size,size), dtype=np.int8) # Initialize numpy 2D array of zeros
else:
raise ValueError('Invalid number of dimensions specified: ', dimension)
points = board.board_points
for row, col in points:
colour = board.board[row][col]
if turn: # Alternate perspectivers according to whose turn it is
if colour:
value = (colour==turn)*2-1 # value is 1 for player whose turn it is, -1 for other player
else: # Point was played but was captured, is now empty?
value = 0
else: # turn is none, don't alternate perspectives according to turn
if colour=='w':
value = white
elif colour=='b':
value = black
else: # Point was played but was captured, is now empty?
value = 0
row = size-row-1 # Convert Board row index (which starts at bottom of board) into matrix row index (which starts at top)
if dimension==1:
array[row*size+col] = value
elif dimension==2:
array[row,col] = value
else:
raise ValueError('Invalid number of dimensions specified: ', dimension)
return array
# In[8]:
# Test Representation conversions
def test_representation():
game_filename = './Game_Files/9x9/Go_Seigen/1968-08-00.sgf'
g = sgf_filename_to_game(game_filename)
move = 4
print show_sgf_file(g,move)
b, turn = game_to_board(g, move)
print ascii_boards.render_board(b)
matrix = board_to_array(b, dimension=2, turn=turn)
print
print matrix
matrix = board_to_array(b, dimension=2, turn=None)
print
print matrix
print
print board_to_array(b, dimension=1, turn=turn)
print board_to_array(b, dimension=1, turn=None)
print matrix.flatten(order='C')
print board_to_array(b, dimension=1)
assert (matrix.flatten(order='C') == board_to_array(b,1)).all()
#test_representation()
# In[9]:
def test_game_move_to_board():
game_file = './Game_Files/9x9/Go_Seigen/1968-08-00.sgf'
g = sgf_filename_to_game(game_file)
move = 5
b, turn = game_to_board(g, move)
print ascii_boards.render_board(b)
matrix = board_to_array(b, dimension=2)
print
print matrix
b, turn = game_to_board(g, move+1)
print ascii_boards.render_board(b)
matrix = board_to_array(b, dimension=2)
print
print matrix
print
b, turn = game_move_to_board(g, move+1)
print ascii_boards.render_board(b)
print
matrix = board_to_array(b, dimension=2, turn=turn)
print matrix
print
vector = board_to_array(b, dimension=1, turn=turn)
print vector
print turn
#test_game_move_to_board()
# In[10]:
import os
from fnmatch import fnmatch
def directory_to_data_files(root_dir, output_filename, size, print_progress=False):
"""
Load and convert all .sgf files from a root directory into text file of data vectors
"""
pattern = "*.sgf"
sgf_files = []
for path, subdirs, files in os.walk(root_dir):
for name in files:
if fnmatch(name, pattern):
sgf_files.append(os.path.join(path, name))
n_files = len(sgf_files)
# Open data file for writing
vectors = open(output_filename, 'wb')
# Convert sgf files to numerical array data files
for i, sgf_file in enumerate(sgf_files):
try:
game = sgf_filename_to_game(sgf_file)
if print_progress:
print str(i+1)+"/"+str(n_files), 'Processing file:', sgf_file
except ValueError as ve:
print 'Exception:',str(ve)+'.','File "'+sgf_file+'"', 'is likely malformed.'
for move in range(1,len(game.get_main_sequence())):
try:
# Create current move vector
board, turn = game_to_board(game, move)
vector = board_to_array(board, dimension=1, turn=turn)
# Create next move vector
next_move_board, turn = game_move_to_board(game, move+1) # Get board containing only the move after the current move
next_move_vector = board_to_array(next_move_board, dimension=1, turn=turn)
# Create winner, 1 means current play won, -1 means other player one
winner = np.int8((game.get_winner()==turn)*2-1)
if len(vector)!=size or len(next_move_vector)!=size:
msg = 'Board size is '+str(len(vector))+'. Expected size is '+str(size)
raise SizeException(msg)
# Write data arrays to files
np.savetxt(vectors, winner[None], fmt='%i', newline=';')
np.savetxt(vectors, vector[None], fmt='%i', newline=';')
np.savetxt(vectors, next_move_vector[None], fmt='%i')
except TypeError as te:
print 'Exception:',str(te)+'.','File "'+sgf_file+'"', 'is likely malformed.'
except ValueError as ve:
print 'Exception:',str(ve)+'.','File "'+sgf_file+'"', 'is likely malformed.'
except IndexError as ie:
print 'Exception:',str(ie)+'.','File "'+sgf_file+'"', 'is likely malformed.'
except Exception as e:
print 'Exception:',str(e)+'.','File "'+sgf_file+'"', 'is likely malformed.'
vectors.close()
class SizeException(Exception):
pass
# In[11]:
def parse_line(line):
"""
Parse line string into winner, vector, and next_move_vector
"""
#line = line.rstrip() # Remove '\n' at end of line
line = line.split(';') # Split line into winner, vector, and next_move_vector
winner = np.int8(line[0])
vector = np.fromstring(line[1], dtype='int8', sep=' ')
next_move_vector = np.fromstring(line[2], dtype='int8', sep=' ')
return winner, vector, next_move_vector
<|fim▁hole|># In[12]:
from random_sampler3 import random_sampler
def sample_data(data_filename, k):
"""
Randomly sample k lines from file, parse them.
Return lists of winners, vectors, and next_move_vectors
"""
lines = random_sampler(filename=data_filename, k=k)
state_tuples = map(parse_line, lines) # Apply parse_lines() to each sampled line
state_lists = map(list, zip(*state_tuples)) # Unzip list of state_tuples into aligned-index list of winners, vectors, next_move_vectors
winners = state_lists[0]
vectors = state_lists[1]
next_move_vectors = state_lists[2]
return winners, vectors, next_move_vectors
# In[40]:
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
# In[41]:
# Main method for running from command line
if __name__ == "__main__":
print 'main method executed'
# Convert and save data to file
root_dir = './Game_Files/9x9'
output_filename = './Data/data_9x9.txt'
#directory_to_data_files(root_dir, output_filename, size=81, print_progress=True)
# Load data from file
data_filename = './Data/data_9x9.txt'
max_examples = file_len(data_filename)
k = max_examples # Number of training examples to randomly sample from data file (note: repeated sampling could give repeat examples)
k = min(k, max_examples) # Don't try to sample more examples than rows in the data file
winners, vectors, next_move_vectors = sample_data(data_filename, k=k)
X = np.array(vectors) # Convert list of vectors into 2D array X
Y = np.array(next_move_vectors) # Convert list of next_move_vectors into 2D array Y
winners = np.array(winners) # Convert list of winners into 1D array winners
# In[ ]:<|fim▁end|> | |
<|file_name|>resource_op_lifting.cc<|end_file_name|><|fim▁begin|>/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This pass lifts resource variable operations outside of device computation.
#include <cstdint>
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h" // TF:llvm-project
#include "mlir/IR/Attributes.h" // TF:llvm-project
#include "mlir/IR/Block.h" // TF:llvm-project
#include "mlir/IR/BlockAndValueMapping.h" // TF:llvm-project
#include "mlir/IR/Builders.h" // TF:llvm-project
#include "mlir/IR/Diagnostics.h" // TF:llvm-project
#include "mlir/IR/Function.h" // TF:llvm-project
#include "mlir/IR/Module.h" // TF:llvm-project
#include "mlir/IR/Operation.h" // TF:llvm-project
#include "mlir/IR/StandardTypes.h" // TF:llvm-project
#include "mlir/IR/SymbolTable.h" // TF:llvm-project
#include "mlir/IR/TypeUtilities.h" // TF:llvm-project
#include "mlir/IR/Types.h" // TF:llvm-project
#include "mlir/IR/Value.h" // TF:llvm-project
#include "mlir/IR/Visitors.h" // TF:llvm-project
#include "mlir/Pass/Pass.h" // TF:llvm-project
#include "mlir/Support/LLVM.h" // TF:llvm-project
#include "mlir/Support/LogicalResult.h" // TF:llvm-project
#include "mlir/Transforms/RegionUtils.h" // TF:llvm-project
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/mangling_util.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
namespace mlir {
namespace {
// This pass lifts resource variable operations outside of device computation.
// This is useful because a lot of accelerator devices can not interact with
// resource variables directly..
//
// Here is a simple example in TensorFlow where a device doubles the value of a
// TensorFlow resource variable and returns new value:
//
// %resource_handle = "tf.VarHandleOp"()
// %1 = "tf_device.launch"() ( {
// %init_value = "tf.ReadVariableOp"(%resource_handle)
// "tf.AssignAddVariableOp"(%resource_handle, %init_value)
// %new_value = "tf.ReadVariableOp"(%resource_handle)
// tf_device.return %new_value
// })
//
// After this pass, the computation would become:
//
// %resource_handle = "tf.VarHandleOp"()
// %init_value = "tf.ReadVariableOp"(%resource_handle)
// %1:2 = "tf_device.launch"() ( {
// %new_value = "tf.AddV2"(%init_value, %init_value)
// tf_device.return %new_value, %new_value
// })
// "tf.AssignVariableOp"(%resource_handle, %1#1)
//
// You can see that there are a few main changes applied:
// 1) All the resource variable reads and writes are now outside of
// tf_device.launch op.
// 2) Instead of taking resource handles as input, this device computation now
// takes snapshotted values of that device.
// 3) Some resource load operations are eliminated with store-load forwarding.
// 4) Updated values to resource are appended to `tf_device.return` and used by
// external resource store operations so that resources are still updated
// after the computation.
//
// If the launch body contains functional control flow, the pass first lifts the
// loads/stores in the body/cond/branch functions to the launch body, then
// performs the above lifting. E.g.,
//
// func @launch_with_loop() -> () {
// %0 = "tf.VarHandleOp"() ...
// "tf_device.launch"() ( {
// %1 = "tf.While"(%0) {body = @while_body, cond = @while_cond}
// tf_device.return
// })
// return
// }
// func @while_body(%arg0: tensor<*x!tf.resource<tensor<f32>>>) {
// %constant = "tf.Const"() ...
// "tf.AssignVariableOp"(%arg0, %constant)
// return %arg0
// }
// func @while_cond(%arg0: tensor<*x!tf.resource<tensor<f32>>>) {
// %read = "tf.ReadVariableOp"(%arg0)
// return %read
// }
//
// will be be transformed to:
//
// func @launch_with_loop() {
// %0 = "tf.VarHandleOp"() ...
// %1 = "tf.ReadVariableOp"(%0)
// %2 = "tf_device.launch"() ( {
// %3 = "tf.While"(%1) {body = @while_body, cond = @while_cond}
// tf_device.return %3 : tensor<f32>
// }) : () -> tensor<f32>
// "tf.AssignVariableOp"(%0, %2)
// return
// }
// func @while_body(%arg0: tensor<f32>) {
// %0 = "tf.Const"() ...
// return %0 : tensor<f32>
// }
// func @while_cond(%arg0: tensor<f32>) {
// return %arg0
// }
//
struct ResourceOpLiftingPass : public ModulePass<ResourceOpLiftingPass> {
void runOnModule() override;
};
// Removes identity nodes in the block. The device computation does not need
// such nodes to carry information.
void RemoveIdentity(Block* block) {
for (auto& op : llvm::make_early_inc_range(*block)) {
if (llvm::isa<TF::IdentityOp>(&op) || llvm::isa<TF::IdentityNOp>(&op)) {
op.replaceAllUsesWith(op.getOperands());
op.erase();
}
}
}
// Performs store-load forwarding. This effectively removes
// 1) Any resource loads after a store to that same resource is done
// 2) Any resource stores except the last one.
// TODO(ycao): Store-load forwarding implemented here is only correct when
// computation is purely sequential (no concurrency). Need to support concurrent
// computation as well.
void ForwardStoreToLoad(Block* block) {
// resource_handle_to_last_store_op keeps track of the most recent (last)
// store to each resource. Non-existent entry indicates that a resource has
// not been stored to yet.
llvm::SmallDenseMap<Value, TF::AssignVariableOp>
resource_handle_to_last_store_op;
// Only iterate through ops directly in the block as we can't handle ops
// nested deeper in regions.
for (Operation& op : llvm::make_early_inc_range(*block)) {
if (auto read_variable_op = dyn_cast<TF::ReadVariableOp>(&op)) {
Value resource = read_variable_op.resource();
auto last_store = resource_handle_to_last_store_op[resource];
if (!last_store) continue;
// Use stored value in last_store to replace all uses of current resource
// load's result, then erase this resource load.
read_variable_op.value().replaceAllUsesWith(last_store.value());
read_variable_op.erase();
continue;
}
if (auto assign_variable_op = dyn_cast<TF::AssignVariableOp>(&op)) {
Value resource = assign_variable_op.resource();
auto last_store = resource_handle_to_last_store_op[resource];
// Previous store ops to same resource can be erased.
if (last_store) last_store.erase();
resource_handle_to_last_store_op[resource] = assign_variable_op;
}
}
}
// Moves resource load operations with the provided `move_load` function. This
// assumes load-store forwarding has been performed on this block such that
// all loads of same resource are on its initial values. A `skip_load` functions
// is used to indicate whether a load should be skipped. If there are multiple
// loads on the same resource, only the first one will be moved, and the later
// ones will be removed and replaced with the first one.
void HoistResourceLoads(
Block* block, llvm::function_ref<bool(TF::ReadVariableOp)> skip_load,
llvm::function_ref<void(TF::ReadVariableOp)> move_load) {
llvm::SmallDenseMap<Value, TF::ReadVariableOp> resource_to_read_ops;
// Only iterate through ops directly in the body as we can't handle
// ops nested deeper in regions.
for (Operation& op : llvm::make_early_inc_range(*block)) {
auto read_variable_op = dyn_cast<TF::ReadVariableOp>(&op);
if (!read_variable_op) continue;
if (skip_load(read_variable_op)) continue;
Value resource = read_variable_op.resource();
auto p = resource_to_read_ops.insert({resource, read_variable_op});
if (p.second) {
move_load(read_variable_op);
continue;
}
// Getting here means a load operation of this resource has been hoisted out
// before. Use hoisted load result to replace all uses of current op result
// and erase op.
op.replaceAllUsesWith(p.first->second);
op.erase();
}
}
// If there are any stores to resource defined outside of the block then the
// stored values must be returned so that new values can be used by sunk
// resource stores.
// Returns true if any resource variable stored values are appended, otherwise
// false.
bool AppendResourceStoreValueToReturn(Block* body) {
bool has_resource_store = false;
auto old_return = body->getTerminator();
llvm::SmallVector<Value, 4> new_return_operands(old_return->getOperands());
// Only iterate through ops directly in the body as we can't handle ops nested
// deeper in regions.
for (auto assign_variable_op : body->getOps<TF::AssignVariableOp>()) {
Value resource = assign_variable_op.resource();
if (!resource) continue;
// Skip resources created inside of the body.
if (resource.getParentRegion() == body->getParent()) continue;
// TODO(ycao): Prevent same value from being returned multiple times.
// TODO(ycao): Do not return resource store value if it is defined outside
// of launch_op.
new_return_operands.push_back(assign_variable_op.value());
has_resource_store = true;
}
// If no resource stores are found, no need to update return op.
if (!has_resource_store) return false;
OpBuilder builder(old_return);
builder.create<tf_device::ReturnOp>(old_return->getLoc(),
new_return_operands);
old_return->erase();
return true;
}
// Moves resource store operations to after launch_op. This assumes load-store
// forwarding has been performed on this launch_op such that there is at most
// one resource store operation carrying its final value.
tf_device::LaunchOp SinkResourceStores(tf_device::LaunchOp launch_op,
OpBuilder* builder) {
// Update ReturnOp inside launch_op's body to output final values of updated
// external resources.
if (!AppendResourceStoreValueToReturn(&launch_op.GetBody())) return launch_op;
auto new_return_op = launch_op.GetBody().getTerminator();
llvm::SmallVector<Type, 4> new_launch_return_types(
new_return_op->getOperandTypes());
builder->setInsertionPoint(launch_op);
auto new_launch_op = builder->create<tf_device::LaunchOp>(
launch_op.getLoc(), new_launch_return_types,
/*operands=*/llvm::SmallVector<Value, 4>(), launch_op.getAttrs());
new_launch_op.body().takeBody(launch_op.body());
// Replace uses of old launch_op results with those of new_launch_op.
for (auto p : llvm::zip(launch_op.getResults(), new_launch_op.getResults())) {
std::get<0>(p).replaceAllUsesWith(std::get<1>(p));
}
// Create a mapping from operands of new_return_op operands to new_launch_op
// results.
BlockAndValueMapping mapper;
for (auto p :
llvm::zip(new_return_op->getOperands(), new_launch_op.getResults())) {
mapper.map(std::get<0>(p), std::get<1>(p));
}
// Clone all resource store ops and map their operands to values returned from
// new_launch_op.
for (Operation& op : llvm::make_early_inc_range(new_launch_op.GetBody())) {
if (dyn_cast<TF::AssignVariableOp>(&op)) {
builder->clone(op, mapper);
op.erase();
}
}
launch_op.erase();
return new_launch_op;
}
// Hoists resource variable loads and sinks stores from launch_op.
LogicalResult HoistResourceOpsFromLaunchOp(tf_device::LaunchOp launch_op) {
ModuleOp m = launch_op.getParentOfType<ModuleOp>();
OpBuilder builder(m);
// Remove identity nodes to avoid aliasing.
RemoveIdentity(&launch_op.GetBody());
// Perform store-load forwarding. So that each resource is only loaded with
// its initial value and is only stored with its final value.
ForwardStoreToLoad(&launch_op.GetBody());
// Move loads of external resources, if any, to before launch_op.
// (Skipping resources created inside of launch_op.)
HoistResourceLoads(
&launch_op.GetBody(),
/*skip_load=*/
[&](TF::ReadVariableOp read) {
return read.resource().getParentRegion() == &launch_op.body();
},
/*move_load=*/
[&](TF::ReadVariableOp read) {
read.getOperation()->moveBefore(launch_op);
});
// Move stores of external resources, if any, to after launch_op.
auto new_launch_op = SinkResourceStores(launch_op, &builder);
llvm::SetVector<Value> captured_values;
getUsedValuesDefinedAbove(new_launch_op.body(), new_launch_op.body(),
captured_values);
for (Value v : captured_values) {
auto tensor_type = v.getType().dyn_cast<TensorType>();
if (!tensor_type) continue;
if (!tensor_type.getElementType().isa<TF::ResourceType>()) continue;
return new_launch_op.emitOpError()
<< "has remaining resource inputs that can not be lifted";
}
return success();
}
// Holds information about a function's use of a resource argument.
struct ResourceArgUseInfo {
Type data_type;
bool updated;
bool used;
};
// Finds the ResourceArgUseInfo for each resource argument. Forwarding to the
// output (i.e., the argument is an operand of the return op) is not considered
// as a use. This doesn't support nesting of ops, so before calling this, nested
// ops/functions need to be already resource-lifted.
LogicalResult FindResourceArgUseInfo(
FuncOp func_op, llvm::SmallDenseMap<int64_t, ResourceArgUseInfo>* result) {
auto return_op = func_op.front().getTerminator();
for (auto arg : func_op.getArguments()) {
if (!getElementTypeOrSelf(arg.getType()).isa<TF::ResourceType>()) continue;
ResourceArgUseInfo info;
info.used = false;
info.updated = false;
bool do_not_touch = false;
for (auto user : arg.getUsers()) {
if (user == return_op) continue;
if (auto read = llvm::dyn_cast<TF::ReadVariableOp>(user)) {
info.used = true;
info.data_type = read.getType();
continue;
}
if (auto assign = llvm::dyn_cast<TF::AssignVariableOp>(user)) {
info.used = true;
info.updated = true;
info.data_type = assign.value().getType();
continue;
}
if (llvm::isa<TF::StackPushV2Op>(user) ||
llvm::isa<TF::StackPopV2Op>(user)) {
// Stacks will be handled by a separate pass.
do_not_touch = true;
break;
}
user->emitOpError("found unsupported operations on resource.");
return failure();
}
if (!do_not_touch) (*result)[arg.getArgNumber()] = info;
}
return success();
}
// Merges two sets of resource arg use infos. An argument is considered used in
// the merged result as long as either set marks it as used. This is used to
// merge results from functions that have aliasing inputs, e.g., a while loop's
// body and condition. The sets of keys of the two maps must be the same.
llvm::SmallDenseMap<int64_t, ResourceArgUseInfo> MergeArgResourceUseInfo(
const llvm::SmallDenseMap<int64_t, ResourceArgUseInfo>& infos0,
const llvm::SmallDenseMap<int64_t, ResourceArgUseInfo>& infos1) {
llvm::SmallDenseMap<int64_t, ResourceArgUseInfo> result;
for (const auto& entry : infos0) {
auto info1_it = infos1.find(entry.getFirst());
// If the entry is missing in any input, we should not touch this entry.
if (info1_it == infos1.end()) continue;
auto& info = result[entry.getFirst()];
info = entry.getSecond();
if (info.updated) continue;
if (info1_it->getSecond().used) {
info.used = true;
info.updated = info1_it->getSecond().updated;
info.data_type = info1_it->getSecond().data_type;
}
}
return result;
}
// Removes the unused resource arguments, and the return values that forward the
// removed arguments. If old_to_new_arg_indices is provided, it will store the
// new argument index that corresponds to each original index (-1 means it is
// removed). If remaining_resource_data_types is provided, it will store the
// data types of the remaining resource arguments, where the indices are after
// removing unused ones.
void RemoveUnusedResourceArgumentsAndForwardedRetvals(
const llvm::SmallDenseMap<int64_t, ResourceArgUseInfo>& infos,
FuncOp func_op,
llvm::SmallVector<int64_t, 4>* old_to_new_arg_indices = nullptr,
llvm::SmallDenseMap<int64_t, Type>* remaining_resource_data_types =
nullptr) {
// Remove return values forwarded from unused arguments.
auto return_op = func_op.front().getTerminator();
auto old_return_vals = llvm::to_vector<8>(return_op->getOperands());
int64_t skipped_retvals = 0;
for (auto entry : llvm::enumerate(old_return_vals)) {
auto return_val = entry.value();
if (auto arg = return_val.dyn_cast<BlockArgument>()) {
auto it = infos.find(arg.getArgNumber());
if (it != infos.end() && !it->getSecond().used) {
return_op->eraseOperand(entry.index() - skipped_retvals++);
}
}
}
llvm::SmallVector<unsigned int, 4> indices_to_erase;
llvm::SmallVector<Type, 4> new_types;
int64_t skipped_args = 0;
for (auto arg : func_op.getArguments()) {
auto it = infos.find(arg.getArgNumber());
if (it != infos.end() && !it->getSecond().used) {
indices_to_erase.push_back(arg.getArgNumber());
skipped_args++;
if (old_to_new_arg_indices != nullptr) {
old_to_new_arg_indices->push_back(-1);
}
} else {
new_types.push_back(arg.getType());
if (old_to_new_arg_indices != nullptr) {
old_to_new_arg_indices->push_back(arg.getArgNumber() - skipped_args);
}
if (it != infos.end() && remaining_resource_data_types != nullptr) {
(*remaining_resource_data_types)[arg.getArgNumber() - skipped_args] =
it->second.data_type;
}
}
}
func_op.eraseArguments(indices_to_erase);
func_op.setType(FunctionType::get(
new_types, llvm::to_vector<4>(return_op->getOperandTypes()),
func_op.getContext()));
}
// Lifts reads/writes of resource arguments from func_op and changes its
// signature. resource_data_types is the (index, data type) pair for each
// resource argument. handle_updated_arg_value is a caller-provided function
// that handles the updated value for an resource argument.
void LiftArgRetResourcesForFunction(
FuncOp func_op,
const llvm::SmallDenseMap<int64_t, Type>& resource_data_types,
llvm::function_ref<void(int64_t, Value)> handle_updated_arg_value) {
ForwardStoreToLoad(&func_op.front());
// Maps a resource argument to the first read.
llvm::SmallDenseMap<Value, TF::ReadVariableOp, 4> resource_arg_read;
// Maps a resource argument to the last write.
llvm::SmallDenseMap<Value, TF::AssignVariableOp, 4> resource_arg_write;
// Use HoistResourceLoads to CSE loads and the `move_load` function only
// records the remaining load to resource_arg_read.
HoistResourceLoads(
&func_op.front(),
/*skip_load=*/
[&](TF::ReadVariableOp read) {
return !read.resource().isa<BlockArgument>();
},
/*move_load=*/
[&](TF::ReadVariableOp read) {
resource_arg_read[read.resource()] = read;
});
// Record the stores in resource_arg_read.
for (auto& op : llvm::make_early_inc_range(func_op.front())) {
auto write = llvm::dyn_cast<TF::AssignVariableOp>(&op);
if (!write) continue;
auto arg = write.resource().dyn_cast<BlockArgument>();
if (!arg) continue;
// After ForwardStoreToLoad(), there should be just one store for each
// resource.
resource_arg_write[arg] = write;
}
// Now change the input types to non-resource and remove the internal loads.
auto new_types = llvm::to_vector<8>(func_op.getType().getInputs());
for (auto& entry : resource_data_types) {
auto arg = func_op.getArgument(entry.getFirst());
auto read_it = resource_arg_read.find(arg);
auto write_it = resource_arg_write.find(arg);
arg.setType(entry.getSecond());
new_types[arg.getArgNumber()] = entry.getSecond();
if (read_it != resource_arg_read.end()) {
read_it->getSecond().replaceAllUsesWith(arg);
read_it->getSecond().erase();
}
if (write_it != resource_arg_write.end()) {
handle_updated_arg_value(arg.getArgNumber(),
write_it->getSecond().value());
write_it->getSecond().erase();
}
}
func_op.setType(FunctionType::get(
new_types,
llvm::to_vector<4>(func_op.front().getTerminator()->getOperandTypes()),
func_op.getContext()));
}
// Returns a vector filtered from range where the unused elements (specified by
// resource_arg_uses) are removed.
template <typename T, typename Range>
llvm::SmallVector<T, 4> FilterRange(
Range range,
const llvm::SmallDenseMap<int64_t, ResourceArgUseInfo>& resource_arg_uses) {
llvm::SmallVector<T, 4> filtered;
for (auto entry : llvm::enumerate(range)) {
auto it = resource_arg_uses.find(entry.index());
if (it == resource_arg_uses.end() || it->getSecond().used)
filtered.push_back(entry.value());
}
return filtered;
}
// Changes the types of the control flow op (e.g., while, if) and adds loads and
// stores around it. arg_data_type_and_updated_output_index maps an operand (to
// be changed) index to its data type and the updated value index in the output
// (-1 means not updated.)
void AddLoadsStoresOutsideControlFlowOp(
Operation* caller,
const llvm::SmallDenseMap<int64_t, std::pair<Type, int64_t>>&
arg_data_type_and_updated_output_index) {
OpBuilder builder(caller);
auto new_operands = llvm::to_vector<8>(caller->getOperands());
llvm::SmallVector<int64_t, 8> changed_indices;
// Find the operands to change, and create the loads.
for (auto& entry : arg_data_type_and_updated_output_index) {
int64_t index = entry.getFirst();
Type new_type = entry.getSecond().first;
int64_t updated_index = entry.getSecond().second;
auto operand = caller->getOperand(index);
builder.setInsertionPoint(caller);
new_operands[index] = builder.create<TF::ReadVariableOp>(
caller->getLoc(), ArrayRef<Type>{new_type}, ArrayRef<Value>{operand},
ArrayRef<NamedAttribute>{});
caller->setOperand(index, new_operands[index]);
if (updated_index < 0) continue;
builder.setInsertionPointAfter(caller);
builder.create<TF::AssignVariableOp>(
caller->getLoc(), ArrayRef<Type>{},
ArrayRef<Value>{operand, caller->getResult(updated_index)},
ArrayRef<NamedAttribute>{});
}
}
// Lifts loads/stores from while loop's body and cond functions.
LogicalResult HanldeWhileLoop(TF::WhileOp while_op, FuncOp body, FuncOp cond) {
// Remove identity nodes to avoid aliasing.
RemoveIdentity(&body.front());
RemoveIdentity(&cond.front());
auto return_op = body.front().getTerminator();
// Sanity check: body resource input/output should alias each other.
for (auto arg : body.getArguments()) {
if (!getElementTypeOrSelf(arg.getType()).isa<TF::ResourceType>()) continue;
if (return_op->getOperand(arg.getArgNumber()) != arg) {
return return_op->emitOpError(
"resource used in while loop is only supported when the ")
<< "resource input and output alias each other in the loop body.";
}
}
// FindResourceArgUseInfo will check supported resource ops (read and assign),
// but loop condition has additional requirement that it cannot write
// resources.
if (cond.walk([&](TF::AssignVariableOp assign) {
assign.emitOpError("found resource write in loop condition.");
return WalkResult::interrupt();
})
.wasInterrupted()) {
return failure();
}
llvm::SmallDenseMap<int64_t, ResourceArgUseInfo> body_use_info;
llvm::SmallDenseMap<int64_t, ResourceArgUseInfo> cond_use_info;
if (failed(FindResourceArgUseInfo(body, &body_use_info)) ||
failed(FindResourceArgUseInfo(cond, &cond_use_info))) {
return failure();
}
// A resource is considered used as long as it is used in either body or cond.
auto resource_arg_uses =
MergeArgResourceUseInfo(body_use_info, cond_use_info);
if (resource_arg_uses.empty()) return success();
for (const auto& entry : resource_arg_uses) {
// Replace output resource uses with the input, so that we can later freely
// change the output type.
while_op.getResult(entry.getFirst())
.replaceAllUsesWith(while_op.getOperand(entry.getFirst()));
}
// Remove unused resources in functions.
llvm::SmallVector<int64_t, 4> old_to_new_indices;
llvm::SmallDenseMap<int64_t, Type> remaining_resource_data_types;
RemoveUnusedResourceArgumentsAndForwardedRetvals(
resource_arg_uses, body, &old_to_new_indices,
&remaining_resource_data_types);
RemoveUnusedResourceArgumentsAndForwardedRetvals(resource_arg_uses, cond);
LiftArgRetResourcesForFunction(
body, remaining_resource_data_types,
[&](int64_t index, Value value) { return_op->setOperand(index, value); });
LiftArgRetResourcesForFunction(cond, remaining_resource_data_types,
[&](int64_t index, Value value) {
// We already checked that cond should not
// have variable writes.
assert(false && "Should not happen");
});
// Recreate the while op.
OpBuilder builder(while_op);
auto new_output_shapes = FilterRange<Attribute, ArrayRef<Attribute>>(
while_op.output_shapes().getValue(), resource_arg_uses);
// Now use the filtered original operands, which will be replaced by
// AddLoadsStoresOutsideControlFlowOp().
auto new_while = builder.create<TF::WhileOp>(
while_op.getLoc(), body.getType().getResults(),
FilterRange<Value, OperandRange>(while_op.getOperands(),
resource_arg_uses),
while_op.getAttrs());
// Prepare for AddLoadsStoresOutsideControlFlowOp() and update
// new_output_shapes.
llvm::SmallDenseMap<int64_t, std::pair<Type, int64_t>>
arg_data_type_and_updated_output_index;
for (const auto& entry : remaining_resource_data_types) {
int64_t update_index = return_op->getOperand(entry.getFirst()) ==
body.getArgument(entry.getFirst())
? -1
: entry.getFirst();
arg_data_type_and_updated_output_index[entry.getFirst()] = {
entry.getSecond(), update_index};
if (!new_output_shapes.empty()) {
tensorflow::TensorShapeProto shape_proto;
tensorflow::ConvertTypeToTensorShape(entry.getSecond())
.AsProto(&shape_proto);
new_output_shapes[entry.getFirst()] = builder.getStringAttr(
tensorflow::mangling_util::MangleShape(shape_proto));
}
}
AddLoadsStoresOutsideControlFlowOp(new_while,
arg_data_type_and_updated_output_index);
new_while.setAttr("output_shapes", builder.getArrayAttr(new_output_shapes));
// Replace uses.
for (int64_t i = 0; i < old_to_new_indices.size(); ++i) {
if (old_to_new_indices[i] >= 0) {
while_op.getResult(i).replaceAllUsesWith(
new_while.getResult(old_to_new_indices[i]));
}
}
while_op.erase();
return success();
}
// Lifts loads/stores from an IfOp's branches.
LogicalResult HandleIfOP(TF::IfOp if_op, FuncOp then_branch,
FuncOp else_branch) {
// Remove identity nodes to avoid aliasing.
RemoveIdentity(&then_branch.front());
RemoveIdentity(&else_branch.front());
// Sanity check: branch return of resources should be aliases of inputs. If
// so, replace the output uses with the input so that we can remove these
// outputs.
for (auto entry : llvm::enumerate(
llvm::zip(then_branch.front().getTerminator()->getOperands(),
else_branch.front().getTerminator()->getOperands()))) {
auto then_retval = std::get<0>(entry.value());
auto else_retval = std::get<1>(entry.value());
assert(then_retval.getType() == else_retval.getType());
if (!getElementTypeOrSelf(then_retval.getType()).isa<TF::ResourceType>()) {
continue;
}
auto then_aliasing_arg = then_retval.dyn_cast<BlockArgument>();
auto else_aliasing_arg = else_retval.dyn_cast<BlockArgument>();
if (!then_aliasing_arg || !else_aliasing_arg ||
then_aliasing_arg.getArgNumber() != else_aliasing_arg.getArgNumber()) {
return if_op.emitOpError("unsupported tf.IfOp output: ")
<< "resource does not alias a single input.";
}
if_op.getResult(entry.index())
.replaceAllUsesWith(
if_op.getOperand(then_aliasing_arg.getArgNumber() + 1));
}
// Erase the resource outputs from the branches.
int64_t non_resource_results = 0;
llvm::SmallVector<int64_t, 4> old_to_new_output_indices;
llvm::SmallVector<Attribute, 4> new_output_shapes;
bool output_removed = false;
for (auto result : if_op.getResults()) {
if (!getElementTypeOrSelf(result.getType()).isa<TF::ResourceType>()) {
old_to_new_output_indices.push_back(non_resource_results++);
if (!if_op.output_shapes().getValue().empty()) {
new_output_shapes.push_back(
if_op.output_shapes().getValue()[result.getResultNumber()]);
}
continue;
}
old_to_new_output_indices.push_back(-1);
then_branch.front().getTerminator()->eraseOperand(non_resource_results);
else_branch.front().getTerminator()->eraseOperand(non_resource_results);
output_removed = true;
}
llvm::SmallDenseMap<int64_t, ResourceArgUseInfo> then_use_info;
llvm::SmallDenseMap<int64_t, ResourceArgUseInfo> else_use_info;
if (failed(FindResourceArgUseInfo(then_branch, &then_use_info)) ||
failed(FindResourceArgUseInfo(else_branch, &else_use_info))) {
return failure();
}
// A resource is considered used as long as it is used in either branch.
auto resource_arg_uses =
MergeArgResourceUseInfo(then_use_info, else_use_info);
if (resource_arg_uses.empty() && !output_removed) return success();
// Remove unused resources in functions.
llvm::SmallDenseMap<int64_t, Type> remaining_resource_data_types;
RemoveUnusedResourceArgumentsAndForwardedRetvals(
resource_arg_uses, then_branch, /*old_to_new_arg_indices=*/nullptr,
&remaining_resource_data_types);
RemoveUnusedResourceArgumentsAndForwardedRetvals(resource_arg_uses,
else_branch);
// Forward resource inputs updated in any branch to the outputs of both
// branches. First prepare the mapping from arg to new update output.
llvm::SmallDenseMap<int64_t, int64_t> resource_arg_to_new_output;
{
int64_t removed_args = 0;
for (const auto& entry : resource_arg_uses) {
if (!entry.getSecond().used) {
removed_args++;
continue;
}
if (!entry.getSecond().updated) continue;
int64_t new_output_index =
non_resource_results + resource_arg_to_new_output.size();
resource_arg_to_new_output[entry.getFirst() - removed_args] =
new_output_index;
}
}
// Append resource updates to the return ops: now they are just forwarded
// input resources, but will be replaced by the data value in
// LiftArgRetResourcesForFunction().
for (auto branch : {then_branch, else_branch}) {
auto new_retvals =
llvm::to_vector<4>(branch.front().getTerminator()->getOperands());
for (const auto& entry : resource_arg_to_new_output) {
new_retvals.push_back(branch.getArgument(entry.getFirst()));
}
auto old_return = branch.front().getTerminator();
OpBuilder builder(old_return);
auto new_return =
builder.create<ReturnOp>(old_return->getLoc(), new_retvals);
old_return->erase();
LiftArgRetResourcesForFunction(
branch, remaining_resource_data_types, [&](int64_t index, Value value) {
new_return.setOperand(resource_arg_to_new_output[index], value);
});
}
// Recreate the if op.
OpBuilder builder(if_op);
// Now use the filtered original operands, which will be replaced by
// AddLoadsStoresOutsideControlFlowOp().
auto new_operands =
FilterRange<Value, OperandRange>(if_op.input(), resource_arg_uses);
new_operands.insert(new_operands.begin(), if_op.cond());
auto new_if = builder.create<TF::IfOp>(if_op.getLoc(),
then_branch.getType().getResults(),
new_operands, if_op.getAttrs());
// Prepare for AddLoadsStoresOutsideControlFlowOp() and update
// new_output_shapes.
llvm::SmallDenseMap<int64_t, std::pair<Type, int64_t>>
arg_data_type_and_updated_output_index;
for (const auto& entry : remaining_resource_data_types) {
auto new_output_it = resource_arg_to_new_output.find(entry.getFirst());<|fim▁hole|> int64_t update_index = new_output_it == resource_arg_to_new_output.end()
? -1
: new_output_it->getSecond();
arg_data_type_and_updated_output_index[entry.getFirst() + 1] = {
entry.getSecond(), update_index};
if (!if_op.output_shapes().getValue().empty() && update_index >= 0) {
tensorflow::TensorShapeProto shape_proto;
tensorflow::ConvertTypeToTensorShape(entry.getSecond())
.AsProto(&shape_proto);
new_output_shapes.push_back(builder.getStringAttr(
tensorflow::mangling_util::MangleShape(shape_proto)));
}
}
AddLoadsStoresOutsideControlFlowOp(new_if,
arg_data_type_and_updated_output_index);
new_if.setAttr("output_shapes", builder.getArrayAttr(new_output_shapes));
// Replace uses.
for (int64_t i = 0; i < old_to_new_output_indices.size(); ++i) {
if (old_to_new_output_indices[i] >= 0) {
if_op.getResult(i).replaceAllUsesWith(
new_if.getResult(old_to_new_output_indices[i]));
}
}
if_op.erase();
return success();
}
// A resource-lifted function for (potentially multiple) PartitionedCallOps and
// information about the lifting changes.
struct PartitionedCallLiftingInfo {
// Function with resources lifted. Can be nullptr if nothing needs to change.
FuncOp lifted_callee;
// Mapping from old resource outputs to their aliasing output inputs.
llvm::SmallDenseMap<int64_t, int64_t> old_outputs_aliasing_old_inputs;
// Mapping from old to new output indices in case any output is removed.
llvm::SmallVector<int64_t, 4> old_to_new_output_indices;
// ResourceArgUseInfo for each old resource argument.
llvm::SmallDenseMap<int64_t, ResourceArgUseInfo> use_info;
// Input for AddLoadsStoresOutsideControlFlowOp(), see its comment.
llvm::SmallDenseMap<int64_t, std::pair<Type, int64_t>>
arg_data_type_and_updated_output_index;
};
// Lifts loads/stores from a PartitionedCallOp's callee function. If anything
// needs to be changed, the original function will be preserved, and the lifting
// happens on a clone, which will be stored in `result`.
LogicalResult HandlePartitionedCallOpCallee(
FuncOp callee, PartitionedCallLiftingInfo* result) {
// Remove identity nodes to avoid aliasing.
RemoveIdentity(&callee.front());
// Sanity check: return of resources should be aliases of inputs. Such outputs
// will be removed later.
int64_t non_resource_results = 0;
for (auto entry :
llvm::enumerate(callee.front().getTerminator()->getOperands())) {
auto retval = entry.value();
if (!getElementTypeOrSelf(retval.getType()).isa<TF::ResourceType>()) {
result->old_to_new_output_indices.push_back(non_resource_results++);
continue;
}
auto aliasing_arg = retval.dyn_cast<BlockArgument>();
if (!aliasing_arg) {
return callee.emitOpError("unsupported function call: ")
<< "resource return value does not alias an input.";
}
result->old_outputs_aliasing_old_inputs[entry.index()] =
aliasing_arg.getArgNumber();
result->old_to_new_output_indices.push_back(-1);
}
if (failed(FindResourceArgUseInfo(callee, &result->use_info))) {
return failure();
}
if (result->use_info.empty()) {
result->lifted_callee = nullptr;
return success();
}
// Clone the callee before making changes.
SmallString<64> name_base = callee.getName();
auto module = callee.getParentOfType<ModuleOp>();
name_base += "_resource_lifted";
auto name = name_base;
callee = callee.clone();
callee.setName(name);
SymbolTable(module).insert(callee);
result->lifted_callee = callee;
// Remove unused resources in functions.
llvm::SmallDenseMap<int64_t, Type> remaining_resource_data_types;
RemoveUnusedResourceArgumentsAndForwardedRetvals(
result->use_info, callee, /*old_to_new_arg_indices=*/nullptr,
&remaining_resource_data_types);
for (const auto& entry : remaining_resource_data_types) {
result->arg_data_type_and_updated_output_index[entry.getFirst()] = {
entry.getSecond(), -1};
}
llvm::SmallVector<Value, 4> new_retvals;
for (auto val : callee.front().getTerminator()->getOperands()) {
// Remove resource type outputs.
if (getElementTypeOrSelf(val.getType()).isa<TF::ResourceType>()) continue;
new_retvals.push_back(val);
}
// Lift resources.
LiftArgRetResourcesForFunction(
callee, remaining_resource_data_types, [&](int64_t index, Value value) {
result->arg_data_type_and_updated_output_index[index].second =
new_retvals.size();
new_retvals.push_back(value);
});
auto old_return = callee.front().getTerminator();
// Replace old return with the new ones with update values.
OpBuilder builder(old_return);
auto new_return = builder.create<ReturnOp>(old_return->getLoc(), new_retvals);
old_return->erase();
callee.setType(FunctionType::get(
callee.getType().getInputs(),
llvm::to_vector<4>(new_return.getOperandTypes()), callee.getContext()));
return success();
}
// Updates a PartitionedCallOp/StatefulPartitionedCallOp according to the
// resource-lifted new callee function in lifting_info.
template <typename CallOpType>
void UpdatePartitionedCallOpWithNewCallee(
CallOpType call_op, const PartitionedCallLiftingInfo& lifting_info) {
if (lifting_info.lifted_callee == nullptr) return;
// Replace output resource uses with the aliasing input, so that we can remove
// this output.
for (const auto& entry : lifting_info.old_outputs_aliasing_old_inputs) {
call_op.getResult(entry.getFirst())
.replaceAllUsesWith(call_op.getOperand(entry.getSecond()));
}
// Recreate the call op.
OpBuilder builder(call_op);
// Now use the filtered original operands, which will be replaced by
// AddLoadsStoresOutsideControlFlowOp().
auto new_operands =
FilterRange<Value, OperandRange>(call_op.args(), lifting_info.use_info);
auto new_call = builder.create<CallOpType>(
call_op.getLoc(),
const_cast<FuncOp&>(lifting_info.lifted_callee).getType().getResults(),
new_operands, call_op.getAttrs());
new_call.setAttr(
"f", builder.getSymbolRefAttr(
const_cast<FuncOp&>(lifting_info.lifted_callee).getName()));
AddLoadsStoresOutsideControlFlowOp(
new_call, lifting_info.arg_data_type_and_updated_output_index);
// Replace uses.
for (int64_t i = 0; i < lifting_info.old_to_new_output_indices.size(); ++i) {
if (lifting_info.old_to_new_output_indices[i] >= 0) {
call_op.getResult(i).replaceAllUsesWith(
new_call.getResult(lifting_info.old_to_new_output_indices[i]));
}
}
call_op.erase();
}
LogicalResult HoistForFunctionalControlFlow(
Block*, ModuleOp, llvm::SmallDenseMap<FuncOp, PartitionedCallLiftingInfo>*);
// A templated routine for handling both PartitionedCallOp and
// StatefulPartitionedCallOp. If the callee is already lifted, it just updates
// the caller op itself; otherwise, it first recursively handles nested control
// flow, then performs lifting on the callee.
template <typename CallOpType>
LogicalResult HandlePartitionedCallOp(
CallOpType call_op, FuncOp callee, ModuleOp module,
llvm::SmallDenseMap<FuncOp, PartitionedCallLiftingInfo>* lifted_callees) {
auto emplace_res =
lifted_callees->try_emplace(callee, PartitionedCallLiftingInfo());
if (emplace_res.second) {
// Unseen callee. Perform resource lifting on it.
HoistForFunctionalControlFlow(&callee.front(), module, lifted_callees);
if (failed(HandlePartitionedCallOpCallee(
callee, &emplace_res.first->getSecond()))) {
return failure();
}
}
UpdatePartitionedCallOpWithNewCallee(call_op, emplace_res.first->getSecond());
return success();
}
// Hoists resource loads/stores from control flow ops in `block` outside the
// body/cond/branch/callee functions.
LogicalResult HoistForFunctionalControlFlow(
Block* block, ModuleOp module,
llvm::SmallDenseMap<FuncOp, PartitionedCallLiftingInfo>*
lifted_partitioned_call_callees) {
// Remove identity nodes to avoid aliasing.
RemoveIdentity(block);
for (Operation& op : llvm::make_early_inc_range(*block)) {
if (auto while_op = llvm::dyn_cast<TF::WhileOp>(&op)) {
auto body = llvm::cast<FuncOp>(module.lookupSymbol(while_op.body()));
auto cond = llvm::cast<FuncOp>(module.lookupSymbol(while_op.cond()));
// Recursively handle the nested control flow.
HoistForFunctionalControlFlow(&body.front(), module,
lifted_partitioned_call_callees);
HoistForFunctionalControlFlow(&cond.front(), module,
lifted_partitioned_call_callees);
if (failed(HanldeWhileLoop(while_op, body, cond))) return failure();
} else if (auto if_op = llvm::dyn_cast<TF::IfOp>(&op)) {
auto then_branch =
llvm::cast<FuncOp>(module.lookupSymbol(if_op.then_branch()));
auto else_branch =
llvm::cast<FuncOp>(module.lookupSymbol(if_op.else_branch()));
// Recursively handle the nested control flow.
HoistForFunctionalControlFlow(&then_branch.front(), module,
lifted_partitioned_call_callees);
HoistForFunctionalControlFlow(&else_branch.front(), module,
lifted_partitioned_call_callees);
if (failed(HandleIfOP(if_op, then_branch, else_branch))) return failure();
} else if (auto call_op = llvm::dyn_cast<TF::PartitionedCallOp>(&op)) {
if (!call_op.f().isa<FlatSymbolRefAttr>()) {
return call_op.emitOpError(
"resource lifting does not support call with nested references.");
}
auto callee = llvm::cast<FuncOp>(
module.lookupSymbol(call_op.f().getRootReference()));
if (failed(HandlePartitionedCallOp(call_op, callee, module,
lifted_partitioned_call_callees))) {
// Nested control flow handling is done in HandlePartitionedCallOp().
return failure();
}
} else if (auto call_op =
llvm::dyn_cast<TF::StatefulPartitionedCallOp>(&op)) {
auto callee = llvm::cast<FuncOp>(module.lookupSymbol(call_op.f()));
if (failed(HandlePartitionedCallOp(call_op, callee, module,
lifted_partitioned_call_callees))) {
return failure();
}
}
}
// Remove unused local variables.
ForwardStoreToLoad(block);
llvm::SmallVector<TF::MlirLocalVarOp, 8> local_vars;
for (Operation& op : *block) {
if (auto local_var = llvm::dyn_cast<TF::MlirLocalVarOp>(&op)) {
local_vars.push_back(local_var);
}
}
for (auto local_var : local_vars) {
if (llvm::all_of(local_var.resource().getUsers(),
[](const Operation* user) {
return llvm::isa<TF::AssignVariableOp>(user);
})) {
for (auto user : local_var.resource().getUsers()) user->erase();
local_var.erase();
}
}
return success();
}
// Lifts resource operation from tf_device.launch_func ops nested in `op`
// outside. Returns failure if there are remaining resource-type values that can
// not be lifted.
void ResourceOpLiftingPass::runOnModule() {
llvm::SmallDenseMap<FuncOp, PartitionedCallLiftingInfo>
lifted_partitioned_call_callees;
auto result = getModule().walk([&](FuncOp func_op) {
return func_op.walk([&](tf_device::LaunchOp launch_op) {
if (failed(HoistForFunctionalControlFlow(
&launch_op.GetBody(), getModule(),
&lifted_partitioned_call_callees)) ||
failed(HoistResourceOpsFromLaunchOp(launch_op))) {
return WalkResult::interrupt();
}
return WalkResult::advance();
});
});
if (result.wasInterrupted()) {
signalPassFailure();
}
}
struct ResourceOpLiftingForMainFunctionPass
: public ModulePass<ResourceOpLiftingForMainFunctionPass> {
void runOnModule() override;
};
void ResourceOpLiftingForMainFunctionPass::runOnModule() {
ModuleOp module = getModule();
FuncOp main_func = module.lookupSymbol<FuncOp>("main");
if (!main_func) {
return;
}
if (failed(TF::ResourceLiftingForFunctionalControlFlow(main_func))) {
return signalPassFailure();
}
}
static PassRegistration<ResourceOpLiftingForMainFunctionPass>
lift_main_func_pass(
"tf-resource-op-lifting-for-main-function",
"Lifting resource operations out of control flow statements for the "
"main function");
static PassRegistration<ResourceOpLiftingPass> pass(
"tf-resource-op-lifting",
"Lifting resource operations out of device computation");
} // namespace
namespace TFDevice {
std::unique_ptr<OpPassBase<ModuleOp>> CreateResourceOpLiftingPass() {
return std::make_unique<ResourceOpLiftingPass>();
}
} // namespace TFDevice
namespace TF {
LogicalResult ResourceLiftingForFunctionalControlFlow(FuncOp function) {
// This routine should only be called when control flow operations are still
// represented with TF IfOp and WhileOp operations. In this case, there should
// be only one basic blocks in the MLIR representation.
if (!has_single_element(function.getBlocks())) {
return function.emitError()
<< "expect the function to have 1 block while it has "
<< function.getBlocks().size();
}
llvm::SmallDenseMap<FuncOp, PartitionedCallLiftingInfo>
lifted_partitioned_call_callees;
return HoistForFunctionalControlFlow(&function.front(),
cast<ModuleOp>(function.getParentOp()),
&lifted_partitioned_call_callees);
}
} // namespace TF
} // namespace mlir<|fim▁end|> | |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/// Solutions to the [Cryptopals Challenges](https://cryptopals.com)
///
/// Written whilst leading the *A*spiring *R*ustacean *S*ocial *E*ducation group within LinkedIn
pub mod encode;
pub mod encrypt;
pub mod stat;
pub mod transform;
pub mod xor_cipher {
use super::stat::{Histogram, HistogramError};
use super::transform::TryFixedXor;
use std::f64;
/// Bytewise XOR `ciphertext` with `test_byte`, and then measure the chi-square goodness of fit
/// of the resulting output with `language`.
pub fn score_byte_decode(
test_byte: u8,
ciphertext: &[u8],
language: &Histogram<char>,
) -> Result<f64, HistogramError> {
let bytes = ciphertext
.try_fixed_xor(vec![test_byte; ciphertext.len()].as_slice())
.unwrap();
let b_len = bytes.len();
match String::from_utf8(bytes) {<|fim▁hole|> }
// if the resulting string contains a null byte, it's not printable and can be
// discarded immediately.
if s.contains(|c| c == '\0') {
return Ok(f64::MAX);
}
let s = s
.to_lowercase()
.chars()
.filter(|&c| c.is_alphabetic())
.collect::<String>();
if s.len() == 0 {
return Err(HistogramError::HistogramMismatch);
}
let mut byte_distr: Histogram<char> = s.chars().into();
byte_distr.normalize();
match byte_distr.chisq(language) {
Ok(raw_score) => {
let pct_non_alpha = (b_len - s.len()) as f64 / b_len as f64;
Ok(raw_score * pct_non_alpha)
}
Err(e) => Err(e),
}
}
Err(_) => Err(HistogramError::HistogramMismatch),
}
}
}<|fim▁end|> | Ok(s) => {
if s.len() != b_len {
return Err(HistogramError::HistogramMismatch); |
<|file_name|>__main__.py<|end_file_name|><|fim▁begin|>import sys
from cool_commits import info, find
if __name__ == '__main__':
command = sys.argv[1]
if command == 'find':
print(*find(sys.argv[2]))
elif command == 'info':
for info_text in info(sys.argv[2]):
print('='*60)<|fim▁hole|> print(info_text)
print('='*60)
else:
raise IOError('Invalid command, supporting only `find` and `info`.')<|fim▁end|> | |
<|file_name|>start.js<|end_file_name|><|fim▁begin|>'use strict';
const gulp = require('gulp');
const sequence = require('run-sequence');<|fim▁hole|>});<|fim▁end|> |
gulp.task('start', cb => {
sequence('configure', 'build', 'install', cb); |
<|file_name|>GenCall.cpp<|end_file_name|><|fim▁begin|>//===--- GenCall.cpp - Swift IR Generation for Function Calls -------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file implements IR generation for function signature lowering
// in Swift. This includes creating the IR type, collecting IR attributes,
// performing calls, and supporting prologue and epilogue emission.
//
//===----------------------------------------------------------------------===//
#include "GenCall.h"
#include "Signature.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CodeGenABITypes.h"
#include "clang/CodeGen/ModuleBuilder.h"
#include "swift/AST/GenericEnvironment.h"
#include "swift/SIL/SILType.h"
#include "swift/ABI/MetadataValues.h"
#include "swift/Runtime/Config.h"
#include "llvm/IR/CallSite.h"
#include "llvm/Support/Compiler.h"
#include "CallEmission.h"
#include "Explosion.h"
#include "GenObjC.h"
#include "GenPoly.h"
#include "GenProto.h"
#include "GenType.h"
#include "IRGenFunction.h"
#include "IRGenModule.h"
#include "LoadableTypeInfo.h"
#include "NativeConventionSchema.h"
using namespace swift;
using namespace irgen;
static Size getYieldOnceCoroutineBufferSize(IRGenModule &IGM) {
return NumWords_YieldOnceBuffer * IGM.getPointerSize();
}
static Alignment getYieldOnceCoroutineBufferAlignment(IRGenModule &IGM) {
return IGM.getPointerAlignment();
}
static Size getYieldManyCoroutineBufferSize(IRGenModule &IGM) {
return NumWords_YieldManyBuffer * IGM.getPointerSize();
}
static Alignment getYieldManyCoroutineBufferAlignment(IRGenModule &IGM) {
return IGM.getPointerAlignment();
}
static Size getCoroutineContextSize(IRGenModule &IGM,
CanSILFunctionType fnType) {
switch (fnType->getCoroutineKind()) {
case SILCoroutineKind::None:
llvm_unreachable("expand a coroutine");
case SILCoroutineKind::YieldOnce:
return getYieldOnceCoroutineBufferSize(IGM);
case SILCoroutineKind::YieldMany:
return getYieldManyCoroutineBufferSize(IGM);
}
llvm_unreachable("bad kind");
}
llvm::Type *ExplosionSchema::getScalarResultType(IRGenModule &IGM) const {
if (size() == 0) {
return IGM.VoidTy;
} else if (size() == 1) {
return begin()->getScalarType();
} else {
SmallVector<llvm::Type*, 16> elts;
for (auto &elt : *this) elts.push_back(elt.getScalarType());
return llvm::StructType::get(IGM.getLLVMContext(), elts);
}
}
static void addDereferenceableAttributeToBuilder(IRGenModule &IGM,
llvm::AttrBuilder &b,
const TypeInfo &ti) {
// The addresses of empty values are undefined, so we can't safely mark them
// dereferenceable.
if (ti.isKnownEmpty(ResilienceExpansion::Maximal))
return;
// If we know the type to have a fixed nonempty size, then the pointer is
// dereferenceable to at least that size.
// TODO: Would be nice to have a "getMinimumKnownSize" on TypeInfo for
// dynamic-layout aggregates.
if (auto fixedTI = dyn_cast<FixedTypeInfo>(&ti)) {
b.addAttribute(
llvm::Attribute::getWithDereferenceableBytes(IGM.LLVMContext,
fixedTI->getFixedSize().getValue()));
}
}
static void addIndirectValueParameterAttributes(IRGenModule &IGM,
llvm::AttributeList &attrs,
const TypeInfo &ti,
unsigned argIndex) {
llvm::AttrBuilder b;
// Value parameter pointers can't alias or be captured.
b.addAttribute(llvm::Attribute::NoAlias);
b.addAttribute(llvm::Attribute::NoCapture);
// The parameter must reference dereferenceable memory of the type.
addDereferenceableAttributeToBuilder(IGM, b, ti);
attrs = attrs.addAttributes(IGM.LLVMContext,
argIndex + llvm::AttributeList::FirstArgIndex, b);
}
static void addInoutParameterAttributes(IRGenModule &IGM,
llvm::AttributeList &attrs,
const TypeInfo &ti, unsigned argIndex,
bool aliasable) {
llvm::AttrBuilder b;
// Aliasing inouts is unspecified, but we still want aliasing to be memory-
// safe, so we can't mark inouts as noalias at the LLVM level.
// They still can't be captured without doing unsafe stuff, though.
b.addAttribute(llvm::Attribute::NoCapture);
// The inout must reference dereferenceable memory of the type.
addDereferenceableAttributeToBuilder(IGM, b, ti);
attrs = attrs.addAttributes(IGM.LLVMContext,
argIndex + llvm::AttributeList::FirstArgIndex, b);
}
static llvm::CallingConv::ID getFreestandingConvention(IRGenModule &IGM) {
// TODO: use a custom CC that returns three scalars efficiently
return IGM.SwiftCC;
}
/// Expand the requirements of the given abstract calling convention
/// into a "physical" calling convention.
llvm::CallingConv::ID irgen::expandCallingConv(IRGenModule &IGM,
SILFunctionTypeRepresentation convention) {
switch (convention) {
case SILFunctionTypeRepresentation::CFunctionPointer:
case SILFunctionTypeRepresentation::ObjCMethod:
case SILFunctionTypeRepresentation::Block:
return llvm::CallingConv::C;
case SILFunctionTypeRepresentation::Method:
case SILFunctionTypeRepresentation::WitnessMethod:
case SILFunctionTypeRepresentation::Closure:
case SILFunctionTypeRepresentation::Thin:
case SILFunctionTypeRepresentation::Thick:
return getFreestandingConvention(IGM);
}
llvm_unreachable("bad calling convention!");
}
static void addIndirectResultAttributes(IRGenModule &IGM,
llvm::AttributeList &attrs,
unsigned paramIndex, bool allowSRet) {
llvm::AttrBuilder b;
b.addAttribute(llvm::Attribute::NoAlias);
b.addAttribute(llvm::Attribute::NoCapture);
if (allowSRet)
b.addAttribute(llvm::Attribute::StructRet);
attrs = attrs.addAttributes(IGM.LLVMContext,
paramIndex + llvm::AttributeList::FirstArgIndex,
b);
}
void IRGenModule::addSwiftSelfAttributes(llvm::AttributeList &attrs,
unsigned argIndex) {
llvm::AttrBuilder b;
b.addAttribute(llvm::Attribute::SwiftSelf);
attrs = attrs.addAttributes(this->LLVMContext,
argIndex + llvm::AttributeList::FirstArgIndex, b);
}
void IRGenModule::addSwiftErrorAttributes(llvm::AttributeList &attrs,
unsigned argIndex) {
llvm::AttrBuilder b;
// Don't add the swifterror attribute on ABIs that don't pass it in a register.
// We create a shadow stack location of the swifterror parameter for the
// debugger on such platforms and so we can't mark the parameter with a
// swifterror attribute.
if (IsSwiftErrorInRegister)
b.addAttribute(llvm::Attribute::SwiftError);
// The error result should not be aliased, captured, or pointed at invalid
// addresses regardless.
b.addAttribute(llvm::Attribute::NoAlias);
b.addAttribute(llvm::Attribute::NoCapture);
b.addDereferenceableAttr(getPointerSize().getValue());
auto attrIndex = argIndex + llvm::AttributeList::FirstArgIndex;
attrs = attrs.addAttributes(this->LLVMContext, attrIndex, b);
}
void irgen::addByvalArgumentAttributes(IRGenModule &IGM,
llvm::AttributeList &attrs,
unsigned argIndex, Alignment align) {
llvm::AttrBuilder b;
b.addAttribute(llvm::Attribute::ByVal);
b.addAttribute(llvm::Attribute::getWithAlignment(IGM.LLVMContext,
align.getValue()));
attrs = attrs.addAttributes(IGM.LLVMContext,
argIndex + llvm::AttributeList::FirstArgIndex, b);
}
void irgen::addExtendAttribute(IRGenModule &IGM, llvm::AttributeList &attrs,
unsigned index, bool signExtend) {
llvm::AttrBuilder b;
if (signExtend)
b.addAttribute(llvm::Attribute::SExt);
else
b.addAttribute(llvm::Attribute::ZExt);
attrs = attrs.addAttributes(IGM.LLVMContext, index, b);
}
namespace swift {
namespace irgen {
namespace {
class SignatureExpansion {
IRGenModule &IGM;
CanSILFunctionType FnType;
public:
SmallVector<llvm::Type*, 8> ParamIRTypes;
llvm::Type *ResultIRType = nullptr;
llvm::AttributeList Attrs;
ForeignFunctionInfo ForeignInfo;
CoroutineInfo CoroInfo;
bool CanUseSRet = true;
bool CanUseError = true;
bool CanUseSelf = true;
SignatureExpansion(IRGenModule &IGM, CanSILFunctionType fnType)
: IGM(IGM), FnType(fnType) {}
/// Expand the components of the primary entrypoint of the function type.
void expandFunctionType();
/// Expand the components of the continuation entrypoint of the
/// function type.
void expandCoroutineContinuationType();
Signature getSignature();
private:
void expand(SILParameterInfo param);
llvm::Type *addIndirectResult();
SILFunctionConventions getSILFuncConventions() const {
return SILFunctionConventions(FnType, IGM.getSILModule());
}
unsigned getCurParamIndex() {
return ParamIRTypes.size();
}
bool claimSRet() {
bool result = CanUseSRet;
CanUseSRet = false;
return result;
}
bool claimSelf() {
auto Ret = CanUseSelf;
assert(CanUseSelf && "Multiple self parameters?!");
CanUseSelf = false;
return Ret;
}
bool claimError() {
auto Ret = CanUseError;
assert(CanUseError && "Multiple error parameters?!");
CanUseError = false;
return Ret;
}
/// Add a pointer to the given type as the next parameter.
void addPointerParameter(llvm::Type *storageType) {
ParamIRTypes.push_back(storageType->getPointerTo());
}
void addCoroutineContextParameter();
void expandResult();
llvm::Type *expandDirectResult();
void expandParameters();
void expandExternalSignatureTypes();
void expandCoroutineResult(bool forContinuation);
void expandCoroutineContinuationParameters();
};
} // end anonymous namespace
} // end namespace irgen
} // end namespace swift
llvm::Type *SignatureExpansion::addIndirectResult() {
auto resultType = getSILFuncConventions().getSILResultType();
const TypeInfo &resultTI = IGM.getTypeInfo(resultType);
addIndirectResultAttributes(IGM, Attrs, ParamIRTypes.size(), claimSRet());
addPointerParameter(resultTI.getStorageType());
return IGM.VoidTy;
}
/// Expand all of the direct and indirect result types.
void SignatureExpansion::expandResult() {
if (FnType->isCoroutine()) {
// This should be easy enough to support if we need to: use the
// same algorithm but add the direct results to the results as if
// they were unioned in.
return expandCoroutineResult(/*for continuation*/ false);
}
auto fnConv = getSILFuncConventions();
// Disable the use of sret if we have multiple indirect results.
if (fnConv.getNumIndirectSILResults() > 1)
CanUseSRet = false;
// Expand the direct result.
ResultIRType = expandDirectResult();
// Expand the indirect results.
for (auto indirectResultType : fnConv.getIndirectSILResultTypes()) {
addIndirectResultAttributes(IGM, Attrs, ParamIRTypes.size(), claimSRet());
addPointerParameter(IGM.getStorageType(indirectResultType));
}
}
namespace {
class YieldSchema {
SILType YieldTy;
const TypeInfo &YieldTI;
Optional<NativeConventionSchema> NativeSchema;
bool IsIndirect;
public:
YieldSchema(IRGenModule &IGM, SILFunctionConventions fnConv,
SILYieldInfo yield)
: YieldTy(fnConv.getSILType(yield)),
YieldTI(IGM.getTypeInfo(YieldTy)) {
if (isFormalIndirect()) {
IsIndirect = true;
} else {
NativeSchema.emplace(IGM, &YieldTI, /*result*/ true);
IsIndirect = NativeSchema->requiresIndirect();
}
}
SILType getSILType() const {
return YieldTy;
}
const TypeInfo &getTypeInfo() const {
return YieldTI;
}
/// Should the yielded value be yielded as a pointer?
bool isIndirect() const { return IsIndirect; }
/// Is the yielded value formally indirect?
bool isFormalIndirect() const { return YieldTy.isAddress(); }
llvm::PointerType *getIndirectPointerType() const {
assert(isIndirect());
return YieldTI.getStorageType()->getPointerTo();
}
const NativeConventionSchema &getDirectSchema() const {
assert(!isIndirect());
return *NativeSchema;
}
void enumerateDirectComponents(llvm::function_ref<void(llvm::Type*)> fn) {
getDirectSchema().enumerateComponents([&](clang::CharUnits begin,
clang::CharUnits end,
llvm::Type *componentTy) {
fn(componentTy);
});
}
};
}
void SignatureExpansion::expandCoroutineResult(bool forContinuation) {
assert(FnType->getNumResults() == 0 &&
"having both normal and yield results is currently unsupported");
// The return type may be different for the ramp function vs. the
// continuations.
if (forContinuation) {
switch (FnType->getCoroutineKind()) {
case SILCoroutineKind::None:
llvm_unreachable("should have been filtered out before here");
// Yield-once coroutines just return void from the continuation.
case SILCoroutineKind::YieldOnce:
ResultIRType = IGM.VoidTy;
return;
// Yield-many coroutines yield the same types from the continuation
// as they do from the ramp function.
case SILCoroutineKind::YieldMany:
break;
}
}
SmallVector<llvm::Type*, 8> components;
// The continuation pointer.
components.push_back(IGM.Int8PtrTy);
auto fnConv = getSILFuncConventions();
for (auto yield : FnType->getYields()) {
YieldSchema schema(IGM, fnConv, yield);
// If the individual value must be yielded indirectly, add a pointer.
if (schema.isIndirect()) {
components.push_back(schema.getIndirectPointerType());
continue;
}
// Otherwise, collect all the component types.
schema.enumerateDirectComponents([&](llvm::Type *type) {
components.push_back(type);
});
}
// Find the maximal sequence of the component types that we can
// convince the ABI to pass directly.
// When counting components, ignore the continuation pointer.
unsigned numDirectComponents = components.size() - 1;
SmallVector<llvm::Type*, 8> overflowTypes;
while (clang::CodeGen::swiftcall::
shouldPassIndirectly(IGM.ClangCodeGen->CGM(), components,
/*asReturnValue*/ true)) {
// If we added a pointer to the end of components, remove it.
if (!overflowTypes.empty()) components.pop_back();
// Remove the last component and add it as an overflow type.
overflowTypes.push_back(components.pop_back_val());
numDirectComponents--;
// Add a pointer to the end of components.
components.push_back(IGM.Int8PtrTy);
}
// We'd better have been able to pass at least two pointers.
assert(components.size() >= 2 || overflowTypes.empty());
CoroInfo.NumDirectYieldComponents = numDirectComponents;
// Replace the pointer type we added to components with the real
// pointer-to-overflow type.
if (!overflowTypes.empty()) {
std::reverse(overflowTypes.begin(), overflowTypes.end());
// TODO: should we use some sort of real layout here instead of
// trusting LLVM's?
components.back() =
llvm::StructType::get(IGM.getLLVMContext(), overflowTypes)
->getPointerTo();
}
ResultIRType = components.size() == 1
? components.front()
: llvm::StructType::get(IGM.getLLVMContext(), components);
}
void SignatureExpansion::expandCoroutineContinuationParameters() {
// The coroutine context.
addCoroutineContextParameter();
// Whether this is an unwind resumption.
ParamIRTypes.push_back(IGM.Int1Ty);
}
void SignatureExpansion::addCoroutineContextParameter() {
// Flag that the context is dereferenceable and unaliased.
auto contextSize = getCoroutineContextSize(IGM, FnType);
Attrs = Attrs.addDereferenceableParamAttr(IGM.getLLVMContext(),
getCurParamIndex(),
contextSize.getValue());
Attrs = Attrs.addParamAttribute(IGM.getLLVMContext(),
getCurParamIndex(),
llvm::Attribute::NoAlias);
ParamIRTypes.push_back(IGM.Int8PtrTy);
}
NativeConventionSchema::NativeConventionSchema(IRGenModule &IGM,
const TypeInfo *ti,
bool IsResult)
: Lowering(IGM.ClangCodeGen->CGM()) {
if (auto *loadable = dyn_cast<LoadableTypeInfo>(ti)) {
// Lower the type according to the Swift ABI.
loadable->addToAggLowering(IGM, Lowering, Size(0));
Lowering.finish();
// Should we pass indirectly according to the ABI?
RequiresIndirect = Lowering.shouldPassIndirectly(IsResult);
} else {
Lowering.finish();
RequiresIndirect = true;
}
}
llvm::Type *NativeConventionSchema::getExpandedType(IRGenModule &IGM) const {
if (empty())
return IGM.VoidTy;
SmallVector<llvm::Type *, 8> elts;
Lowering.enumerateComponents([&](clang::CharUnits offset,
clang::CharUnits end,
llvm::Type *type) { elts.push_back(type); });
if (elts.size() == 1)
return elts[0];
auto &ctx = IGM.getLLVMContext();
return llvm::StructType::get(ctx, elts, /*packed*/ false);
}
std::pair<llvm::StructType *, llvm::StructType *>
NativeConventionSchema::getCoercionTypes(
IRGenModule &IGM, SmallVectorImpl<unsigned> &expandedTyIndicesMap) const {
auto &ctx = IGM.getLLVMContext();
if (empty()) {
auto type = llvm::StructType::get(ctx);
return {type, type};
}
clang::CharUnits lastEnd = clang::CharUnits::Zero();
llvm::SmallSet<unsigned, 8> overlappedWithSuccessor;
unsigned idx = 0;
// Mark overlapping ranges.
Lowering.enumerateComponents(
[&](clang::CharUnits offset, clang::CharUnits end, llvm::Type *type) {
if (offset < lastEnd) {
overlappedWithSuccessor.insert(idx);
}
lastEnd = end;
++idx;
});
// Create the coercion struct with only the integer portion of overlapped
// components and non-overlapped components.
idx = 0;
lastEnd = clang::CharUnits::Zero();
SmallVector<llvm::Type *, 8> elts;
bool packed = false;
Lowering.enumerateComponents(
[&](clang::CharUnits begin, clang::CharUnits end, llvm::Type *type) {
bool overlapped = overlappedWithSuccessor.count(idx) ||
(idx && overlappedWithSuccessor.count(idx - 1));
++idx;
if (overlapped && !isa<llvm::IntegerType>(type)) {
// keep the old lastEnd for padding.
return;
}
// Add padding (which may include padding for overlapped non-integer
// components).
if (begin != lastEnd) {
auto paddingSize = begin - lastEnd;
assert(!paddingSize.isNegative());
auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx),
paddingSize.getQuantity());
elts.push_back(padding);
}
if (!packed &&
!begin.isMultipleOf(clang::CharUnits::fromQuantity(
IGM.DataLayout.getABITypeAlignment(type))))
packed = true;
elts.push_back(type);
expandedTyIndicesMap.push_back(idx - 1);
lastEnd = begin + clang::CharUnits::fromQuantity(
IGM.DataLayout.getTypeAllocSize(type));
assert(end <= lastEnd);
});
auto *coercionType = llvm::StructType::get(ctx, elts, packed);
if (overlappedWithSuccessor.empty())
return {coercionType, llvm::StructType::get(ctx)};
// Create the coercion struct with only the non-integer overlapped
// components.
idx = 0;
lastEnd = clang::CharUnits::Zero();
elts.clear();
packed = false;
Lowering.enumerateComponents(
[&](clang::CharUnits begin, clang::CharUnits end, llvm::Type *type) {
bool overlapped = overlappedWithSuccessor.count(idx) ||
(idx && overlappedWithSuccessor.count(idx - 1));
++idx;
if (!overlapped || (overlapped && isa<llvm::IntegerType>(type))) {
// Ignore and keep the old lastEnd for padding.
return;
}
// Add padding.
if (begin != lastEnd) {
auto paddingSize = begin - lastEnd;
assert(!paddingSize.isNegative());
auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx),
paddingSize.getQuantity());
elts.push_back(padding);
}
if (!packed &&
!begin.isMultipleOf(clang::CharUnits::fromQuantity(
IGM.DataLayout.getABITypeAlignment(type))))
packed = true;
elts.push_back(type);
expandedTyIndicesMap.push_back(idx - 1);
lastEnd = begin + clang::CharUnits::fromQuantity(
IGM.DataLayout.getTypeAllocSize(type));
assert(end <= lastEnd);
});
auto *overlappedCoercionType = llvm::StructType::get(ctx, elts, packed);
return {coercionType, overlappedCoercionType};
}
// TODO: Direct to Indirect result conversion could be handled in a SIL
// AddressLowering pass.
llvm::Type *SignatureExpansion::expandDirectResult() {
// Handle the direct result type, checking for supposedly scalar
// result types that we actually want to return indirectly.
auto resultType = getSILFuncConventions().getSILResultType();
// Fast-path the empty tuple type.
if (auto tuple = resultType.getAs<TupleType>())
if (tuple->getNumElements() == 0)
return IGM.VoidTy;
switch (FnType->getLanguage()) {
case SILFunctionLanguage::C:
llvm_unreachable("Expanding C/ObjC parameters in the wrong place!");
break;
case SILFunctionLanguage::Swift: {
auto &ti = IGM.getTypeInfo(resultType);
auto &native = ti.nativeReturnValueSchema(IGM);
if (native.requiresIndirect())
return addIndirectResult();
// Disable the use of sret if we have a non-trivial direct result.
if (!native.empty()) CanUseSRet = false;
return native.getExpandedType(IGM);
}
}
llvm_unreachable("Not a valid SILFunctionLanguage.");
}
static const clang::FieldDecl *
getLargestUnionField(const clang::RecordDecl *record,
const clang::ASTContext &ctx) {
const clang::FieldDecl *largestField = nullptr;
clang::CharUnits unionSize = clang::CharUnits::Zero();
for (auto field : record->fields()) {
assert(!field->isBitField());
clang::CharUnits fieldSize = ctx.getTypeSizeInChars(field->getType());
if (unionSize < fieldSize) {
unionSize = fieldSize;
largestField = field;
}
}
assert(largestField && "empty union?");
return largestField;
}
namespace {
/// A CRTP class for working with Clang's ABIArgInfo::Expand
/// argument type expansions.
template <class Impl, class... Args> struct ClangExpand {
IRGenModule &IGM;
const clang::ASTContext &Ctx;
ClangExpand(IRGenModule &IGM) : IGM(IGM), Ctx(IGM.getClangASTContext()) {}
Impl &asImpl() { return *static_cast<Impl*>(this); }
void visit(clang::CanQualType type, Args... args) {
switch (type->getTypeClass()) {
#define TYPE(Class, Base)
#define NON_CANONICAL_TYPE(Class, Base) \
case clang::Type::Class:
#define DEPENDENT_TYPE(Class, Base) \
case clang::Type::Class:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
case clang::Type::Class:
#include "clang/AST/TypeNodes.def"
llvm_unreachable("canonical or dependent type in ABI lowering");
// These shouldn't occur in expandable struct types.
case clang::Type::IncompleteArray:
case clang::Type::VariableArray:
llvm_unreachable("variable-sized or incomplete array in ABI lowering");
// We should only ever get ObjC pointers, not underlying objects.
case clang::Type::ObjCInterface:
case clang::Type::ObjCObject:
llvm_unreachable("ObjC object type in ABI lowering");
// We should only ever get function pointers.
case clang::Type::FunctionProto:
case clang::Type::FunctionNoProto:
llvm_unreachable("non-pointer function type in ABI lowering");
// We currently never import C++ code, and we should be able to
// kill Expand before we do.
case clang::Type::LValueReference:
case clang::Type::RValueReference:
case clang::Type::MemberPointer:
case clang::Type::Auto:
case clang::Type::DeducedTemplateSpecialization:
llvm_unreachable("C++ type in ABI lowering?");
case clang::Type::Pipe:
llvm_unreachable("OpenCL type in ABI lowering?");
case clang::Type::ConstantArray: {
auto array = Ctx.getAsConstantArrayType(type);
auto elt = Ctx.getCanonicalType(array->getElementType());
auto &&context = asImpl().beginArrayElements(elt);
uint64_t n = array->getSize().getZExtValue();
for (uint64_t i = 0; i != n; ++i) {
asImpl().visitArrayElement(elt, i, context, args...);
}
return;
}
case clang::Type::Record: {
auto record = cast<clang::RecordType>(type)->getDecl();
if (record->isUnion()) {
auto largest = getLargestUnionField(record, Ctx);
asImpl().visitUnionField(record, largest, args...);
} else {
auto &&context = asImpl().beginStructFields(record);
for (auto field : record->fields()) {
asImpl().visitStructField(record, field, context, args...);
}
}
return;
}
case clang::Type::Complex: {
auto elt = type.castAs<clang::ComplexType>().getElementType();
asImpl().visitComplexElement(elt, 0, args...);
asImpl().visitComplexElement(elt, 1, args...);
return;
}
// Just handle this types as opaque integers.
case clang::Type::Enum:
case clang::Type::Atomic:
asImpl().visitScalar(convertTypeAsInteger(type), args...);
return;
case clang::Type::Builtin:
asImpl().visitScalar(
convertBuiltinType(type.castAs<clang::BuiltinType>()),
args...);
return;
case clang::Type::Vector:
case clang::Type::ExtVector:
asImpl().visitScalar(
convertVectorType(type.castAs<clang::VectorType>()),
args...);
return;
case clang::Type::Pointer:
case clang::Type::BlockPointer:
case clang::Type::ObjCObjectPointer:
asImpl().visitScalar(IGM.Int8PtrTy, args...);
return;
}
llvm_unreachable("bad type kind");
}
Size getSizeOfType(clang::QualType type) {
auto clangSize = Ctx.getTypeSizeInChars(type);
return Size(clangSize.getQuantity());
}
private:
llvm::Type *convertVectorType(clang::CanQual<clang::VectorType> type) {
auto eltTy =
convertBuiltinType(type->getElementType().castAs<clang::BuiltinType>());
return llvm::VectorType::get(eltTy, type->getNumElements());
}
llvm::Type *convertBuiltinType(clang::CanQual<clang::BuiltinType> type) {
switch (type.getTypePtr()->getKind()) {
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) \
case clang::BuiltinType::Id:
#include "clang/AST/BuiltinTypes.def"
case clang::BuiltinType::Dependent:
llvm_unreachable("placeholder type in ABI lowering");
// We should never see these unadorned.
case clang::BuiltinType::ObjCId:
case clang::BuiltinType::ObjCClass:
case clang::BuiltinType::ObjCSel:
llvm_unreachable("bare Objective-C object type in ABI lowering");
// This should never be the type of an argument or field.
case clang::BuiltinType::Void:
llvm_unreachable("bare void type in ABI lowering");
// We should never see the OpenCL builtin types at all.
case clang::BuiltinType::OCLImage1dRO:
case clang::BuiltinType::OCLImage1dRW:
case clang::BuiltinType::OCLImage1dWO:
case clang::BuiltinType::OCLImage1dArrayRO:
case clang::BuiltinType::OCLImage1dArrayRW:
case clang::BuiltinType::OCLImage1dArrayWO:
case clang::BuiltinType::OCLImage1dBufferRO:
case clang::BuiltinType::OCLImage1dBufferRW:
case clang::BuiltinType::OCLImage1dBufferWO:
case clang::BuiltinType::OCLImage2dRO:
case clang::BuiltinType::OCLImage2dRW:
case clang::BuiltinType::OCLImage2dWO:
case clang::BuiltinType::OCLImage2dArrayRO:
case clang::BuiltinType::OCLImage2dArrayRW:
case clang::BuiltinType::OCLImage2dArrayWO:
case clang::BuiltinType::OCLImage2dDepthRO:
case clang::BuiltinType::OCLImage2dDepthRW:
case clang::BuiltinType::OCLImage2dDepthWO:
case clang::BuiltinType::OCLImage2dArrayDepthRO:
case clang::BuiltinType::OCLImage2dArrayDepthRW:
case clang::BuiltinType::OCLImage2dArrayDepthWO:
case clang::BuiltinType::OCLImage2dMSAARO:
case clang::BuiltinType::OCLImage2dMSAARW:
case clang::BuiltinType::OCLImage2dMSAAWO:
case clang::BuiltinType::OCLImage2dArrayMSAARO:
case clang::BuiltinType::OCLImage2dArrayMSAARW:
case clang::BuiltinType::OCLImage2dArrayMSAAWO:
case clang::BuiltinType::OCLImage2dMSAADepthRO:
case clang::BuiltinType::OCLImage2dMSAADepthRW:
case clang::BuiltinType::OCLImage2dMSAADepthWO:
case clang::BuiltinType::OCLImage2dArrayMSAADepthRO:
case clang::BuiltinType::OCLImage2dArrayMSAADepthRW:
case clang::BuiltinType::OCLImage2dArrayMSAADepthWO:
case clang::BuiltinType::OCLImage3dRO:
case clang::BuiltinType::OCLImage3dRW:
case clang::BuiltinType::OCLImage3dWO:
case clang::BuiltinType::OCLSampler:
case clang::BuiltinType::OCLEvent:
case clang::BuiltinType::OCLClkEvent:
case clang::BuiltinType::OCLQueue:
case clang::BuiltinType::OCLReserveID:
llvm_unreachable("OpenCL type in ABI lowering");
// Handle all the integer types as opaque values.
#define BUILTIN_TYPE(Id, SingletonId)
#define SIGNED_TYPE(Id, SingletonId) \
case clang::BuiltinType::Id:
#define UNSIGNED_TYPE(Id, SingletonId) \
case clang::BuiltinType::Id:
#include "clang/AST/BuiltinTypes.def"
return convertTypeAsInteger(type);
// Lower all the floating-point values by their semantics.
case clang::BuiltinType::Half:
return convertFloatingType(Ctx.getTargetInfo().getHalfFormat());
case clang::BuiltinType::Float:
return convertFloatingType(Ctx.getTargetInfo().getFloatFormat());
case clang::BuiltinType::Double:
return convertFloatingType(Ctx.getTargetInfo().getDoubleFormat());
case clang::BuiltinType::LongDouble:
return convertFloatingType(Ctx.getTargetInfo().getLongDoubleFormat());
case clang::BuiltinType::Float16:
llvm_unreachable("When upstream support is added for Float16 in "
"clang::TargetInfo, use the implementation here");
case clang::BuiltinType::Float128:
return convertFloatingType(Ctx.getTargetInfo().getFloat128Format());
// nullptr_t -> void*
case clang::BuiltinType::NullPtr:
return IGM.Int8PtrTy;
}
llvm_unreachable("bad builtin type");
}
llvm::Type *convertFloatingType(const llvm::fltSemantics &format) {
if (&format == &llvm::APFloat::IEEEhalf())
return llvm::Type::getHalfTy(IGM.getLLVMContext());
if (&format == &llvm::APFloat::IEEEsingle())
return llvm::Type::getFloatTy(IGM.getLLVMContext());
if (&format == &llvm::APFloat::IEEEdouble())
return llvm::Type::getDoubleTy(IGM.getLLVMContext());
if (&format == &llvm::APFloat::IEEEquad())
return llvm::Type::getFP128Ty(IGM.getLLVMContext());
if (&format == &llvm::APFloat::PPCDoubleDouble())
return llvm::Type::getPPC_FP128Ty(IGM.getLLVMContext());
if (&format == &llvm::APFloat::x87DoubleExtended())
return llvm::Type::getX86_FP80Ty(IGM.getLLVMContext());
llvm_unreachable("bad float format");
}
llvm::Type *convertTypeAsInteger(clang::QualType type) {
auto size = getSizeOfType(type);
return llvm::IntegerType::get(IGM.getLLVMContext(),
size.getValueInBits());
}
};
/// A CRTP specialization of ClangExpand which projects down to
/// various aggregate elements of an address.
///
/// Subclasses should only have to define visitScalar.
template <class Impl>
class ClangExpandProjection : public ClangExpand<Impl, Address> {
using super = ClangExpand<Impl, Address>;
using super::asImpl;
using super::IGM;
using super::Ctx;
using super::getSizeOfType;
protected:
IRGenFunction &IGF;
ClangExpandProjection(IRGenFunction &IGF)
: super(IGF.IGM), IGF(IGF) {}
public:
void visit(clang::CanQualType type, Address addr) {
assert(addr.getType() == IGM.Int8PtrTy);
super::visit(type, addr);
}
Size beginArrayElements(clang::CanQualType element) {
return getSizeOfType(element);
}
void visitArrayElement(clang::CanQualType element, unsigned i,
Size elementSize, Address arrayAddr) {
asImpl().visit(element, createGEPAtOffset(arrayAddr, elementSize * i));
}
void visitComplexElement(clang::CanQualType element, unsigned i,
Address complexAddr) {
Address addr = complexAddr;
if (i) { addr = createGEPAtOffset(complexAddr, getSizeOfType(element)); }
asImpl().visit(element, addr);
}
void visitUnionField(const clang::RecordDecl *record,
const clang::FieldDecl *field,
Address structAddr) {
asImpl().visit(Ctx.getCanonicalType(field->getType()), structAddr);
}
const clang::ASTRecordLayout &
beginStructFields(const clang::RecordDecl *record) {
return Ctx.getASTRecordLayout(record);
}
void visitStructField(const clang::RecordDecl *record,
const clang::FieldDecl *field,
const clang::ASTRecordLayout &layout,
Address structAddr) {
auto fieldIndex = field->getFieldIndex();
assert(!field->isBitField());
auto fieldOffset = Size(layout.getFieldOffset(fieldIndex) / 8);
asImpl().visit(Ctx.getCanonicalType(field->getType()),
createGEPAtOffset(structAddr, fieldOffset));
}
private:
Address createGEPAtOffset(Address addr, Size offset) {
if (offset.isZero()) {
return addr;
} else {
return IGF.Builder.CreateConstByteArrayGEP(addr, offset);
}
}
};
/// A class for collecting the types of a Clang ABIArgInfo::Expand
/// argument expansion.
struct ClangExpandTypeCollector : ClangExpand<ClangExpandTypeCollector> {
SmallVectorImpl<llvm::Type*> &Types;
ClangExpandTypeCollector(IRGenModule &IGM,
SmallVectorImpl<llvm::Type*> &types)
: ClangExpand(IGM), Types(types) {}
bool beginArrayElements(clang::CanQualType element) { return true; }
void visitArrayElement(clang::CanQualType element, unsigned i, bool _) {
visit(element);
}
void visitComplexElement(clang::CanQualType element, unsigned i) {
visit(element);
}
void visitUnionField(const clang::RecordDecl *record,
const clang::FieldDecl *field) {
visit(Ctx.getCanonicalType(field->getType()));
}
bool beginStructFields(const clang::RecordDecl *record) { return true; }
void visitStructField(const clang::RecordDecl *record,
const clang::FieldDecl *field,
bool _) {
visit(Ctx.getCanonicalType(field->getType()));
}
void visitScalar(llvm::Type *type) {
Types.push_back(type);
}
};
} // end anonymous namespace
static bool doesClangExpansionMatchSchema(IRGenModule &IGM,
clang::CanQualType type,
const ExplosionSchema &schema) {
assert(!schema.containsAggregate());
SmallVector<llvm::Type *, 4> expansion;
ClangExpandTypeCollector(IGM, expansion).visit(type);
if (expansion.size() != schema.size())
return false;
for (size_t i = 0, e = schema.size(); i != e; ++i) {
if (schema[i].getScalarType() != expansion[i])
return false;
}
return true;
}
/// Expand the result and parameter types to the appropriate LLVM IR
/// types for C and Objective-C signatures.
void SignatureExpansion::expandExternalSignatureTypes() {
assert(FnType->getLanguage() == SILFunctionLanguage::C);
// Convert the SIL result type to a Clang type.
auto clangResultTy = IGM.getClangType(FnType->getFormalCSemanticResult());
// Now convert the parameters to Clang types.
auto params = FnType->getParameters();
SmallVector<clang::CanQualType,4> paramTys;
auto const &clangCtx = IGM.getClangASTContext();
switch (FnType->getRepresentation()) {
case SILFunctionTypeRepresentation::ObjCMethod: {
// ObjC methods take their 'self' argument first, followed by an
// implicit _cmd argument.
auto &self = params.back();
auto clangTy = IGM.getClangType(self);
paramTys.push_back(clangTy);
paramTys.push_back(clangCtx.VoidPtrTy);
params = params.drop_back();
break;
}
case SILFunctionTypeRepresentation::Block:
// Blocks take their context argument first.
paramTys.push_back(clangCtx.VoidPtrTy);
break;
case SILFunctionTypeRepresentation::CFunctionPointer:
// No implicit arguments.
break;
case SILFunctionTypeRepresentation::Thin:
case SILFunctionTypeRepresentation::Thick:
case SILFunctionTypeRepresentation::Method:
case SILFunctionTypeRepresentation::WitnessMethod:
case SILFunctionTypeRepresentation::Closure:
llvm_unreachable("not a C representation");
}
// Given an index within the clang parameters list, what do we need
// to subtract from it to get to the corresponding index within the
// Swift parameters list?
size_t clangToSwiftParamOffset = paramTys.size();
// Convert each parameter to a Clang type.
for (auto param : params) {
auto clangTy = IGM.getClangType(param);
paramTys.push_back(clangTy);
}
// Generate function info for this signature.
auto extInfo = clang::FunctionType::ExtInfo();
auto &FI = clang::CodeGen::arrangeFreeFunctionCall(IGM.ClangCodeGen->CGM(),
clangResultTy, paramTys, extInfo,
clang::CodeGen::RequiredArgs::All);
ForeignInfo.ClangInfo = &FI;
assert(FI.arg_size() == paramTys.size() &&
"Expected one ArgInfo for each parameter type!");
auto &returnInfo = FI.getReturnInfo();
// Does the result need an extension attribute?
if (returnInfo.isExtend()) {
bool signExt = clangResultTy->hasSignedIntegerRepresentation();
assert((signExt || clangResultTy->hasUnsignedIntegerRepresentation()) &&
"Invalid attempt to add extension attribute to argument!");
addExtendAttribute(IGM, Attrs, llvm::AttributeList::ReturnIndex, signExt);
}
// If we return indirectly, that is the first parameter type.
if (returnInfo.isIndirect()) {
addIndirectResult();
}
size_t firstParamToLowerNormally = 0;
// Use a special IR type for passing block pointers.
if (FnType->getRepresentation() == SILFunctionTypeRepresentation::Block) {
assert(FI.arg_begin()[0].info.isDirect() &&
"block pointer not passed directly?");
ParamIRTypes.push_back(IGM.ObjCBlockPtrTy);
firstParamToLowerNormally = 1;
}
for (auto i : indices(paramTys).slice(firstParamToLowerNormally)) {
auto &AI = FI.arg_begin()[i].info;
// Add a padding argument if required.
if (auto *padType = AI.getPaddingType())
ParamIRTypes.push_back(padType);
switch (AI.getKind()) {
case clang::CodeGen::ABIArgInfo::Extend: {
bool signExt = paramTys[i]->hasSignedIntegerRepresentation();
assert((signExt || paramTys[i]->hasUnsignedIntegerRepresentation()) &&
"Invalid attempt to add extension attribute to argument!");
addExtendAttribute(IGM, Attrs, getCurParamIndex() +
llvm::AttributeList::FirstArgIndex, signExt);
LLVM_FALLTHROUGH;
}
case clang::CodeGen::ABIArgInfo::Direct: {
switch (FI.getExtParameterInfo(i).getABI()) {
case clang::ParameterABI::Ordinary:
break;
case clang::ParameterABI::SwiftContext:
IGM.addSwiftSelfAttributes(Attrs, getCurParamIndex());
break;
case clang::ParameterABI::SwiftErrorResult:
IGM.addSwiftErrorAttributes(Attrs, getCurParamIndex());
break;
case clang::ParameterABI::SwiftIndirectResult:
addIndirectResultAttributes(IGM, Attrs, getCurParamIndex(),claimSRet());
break;
}
// If the coercion type is a struct which can be flattened, we need to
// expand it.
auto *coercedTy = AI.getCoerceToType();
if (AI.isDirect() && AI.getCanBeFlattened() &&
isa<llvm::StructType>(coercedTy)) {
const auto *ST = cast<llvm::StructType>(coercedTy);
for (unsigned EI : range(ST->getNumElements()))
ParamIRTypes.push_back(ST->getElementType(EI));
} else {
ParamIRTypes.push_back(coercedTy);
}
break;
}
case clang::CodeGen::ABIArgInfo::CoerceAndExpand: {
auto types = AI.getCoerceAndExpandTypeSequence();
ParamIRTypes.append(types.begin(), types.end());
break;
}
case clang::CodeGen::ABIArgInfo::Indirect: {
assert(i >= clangToSwiftParamOffset &&
"Unexpected index for indirect byval argument");
auto ¶m = params[i - clangToSwiftParamOffset];
auto paramTy = getSILFuncConventions().getSILType(param);
auto ¶mTI = cast<FixedTypeInfo>(IGM.getTypeInfo(paramTy));
if (AI.getIndirectByVal())
addByvalArgumentAttributes(
IGM, Attrs, getCurParamIndex(),
Alignment(AI.getIndirectAlign().getQuantity()));
addPointerParameter(paramTI.getStorageType());
break;
}
case clang::CodeGen::ABIArgInfo::Expand:
ClangExpandTypeCollector(IGM, ParamIRTypes).visit(paramTys[i]);
break;
case clang::CodeGen::ABIArgInfo::Ignore:
break;
case clang::CodeGen::ABIArgInfo::InAlloca:
llvm_unreachable("Need to handle InAlloca during signature expansion");
}
}
if (returnInfo.isIndirect() || returnInfo.isIgnore()) {
ResultIRType = IGM.VoidTy;
} else {
ResultIRType = returnInfo.getCoerceToType();
}
}
static ArrayRef<llvm::Type *> expandScalarOrStructTypeToArray(llvm::Type *&ty) {
ArrayRef<llvm::Type*> expandedTys;
if (auto expansionTy = dyn_cast<llvm::StructType>(ty)) {
// Is there any good reason this isn't public API of llvm::StructType?
expandedTys = makeArrayRef(expansionTy->element_begin(),
expansionTy->getNumElements());
} else {
expandedTys = ty;
}
return expandedTys;
}
void SignatureExpansion::expand(SILParameterInfo param) {
auto paramSILType = getSILFuncConventions().getSILType(param);
auto &ti = IGM.getTypeInfo(paramSILType);
switch (auto conv = param.getConvention()) {
case ParameterConvention::Indirect_In:
case ParameterConvention::Indirect_In_Constant:
case ParameterConvention::Indirect_In_Guaranteed:
addIndirectValueParameterAttributes(IGM, Attrs, ti, ParamIRTypes.size());
addPointerParameter(
IGM.getStorageType(getSILFuncConventions().getSILType(param)));
return;
case ParameterConvention::Indirect_Inout:
case ParameterConvention::Indirect_InoutAliasable:
addInoutParameterAttributes(IGM, Attrs, ti, ParamIRTypes.size(),
conv == ParameterConvention::Indirect_InoutAliasable);
addPointerParameter(
IGM.getStorageType(getSILFuncConventions().getSILType(param)));
return;
case ParameterConvention::Direct_Owned:
case ParameterConvention::Direct_Unowned:
case ParameterConvention::Direct_Guaranteed:
switch (FnType->getLanguage()) {
case SILFunctionLanguage::C: {
llvm_unreachable("Unexpected C/ObjC method in parameter expansion!");
return;
}
case SILFunctionLanguage::Swift: {
auto &nativeSchema = ti.nativeParameterValueSchema(IGM);
if (nativeSchema.requiresIndirect()) {
addIndirectValueParameterAttributes(IGM, Attrs, ti,
ParamIRTypes.size());
ParamIRTypes.push_back(ti.getStorageType()->getPointerTo());
return;
}
if (nativeSchema.empty()) {
assert(ti.getSchema().empty());
return;
}
auto expandedTy = nativeSchema.getExpandedType(IGM);
auto expandedTysArray = expandScalarOrStructTypeToArray(expandedTy);
for (auto *Ty : expandedTysArray)
ParamIRTypes.push_back(Ty);
return;
}
}
llvm_unreachable("bad abstract CC");
}<|fim▁hole|>}
/// Does the given function type have a self parameter that should be
/// given the special treatment for self parameters?
///
/// It's important that this only return true for things that are
/// passed as a single pointer.
bool irgen::hasSelfContextParameter(CanSILFunctionType fnType) {
if (!fnType->hasSelfParam())
return false;
SILParameterInfo param = fnType->getSelfParameter();
// All the indirect conventions pass a single pointer.
if (param.isFormalIndirect()) {
return true;
}
// Direct conventions depend on the type.
CanType type = param.getType();
// Thick or @objc metatypes (but not existential metatypes).
if (auto metatype = dyn_cast<MetatypeType>(type)) {
return metatype->getRepresentation() != MetatypeRepresentation::Thin;
}
// Classes and class-bounded archetypes or ObjC existentials.
// No need to apply this to existentials.
// The direct check for SubstitutableType works because only
// class-bounded generic types can be passed directly.
if (type->mayHaveSuperclass() || isa<SubstitutableType>(type) ||
type->isObjCExistentialType()) {
return true;
}
return false;
}
/// Expand the abstract parameters of a SIL function type into the physical
/// parameters of an LLVM function type (results have already been expanded).
void SignatureExpansion::expandParameters() {
assert(FnType->getRepresentation() != SILFunctionTypeRepresentation::Block
&& "block with non-C calling conv?!");
// First, if this is a coroutine, add the coroutine-context parameter.
switch (FnType->getCoroutineKind()) {
case SILCoroutineKind::None:
break;
case SILCoroutineKind::YieldOnce:
case SILCoroutineKind::YieldMany:
addCoroutineContextParameter();
break;
}
// Next, the formal parameters. But 'self' is treated as the
// context if it has pointer representation.
auto params = FnType->getParameters();
bool hasSelfContext = false;
if (hasSelfContextParameter(FnType)) {
hasSelfContext = true;
params = params.drop_back();
}
for (auto param : params) {
expand(param);
}
// Next, the generic signature.
if (hasPolymorphicParameters(FnType))
expandPolymorphicSignature(IGM, FnType, ParamIRTypes);
// Context is next.
if (hasSelfContext) {
auto curLength = ParamIRTypes.size(); (void) curLength;
if (claimSelf())
IGM.addSwiftSelfAttributes(Attrs, curLength);
expand(FnType->getSelfParameter());
assert(ParamIRTypes.size() == curLength + 1 &&
"adding 'self' added unexpected number of parameters");
} else {
auto needsContext = [=]() -> bool {
switch (FnType->getRepresentation()) {
case SILFunctionType::Representation::Block:
llvm_unreachable("adding block parameter in Swift CC expansion?");
// Always leave space for a context argument if we have an error result.
case SILFunctionType::Representation::CFunctionPointer:
case SILFunctionType::Representation::Method:
case SILFunctionType::Representation::WitnessMethod:
case SILFunctionType::Representation::ObjCMethod:
case SILFunctionType::Representation::Thin:
case SILFunctionType::Representation::Closure:
return FnType->hasErrorResult();
case SILFunctionType::Representation::Thick:
return true;
}
llvm_unreachable("bad representation kind");
};
if (needsContext()) {
if (claimSelf())
IGM.addSwiftSelfAttributes(Attrs, ParamIRTypes.size());
ParamIRTypes.push_back(IGM.RefCountedPtrTy);
}
}
// Error results are last. We always pass them as a pointer to the
// formal error type; LLVM will magically turn this into a non-pointer
// if we set the right attribute.
if (FnType->hasErrorResult()) {
if (claimError())
IGM.addSwiftErrorAttributes(Attrs, ParamIRTypes.size());
llvm::Type *errorType = IGM.getStorageType(
getSILFuncConventions().getSILType(FnType->getErrorResult()));
ParamIRTypes.push_back(errorType->getPointerTo());
}
// Witness methods have some extra parameter types.
if (FnType->getRepresentation() ==
SILFunctionTypeRepresentation::WitnessMethod) {
expandTrailingWitnessSignature(IGM, FnType, ParamIRTypes);
}
}
/// Expand the result and parameter types of a SIL function into the
/// physical parameter types of an LLVM function and return the result
/// type.
void SignatureExpansion::expandFunctionType() {
switch (FnType->getLanguage()) {
case SILFunctionLanguage::Swift: {
expandResult();
expandParameters();
return;
}
case SILFunctionLanguage::C:
expandExternalSignatureTypes();
return;
}
llvm_unreachable("bad abstract calling convention");
}
void SignatureExpansion::expandCoroutineContinuationType() {
expandCoroutineResult(/*for continuation*/ true);
expandCoroutineContinuationParameters();
}
Signature SignatureExpansion::getSignature() {
// Create the appropriate LLVM type.
llvm::FunctionType *llvmType =
llvm::FunctionType::get(ResultIRType, ParamIRTypes, /*variadic*/ false);
assert((ForeignInfo.ClangInfo != nullptr) ==
(FnType->getLanguage() == SILFunctionLanguage::C) &&
"C function type without C function info");
auto callingConv = expandCallingConv(IGM, FnType->getRepresentation());
Signature result;
result.Type = llvmType;
result.CallingConv = callingConv;
result.Attributes = Attrs;
using ExtraData = Signature::ExtraData;
if (FnType->getLanguage() == SILFunctionLanguage::C) {
result.ExtraDataKind = ExtraData::kindForMember<ForeignFunctionInfo>();
result.ExtraDataStorage.emplace<ForeignFunctionInfo>(result.ExtraDataKind,
ForeignInfo);
} else if (FnType->isCoroutine()) {
result.ExtraDataKind = ExtraData::kindForMember<CoroutineInfo>();
result.ExtraDataStorage.emplace<CoroutineInfo>(result.ExtraDataKind,
CoroInfo);
} else {
result.ExtraDataKind = ExtraData::kindForMember<void>();
}
return result;
}
Signature Signature::getUncached(IRGenModule &IGM,
CanSILFunctionType formalType) {
GenericContextScope scope(IGM, formalType->getGenericSignature());
SignatureExpansion expansion(IGM, formalType);
expansion.expandFunctionType();
return expansion.getSignature();
}
Signature Signature::forCoroutineContinuation(IRGenModule &IGM,
CanSILFunctionType fnType) {
assert(fnType->isCoroutine());
SignatureExpansion expansion(IGM, fnType);
expansion.expandCoroutineContinuationType();
return expansion.getSignature();
}
void irgen::extractScalarResults(IRGenFunction &IGF, llvm::Type *bodyType,
llvm::Value *call, Explosion &out) {
assert(!bodyType->isVoidTy() && "Unexpected void result type!");
auto *returned = call;
auto *callType = call->getType();
// If the type of the result of the call differs from the type used
// elsewhere in the caller due to ABI type coercion, we need to
// coerce the result back from the ABI type before extracting the
// elements.
if (bodyType != callType)
returned = IGF.coerceValue(returned, bodyType, IGF.IGM.DataLayout);
if (auto *structType = dyn_cast<llvm::StructType>(bodyType))
for (unsigned i = 0, e = structType->getNumElements(); i != e; ++i)
out.add(IGF.Builder.CreateExtractValue(returned, i));
else
out.add(returned);
}
/// Emit the unsubstituted result of this call into the given explosion.
/// The unsubstituted result must be naturally returned directly.
void CallEmission::emitToUnmappedExplosion(Explosion &out) {
assert(LastArgWritten == 0 && "emitting unnaturally to explosion");
auto call = emitCallSite();
// Bail out immediately on a void result.
llvm::Value *result = call.getInstruction();
if (result->getType()->isVoidTy())
return;
SILFunctionConventions fnConv(getCallee().getOrigFunctionType(),
IGF.getSILModule());
// If the result was returned autoreleased, implicitly insert the reclaim.
// This is only allowed on a single direct result.
if (fnConv.getNumDirectSILResults() == 1
&& (fnConv.getDirectSILResults().begin()->getConvention()
== ResultConvention::Autoreleased)) {
result = emitObjCRetainAutoreleasedReturnValue(IGF, result);
}
// Get the natural IR type in the body of the function that makes
// the call. This may be different than the IR type returned by the
// call itself due to ABI type coercion.
auto resultType = fnConv.getSILResultType();
auto &nativeSchema = IGF.IGM.getTypeInfo(resultType).nativeReturnValueSchema(IGF.IGM);
// For ABI reasons the result type of the call might not actually match the
// expected result type.
auto expectedNativeResultType = nativeSchema.getExpandedType(IGF.IGM);
if (result->getType() != expectedNativeResultType) {
// This should only be needed when we call C functions.
assert(getCallee().getOrigFunctionType()->getLanguage() ==
SILFunctionLanguage::C);
result =
IGF.coerceValue(result, expectedNativeResultType, IGF.IGM.DataLayout);
}
// Gather the values.
Explosion nativeExplosion;
extractScalarResults(IGF, result->getType(), result, nativeExplosion);
out = nativeSchema.mapFromNative(IGF.IGM, IGF, nativeExplosion, resultType);
}
/// Emit the unsubstituted result of this call to the given address.
/// The unsubstituted result must be naturally returned indirectly.
void CallEmission::emitToUnmappedMemory(Address result) {
assert(LastArgWritten == 1 && "emitting unnaturally to indirect result");
Args[0] = result.getAddress();
SILFunctionConventions FnConv(CurCallee.getSubstFunctionType(),
IGF.getSILModule());
addIndirectResultAttributes(IGF.IGM, CurCallee.getMutableAttributes(),
0, FnConv.getNumIndirectSILResults() <= 1);
#ifndef NDEBUG
LastArgWritten = 0; // appease an assert
#endif
emitCallSite();
}
/// The private routine to ultimately emit a call or invoke instruction.
llvm::CallSite CallEmission::emitCallSite() {
assert(LastArgWritten == 0);
assert(!EmittedCall);
EmittedCall = true;
// Make the call and clear the arguments array.
const auto &fn = getCallee().getFunctionPointer();
auto fnTy = fn.getFunctionType();
// Coerce argument types for those cases where the IR type required
// by the ABI differs from the type used within the function body.
assert(fnTy->getNumParams() == Args.size());
for (int i = 0, e = fnTy->getNumParams(); i != e; ++i) {
auto *paramTy = fnTy->getParamType(i);
auto *argTy = Args[i]->getType();
if (paramTy != argTy)
Args[i] = IGF.coerceValue(Args[i], paramTy, IGF.IGM.DataLayout);
}
// TODO: exceptions!
auto call = IGF.Builder.CreateCall(fn, Args);
// Make coroutines calls opaque to LLVM analysis.
if (IsCoroutine) {
// Go back and insert some instructions right before the call.
// It's easier to do this than to mess around with copying and
// modifying the FunctionPointer above.
IGF.Builder.SetInsertPoint(call);
// Insert a call to @llvm.coro.prepare.retcon, then bitcast to the right
// function type.
auto origCallee = call->getCalledValue();
llvm::Value *opaqueCallee = origCallee;
opaqueCallee =
IGF.Builder.CreateBitCast(opaqueCallee, IGF.IGM.Int8PtrTy);
opaqueCallee =
IGF.Builder.CreateIntrinsicCall(llvm::Intrinsic::ID::coro_prepare_retcon,
{ opaqueCallee });
opaqueCallee =
IGF.Builder.CreateBitCast(opaqueCallee, origCallee->getType());
call->setCalledFunction(opaqueCallee);
// Reset the insert point to after the call.
IGF.Builder.SetInsertPoint(call->getParent());
}
Args.clear();
// Destroy any temporaries we needed.
// We don't do this for coroutines because we need to wait until the
// coroutine is complete.
if (!IsCoroutine) {
Temporaries.destroyAll(IGF);
// Clear the temporary set so that we can assert that there are no
// temporaries later.
Temporaries.clear();
}
// Return.
return call;
}
llvm::CallInst *IRBuilder::CreateCall(const FunctionPointer &fn,
ArrayRef<llvm::Value*> args) {
assert(!isTrapIntrinsic(fn.getPointer()) && "Use CreateNonMergeableTrap");
llvm::CallInst *call = IRBuilderBase::CreateCall(fn.getPointer(), args);
call->setAttributes(fn.getAttributes());
call->setCallingConv(fn.getCallingConv());
return call;
}
/// Emit the result of this call to memory.
void CallEmission::emitToMemory(Address addr,
const LoadableTypeInfo &indirectedResultTI,
bool isOutlined) {
assert(LastArgWritten <= 1);
// If the call is naturally to an explosion, emit it that way and
// then initialize the temporary.
if (LastArgWritten == 0) {
Explosion result;
emitToExplosion(result, isOutlined);
indirectedResultTI.initialize(IGF, result, addr, isOutlined);
return;
}
// Okay, we're naturally emitting to memory.
Address origAddr = addr;
auto origFnType = CurCallee.getOrigFunctionType();
auto substFnType = CurCallee.getSubstFunctionType();
// We're never being asked to do anything with *formal*
// indirect results here, just the possibility of a direct-in-SIL
// result that's actually being passed indirectly.
//
// TODO: SIL address lowering should be able to handle such cases earlier.
auto origResultType = origFnType->getDirectFormalResultsType().getASTType();
auto substResultType = substFnType->getDirectFormalResultsType().getASTType();
if (origResultType->hasTypeParameter())
origResultType = IGF.IGM.getGenericEnvironment()
->mapTypeIntoContext(origResultType)
->getCanonicalType();
if (origResultType != substResultType) {
auto origTy = IGF.IGM.getStoragePointerTypeForLowered(origResultType);
origAddr = IGF.Builder.CreateBitCast(origAddr, origTy);
}
emitToUnmappedMemory(origAddr);
}
static void emitCastToSubstSchema(IRGenFunction &IGF, Explosion &in,
const ExplosionSchema &schema,
Explosion &out) {
assert(in.size() == schema.size());
for (unsigned i = 0, e = schema.size(); i != e; ++i) {
llvm::Type *expectedType = schema.begin()[i].getScalarType();
llvm::Value *value = in.claimNext();
if (value->getType() != expectedType)
value = IGF.Builder.CreateBitCast(value, expectedType,
value->getName() + ".asSubstituted");
out.add(value);
}
}
void CallEmission::emitYieldsToExplosion(Explosion &out) {
// Emit the call site.
auto call = emitCallSite();
// Pull the raw return values out.
Explosion rawReturnValues;
extractScalarResults(IGF, call->getType(), call.getInstruction(),
rawReturnValues);
auto coroInfo = getCallee().getSignature().getCoroutineInfo();
// Go ahead and forward the continuation pointer as an opaque pointer.
auto continuation = rawReturnValues.claimNext();
out.add(continuation);
// Collect the raw value components.
Explosion rawYieldComponents;
// Add all the direct yield components.
rawYieldComponents.add(
rawReturnValues.claim(coroInfo.NumDirectYieldComponents));
// Add all the indirect yield components.
assert(rawReturnValues.size() <= 1);
if (!rawReturnValues.empty()) {
// Extract the indirect yield buffer.
auto indirectPointer = rawReturnValues.claimNext();
auto indirectStructTy = cast<llvm::StructType>(
indirectPointer->getType()->getPointerElementType());
auto layout = IGF.IGM.DataLayout.getStructLayout(indirectStructTy);
Address indirectBuffer(indirectPointer, Alignment(layout->getAlignment()));
for (auto i : indices(indirectStructTy->elements())) {
// Skip padding.
if (indirectStructTy->getElementType(i)->isArrayTy())
continue;
auto eltAddr = IGF.Builder.CreateStructGEP(indirectBuffer, i, layout);
rawYieldComponents.add(IGF.Builder.CreateLoad(eltAddr));
}
}
auto substCoroType = getCallee().getSubstFunctionType();
SILFunctionConventions fnConv(substCoroType, IGF.getSILModule());
for (auto yield : fnConv.getYields()) {
YieldSchema schema(IGF.IGM, fnConv, yield);
// If the schema says it's indirect, then we expect a pointer.
if (schema.isIndirect()) {
auto pointer = IGF.Builder.CreateBitCast(rawYieldComponents.claimNext(),
schema.getIndirectPointerType());
// If it's formally indirect, then we should just add that pointer
// to the output.
if (schema.isFormalIndirect()) {
out.add(pointer);
continue;
}
// Otherwise, we need to load.
auto &yieldTI = cast<LoadableTypeInfo>(schema.getTypeInfo());
yieldTI.loadAsTake(IGF, yieldTI.getAddressForPointer(pointer), out);
continue;
}
// Otherwise, it's direct. Remap.
auto temp = schema.getDirectSchema().mapFromNative(IGF.IGM, IGF,
rawYieldComponents,
schema.getSILType());
auto &yieldTI = cast<LoadableTypeInfo>(schema.getTypeInfo());
emitCastToSubstSchema(IGF, temp, yieldTI.getSchema(), out);
}
}
/// Emit the result of this call to an explosion.
void CallEmission::emitToExplosion(Explosion &out, bool isOutlined) {
assert(LastArgWritten <= 1);
// For coroutine calls, we need to collect the yields, not the results;
// this looks very different.
if (IsCoroutine) {
assert(LastArgWritten == 0 && "coroutine with indirect result?");
emitYieldsToExplosion(out);
return;
}
SILFunctionConventions fnConv(getCallee().getSubstFunctionType(),
IGF.getSILModule());
SILType substResultType = fnConv.getSILResultType();
auto &substResultTI =
cast<LoadableTypeInfo>(IGF.getTypeInfo(substResultType));
// If the call is naturally to memory, emit it that way and then
// explode that temporary.
if (LastArgWritten == 1) {
StackAddress ctemp = substResultTI.allocateStack(IGF, substResultType,
"call.aggresult");
Address temp = ctemp.getAddress();
emitToMemory(temp, substResultTI, isOutlined);
// We can use a take.
substResultTI.loadAsTake(IGF, temp, out);
substResultTI.deallocateStack(IGF, ctemp, substResultType);
return;
}
// Okay, we're naturally emitting to an explosion.
Explosion temp;
emitToUnmappedExplosion(temp);
// We might need to bitcast the results.
emitCastToSubstSchema(IGF, temp, substResultTI.getSchema(), out);
}
CallEmission::CallEmission(CallEmission &&other)
: IGF(other.IGF),
Args(std::move(other.Args)),
CurCallee(std::move(other.CurCallee)),
LastArgWritten(other.LastArgWritten),
EmittedCall(other.EmittedCall) {
// Prevent other's destructor from asserting.
LastArgWritten = 0;
EmittedCall = true;
}
CallEmission::~CallEmission() {
assert(LastArgWritten == 0);
assert(EmittedCall);
assert(Temporaries.hasBeenCleared());
}
Callee::Callee(CalleeInfo &&info, const FunctionPointer &fn,
llvm::Value *firstData, llvm::Value *secondData)
: Info(std::move(info)), Fn(fn),
FirstData(firstData), SecondData(secondData) {
#ifndef NDEBUG
// We should have foreign info if it's a foreign call.
assert((Fn.getForeignInfo().ClangInfo != nullptr) ==
(Info.OrigFnType->getLanguage() == SILFunctionLanguage::C));
// We should have the right data values for the representation.
switch (Info.OrigFnType->getRepresentation()) {
case SILFunctionTypeRepresentation::ObjCMethod:
assert(FirstData && SecondData);
break;
case SILFunctionTypeRepresentation::Method:
case SILFunctionTypeRepresentation::WitnessMethod:
assert((FirstData != nullptr) == hasSelfContextParameter(Info.OrigFnType));
assert(!SecondData);
break;
case SILFunctionTypeRepresentation::Thick:
case SILFunctionTypeRepresentation::Block:
assert(FirstData && !SecondData);
break;
case SILFunctionTypeRepresentation::Thin:
case SILFunctionTypeRepresentation::Closure:
case SILFunctionTypeRepresentation::CFunctionPointer:
assert(!FirstData && !SecondData);
break;
}
#endif
}
llvm::Value *Callee::getSwiftContext() const {
switch (Info.OrigFnType->getRepresentation()) {
case SILFunctionTypeRepresentation::Block:
case SILFunctionTypeRepresentation::ObjCMethod:
case SILFunctionTypeRepresentation::CFunctionPointer:
case SILFunctionTypeRepresentation::Thin:
case SILFunctionTypeRepresentation::Closure:
return nullptr;
case SILFunctionTypeRepresentation::WitnessMethod:
case SILFunctionTypeRepresentation::Method:
// This may or may not be null.
return FirstData;
case SILFunctionTypeRepresentation::Thick:
assert(FirstData && "no context value set on callee");
return FirstData;
}
llvm_unreachable("bad representation");
}
llvm::Value *Callee::getBlockObject() const {
assert(Info.OrigFnType->getRepresentation() ==
SILFunctionTypeRepresentation::Block &&
"not a block");
assert(FirstData && "no block object set on callee");
return FirstData;
}
llvm::Value *Callee::getObjCMethodReceiver() const {
assert(Info.OrigFnType->getRepresentation() ==
SILFunctionTypeRepresentation::ObjCMethod &&
"not a method");
assert(FirstData && "no receiver set on callee");
return FirstData;
}
llvm::Value *Callee::getObjCMethodSelector() const {
assert(Info.OrigFnType->getRepresentation() ==
SILFunctionTypeRepresentation::ObjCMethod &&
"not a method");
assert(SecondData && "no selector set on callee");
return SecondData;
}
/// Set up this emitter afresh from the current callee specs.
void CallEmission::setFromCallee() {
IsCoroutine = CurCallee.getSubstFunctionType()->isCoroutine();
EmittedCall = false;
unsigned numArgs = CurCallee.getLLVMFunctionType()->getNumParams();
// Set up the args array.
assert(Args.empty());
Args.reserve(numArgs);
Args.set_size(numArgs);
LastArgWritten = numArgs;
auto fnType = CurCallee.getOrigFunctionType();
if (fnType->getRepresentation()
== SILFunctionTypeRepresentation::WitnessMethod) {
unsigned n = getTrailingWitnessSignatureLength(IGF.IGM, fnType);
while (n--) {
Args[--LastArgWritten] = nullptr;
}
}
llvm::Value *contextPtr = CurCallee.getSwiftContext();
// Add the error result if we have one.
if (fnType->hasErrorResult()) {
// The invariant is that this is always zero-initialized, so we
// don't need to do anything extra here.
SILFunctionConventions fnConv(fnType, IGF.getSILModule());
Address errorResultSlot = IGF.getErrorResultSlot(fnConv.getSILErrorType());
assert(LastArgWritten > 0);
Args[--LastArgWritten] = errorResultSlot.getAddress();
addAttribute(LastArgWritten + llvm::AttributeList::FirstArgIndex,
llvm::Attribute::NoCapture);
IGF.IGM.addSwiftErrorAttributes(CurCallee.getMutableAttributes(),
LastArgWritten);
// Fill in the context pointer if necessary.
if (!contextPtr) {
assert(!CurCallee.getOrigFunctionType()->getExtInfo().hasContext() &&
"Missing context?");
contextPtr = llvm::UndefValue::get(IGF.IGM.RefCountedPtrTy);
}
}
// Add the data pointer if we have one.
// (Note that we're emitting backwards, so this correctly goes
// *before* the error pointer.)
if (contextPtr) {
assert(LastArgWritten > 0);
Args[--LastArgWritten] = contextPtr;
IGF.IGM.addSwiftSelfAttributes(CurCallee.getMutableAttributes(),
LastArgWritten);
}
}
bool irgen::canCoerceToSchema(IRGenModule &IGM,
ArrayRef<llvm::Type*> expandedTys,
const ExplosionSchema &schema) {
// If the schemas don't even match in number, we have to go
// through memory.
if (expandedTys.size() != schema.size())
return false;
// If there's just one element, we can always coerce as a scalar.
if (expandedTys.size() == 1) return true;
// If there are multiple elements, the pairs of types need to
// match in size for the coercion to work.
for (size_t i = 0, e = expandedTys.size(); i != e; ++i) {
llvm::Type *inputTy = schema[i].getScalarType();
llvm::Type *outputTy = expandedTys[i];
if (inputTy != outputTy &&
IGM.DataLayout.getTypeSizeInBits(inputTy) !=
IGM.DataLayout.getTypeSizeInBits(outputTy))
return false;
}
// Okay, everything is fine.
return true;
}
static llvm::Type *getOutputType(TranslationDirection direction, unsigned index,
const ExplosionSchema &nativeSchema,
ArrayRef<llvm::Type*> expandedForeignTys) {
assert(nativeSchema.size() == expandedForeignTys.size());
return (direction == TranslationDirection::ToForeign
? expandedForeignTys[index]
: nativeSchema[index].getScalarType());
}
static void emitCoerceAndExpand(IRGenFunction &IGF, Explosion &in,
Explosion &out, SILType paramTy,
const LoadableTypeInfo ¶mTI,
llvm::StructType *coercionTy,
ArrayRef<llvm::Type *> expandedTys,
TranslationDirection direction,
bool isOutlined) {
// If we can directly coerce the scalar values, avoid going through memory.
auto schema = paramTI.getSchema();
if (canCoerceToSchema(IGF.IGM, expandedTys, schema)) {
for (auto index : indices(expandedTys)) {
llvm::Value *arg = in.claimNext();
assert(arg->getType() ==
getOutputType(reverse(direction), index, schema, expandedTys));
auto outputTy = getOutputType(direction, index, schema, expandedTys);
if (arg->getType() != outputTy)
arg = IGF.coerceValue(arg, outputTy, IGF.IGM.DataLayout);
out.add(arg);
}
return;
}
// Otherwise, materialize to a temporary.
auto temporaryAlloc =
paramTI.allocateStack(IGF, paramTy, "coerce-and-expand.temp");
Address temporary = temporaryAlloc.getAddress();
auto coercionTyLayout = IGF.IGM.DataLayout.getStructLayout(coercionTy);
// Make the alloca at least as aligned as the coercion struct, just
// so that the element accesses we make don't end up under-aligned.
Alignment coercionTyAlignment = Alignment(coercionTyLayout->getAlignment());
auto alloca = cast<llvm::AllocaInst>(temporary.getAddress());
if (alloca->getAlignment() < coercionTyAlignment.getValue()) {
alloca->setAlignment(coercionTyAlignment.getValue());
temporary = Address(temporary.getAddress(), coercionTyAlignment);
}
// If we're translating *to* the foreign expansion, do an ordinary
// initialization from the input explosion.
if (direction == TranslationDirection::ToForeign) {
paramTI.initialize(IGF, in, temporary, isOutlined);
}
Address coercedTemporary =
IGF.Builder.CreateElementBitCast(temporary, coercionTy);
#ifndef NDEBUG
size_t expandedTyIndex = 0;
#endif
for (auto eltIndex : indices(coercionTy->elements())) {
auto eltTy = coercionTy->getElementType(eltIndex);
// Skip padding fields.
if (eltTy->isArrayTy()) continue;
assert(expandedTys[expandedTyIndex++] == eltTy);
// Project down to the field.
Address eltAddr =
IGF.Builder.CreateStructGEP(coercedTemporary, eltIndex, coercionTyLayout);
// If we're translating *to* the foreign expansion, pull the value out
// of the field and add it to the output.
if (direction == TranslationDirection::ToForeign) {
llvm::Value *value = IGF.Builder.CreateLoad(eltAddr);
out.add(value);
// Otherwise, claim the next value from the input and store that
// in the field.
} else {
llvm::Value *value = in.claimNext();
IGF.Builder.CreateStore(value, eltAddr);
}
}
assert(expandedTyIndex == expandedTys.size());
// If we're translating *from* the foreign expansion, do an ordinary
// load into the output explosion.
if (direction == TranslationDirection::ToNative) {
paramTI.loadAsTake(IGF, temporary, out);
}
paramTI.deallocateStack(IGF, temporaryAlloc, paramTy);
}
static void emitDirectExternalArgument(IRGenFunction &IGF, SILType argType,
const clang::CodeGen::ABIArgInfo &AI,
Explosion &in, Explosion &out,
bool isOutlined) {
bool IsDirectFlattened = AI.isDirect() && AI.getCanBeFlattened();
bool IsIndirect = !AI.isDirect();
// If we're supposed to pass directly as a struct type, that
// really means expanding out as multiple arguments.
llvm::Type *coercedTy = AI.getCoerceToType();
ArrayRef<llvm::Type *> expandedTys =
expandScalarOrStructTypeToArray(coercedTy);
auto &argTI = cast<LoadableTypeInfo>(IGF.getTypeInfo(argType));
auto inputSchema = argTI.getSchema();
// Check to see if we can pairwise coerce Swift's exploded scalars
// to Clang's expanded elements.
if ((IsDirectFlattened || IsIndirect) &&
canCoerceToSchema(IGF.IGM, expandedTys, inputSchema)) {
for (auto outputTy : expandedTys) {
llvm::Value *arg = in.claimNext();
if (arg->getType() != outputTy)
arg = IGF.coerceValue(arg, outputTy, IGF.IGM.DataLayout);
out.add(arg);
}
return;
}
// Otherwise, we need to coerce through memory.
Address temporary;
Size tempSize;
std::tie(temporary, tempSize) =
allocateForCoercion(IGF, argTI.getStorageType(), coercedTy, "coerced-arg");
IGF.Builder.CreateLifetimeStart(temporary, tempSize);
// Store to a temporary.
Address tempOfArgTy = IGF.Builder.CreateBitCast(
temporary, argTI.getStorageType()->getPointerTo());
argTI.initializeFromParams(IGF, in, tempOfArgTy, argType, isOutlined);
// Bitcast the temporary to the expected type.
Address coercedAddr =
IGF.Builder.CreateBitCast(temporary, coercedTy->getPointerTo());
if (IsDirectFlattened && isa<llvm::StructType>(coercedTy)) {
// Project out individual elements if necessary.
auto *ST = cast<llvm::StructType>(coercedTy);
const auto *layout = IGF.IGM.DataLayout.getStructLayout(ST);
for (unsigned EI : range(ST->getNumElements())) {
auto offset = Size(layout->getElementOffset(EI));
auto address = IGF.Builder.CreateStructGEP(coercedAddr, EI, offset);
out.add(IGF.Builder.CreateLoad(address));
}
} else {
// Otherwise, collect the single scalar.
out.add(IGF.Builder.CreateLoad(coercedAddr));
}
IGF.Builder.CreateLifetimeEnd(temporary, tempSize);
}
namespace {
/// Load a clang argument expansion from a buffer.
struct ClangExpandLoadEmitter :
ClangExpandProjection<ClangExpandLoadEmitter> {
Explosion &Out;
ClangExpandLoadEmitter(IRGenFunction &IGF, Explosion &out)
: ClangExpandProjection(IGF), Out(out) {}
void visitScalar(llvm::Type *scalarTy, Address addr) {
addr = IGF.Builder.CreateBitCast(addr, scalarTy->getPointerTo());
auto value = IGF.Builder.CreateLoad(addr);
Out.add(value);
}
};
/// Store a clang argument expansion into a buffer.
struct ClangExpandStoreEmitter :
ClangExpandProjection<ClangExpandStoreEmitter> {
Explosion &In;
ClangExpandStoreEmitter(IRGenFunction &IGF, Explosion &in)
: ClangExpandProjection(IGF), In(in) {}
void visitScalar(llvm::Type *scalarTy, Address addr) {
auto value = In.claimNext();
addr = IGF.Builder.CreateBitCast(addr, scalarTy->getPointerTo());
IGF.Builder.CreateStore(value, addr);
}
};
} // end anonymous namespace
/// Given a Swift value explosion in 'in', produce a Clang expansion
/// (according to ABIArgInfo::Expand) in 'out'.
static void
emitClangExpandedArgument(IRGenFunction &IGF, Explosion &in, Explosion &out,
clang::CanQualType clangType, SILType swiftType,
const LoadableTypeInfo &swiftTI, bool isOutlined) {
// If Clang's expansion schema matches Swift's, great.
auto swiftSchema = swiftTI.getSchema();
if (doesClangExpansionMatchSchema(IGF.IGM, clangType, swiftSchema)) {
return in.transferInto(out, swiftSchema.size());
}
// Otherwise, materialize to a temporary.
auto ctemp = swiftTI.allocateStack(IGF, swiftType, "clang-expand-arg.temp");
Address temp = ctemp.getAddress();
swiftTI.initialize(IGF, in, temp, isOutlined);
Address castTemp = IGF.Builder.CreateBitCast(temp, IGF.IGM.Int8PtrTy);
ClangExpandLoadEmitter(IGF, out).visit(clangType, castTemp);
swiftTI.deallocateStack(IGF, ctemp, swiftType);
}
/// Given a Clang-expanded (according to ABIArgInfo::Expand) parameter
/// in 'in', produce a Swift value explosion in 'out'.
void irgen::emitClangExpandedParameter(IRGenFunction &IGF,
Explosion &in, Explosion &out,
clang::CanQualType clangType,
SILType swiftType,
const LoadableTypeInfo &swiftTI) {
// If Clang's expansion schema matches Swift's, great.
auto swiftSchema = swiftTI.getSchema();
if (doesClangExpansionMatchSchema(IGF.IGM, clangType, swiftSchema)) {
return in.transferInto(out, swiftSchema.size());
}
// Otherwise, materialize to a temporary.
auto tempAlloc = swiftTI.allocateStack(IGF, swiftType,
"clang-expand-param.temp");
Address temp = tempAlloc.getAddress();
Address castTemp = IGF.Builder.CreateBitCast(temp, IGF.IGM.Int8PtrTy);
ClangExpandStoreEmitter(IGF, in).visit(clangType, castTemp);
// Then load out.
swiftTI.loadAsTake(IGF, temp, out);
swiftTI.deallocateStack(IGF, tempAlloc, swiftType);
}
static void externalizeArguments(IRGenFunction &IGF, const Callee &callee,
Explosion &in, Explosion &out,
TemporarySet &temporaries,
bool isOutlined) {
auto silConv = IGF.IGM.silConv;
auto fnType = callee.getOrigFunctionType();
auto params = fnType->getParameters();
assert(callee.getForeignInfo().ClangInfo);
auto &FI = *callee.getForeignInfo().ClangInfo;
// The index of the first "physical" parameter from paramTys/FI that
// corresponds to a logical parameter from params.
unsigned firstParam = 0;
// Handle the ObjC prefix.
if (callee.getRepresentation() == SILFunctionTypeRepresentation::ObjCMethod) {
// Ignore both the logical and the physical parameters associated
// with self and _cmd.
firstParam += 2;
params = params.drop_back();
// Or the block prefix.
} else if (fnType->getRepresentation()
== SILFunctionTypeRepresentation::Block) {
// Ignore the physical block-object parameter.
firstParam += 1;
}
for (unsigned i = firstParam, e = FI.arg_size(); i != e; ++i) {
auto clangParamTy = FI.arg_begin()[i].type;
auto &AI = FI.arg_begin()[i].info;
// We don't need to do anything to handle the Swift parameter-ABI
// attributes here because we shouldn't be trying to round-trip
// swiftcall function pointers through SIL as C functions anyway.
assert(FI.getExtParameterInfo(i).getABI() == clang::ParameterABI::Ordinary);
// Add a padding argument if required.
if (auto *padType = AI.getPaddingType())
out.add(llvm::UndefValue::get(padType));
SILType paramType = silConv.getSILType(params[i - firstParam]);
switch (AI.getKind()) {
case clang::CodeGen::ABIArgInfo::Extend: {
bool signExt = clangParamTy->hasSignedIntegerRepresentation();
assert((signExt || clangParamTy->hasUnsignedIntegerRepresentation()) &&
"Invalid attempt to add extension attribute to argument!");
(void) signExt;
LLVM_FALLTHROUGH;
}
case clang::CodeGen::ABIArgInfo::Direct: {
auto toTy = AI.getCoerceToType();
// Indirect parameters are bridged as Clang pointer types.
if (silConv.isSILIndirect(params[i - firstParam])) {
assert(paramType.isAddress() && "SIL type is not an address?");
auto addr = in.claimNext();
if (addr->getType() != toTy)
addr = IGF.coerceValue(addr, toTy, IGF.IGM.DataLayout);
out.add(addr);
break;
}
emitDirectExternalArgument(IGF, paramType, AI, in, out, isOutlined);
break;
}
case clang::CodeGen::ABIArgInfo::Indirect: {
auto &ti = cast<LoadableTypeInfo>(IGF.getTypeInfo(paramType));
auto temp = ti.allocateStack(IGF, paramType, "indirect-temporary");
temporaries.add({temp, paramType});
Address addr = temp.getAddress();
// Set at least the alignment the ABI expects.
if (AI.getIndirectByVal()) {
auto ABIAlign = AI.getIndirectAlign();
if (ABIAlign > addr.getAlignment()) {
auto *AS = cast<llvm::AllocaInst>(addr.getAddress());
AS->setAlignment(ABIAlign.getQuantity());
addr = Address(addr.getAddress(), Alignment(ABIAlign.getQuantity()));
}
}
ti.initialize(IGF, in, addr, isOutlined);
out.add(addr.getAddress());
break;
}
case clang::CodeGen::ABIArgInfo::CoerceAndExpand: {
auto ¶mTI = cast<LoadableTypeInfo>(IGF.getTypeInfo(paramType));
emitCoerceAndExpand(IGF, in, out, paramType, paramTI,
AI.getCoerceAndExpandType(),
AI.getCoerceAndExpandTypeSequence(),
TranslationDirection::ToForeign, isOutlined);
break;
}
case clang::CodeGen::ABIArgInfo::Expand:
emitClangExpandedArgument(
IGF, in, out, clangParamTy, paramType,
cast<LoadableTypeInfo>(IGF.getTypeInfo(paramType)), isOutlined);
break;
case clang::CodeGen::ABIArgInfo::Ignore:
break;
case clang::CodeGen::ABIArgInfo::InAlloca:
llvm_unreachable("Need to handle InAlloca when externalizing arguments");
break;
}
}
}
/// Returns whether allocas are needed.
bool irgen::addNativeArgument(IRGenFunction &IGF, Explosion &in,
SILParameterInfo origParamInfo, Explosion &out,
bool isOutlined) {
// Addresses consist of a single pointer argument.
if (IGF.IGM.silConv.isSILIndirect(origParamInfo)) {
out.add(in.claimNext());
return false;
}
auto paramType = IGF.IGM.silConv.getSILType(origParamInfo);
auto &ti = cast<LoadableTypeInfo>(IGF.getTypeInfo(paramType));
auto schema = ti.getSchema();
auto &nativeSchema = ti.nativeParameterValueSchema(IGF.IGM);
if (nativeSchema.requiresIndirect()) {
// Pass the argument indirectly.
auto buf = IGF.createAlloca(ti.getStorageType(),
ti.getFixedAlignment(), "");
ti.initialize(IGF, in, buf, isOutlined);
out.add(buf.getAddress());
return true;
} else {
if (schema.empty()) {
assert(nativeSchema.empty());
return false;
}
assert(!nativeSchema.empty());
// Pass the argument explosion directly, mapping into the native swift
// calling convention.
Explosion nonNativeParam;
ti.reexplode(IGF, in, nonNativeParam);
Explosion nativeParam = nativeSchema.mapIntoNative(
IGF.IGM, IGF, nonNativeParam, paramType, isOutlined);
nativeParam.transferInto(out, nativeParam.size());
return false;
}
}
/// Emit a direct parameter that was passed under a C-based CC.
static void emitDirectForeignParameter(IRGenFunction &IGF, Explosion &in,
const clang::CodeGen::ABIArgInfo &AI,
Explosion &out, SILType paramType,
const LoadableTypeInfo ¶mTI) {
// The ABI IR types for the entrypoint might differ from the
// Swift IR types for the body of the function.
llvm::Type *coercionTy = AI.getCoerceToType();
ArrayRef<llvm::Type*> expandedTys;
if (AI.isDirect() && AI.getCanBeFlattened() &&
isa<llvm::StructType>(coercionTy)) {
const auto *ST = cast<llvm::StructType>(coercionTy);
expandedTys = makeArrayRef(ST->element_begin(), ST->getNumElements());
} else if (coercionTy == paramTI.getStorageType()) {
// Fast-path a really common case. This check assumes that either
// the storage type of a type is an llvm::StructType or it has a
// single-element explosion.
out.add(in.claimNext());
return;
} else {
expandedTys = coercionTy;
}
auto outputSchema = paramTI.getSchema();
// Check to see if we can pairwise-coerce Swift's exploded scalars
// to Clang's expanded elements.
if (canCoerceToSchema(IGF.IGM, expandedTys, outputSchema)) {
for (auto &outputElt : outputSchema) {
llvm::Value *param = in.claimNext();
llvm::Type *outputTy = outputElt.getScalarType();
if (param->getType() != outputTy)
param = IGF.coerceValue(param, outputTy, IGF.IGM.DataLayout);
out.add(param);
}
return;
}
// Otherwise, we need to traffic through memory.
// Create a temporary.
Address temporary; Size tempSize;
std::tie(temporary, tempSize) = allocateForCoercion(IGF,
coercionTy,
paramTI.getStorageType(),
"");
IGF.Builder.CreateLifetimeStart(temporary, tempSize);
// Write the input parameters into the temporary:
Address coercedAddr =
IGF.Builder.CreateBitCast(temporary, coercionTy->getPointerTo());
// Break down a struct expansion if necessary.
if (auto expansionTy = dyn_cast<llvm::StructType>(coercionTy)) {
auto layout = IGF.IGM.DataLayout.getStructLayout(expansionTy);
for (unsigned i = 0, e = expansionTy->getNumElements(); i != e; ++i) {
auto fieldOffset = Size(layout->getElementOffset(i));
auto fieldAddr = IGF.Builder.CreateStructGEP(coercedAddr, i, fieldOffset);
IGF.Builder.CreateStore(in.claimNext(), fieldAddr);
}
// Otherwise, store the single scalar.
} else {
IGF.Builder.CreateStore(in.claimNext(), coercedAddr);
}
// Pull out the elements.
temporary = IGF.Builder.CreateBitCast(temporary,
paramTI.getStorageType()->getPointerTo());
paramTI.loadAsTake(IGF, temporary, out);
// Deallocate the temporary.
// `deallocateStack` emits the lifetime.end marker for us.
paramTI.deallocateStack(IGF, StackAddress(temporary), paramType);
}
void irgen::emitForeignParameter(IRGenFunction &IGF, Explosion ¶ms,
ForeignFunctionInfo foreignInfo,
unsigned foreignParamIndex, SILType paramTy,
const LoadableTypeInfo ¶mTI,
Explosion ¶mExplosion, bool isOutlined) {
assert(foreignInfo.ClangInfo);
auto &FI = *foreignInfo.ClangInfo;
auto clangArgTy = FI.arg_begin()[foreignParamIndex].type;
auto AI = FI.arg_begin()[foreignParamIndex].info;
// We don't need to do anything to handle the Swift parameter-ABI
// attributes here because we shouldn't be trying to round-trip
// swiftcall function pointers through SIL as C functions anyway.
assert(FI.getExtParameterInfo(foreignParamIndex).getABI()
== clang::ParameterABI::Ordinary);
// Drop padding arguments.
if (AI.getPaddingType())
params.claimNext();
switch (AI.getKind()) {
case clang::CodeGen::ABIArgInfo::Extend:
case clang::CodeGen::ABIArgInfo::Direct:
emitDirectForeignParameter(IGF, params, AI, paramExplosion, paramTy,
paramTI);
return;
case clang::CodeGen::ABIArgInfo::Indirect: {
Address address = paramTI.getAddressForPointer(params.claimNext());
paramTI.loadAsTake(IGF, address, paramExplosion);
return;
}
case clang::CodeGen::ABIArgInfo::Expand: {
emitClangExpandedParameter(IGF, params, paramExplosion, clangArgTy,
paramTy, paramTI);
return;
}
case clang::CodeGen::ABIArgInfo::CoerceAndExpand: {
auto ¶mTI = cast<LoadableTypeInfo>(IGF.getTypeInfo(paramTy));
emitCoerceAndExpand(IGF, params, paramExplosion, paramTy, paramTI,
AI.getCoerceAndExpandType(),
AI.getCoerceAndExpandTypeSequence(),
TranslationDirection::ToNative, isOutlined);
break;
}
case clang::CodeGen::ABIArgInfo::Ignore:
return;
case clang::CodeGen::ABIArgInfo::InAlloca:
llvm_unreachable("Need to handle InAlloca during signature expansion");
}
}
static void emitRetconCoroutineEntry(IRGenFunction &IGF,
CanSILFunctionType fnType,
Explosion &allParamValues,
llvm::Intrinsic::ID idIntrinsic,
Size bufferSize,
Alignment bufferAlignment) {
auto prototype =
IGF.IGM.getOpaquePtr(IGF.IGM.getAddrOfContinuationPrototype(fnType));
// Use malloc and free as our allocator.
auto allocFn = IGF.IGM.getOpaquePtr(IGF.IGM.getMallocFn());
auto deallocFn = IGF.IGM.getOpaquePtr(IGF.IGM.getFreeFn());
// Call the right 'llvm.coro.id.retcon' variant.
llvm::Value *buffer = allParamValues.claimNext();
llvm::Value *id = IGF.Builder.CreateIntrinsicCall(idIntrinsic, {
llvm::ConstantInt::get(IGF.IGM.Int32Ty, bufferSize.getValue()),
llvm::ConstantInt::get(IGF.IGM.Int32Ty, bufferAlignment.getValue()),
buffer,
prototype,
allocFn,
deallocFn
});
// Call 'llvm.coro.begin', just for consistency with the normal pattern.
// This serves as a handle that we can pass around to other intrinsics.
auto hdl = IGF.Builder.CreateIntrinsicCall(llvm::Intrinsic::ID::coro_begin, {
id,
llvm::ConstantPointerNull::get(IGF.IGM.Int8PtrTy)
});
// Set the coroutine handle; this also flags that is a coroutine so that
// e.g. dynamic allocas use the right code generation.
IGF.setCoroutineHandle(hdl);
}
void irgen::emitYieldOnceCoroutineEntry(IRGenFunction &IGF,
CanSILFunctionType fnType,
Explosion &allParamValues) {
emitRetconCoroutineEntry(IGF, fnType, allParamValues,
llvm::Intrinsic::ID::coro_id_retcon_once,
getYieldOnceCoroutineBufferSize(IGF.IGM),
getYieldOnceCoroutineBufferAlignment(IGF.IGM));
}
void irgen::emitYieldManyCoroutineEntry(IRGenFunction &IGF,
CanSILFunctionType fnType,
Explosion &allParamValues) {
emitRetconCoroutineEntry(IGF, fnType, allParamValues,
llvm::Intrinsic::ID::coro_id_retcon,
getYieldManyCoroutineBufferSize(IGF.IGM),
getYieldManyCoroutineBufferAlignment(IGF.IGM));
}
static Address createOpaqueBufferAlloca(IRGenFunction &IGF,
Size size, Alignment align) {
auto ty = llvm::ArrayType::get(IGF.IGM.Int8Ty, size.getValue());
auto addr = IGF.createAlloca(ty, align);
addr = IGF.Builder.CreateStructGEP(addr, 0, Size(0));
IGF.Builder.CreateLifetimeStart(addr, size);
return addr;
}
Address irgen::emitAllocYieldOnceCoroutineBuffer(IRGenFunction &IGF) {
return createOpaqueBufferAlloca(IGF, getYieldOnceCoroutineBufferSize(IGF.IGM),
getYieldOnceCoroutineBufferAlignment(IGF.IGM));
}
Address irgen::emitAllocYieldManyCoroutineBuffer(IRGenFunction &IGF) {
return createOpaqueBufferAlloca(IGF, getYieldManyCoroutineBufferSize(IGF.IGM),
getYieldManyCoroutineBufferAlignment(IGF.IGM));
}
void irgen::emitDeallocYieldOnceCoroutineBuffer(IRGenFunction &IGF,
Address buffer) {
auto bufferSize = getYieldOnceCoroutineBufferSize(IGF.IGM);
IGF.Builder.CreateLifetimeEnd(buffer, bufferSize);
}
void irgen::emitDeallocYieldManyCoroutineBuffer(IRGenFunction &IGF,
Address buffer) {
auto bufferSize = getYieldManyCoroutineBufferSize(IGF.IGM);
IGF.Builder.CreateLifetimeEnd(buffer, bufferSize);
}
llvm::Value *irgen::emitYield(IRGenFunction &IGF,
CanSILFunctionType coroutineType,
Explosion &substValues) {
auto coroSignature = IGF.IGM.getSignature(coroutineType);
auto coroInfo = coroSignature.getCoroutineInfo();
// Translate the arguments to an unsubstituted form.
Explosion allComponents;
for (auto yield : coroutineType->getYields())
addNativeArgument(IGF, substValues, yield, allComponents, false);
// Figure out which arguments need to be yielded directly.
SmallVector<llvm::Value*, 8> yieldArgs;
// Add the direct yield components.
auto directComponents =
allComponents.claim(coroInfo.NumDirectYieldComponents);
yieldArgs.append(directComponents.begin(), directComponents.end());
// The rest need to go into an indirect buffer.
auto indirectComponents = allComponents.claimAll();
auto resultStructTy =
dyn_cast<llvm::StructType>(coroSignature.getType()->getReturnType());
assert((!resultStructTy
&& directComponents.empty()
&& indirectComponents.empty())
|| (resultStructTy
&& resultStructTy->getNumElements() ==
(1 + directComponents.size()
+ unsigned(!indirectComponents.empty()))));
// Fill in the indirect buffer if necessary.
Optional<Address> indirectBuffer;
Size indirectBufferSize;
if (!indirectComponents.empty()) {
auto bufferStructTy = cast<llvm::StructType>(
resultStructTy->getElementType(resultStructTy->getNumElements() - 1)
->getPointerElementType());
auto layout = IGF.IGM.DataLayout.getStructLayout(bufferStructTy);
indirectBuffer = IGF.createAlloca(bufferStructTy,
Alignment(layout->getAlignment()));
indirectBufferSize = Size(layout->getSizeInBytes());
IGF.Builder.CreateLifetimeStart(*indirectBuffer, indirectBufferSize);
for (size_t i : indices(bufferStructTy->elements())) {
// Skip padding elements.
if (bufferStructTy->getElementType(i)->isArrayTy())
continue;
assert(!indirectComponents.empty() &&
"insufficient number of indirect yield components");
auto addr = IGF.Builder.CreateStructGEP(*indirectBuffer, i, layout);
IGF.Builder.CreateStore(indirectComponents.front(), addr);
indirectComponents = indirectComponents.drop_front();
}
assert(indirectComponents.empty() && "too many indirect yield components");
// Remember to yield the indirect buffer.
yieldArgs.push_back(indirectBuffer->getAddress());
}
// Perform the yield.
auto isUnwind =
IGF.Builder.CreateIntrinsicCall(llvm::Intrinsic::ID::coro_suspend_retcon,
{ IGF.IGM.Int1Ty },
yieldArgs);
// We're done with the indirect buffer.
if (indirectBuffer) {
IGF.Builder.CreateLifetimeEnd(*indirectBuffer, indirectBufferSize);
}
return isUnwind;
}
/// Add a new set of arguments to the function.
void CallEmission::setArgs(Explosion &original, bool isOutlined,
WitnessMetadata *witnessMetadata) {
// Convert arguments to a representation appropriate to the calling
// convention.
Explosion adjusted;
auto origCalleeType = CurCallee.getOrigFunctionType();
SILFunctionConventions fnConv(origCalleeType, IGF.getSILModule());
// Pass along the indirect result pointers.
original.transferInto(adjusted, fnConv.getNumIndirectSILResults());
// Pass along the coroutine buffer.
switch (origCalleeType->getCoroutineKind()) {
case SILCoroutineKind::YieldMany:
case SILCoroutineKind::YieldOnce:
original.transferInto(adjusted, 1);
break;
case SILCoroutineKind::None:
break;
}
// Translate the formal arguments and handle any special arguments.
switch (getCallee().getRepresentation()) {
case SILFunctionTypeRepresentation::ObjCMethod:
adjusted.add(getCallee().getObjCMethodReceiver());
adjusted.add(getCallee().getObjCMethodSelector());
externalizeArguments(IGF, getCallee(), original, adjusted,
Temporaries, isOutlined);
break;
case SILFunctionTypeRepresentation::Block:
adjusted.add(getCallee().getBlockObject());
LLVM_FALLTHROUGH;
case SILFunctionTypeRepresentation::CFunctionPointer:
externalizeArguments(IGF, getCallee(), original, adjusted,
Temporaries, isOutlined);
break;
case SILFunctionTypeRepresentation::WitnessMethod:
assert(witnessMetadata);
assert(witnessMetadata->SelfMetadata->getType() ==
IGF.IGM.TypeMetadataPtrTy);
assert(witnessMetadata->SelfWitnessTable->getType() ==
IGF.IGM.WitnessTablePtrTy);
Args.rbegin()[1] = witnessMetadata->SelfMetadata;
Args.rbegin()[0] = witnessMetadata->SelfWitnessTable;
LLVM_FALLTHROUGH;
case SILFunctionTypeRepresentation::Closure:
case SILFunctionTypeRepresentation::Method:
case SILFunctionTypeRepresentation::Thin:
case SILFunctionTypeRepresentation::Thick: {
// Check for value arguments that need to be passed indirectly.
// But don't expect to see 'self' if it's been moved to the context
// position.
auto params = origCalleeType->getParameters();
if (hasSelfContextParameter(origCalleeType)) {
params = params.drop_back();
}
for (auto param : params) {
addNativeArgument(IGF, original, param, adjusted, isOutlined);
}
// Anything else, just pass along. This will include things like
// generic arguments.
adjusted.add(original.claimAll());
break;
}
}
// Add the given number of arguments.
assert(LastArgWritten >= adjusted.size());
size_t targetIndex = LastArgWritten - adjusted.size();
assert(targetIndex <= 1);
LastArgWritten = targetIndex;
auto argIterator = Args.begin() + targetIndex;
for (auto value : adjusted.claimAll()) {
*argIterator++ = value;
}
}
void CallEmission::addAttribute(unsigned index,
llvm::Attribute::AttrKind attr) {
auto &attrs = CurCallee.getMutableAttributes();
attrs = attrs.addAttribute(IGF.IGM.LLVMContext, index, attr);
}
/// Initialize an Explosion with the parameters of the current
/// function. All of the objects will be added unmanaged. This is
/// really only useful when writing prologue code.
Explosion IRGenFunction::collectParameters() {
Explosion params;
for (auto i = CurFn->arg_begin(), e = CurFn->arg_end(); i != e; ++i)
params.add(&*i);
return params;
}
/// Fetch the error result slot.
Address IRGenFunction::getErrorResultSlot(SILType errorType) {
if (!ErrorResultSlot) {
auto &errorTI = cast<FixedTypeInfo>(getTypeInfo(errorType));
IRBuilder builder(IGM.getLLVMContext(), IGM.DebugInfo != nullptr);
builder.SetInsertPoint(AllocaIP->getParent(), AllocaIP->getIterator());
// Create the alloca. We don't use allocateStack because we're
// not allocating this in stack order.
auto addr = createAlloca(errorTI.getStorageType(),
errorTI.getFixedAlignment(),
"swifterror");
// Only add the swifterror attribute on ABIs that pass it in a register.
// We create a shadow stack location of the swifterror parameter for the
// debugger on platforms that pass swifterror by reference and so we can't
// mark the parameter with a swifterror attribute for these.
if (IGM.IsSwiftErrorInRegister)
cast<llvm::AllocaInst>(addr.getAddress())->setSwiftError(true);
// Initialize at the alloca point.
auto nullError = llvm::ConstantPointerNull::get(
cast<llvm::PointerType>(errorTI.getStorageType()));
builder.CreateStore(nullError, addr);
ErrorResultSlot = addr.getAddress();
}
return Address(ErrorResultSlot, IGM.getPointerAlignment());
}
/// Fetch the error result slot received from the caller.
Address IRGenFunction::getCallerErrorResultSlot() {
assert(ErrorResultSlot && "no error result slot!");
assert(isa<llvm::Argument>(ErrorResultSlot) && "error result slot is local!");
return Address(ErrorResultSlot, IGM.getPointerAlignment());
}
// Set the error result slot. This should only be done in the prologue.
void IRGenFunction::setErrorResultSlot(llvm::Value *address) {
assert(!ErrorResultSlot && "already have error result slot!");
assert(isa<llvm::PointerType>(address->getType()));
ErrorResultSlot = address;
}
/// Emit the basic block that 'return' should branch to and insert it into
/// the current function. This creates a second
/// insertion point that most blocks should be inserted before.
void IRGenFunction::emitBBForReturn() {
ReturnBB = createBasicBlock("return");
CurFn->getBasicBlockList().push_back(ReturnBB);
}
/// Emit the prologue for the function.
void IRGenFunction::emitPrologue() {
// Set up the IRBuilder.
llvm::BasicBlock *EntryBB = createBasicBlock("entry");
assert(CurFn->getBasicBlockList().empty() && "prologue already emitted?");
CurFn->getBasicBlockList().push_back(EntryBB);
Builder.SetInsertPoint(EntryBB);
// Set up the alloca insertion point.
AllocaIP = Builder.IRBuilderBase::CreateAlloca(IGM.Int1Ty,
/*array size*/ nullptr,
"alloca point");
}
/// Emit a branch to the return block and set the insert point there.
/// Returns true if the return block is reachable, false otherwise.
bool IRGenFunction::emitBranchToReturnBB() {
// If there are no edges to the return block, we never want to emit it.
if (ReturnBB->use_empty()) {
ReturnBB->eraseFromParent();
// Normally this means that we'll just insert the epilogue in the
// current block, but if the current IP is unreachable then so is
// the entire epilogue.
if (!Builder.hasValidIP())
return false;
// Otherwise, branch to it if the current IP is reachable.
} else if (Builder.hasValidIP()) {
Builder.CreateBr(ReturnBB);
Builder.SetInsertPoint(ReturnBB);
// Otherwise, if there is exactly one use of the return block, merge
// it into its predecessor.
} else if (ReturnBB->hasOneUse()) {
// return statements are never emitted as conditional branches.
llvm::BranchInst *Br = cast<llvm::BranchInst>(*ReturnBB->use_begin());
assert(Br->isUnconditional());
Builder.SetInsertPoint(Br->getParent());
Br->eraseFromParent();
ReturnBB->eraseFromParent();
// Otherwise, just move the IP to the return block.
} else {
Builder.SetInsertPoint(ReturnBB);
}
return true;
}
/// Emit the epilogue for the function.
void IRGenFunction::emitEpilogue() {
// Destroy the alloca insertion point.
AllocaIP->eraseFromParent();
}
std::pair<Address, Size>
irgen::allocateForCoercion(IRGenFunction &IGF,
llvm::Type *fromTy,
llvm::Type *toTy,
const llvm::Twine &basename) {
auto &DL = IGF.IGM.DataLayout;
auto fromSize = DL.getTypeSizeInBits(fromTy);
auto toSize = DL.getTypeSizeInBits(toTy);
auto bufferTy = fromSize >= toSize
? fromTy
: toTy;
auto alignment = std::max(DL.getABITypeAlignment(fromTy),
DL.getABITypeAlignment(toTy));
auto buffer = IGF.createAlloca(bufferTy, Alignment(alignment),
basename + ".coerced");
Size size(std::max(fromSize, toSize));
return {buffer, size};
}
llvm::Value* IRGenFunction::coerceValue(llvm::Value *value, llvm::Type *toTy,
const llvm::DataLayout &DL)
{
llvm::Type *fromTy = value->getType();
assert(fromTy != toTy && "Unexpected same types in type coercion!");
assert(!fromTy->isVoidTy()
&& "Unexpected void source type in type coercion!");
assert(!toTy->isVoidTy()
&& "Unexpected void destination type in type coercion!");
// Use the pointer/pointer and pointer/int casts if we can.
if (toTy->isPointerTy()) {
if (fromTy->isPointerTy())
return Builder.CreateBitCast(value, toTy);
if (fromTy == IGM.IntPtrTy)
return Builder.CreateIntToPtr(value, toTy);
} else if (fromTy->isPointerTy()) {
if (toTy == IGM.IntPtrTy) {
return Builder.CreatePtrToInt(value, toTy);
}
}
// Otherwise we need to store, bitcast, and load.
Address address; Size size;
std::tie(address, size) = allocateForCoercion(*this, fromTy, toTy,
value->getName() + ".coercion");
Builder.CreateLifetimeStart(address, size);
auto orig = Builder.CreateBitCast(address, fromTy->getPointerTo());
Builder.CreateStore(value, orig);
auto coerced = Builder.CreateBitCast(address, toTy->getPointerTo());
auto loaded = Builder.CreateLoad(coerced);
Builder.CreateLifetimeEnd(address, size);
return loaded;
}
void IRGenFunction::emitScalarReturn(llvm::Type *resultType,
Explosion &result) {
if (result.empty()) {
Builder.CreateRetVoid();
return;
}
auto *ABIType = CurFn->getReturnType();
if (result.size() == 1) {
auto *returned = result.claimNext();
if (ABIType != returned->getType())
returned = coerceValue(returned, ABIType, IGM.DataLayout);
Builder.CreateRet(returned);
return;
}
// Multiple return values are returned as a struct.
assert(cast<llvm::StructType>(resultType)->getNumElements() == result.size());
llvm::Value *resultAgg = llvm::UndefValue::get(resultType);
for (unsigned i = 0, e = result.size(); i != e; ++i) {
llvm::Value *elt = result.claimNext();
resultAgg = Builder.CreateInsertValue(resultAgg, elt, i);
}
if (ABIType != resultType)
resultAgg = coerceValue(resultAgg, ABIType, IGM.DataLayout);
Builder.CreateRet(resultAgg);
}
/// Adjust the alignment of the alloca pointed to by \p allocaAddr to the
/// required alignment of the struct \p type.
static void adjustAllocaAlignment(const llvm::DataLayout &DL,
Address allocaAddr, llvm::StructType *type) {
auto layout = DL.getStructLayout(type);
Alignment layoutAlignment = Alignment(layout->getAlignment());
auto alloca = cast<llvm::AllocaInst>(allocaAddr.getAddress());
if (alloca->getAlignment() < layoutAlignment.getValue()) {
alloca->setAlignment(layoutAlignment.getValue());
allocaAddr = Address(allocaAddr.getAddress(), layoutAlignment);
}
}
unsigned NativeConventionSchema::size() const {
if (empty())
return 0;
unsigned size = 0;
Lowering.enumerateComponents([&](clang::CharUnits offset,
clang::CharUnits end,
llvm::Type *type) { ++size; });
return size;
}
static bool canMatchByTruncation(IRGenModule &IGM,
ArrayRef<llvm::Type*> expandedTys,
const ExplosionSchema &schema) {
// If the schemas don't even match in number, we have to go
// through memory.
if (expandedTys.size() != schema.size() || expandedTys.empty())
return false;
if (expandedTys.size() == 1) return false;
// If there are multiple elements, the pairs of types need to
// match in size upto the penultimate for the truncation to work.
size_t e = expandedTys.size();
for (size_t i = 0; i != e - 1; ++i) {
// Check that we can truncate the last element.
llvm::Type *outputTy = schema[i].getScalarType();
llvm::Type *inputTy = expandedTys[i];
if (inputTy != outputTy &&
IGM.DataLayout.getTypeSizeInBits(inputTy) !=
IGM.DataLayout.getTypeSizeInBits(outputTy))
return false;
}
llvm::Type *outputTy = schema[e-1].getScalarType();
llvm::Type *inputTy = expandedTys[e-1];
return inputTy == outputTy || (IGM.DataLayout.getTypeSizeInBits(inputTy) ==
IGM.DataLayout.getTypeSizeInBits(outputTy)) ||
(IGM.DataLayout.getTypeSizeInBits(inputTy) >
IGM.DataLayout.getTypeSizeInBits(outputTy) &&
isa<llvm::IntegerType>(inputTy) && isa<llvm::IntegerType>(outputTy));
}
Explosion NativeConventionSchema::mapFromNative(IRGenModule &IGM,
IRGenFunction &IGF,
Explosion &native,
SILType type) const {
if (native.empty()) {
assert(empty() && "Empty explosion must match the native convention");
return Explosion();
}
assert(!empty());
auto *nativeTy = getExpandedType(IGM);
auto expandedTys = expandScalarOrStructTypeToArray(nativeTy);
auto &TI = IGM.getTypeInfo(type);
auto schema = TI.getSchema();
// The expected explosion type.
auto *explosionTy = schema.getScalarResultType(IGM);
// Check whether we can coerce the explosion to the expected type convention.
auto &DataLayout = IGM.DataLayout;
Explosion nonNativeExplosion;
if (canCoerceToSchema(IGM, expandedTys, schema)) {
if (native.size() == 1) {
auto *elt = native.claimNext();
if (explosionTy != elt->getType()) {
if (isa<llvm::IntegerType>(explosionTy) &&
isa<llvm::IntegerType>(elt->getType())) {
elt = IGF.Builder.CreateTrunc(elt, explosionTy);
} else {
elt = IGF.coerceValue(elt, explosionTy, DataLayout);
}
}
nonNativeExplosion.add(elt);
return nonNativeExplosion;
} else if (nativeTy == explosionTy) {
native.transferInto(nonNativeExplosion, native.size());
return nonNativeExplosion;
}
// Otherwise, we have to go through memory if we can match by truncation.
} else if (canMatchByTruncation(IGM, expandedTys, schema)) {
assert(expandedTys.size() == schema.size());
for (size_t i = 0, e = expandedTys.size(); i != e; ++i) {
auto *elt = native.claimNext();
auto *schemaTy = schema[i].getScalarType();
auto *nativeTy = elt->getType();
assert(nativeTy == expandedTys[i]);
if (schemaTy == nativeTy) {
// elt = elt
} else if (DataLayout.getTypeSizeInBits(schemaTy) ==
DataLayout.getTypeSizeInBits(nativeTy))
elt = IGF.coerceValue(elt, schemaTy, DataLayout);
else {
assert(DataLayout.getTypeSizeInBits(schemaTy) <
DataLayout.getTypeSizeInBits(nativeTy));
elt = IGF.Builder.CreateTrunc(elt, schemaTy);
}
nonNativeExplosion.add(elt);
}
return nonNativeExplosion;
}
// If not, go through memory.
auto &loadableTI = cast<LoadableTypeInfo>(TI);
// We can get two layouts if there are overlapping ranges in the legal type
// sequence.
llvm::StructType *coercionTy, *overlappedCoercionTy;
SmallVector<unsigned, 8> expandedTyIndicesMap;
std::tie(coercionTy, overlappedCoercionTy) =
getCoercionTypes(IGM, expandedTyIndicesMap);
// Get the larger layout out of those two.
auto coercionSize = DataLayout.getTypeSizeInBits(coercionTy);
auto overlappedCoercionSize =
DataLayout.getTypeSizeInBits(overlappedCoercionTy);
llvm::StructType *largerCoercion = coercionSize >= overlappedCoercionSize
? coercionTy
: overlappedCoercionTy;
// Allocate a temporary for the coercion.
Address temporary;
Size tempSize;
std::tie(temporary, tempSize) = allocateForCoercion(
IGF, largerCoercion, loadableTI.getStorageType(), "temp-coercion");
// Make sure we have sufficiently large alignment.
adjustAllocaAlignment(DataLayout, temporary, coercionTy);
adjustAllocaAlignment(DataLayout, temporary, overlappedCoercionTy);
auto &Builder = IGF.Builder;
Builder.CreateLifetimeStart(temporary, tempSize);
// Store the expanded type elements.
auto coercionAddr = Builder.CreateElementBitCast(temporary, coercionTy);
unsigned expandedMapIdx = 0;
auto eltsArray = native.claimAll();
SmallVector<llvm::Value *, 8> nativeElts(eltsArray.begin(), eltsArray.end());
auto storeToFn = [&](llvm::StructType *ty, Address structAddr) {
for (auto eltIndex : indices(ty->elements())) {
auto layout = DataLayout.getStructLayout(ty);
auto eltTy = ty->getElementType(eltIndex);
// Skip padding fields.
if (eltTy->isArrayTy())
continue;
Address eltAddr = Builder.CreateStructGEP(structAddr, eltIndex, layout);
auto index = expandedTyIndicesMap[expandedMapIdx];
assert(index < nativeElts.size() && nativeElts[index] != nullptr);
auto nativeElt = nativeElts[index];
Builder.CreateStore(nativeElt, eltAddr);
nativeElts[index] = nullptr;
++expandedMapIdx;
}
};
storeToFn(coercionTy, coercionAddr);
if (!overlappedCoercionTy->isEmptyTy()) {
auto overlappedCoercionAddr =
Builder.CreateElementBitCast(temporary, overlappedCoercionTy);
storeToFn(overlappedCoercionTy, overlappedCoercionAddr);
}
// Reload according to the types schema.
Address storageAddr = Builder.CreateBitCast(
temporary, loadableTI.getStorageType()->getPointerTo());
loadableTI.loadAsTake(IGF, storageAddr, nonNativeExplosion);
Builder.CreateLifetimeEnd(temporary, tempSize);
return nonNativeExplosion;
}
Explosion NativeConventionSchema::mapIntoNative(IRGenModule &IGM,
IRGenFunction &IGF,
Explosion &fromNonNative,
SILType type,
bool isOutlined) const {
if (fromNonNative.empty()) {
assert(empty() && "Empty explosion must match the native convention");
return Explosion();
}
assert(!requiresIndirect() && "Expected direct convention");
assert(!empty());
auto *nativeTy = getExpandedType(IGM);
auto expandedTys = expandScalarOrStructTypeToArray(nativeTy);
auto &TI = IGM.getTypeInfo(type);
auto schema = TI.getSchema();
auto *explosionTy = schema.getScalarResultType(IGM);
// Check whether we can coerce the explosion to the expected type convention.
auto &DataLayout = IGM.DataLayout;
Explosion nativeExplosion;
if (canCoerceToSchema(IGM, expandedTys, schema)) {
if (fromNonNative.size() == 1) {
auto *elt = fromNonNative.claimNext();
if (nativeTy != elt->getType()) {
if (isa<llvm::IntegerType>(nativeTy) &&
isa<llvm::IntegerType>(elt->getType()))
elt = IGF.Builder.CreateZExt(elt, nativeTy);
else
elt = IGF.coerceValue(elt, nativeTy, DataLayout);
}
nativeExplosion.add(elt);
return nativeExplosion;
} else if (nativeTy == explosionTy) {
fromNonNative.transferInto(nativeExplosion, fromNonNative.size());
return nativeExplosion;
}
// Otherwise, we have to go through memory if we can't match by truncation.
} else if (canMatchByTruncation(IGM, expandedTys, schema)) {
assert(expandedTys.size() == schema.size());
for (size_t i = 0, e = expandedTys.size(); i != e; ++i) {
auto *elt = fromNonNative.claimNext();
auto *schemaTy = elt->getType();
auto *nativeTy = expandedTys[i];
assert(schema[i].getScalarType() == schemaTy);
if (schemaTy == nativeTy) {
// elt = elt
} else if (DataLayout.getTypeSizeInBits(schemaTy) ==
DataLayout.getTypeSizeInBits(nativeTy))
elt = IGF.coerceValue(elt, nativeTy, DataLayout);
else {
assert(DataLayout.getTypeSizeInBits(schemaTy) <
DataLayout.getTypeSizeInBits(nativeTy));
elt = IGF.Builder.CreateZExt(elt, nativeTy);
}
nativeExplosion.add(elt);
}
return nativeExplosion;
}
// If not, go through memory.
auto &loadableTI = cast<LoadableTypeInfo>(TI);
// We can get two layouts if there are overlapping ranges in the legal type
// sequence.
llvm::StructType *coercionTy, *overlappedCoercionTy;
SmallVector<unsigned, 8> expandedTyIndicesMap;
std::tie(coercionTy, overlappedCoercionTy) =
getCoercionTypes(IGM, expandedTyIndicesMap);
// Get the larger layout out of those two.
auto coercionSize = DataLayout.getTypeSizeInBits(coercionTy);
auto overlappedCoercionSize =
DataLayout.getTypeSizeInBits(overlappedCoercionTy);
llvm::StructType *largerCoercion = coercionSize >= overlappedCoercionSize
? coercionTy
: overlappedCoercionTy;
// Allocate a temporary for the coercion.
Address temporary;
Size tempSize;
std::tie(temporary, tempSize) = allocateForCoercion(
IGF, largerCoercion, loadableTI.getStorageType(), "temp-coercion");
// Make sure we have sufficiently large alignment.
adjustAllocaAlignment(DataLayout, temporary, coercionTy);
adjustAllocaAlignment(DataLayout, temporary, overlappedCoercionTy);
auto &Builder = IGF.Builder;
Builder.CreateLifetimeStart(temporary, tempSize);
// Initialize the memory of the temporary.
Address storageAddr = Builder.CreateBitCast(
temporary, loadableTI.getStorageType()->getPointerTo());
loadableTI.initialize(IGF, fromNonNative, storageAddr, isOutlined);
// Load the expanded type elements from memory.
auto coercionAddr = Builder.CreateElementBitCast(temporary, coercionTy);
unsigned expandedMapIdx = 0;
SmallVector<llvm::Value *, 8> expandedElts(expandedTys.size(), nullptr);
auto loadFromFn = [&](llvm::StructType *ty, Address structAddr) {
for (auto eltIndex : indices(ty->elements())) {
auto layout = DataLayout.getStructLayout(ty);
auto eltTy = ty->getElementType(eltIndex);
// Skip padding fields.
if (eltTy->isArrayTy())
continue;
Address eltAddr = Builder.CreateStructGEP(structAddr, eltIndex, layout);
llvm::Value *elt = Builder.CreateLoad(eltAddr);
auto index = expandedTyIndicesMap[expandedMapIdx];
assert(expandedElts[index] == nullptr);
expandedElts[index] = elt;
++expandedMapIdx;
}
};
loadFromFn(coercionTy, coercionAddr);
if (!overlappedCoercionTy->isEmptyTy()) {
auto overlappedCoercionAddr =
Builder.CreateElementBitCast(temporary, overlappedCoercionTy);
loadFromFn(overlappedCoercionTy, overlappedCoercionAddr);
}
Builder.CreateLifetimeEnd(temporary, tempSize);
// Add the values to the explosion.
for (auto *val : expandedElts)
nativeExplosion.add(val);
assert(expandedTys.size() == nativeExplosion.size());
return nativeExplosion;
}
void IRGenFunction::emitScalarReturn(SILType resultType, Explosion &result,
bool isSwiftCCReturn, bool isOutlined) {
if (result.empty()) {
assert(IGM.getTypeInfo(resultType).nativeReturnValueSchema(IGM).empty() &&
"Empty explosion must match the native calling convention");
Builder.CreateRetVoid();
return;
}
// In the native case no coercion is needed.
if (isSwiftCCReturn) {
auto &nativeSchema =
IGM.getTypeInfo(resultType).nativeReturnValueSchema(IGM);
assert(!nativeSchema.requiresIndirect());
Explosion native =
nativeSchema.mapIntoNative(IGM, *this, result, resultType, isOutlined);
if (native.size() == 1) {
Builder.CreateRet(native.claimNext());
return;
}
llvm::Value *nativeAgg =
llvm::UndefValue::get(nativeSchema.getExpandedType(IGM));
for (unsigned i = 0, e = native.size(); i != e; ++i) {
llvm::Value *elt = native.claimNext();
nativeAgg = Builder.CreateInsertValue(nativeAgg, elt, i);
}
Builder.CreateRet(nativeAgg);
return;
}
// Otherwise we potentially need to coerce the type. We don't need to go
// through the mapping to the native calling convention.
auto *ABIType = CurFn->getReturnType();
if (result.size() == 1) {
auto *returned = result.claimNext();
if (ABIType != returned->getType())
returned = coerceValue(returned, ABIType, IGM.DataLayout);
Builder.CreateRet(returned);
return;
}
auto &resultTI = IGM.getTypeInfo(resultType);
auto schema = resultTI.getSchema();
auto *bodyType = schema.getScalarResultType(IGM);
// Multiple return values are returned as a struct.
assert(cast<llvm::StructType>(bodyType)->getNumElements() == result.size());
llvm::Value *resultAgg = llvm::UndefValue::get(bodyType);
for (unsigned i = 0, e = result.size(); i != e; ++i) {
llvm::Value *elt = result.claimNext();
resultAgg = Builder.CreateInsertValue(resultAgg, elt, i);
}
if (ABIType != bodyType)
resultAgg = coerceValue(resultAgg, ABIType, IGM.DataLayout);
Builder.CreateRet(resultAgg);
}
/// Modify the given variable to hold a pointer whose type is the
/// LLVM lowering of the given function type, and return the signature
/// for the type.
static Signature emitCastOfFunctionPointer(IRGenFunction &IGF,
llvm::Value *&fnPtr,
CanSILFunctionType fnType) {
// Figure out the function type.
auto sig = IGF.IGM.getSignature(fnType);
// Emit the cast.
fnPtr = IGF.Builder.CreateBitCast(fnPtr, sig.getType()->getPointerTo());
// Return the information.
return sig;
}
Callee irgen::getBlockPointerCallee(IRGenFunction &IGF,
llvm::Value *blockPtr,
CalleeInfo &&info) {
// Grab the block pointer and make it the first physical argument.
llvm::PointerType *blockPtrTy = IGF.IGM.ObjCBlockPtrTy;
auto castBlockPtr = IGF.Builder.CreateBitCast(blockPtr, blockPtrTy);
// Extract the invocation pointer for blocks.
auto blockStructTy = blockPtrTy->getElementType();
llvm::Value *invokeFnPtrPtr =
IGF.Builder.CreateStructGEP(blockStructTy, castBlockPtr, 3);
Address invokeFnPtrAddr(invokeFnPtrPtr, IGF.IGM.getPointerAlignment());
llvm::Value *invokeFnPtr = IGF.Builder.CreateLoad(invokeFnPtrAddr);
auto sig = emitCastOfFunctionPointer(IGF, invokeFnPtr, info.OrigFnType);
FunctionPointer fn(invokeFnPtr, sig);
return Callee(std::move(info), fn, blockPtr);
}
Callee irgen::getSwiftFunctionPointerCallee(
IRGenFunction &IGF, llvm::Value *fnPtr, llvm::Value *dataPtr,
CalleeInfo &&calleeInfo, bool castOpaqueToRefcountedContext) {
auto sig = emitCastOfFunctionPointer(IGF, fnPtr, calleeInfo.OrigFnType);
FunctionPointer fn(fnPtr, sig);
if (castOpaqueToRefcountedContext) {
assert(dataPtr && dataPtr->getType() == IGF.IGM.OpaquePtrTy &&
"Expecting trivial closure context");
dataPtr = IGF.Builder.CreateBitCast(dataPtr, IGF.IGM.RefCountedPtrTy);
}
return Callee(std::move(calleeInfo), fn, dataPtr);
}
Callee irgen::getCFunctionPointerCallee(IRGenFunction &IGF,
llvm::Value *fnPtr,
CalleeInfo &&calleeInfo) {
auto sig = emitCastOfFunctionPointer(IGF, fnPtr, calleeInfo.OrigFnType);
FunctionPointer fn(fnPtr, sig);
return Callee(std::move(calleeInfo), fn);
}
FunctionPointer
FunctionPointer::forDirect(IRGenModule &IGM, llvm::Constant *fnPtr,
CanSILFunctionType fnType) {
return forDirect(fnPtr, IGM.getSignature(fnType));
}
FunctionPointer
FunctionPointer::forExplosionValue(IRGenFunction &IGF, llvm::Value *fnPtr,
CanSILFunctionType fnType) {
// Bitcast out of an opaque pointer type.
assert(fnPtr->getType() == IGF.IGM.Int8PtrTy);
auto sig = emitCastOfFunctionPointer(IGF, fnPtr, fnType);
return FunctionPointer(fnPtr, sig);
}
llvm::Value *
FunctionPointer::getExplosionValue(IRGenFunction &IGF,
CanSILFunctionType fnType) const {
// Bitcast to an opaque pointer type.
llvm::Value *fnPtr =
IGF.Builder.CreateBitCast(getPointer(), IGF.IGM.Int8PtrTy);
return fnPtr;
}<|fim▁end|> | llvm_unreachable("bad parameter convention"); |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>extern crate num;
use num::traits::{PrimInt, Zero};
fn horner<T: PrimInt + Zero>(cs: &[T], x: T) -> T {
cs.iter()
.rev()
.fold(Zero::zero(), |acc: T, c| (acc * x) + (*c))
}
fn main() {
println!("{}", horner(&[-19i32, 7, -4, 6], 3i32)); // 128
}
#[cfg(test)]
mod tests {
use super::horner;
#[test]
fn test() {
assert_eq!(horner(&[-19i32, 7, -4, 6], 3i32), 128);
assert_eq!(horner(&[-1i32, 7, -4, 6], 0i32), -1);
assert_eq!(horner(&[-0i32, 3], 100i32), 300);
assert_eq!(horner(&[-20i32, 7, 1], 10i32), 150);
assert_eq!(horner(&[-19i32, 7, -4, 0], 5i32), -84);<|fim▁hole|><|fim▁end|> | }
} |
<|file_name|>routes.py<|end_file_name|><|fim▁begin|>from app import celery
from flask import current_app as app
from datetime import timedelta
from celery.decorators import periodic_task
from flask import jsonify, request, abort
import requests
import json
@periodic_task(run_every=(timedelta(seconds=1)))
def ping():
print "ping!"
headers = {'content-type': 'application/json'}<|fim▁hole|> r = json.loads(response.text)
if r['success'] is True:
print r['server_views']<|fim▁end|> | response = requests.post("http://localhost:9000" + "/send_ping", headers=headers, data=json.dumps({})) |
<|file_name|>rect.rs<|end_file_name|><|fim▁begin|>use std::convert::From;
use graphics::math::{ self, Scalar };
use { Point, Size };
/// A rectangle.
#[derive(Clone, Copy, Debug)]
pub struct Rect {
/// The position of the top left corner of the rectangle.
pub pos: Point,
/// The width and height of the rectangle.
pub size: Size,
}
impl<P: Into<Point>, S: Into<Size>> From<(P, S)> for Rect {
/// Creates a rectangle from the position of its top left corner and its size.
fn from((pos, size): (P, S)) -> Rect {
let (pos, size): (Point, Size) = (pos.into(), size.into());
Rect { pos: pos, size: size }
}
}
impl From<Rect> for [Scalar; 4] {
fn from(rect: Rect) -> [Scalar; 4] {
[rect.pos.x, rect.pos.y, rect.size.w, rect.size.h]
}
}
impl From<[Scalar; 4]> for Rect {
/// Creates a rectangle from an array.
fn from(v: [Scalar; 4]) -> Rect {
Rect {
pos: Point { x: v[0], y: v[1] },
size: Size { w: v[2], h: v[3] },
}
}
}
impl From<(Scalar, Scalar, Scalar, Scalar)> for Rect {
fn from((x, y, w, h): (Scalar, Scalar, Scalar, Scalar)) -> Rect {
Rect {
pos: Point { x: x, y: y },
size: Size { w: w, h: h },
}
}
}
impl Rect {
/// Returns the position of the bottom side of the rectangle.
pub fn bottom(&self) -> Scalar {
self.pos.y + self.size.h
}
/// Computes a rectangle with quadruple the surface area of self and with center
/// (self.x, self.y).
pub fn centered(self) -> Rect {
Rect {
pos: Point {
x: self.pos.x - self.size.w,
y: self.pos.y - self.size.h,
},
size: self.size * 2.0,
}
}
/// Compute whether or not the point is inside the rectangle.
#[inline(always)]
pub fn contains<T: Into<Point>>(&self, point: T) -> bool {
let point: Point = point.into();
self.left() < point.x && point.x < self.right() &&
self.top() < point.y && point.y < self.bottom()
}
/// Create a rectangle that circumscribes the given circle.
pub fn new_circle<T: Into<Point>>(center: T, radius: Scalar) -> Rect {
let center: Point = center.into();
Rect {
pos: Point {
x: center.x - radius,
y: center.y - radius,
},
size: Size {
w: 2.0 * radius,<|fim▁hole|> }
}
/// Create a square rectangle with sides of length len and top left corner at pos.
pub fn new_square<T: Into<Point>>(pos: T, len: Scalar) -> Rect {
let pos: Point = pos.into();
Rect {
pos: pos,
size: Size { w: len, h: len },
}
}
/// Returns the position of the left side of the rectangle.
pub fn left(&self) -> Scalar {
self.pos.x
}
/// Computes a rectangle whose perimeter forms the inside edge of margin with size m for self.
#[inline(always)]
pub fn margin(self, m: Scalar) -> Rect {
math::margin_rectangle(self.into(), m).into()
}
/// Computes a rectangle translated (slid) in the direction of the vector a distance relative
/// to the size of the rectangle. For example, self.relative([1.0, 1.0]) returns a rectangle
/// one rectangle to the right and down from the original.
#[inline(always)]
pub fn relative<T: Into<Point>>(self, v: T) -> Rect {
let v: Point = v.into();
Rect {
pos: Point {
x: self.pos.x + self.size.w * v.x,
y: self.pos.y + self.size.h * v.y,
},
size: self.size,
}
}
/// Returns the position of the right side of the rectangle.
pub fn right(&self) -> Scalar {
self.pos.x + self.size.w
}
/// Computes a scaled rectangle with the same position as self.
pub fn scaled<T: Into<Size>>(self, v: T) -> Rect {
let v: Size = v.into();
Rect {
pos: self.pos,
size: self.size * v,
}
}
/// Returns the position of the top side of the rectangle.
pub fn top(&self) -> Scalar {
self.pos.y
}
}<|fim▁end|> | h: 2.0 * radius,
}, |
<|file_name|>GRIDJava2.rs<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | energymodels.GRIDJava2 |
<|file_name|>key_signature_test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
from noisidev import unittest
from .key_signature import KeySignature
<|fim▁hole|>class KeySignatureTest(unittest.TestCase):
def test_equal(self):
self.assertEqual(KeySignature(name='C major'),
KeySignature(name='C major'))
self.assertNotEqual(KeySignature(name='C major'),
KeySignature(name='G major'))
def test_compare_with_bad_class(self):
with self.assertRaises(TypeError):
# pylint: disable=expression-not-assigned
KeySignature() == 'foo'
def test_preset_names(self):
self.assertEqual(KeySignature(name='G major').accidentals, ['F#'])
self.assertEqual(KeySignature(name='G minor').accidentals, ['Bb', 'Eb'])<|fim▁end|> | |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from distutils.core import setup
long_description = """
`termtool` helps you write subcommand-based command line tools in Python. It collects several Python libraries into a declarative syntax:
* `argparse`, the argument parsing module with subcommand support provided in the standard library in Python 2.7 and later.
* `prettytable <http://code.google.com/p/python-progressbar/>`_, an easy module for building tables of information.
* `progressbar <http://code.google.com/p/python-progressbar/>`_, a handy module for displaying progress bars.
* `logging`, the simple built-in module for logging messages.
"""
setup(
name='termtool',
version='1.1',
description='Declarative terminal tool programming',<|fim▁hole|> url='https://github.com/markpasc/termtool',
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: Unix',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
packages=[],
py_modules=['termtool'],
requires=['argparse', 'PrettyTable', 'progressbar'],
)<|fim▁end|> | author='Mark Paschal',
author_email='[email protected]', |
<|file_name|>testorm-2.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python2.6
# -*- coding: utf-8 -*-
<|fim▁hole|># it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# Metagam is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Metagam. If not, see <http://www.gnu.org/licenses/>.
import unittest
from concurrence import dispatch, Tasklet
import mg.test.testorm
from mg.core.memcached import Memcached
from mg.core.cass import CassandraPool
class TestORM_Storage2(mg.test.testorm.TestORM):
def setUp(self):
mg.test.testorm.TestORM.setUp(self)
self.db.storage = 2
self.db.app = "testapp"
def main():
mg.test.testorm.cleanup()
unittest.main()
if __name__ == "__main__":
dispatch(main)<|fim▁end|> | # This file is a part of Metagam project.
#
# Metagam is free software: you can redistribute it and/or modify |
<|file_name|>gtfs-update.js<|end_file_name|><|fim▁begin|>var http = require("http");
var path = require('path');
var fs = require('fs');
var mkdirp = require('mkdirp');
var pg = require('pg');
var exec = require('child_process').exec;
var sh = require("execSync")
var util = require('util');
var info = {"good_load":0,"bad_load":0,"no_data":0};
var password="transit";
var options = {
host: 'www.gtfs-data-exchange.com',
path: '/api/agencies'
};
http.get(options, function (http_res) {
//console.log(http_res);
var data = "";
http_res.on("data", function (chunk) {
data += chunk;
});
http_res.on("end", function () {
parseAgencies(JSON.parse(data).data);
});
})
.on('error', function(e) {
console.log(e);
console.log("Got error: " + e);
});
var parseAgencies = function(agencyList){
var validAgencyCount = 0;
var conString = "postgres://postgres:"+password+"@localhost:5432/gtfs";
var client = new pg.Client(conString);
client.connect(function(err) {
if(err) {
return console.error('Could not connect to database', err);
}
//console.log(result.rows[0].theTime);
//output: Tue Jan 15 2013 19:12:47 GMT-600 (CST)
agencyList.forEach(function(agency){
if(agency['is_official'] && agency['country'] == 'United States'){
//console.log( agency['dataexchange_id']);
validAgencyCount++
var options = {
host: 'www.gtfs-data-exchange.com',
path: '/api/agency?agency='+agency['dataexchange_id']
};
http.get(options, function (http_res) {
//console.log(http_res);
var data = "";
http_res.on("data", function (chunk) {
data += chunk;
});
http_res.on("end", function () {
mkdirp(path.resolve(__dirname,"../gtfs/")+"/"+agency['dataexchange_id'], function(err){
if (err) console.error(err)
//else console.log('created dir '+agency['dataexchange_id']);
});
if(agency["is_official"] && agency['country'] === 'United States'){
//console.log( "Agency id: " + agency['dataexchange_id'],"File URL: " + "")
}
parseAgent(JSON.parse(data).data,agency,client);
});
})
.on('error', function(e) {
console.log(e);
console.log("Got error: " + e);
});
}
})//end for each agency;
//client.end();
});
console.log("Num Agencies:"+validAgencyCount);
console.log("done");
}
var download = function(url, dest, cb) {
var file = fs.createWriteStream(dest);
var request = http.get(url, function(response) {
response.pipe(file);
file.on('finish', function() {
file.close();
cb();
});
});
}
var gtfsdbLoad = function(schemaName,destinationStream){
var result = sh.exec("gtfsdb-load --database_url postgresql://postgres:"+password+"@localhost/gtfs --schema="+schemaName+" --is_geospatial "+destinationStream);
console.log('return code ' + result.code);
console.log('stdout + stderr ' + result.stdout);
}
var createSchema = function(client,schemaName){
var query = 'CREATE SCHEMA "'+schemaName+'" ';
client.query(query, function(err, result) { if(err) { return console.error('error running query:',query, err); }})
}
var writeAgency = function(agency){
var body = JSON.stringify(agency);
var post_options = {
hostname: "localhost",
port: 1337,
path: "/agency/create/",
method: "POST",
headers: {
"Content-Type": "application/json",
"Content-Length": body.length // Often this part is optional
}
}
var post_req = http.request(post_options, function(res) {
res.setEncoding('utf8');
res.on('data', function (chunk) {
console.log('Response: ' + chunk);
});
});
post_req.write(body);
post_req.end();
}
var inAPI = function(dataexchange_id,cb){
var options = {
host: 'localhost',
port: 1337,
path: '/agency/?dataexchange_id='+dataexchange_id
};
http.get(options, function (http_res) {
//console.log(http_res);
var data = "";
http_res.on("data", function (chunk) {
data += chunk;
});
http_res.on("end", function () {
output =JSON.parse(data)
if(output.length > 0){
cb(true);
}else{
cb(false);
}
});
})
.on('error', function(e) {
console.log(e);
console.log("Got error: " + e);
});
}
var testQuery = function(client,schemaName,agency,destinationStream){
var query = 'select ST_AsGeoJSON(geom) as geo,route_id from "'+schemaName+'".routes where geom is not null';
client.query(query, function(err, result) {
if(err) {
//return console.error('error running query:',query, err);
info.no_data++;
console.log(util.inspect(info,false,null));
//client.query('DROP SCHEMA "'+schemaName+'"');
return console.log(schemaName+":No Table");
}
if(result.rows && result.rows.length > 0){
//console.log('error check '+util.inspect(result,false,null)+' '+schemaName);
if(JSON.parse(result.rows[0].geo) !== null){
agency['current_datafile'] = schemaName;
agency.is_official = 1;
//console.log('Writing '+agency.dataexchange_id)
//console.log(util.inspect(agency,false,null));
//writeAgency(agency);
inAPI(agency.dataexchange_id,function(exists){
if(exists){
console.log(agency.dataexchange_id+" exists.")
}else{
console.log(agency.dataexchange_id+" doesn't exist.")
writeAgency(agency);
}
});
//console.log(schemaName+": "+JSON.parse(result.rows[0].geo).coordinates[0][0]);
info.good_load++;<|fim▁hole|> //console.log(schemaName+": No Geometry");
info.bad_load++;
//gtfsdbLoad(schemaName,destinationStream)
}
}else{
//client.query('DROP SCHEMA "'+schemaName+'"');
//console.log(schemaName+": No Rows");
info.bad_load++;
//gtfsdbLoad(schemaName,destinationStream)
}
//console.log(util.inspect(info,false,null));
})
}
var parseAgent = function(agent,agency, client){
var i = 0;
var house = agency.dataexchange_id;
agent.datafiles.forEach(function(datafile){
if(i == 0){
var fileNameOrig = agent["datafiles"][0].file_url;
var nameSplit = fileNameOrig.substr(29);
var schemaName = fileNameOrig.substr(29).split(".")[0];
var destinationStream = path.resolve(__dirname,"../gtfs/" + house + "/" + nameSplit);
testQuery(client,schemaName,agency,destinationStream);
//createSchema(client,schemaName);
//gtfsdbLoad(schemaName,destinationStream)
//download(agent["datafiles"][0].file_url,destinationStream,function(){});
}
i++;
})
//console.log("agent")
return agent["datafiles"][0].file_url;
}<|fim▁end|> | }else{
//client.query('DROP SCHEMA "'+schemaName+'"'); |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>#![feature(const_fn)]
#![feature(alloc, allocator_api)]
#![no_std]
#[cfg(test)]
#[macro_use]
extern crate std;
#[cfg(feature = "use_spin")]
extern crate spin;
extern crate alloc;
use alloc::alloc::{Alloc, AllocErr, Layout};
use core::alloc::{GlobalAlloc};
use core::mem;
#[cfg(feature = "use_spin")]
use core::ops::Deref;
use core::ptr::NonNull;
use hole::{Hole, HoleList};
#[cfg(feature = "use_spin")]
use spin::Mutex;
mod hole;
#[cfg(test)]
mod test;
/// A fixed size heap backed by a linked list of free memory blocks.
pub struct Heap {
bottom: usize,
size: usize,
holes: HoleList,
}
impl Heap {
/// Creates an empty heap. All allocate calls will return `None`.
pub const fn empty() -> Heap {
Heap {
bottom: 0,
size: 0,
holes: HoleList::empty(),
}
}
/// Initializes an empty heap
///
/// # Unsafety
///
/// This function must be called at most once and must only be used on an
/// empty heap.
pub unsafe fn init(&mut self, heap_bottom: usize, heap_size: usize) {
self.bottom = heap_bottom;
self.size = heap_size;
self.holes = HoleList::new(heap_bottom, heap_size);
}
/// Creates a new heap with the given `bottom` and `size`. The bottom address must be valid
/// and the memory in the `[heap_bottom, heap_bottom + heap_size)` range must not be used for
/// anything else. This function is unsafe because it can cause undefined behavior if the
/// given address is invalid.
pub unsafe fn new(heap_bottom: usize, heap_size: usize) -> Heap {
Heap {
bottom: heap_bottom,
size: heap_size,
holes: HoleList::new(heap_bottom, heap_size),
}
}
/// Allocates a chunk of the given size with the given alignment. Returns a pointer to the
/// beginning of that chunk if it was successful. Else it returns `None`.
/// This function scans the list of free memory blocks and uses the first block that is big
/// enough. The runtime is in O(n) where n is the number of free blocks, but it should be
/// reasonably fast for small allocations.
pub fn allocate_first_fit(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
let mut size = layout.size();
if size < HoleList::min_size() {
size = HoleList::min_size();
}
let size = align_up(size, mem::align_of::<Hole>());
let layout = Layout::from_size_align(size, layout.align()).unwrap();
self.holes.allocate_first_fit(layout)
}
/// Frees the given allocation. `ptr` must be a pointer returned
/// by a call to the `allocate_first_fit` function with identical size and alignment. Undefined
/// behavior may occur for invalid arguments, thus this function is unsafe.
///
/// This function walks the list of free memory blocks and inserts the freed block at the
/// correct place. If the freed block is adjacent to another free block, the blocks are merged
/// again. This operation is in `O(n)` since the list needs to be sorted by address.
pub unsafe fn deallocate(&mut self, ptr: NonNull<u8>, layout: Layout) {
let mut size = layout.size();
if size < HoleList::min_size() {
size = HoleList::min_size();
}
let size = align_up(size, mem::align_of::<Hole>());
let layout = Layout::from_size_align(size, layout.align()).unwrap();
self.holes.deallocate(ptr, layout);
}
/// Returns the bottom address of the heap.
pub fn bottom(&self) -> usize {
self.bottom
}
/// Returns the size of the heap.
pub fn size(&self) -> usize {
self.size
}
/// Return the top address of the heap
pub fn top(&self) -> usize {
self.bottom + self.size
}
/// Extends the size of the heap by creating a new hole at the end
///
/// # Unsafety
///
/// The new extended area must be valid
pub unsafe fn extend(&mut self, by: usize) {
let top = self.top();
let layout = Layout::from_size_align(by, 1).unwrap();
self.holes
.deallocate(NonNull::new_unchecked(top as *mut u8), layout);
self.size += by;
}
}
unsafe impl Alloc for Heap {
unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
self.allocate_first_fit(layout)
}
unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
self.deallocate(ptr, layout)
}
}
#[cfg(feature = "use_spin")]
pub struct LockedHeap(Mutex<Heap>);
#[cfg(feature = "use_spin")]
impl LockedHeap {
/// Creates an empty heap. All allocate calls will return `None`.
pub const fn empty() -> LockedHeap {
LockedHeap(Mutex::new(Heap::empty()))
}
/// Creates a new heap with the given `bottom` and `size`. The bottom address must be valid
/// and the memory in the `[heap_bottom, heap_bottom + heap_size)` range must not be used for
/// anything else. This function is unsafe because it can cause undefined behavior if the
/// given address is invalid.
pub unsafe fn new(heap_bottom: usize, heap_size: usize) -> LockedHeap {
LockedHeap(Mutex::new(Heap {
bottom: heap_bottom,
size: heap_size,
holes: HoleList::new(heap_bottom, heap_size),
}))
}
}
#[cfg(feature = "use_spin")]
impl Deref for LockedHeap {
type Target = Mutex<Heap>;
fn deref(&self) -> &Mutex<Heap> {
&self.0
}
}
#[cfg(feature = "use_spin")]
unsafe impl GlobalAlloc for LockedHeap {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
self.0
.lock()
.allocate_first_fit(layout)
.ok()
.map_or(0 as *mut u8, |allocation| allocation.as_ptr())
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
self.0
.lock()
.deallocate(NonNull::new_unchecked(ptr), layout)
}
}
/// Align downwards. Returns the greatest x with alignment `align`
/// so that x <= addr. The alignment must be a power of 2.
pub fn align_down(addr: usize, align: usize) -> usize {
if align.is_power_of_two() {<|fim▁hole|> addr
} else {
panic!("`align` must be a power of 2");
}
}
/// Align upwards. Returns the smallest x with alignment `align`
/// so that x >= addr. The alignment must be a power of 2.
pub fn align_up(addr: usize, align: usize) -> usize {
align_down(addr + align - 1, align)
}<|fim▁end|> | addr & !(align - 1)
} else if align == 0 { |
<|file_name|>help.go<|end_file_name|><|fim▁begin|>package repodelete
<|fim▁hole|>}
func GetArguments() string {
return ` repository pattern
Specifies the repositories that should be removed. You can use wildcards to specify multiple repositories.`
}<|fim▁end|> | var Usage = []string{"rt rdel <repository pattern>"}
func GetDescription() string {
return "Permanently delete repositories with all of their content from Artifactory." |
<|file_name|>package-info.java<|end_file_name|><|fim▁begin|>/*
* Copyright © 2014 - 2018 Leipzig University (Database Research Group)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0<|fim▁hole|> * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Contains implementations graph pattern matching on a single input graph.
*/
package org.gradoop.flink.model.impl.operators.matching.transactional.function;<|fim▁end|> | * |
<|file_name|>build.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>}<|fim▁end|> | // Copyright © 2015, Peter Atashian
// Licensed under the MIT License <LICENSE.md>
fn main() {
println!("cargo:rustc-flags=-l windows.data.pdf"); |
<|file_name|>config.py<|end_file_name|><|fim▁begin|>import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
MAIL_SERVER = os.environ.get('MAIL_SERVER', 'smtp.googlemail.com')
MAIL_PORT = int(os.environ.get('MAIL_PORT', '587'))
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in \
['true', 'on', '1']
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <[email protected]>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
SSL_REDIRECT = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
FLASKY_POSTS_PER_PAGE = 20
FLASKY_FOLLOWERS_PER_PAGE = 50
FLASKY_COMMENTS_PER_PAGE = 30
FLASKY_SLOW_DB_QUERY_TIME = 0.5
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite://'
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
# email errors to the administrators
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.FLASKY_MAIL_SENDER,
toaddrs=[cls.FLASKY_ADMIN],
subject=cls.FLASKY_MAIL_SUBJECT_PREFIX + ' Application Error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
class HerokuConfig(ProductionConfig):
SSL_REDIRECT = True if os.environ.get('DYNO') else False
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# handle reverse proxy server headers
try:
from werkzeug.middleware.proxy_fix import ProxyFix
except ImportError:
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
class DockerConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
class UnixConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# log to syslog
import logging
from logging.handlers import SysLogHandler
syslog_handler = SysLogHandler()
syslog_handler.setLevel(logging.INFO)
app.logger.addHandler(syslog_handler)
config = {<|fim▁hole|> 'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuConfig,
'docker': DockerConfig,
'unix': UnixConfig,
'default': DevelopmentConfig
}<|fim▁end|> | 'development': DevelopmentConfig, |
<|file_name|>globals.py<|end_file_name|><|fim▁begin|># The Nexus software is licensed under the BSD 2-Clause license.
#<|fim▁hole|># http://opensource.org/licenses/bsd-license.php
import time
def Rank(self, parts, fromloc, overriderank, server=None):
username = parts[2].lower()
year = time.strftime("%Y")
month = time.strftime("%m")
if username == "099":
if not (int(year) > 2012 and int(month) > 3):
return "099 may not be ranked until April 1st 2013."
if server:
factory = server
else:
factory = self.client.factory
if parts[1] == "builder":
if len(parts) > 3:
try:
world = factory.worlds[parts[3]]
except KeyError:
return ("Unknown world \"%s\"" %parts[3])
else:
if not server:
world = self.client.world
else:
return "You must provide a world"
if not server:
if not overriderank:
if not (world.isOp(self.client.username) or world.isOwner(self.client.username) or self.client.isModPlus()):
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not (world.isOp(parts[-1]) or world.isOwner(parts[-1]) or factory.isModPlus(parts[-1])):
return ("You are not a high enough rank!")
world.builders.add(username)
if username in factory.usernames:
user = factory.usernames[username]
if user.world == world:
user.sendBuilderUpdate()
return ("%s is now a Builder" % username)
elif parts[1] == "op":
if len(parts) > 3:
try:
world = factory.worlds[parts[3]]
except KeyError:
return ("Unknown world \"%s\"" %parts[3])
else:
if not server:
world = self.client.world
else:
return "You must provide a world"
if not server:
if not overriderank:
if not (world.isOwner(self.client.username) or self.client.isModPlus()):
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not (world.isOwner(parts[-1]) or factory.isModPlus(parts[-1])):
return ("You are not a high enough rank!")
world.ops.add(username)
return ("Opped %s" % username)
elif parts[1] == "worldowner":
if len(parts) > 3:
try:
world = factory.worlds[parts[3]]
except KeyError:
return ("Unknown world \"%s\"" %parts[3])
else:
if not server:
world = self.client.world
else:
return "You must provide a world"
if not server:
if not self.client.isWorldOwnerPlus() or overriderank:
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not (world.isOwner(parts[-1]) or factory.isModPlus(parts[-1])):
return ("You are not a high enough rank!")
self.client.world.owner = (username)
return ("%s is now a World Owner." % username)
elif parts[1] == "member":
if not server:
if not self.client.isModPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isModPlus(parts[-1]):
return ("You are not a high enough rank!")
factory.members.add(username)
if username in factory.usernames:
factory.usernames[username].sendMemberUpdate()
return ("%s is now a Member." % username)
elif parts[1] == "globalbuilder":
if not server:
if not self.client.isModPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isModPlus(parts[-1]):
return ("You are not a high enough rank!")
factory.globalbuilders.add(username)
if username in factory.usernames:
factory.usernames[username].sendGlobalBuilderUpdate()
return ("%s is now a Global Builder." % username)
elif parts[1] == "mod":
if not server:
if not self.client.isDirectorPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isDirectorPlus(parts[-1]):
return ("You are not a high enough rank!")
factory.mods.add(username)
if username in factory.usernames:
factory.usernames[username].sendModUpdate()
return ("%s is now a Mod." % username)
elif parts[1] == "admin":
if not server:
if not self.client.isDirectorPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isDirectorPlus(parts[-1]):
return ("You are not a high enough rank!")
factory.admins.add(username)
if username in factory.usernames:
factory.usernames[username].sendAdminUpdate()
return ("%s is now an admin." % username)
elif parts[1] == "coder":
if not server:
if not self.client.isDirectorPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isDirectorPlus(parts[-1]):
return ("You are not a high enough rank!")
factory.coders.add(username)
if username in factory.usernames:
factory.usernames[username].sendCoderUpdate()
return ("%s is now a coder." % username)
elif parts[1] == "director":
if not server:
if not self.client.isHiddenPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isHiddenPlus(parts[-1]):
return ("You are not a high enough rank!")
factory.directors.add(username)
if username in factory.usernames:
factory.usernames[username].sendDirectorUpdate()
return ("%s is now an director." % username)
elif parts[1] == "hidden":
if not server:
if not self.client.isServerOwner():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isServerOwner(parts[-1]):
return ("You are not a high enough rank!")
factory.hidden.add(username)
if username in factory.usernames:
factory.usernames[username].sendHiddenUpdate()
return ("%s is now hidden." % username)
else:
return ("Unknown rank \"%s\""%parts[1])
def DeRank(self, parts, fromloc, overriderank, server=None):
username = parts[2].lower()
if server:
factory = server
else:
factory = self.client.factory
if parts[1] == "builder":
if len(parts) > 3:
try:
world = factory.worlds[parts[3]]
except KeyError:
return ("Unknown world \"%s\"" %parts[3])
else:
if not server:
world = self.client.world
else:
return "You must provide a world"
if not server:
if not overriderank:
if not (world.isOp(self.client.username) or world.isOwner(self.client.username) or self.client.isModPlus()):
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not (world.isOp(parts[-1]) or world.isOwner(parts[-1]) or factory.isModPlus(parts[-1])):
return ("You are not a high enough rank!")
try:
world.builders.remove(username)
except KeyError:
return ("%s is not a Builder." % username)
if username in factory.usernames:
user = factory.usernames[username]
if user.world == world:
user.sendBuilderUpdate()
return ("Removed %s as Builder" % username)
elif parts[1] == "op":
if len(parts) > 3:
try:
world = factory.worlds[parts[3]]
except KeyError:
return ("Unknown world \"%s\"" %parts[3])
else:
if not server:
world = self.client.world
else:
return "You must provide a world"
if not server:
if not overriderank:
if not (world.isOwner(self.client.username) or self.client.isModPlus()) and world != self.client.world:
return ("You are not a World Owner!")
else:
if fromloc != "console":
if not (world.isOwner(parts[-1]) or factory.isModPlus(parts[-1])):
return ("You are not a high enough rank!")
try:
world.ops.remove(username)
except KeyError:
return ("%s is not an op." % username)
if username in factory.usernames:
user = factory.usernames[username]
if user.world == world:
user.sendOpUpdate()
return ("Deopped %s" % username)
elif parts[1] == "worldowner":
if len(parts) > 3:
try:
world = factory.worlds[parts[3]]
except KeyError:
return ("Unknown world \"%s\"" %parts[3])
else:
if not server:
world = self.client.world
else:
return "You must provide a world"
if not server:
if not (world.isOwner(self.client.username) or self.client.isModPlus()) and world != self.client.world:
return ("You are not a World Owner!")
else:
if fromloc != "console":
if not (world.isOwner(parts[-1]) or factory.isModPlus(parts[-1])):
return ("You are not a high enough rank!")
try:
self.client.world.owner = ("")
except KeyError:
return ("%s is not a world owner." % username)
if username in factory.usernames:
user = factory.usernames[username]
if user.world == world:
user.sendOpUpdate()
return ("%s is no longer the World Owner." % username)
elif parts[1] == "member":
if not server:
if not self.client.isModPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isModPlus(parts[-1]):
return ("You are not a high enough rank!")
if username in factory.members:
factory.members.remove(username)
else:
return ("No such member \"%s\"" % username.lower())
if username in factory.usernames:
factory.usernames[username].sendMemberUpdate()
return ("%s is no longer a Member." % username.lower())
elif parts[1] == "globalbuilder":
if not server:
if not self.client.isModPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isModPlus(parts[-1]):
return ("You are not a high enough rank!")
if username in factory.globalbuilders:
factory.globalbuilders.remove(username)
else:
return ("No such global builder \"%s\"" % username.lower())
if username in factory.usernames:
factory.usernames[username].sendGlobalBuilderUpdate()
return ("%s is no longer a Member." % username.lower())
elif parts[1] == "mod":
if not server:
if not self.client.isDirectorPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isDirectorPlus(parts[-1]):
return ("You are not a high enough rank!")
if username in factory.mods:
factory.mods.remove(username)
else:
return ("No such mod \"%s\"" % username.lower())
if username in factory.usernames:
factory.usernames[username].sendModUpdate()
return ("%s is no longer a Mod." % username.lower())
elif parts[1] == "admin":
if not server:
if not self.client.isDirectorPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isDirectorPlus(parts[-1]):
return ("You are not a high enough rank!")
if username in factory.admins:
factory.admins.remove(username)
if username in factory.usernames:
factory.usernames[username].sendAdminUpdate()
return ("%s is no longer an admin." % username.lower())
else:
return ("No such admin \"%s\""% username.lower())
elif parts[1] == "coder":
if not server:
if not self.client.isDirectorPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isDirectorPlus(parts[-1]):
return ("You are not a high enough rank!")
if username in factory.coders:
factory.coders.remove(username)
if username in factory.usernames:
factory.usernames[username].sendCoderUpdate()
return ("%s is no longer a coder." % username.lower())
else:
return ("No such admin \"%s\""% username.lower())
elif parts[1] == "director":
if not server:
if not self.client.isHiddenPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isHiddenPlus(parts[-1]):
return ("You are not a high enough rank!")
if username in factory.directors:
factory.directors.remove(username)
if username in factory.usernames:
factory.usernames[username].sendDirectorUpdate()
return ("%s is no longer an director." % username.lower())
else:
return ("No such director \"%s\""% username.lower())
elif parts[1] == "hidden":
if not server:
if not self.client.isServerOwner():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isServerOwner(parts[-1]):
return ("You are not a high enough rank!")
if username in factory.hidden:
factory.hidden.remove(username)
if username in factory.usernames:
factory.usernames[username].sendHiddenUpdate()
return ("%s is no longer hidden." % username.lower())
else:
return ("No such hidden \"%s\""% username.lower())
else:
return ("Unknown rank \"%s\""%parts[1])
def Spec(self, username, fromloc, overriderank, server=None):
if server:
factory = server
else:
factory = self.client.factory
if username in factory.directors:
return ("You cannot make staff a spec!")
if username in factory.coders:
return ("You cannot make staff a spec!")
if username in factory.admins:
return ("You cannot make staff a spec!")
if username in factory.mods:
return ("You cannot make staff a spec!")
factory.spectators.add(username)
if username in factory.usernames:
factory.usernames[username].sendSpectatorUpdate()
return ("%s is now a spec." % username)
def Staff(self, server=None):
Temp = []
if server:
factory = server
else:
factory = self.client.factory
if len(factory.directors):
Temp.append (["Directors:"] + list(factory.directors))
if len(factory.coders):
Temp.append (["Coders:"] + list(factory.coders))
if len(factory.admins):
Temp.append (["Admins:"] + list(factory.admins))
if len(factory.mods):
Temp.append (["Mods:"] + list(factory.mods))
return Temp
def Credits(self=None):
Temp = []
Temp.append ("Thanks to the following people for making Arc possible...")
Temp.append ("Mojang Specifications (Minecraft): Notch, dock, ez, ...")
Temp.append ("Creator: aera (Myne and The Archives)")
Temp.append ("Devs (Arc/The Archives): Adam01, gdude2002 (arbot), NotMeh, revenant,")
Temp.append ("Devs (iCraft): AndrewPH, destroyerx1, Dwarfy, erronjason, eugo (Knossus), goober, gothfox, ntfwc, Saanix, sk8rjwd, tehcid, Varriount, willempiee")
Temp.append ("Devs (blockBox): fizyplankton, tyteen4a03, UberFoX")
Temp.append ("Others: 099, 2k10, Akai, Antoligy, Aquaskys, aythrea, Bidoof_King, Bioniclegenius (Red_Link), blahblahbal, BlueProtoman, CDRom, fragmer, GLaDOS (Cortana), iMak, Kelraider, MAup, MystX, PyroPyro, Rils, Roadcrosser, Roujo, setveen, TheUndeadFish, TkTech, Uninspired")
return Temp
def makefile(filename):
import os
dir = os.path.dirname(filename)
try:
os.stat(dir)
except:
try:
os.mkdir(dir)
except OSError:
pass
if not os.path.exists(filename):
with open(filename, "w") as f:
f.write("")
del os
def makedatfile(filename):
import os
dir = os.path.dirname(filename)
try:
os.stat(dir)
except:
try:
os.mkdir(dir)
except OSError:
pass
if not os.path.exists(filename):
with open(filename, "w") as f:
f.write("(dp1\n.")
del os
def checkos(self):
try:
if (os.uname()[0] == "Darwin"):
os = "Mac"
else:
os = "Linux"
except:
os = "Windows"
return os<|fim▁end|> | # You should have recieved a copy of this license with the software.
# If you did not, you can find one at the following link.
# |
<|file_name|>content.js<|end_file_name|><|fim▁begin|>'use strict';
<|fim▁hole|> */
var hbs = require('express-hbs');
function content(options) {
return new hbs.handlebars.SafeString(this.html || '');
}
module.exports = content;
// downsize = Tag-safe truncation for HTML and XML. Works by word!<|fim▁end|> | /**
* Module dependencies |
<|file_name|>BlockStructure.java<|end_file_name|><|fim▁begin|>package fun.guruqu.portal.structures;
public class BlockStructure {
/**
* @param args<|fim▁hole|>
}<|fim▁end|> | */
public static void main(String[] args) {
} |
<|file_name|>migrations.js<|end_file_name|><|fim▁begin|>// Anytime you change the schema of one of the collection in a non-backward
// compatible way you have to write a migration in this file using the following
// API:
//
// Migrations.add(name, migrationCallback, optionalOrder);
// Note that we have extra migrations defined in `sandstorm.js` that are
// exclusive to Sandstorm and shouldn’t be executed in the general case.
// XXX I guess if we had ES6 modules we could
// `import { isSandstorm } from sandstorm.js` and define the migration here as
// well, but for now I want to avoid definied too many globals.
// In the context of migration functions we don't want to validate database
// mutation queries against the current (ie, latest) collection schema. Doing
// that would work at the time we write the migration but would break in the
// future when we'll update again the concerned collection schema.
//
// To prevent this bug we always have to disable the schema validation and<|fim▁hole|>// argument transformations. We generally use the shorthandlers defined below.
const noValidate = {
validate: false,
filter: false,
autoConvert: false,
removeEmptyStrings: false,
getAutoValues: false,
};
const noValidateMulti = { ...noValidate, multi: true };
Migrations.add('board-background-color', () => {
const defaultColor = '#16A085';
Boards.update({
background: {
$exists: false,
},
}, {
$set: {
background: {
type: 'color',
color: defaultColor,
},
},
}, noValidateMulti);
});
Migrations.add('lowercase-board-permission', () => {
['Public', 'Private'].forEach((permission) => {
Boards.update(
{ permission },
{ $set: { permission: permission.toLowerCase() } },
noValidateMulti
);
});
});
// Security migration: see https://github.com/wekan/wekan/issues/99
Migrations.add('change-attachments-type-for-non-images', () => {
const newTypeForNonImage = 'application/octet-stream';
Attachments.find().forEach((file) => {
if (!file.isImage()) {
Attachments.update(file._id, {
$set: {
'original.type': newTypeForNonImage,
'copies.attachments.type': newTypeForNonImage,
},
}, noValidate);
}
});
});
Migrations.add('card-covers', () => {
Cards.find().forEach((card) => {
const cover = Attachments.findOne({ cardId: card._id, cover: true });
if (cover) {
Cards.update(card._id, {$set: {coverId: cover._id}}, noValidate);
}
});
Attachments.update({}, {$unset: {cover: ''}}, noValidateMulti);
});
Migrations.add('use-css-class-for-boards-colors', () => {
const associationTable = {
'#27AE60': 'nephritis',
'#C0392B': 'pomegranate',
'#2980B9': 'belize',
'#8E44AD': 'wisteria',
'#2C3E50': 'midnight',
'#E67E22': 'pumpkin',
};
Boards.find().forEach((board) => {
const oldBoardColor = board.background.color;
const newBoardColor = associationTable[oldBoardColor];
Boards.update(board._id, {
$set: { color: newBoardColor },
$unset: { background: '' },
}, noValidate);
});
});
Migrations.add('denormalize-star-number-per-board', () => {
Boards.find().forEach((board) => {
const nStars = Users.find({'profile.starredBoards': board._id}).count();
Boards.update(board._id, {$set: {stars: nStars}}, noValidate);
});
});
// We want to keep a trace of former members so we can efficiently publish their
// infos in the general board publication.
Migrations.add('add-member-isactive-field', () => {
Boards.find({}, {fields: {members: 1}}).forEach((board) => {
const allUsersWithSomeActivity = _.chain(
Activities.find({ boardId: board._id }, { fields:{ userId:1 }}).fetch())
.pluck('userId')
.uniq()
.value();
const currentUsers = _.pluck(board.members, 'userId');
const formerUsers = _.difference(allUsersWithSomeActivity, currentUsers);
const newMemberSet = [];
board.members.forEach((member) => {
member.isActive = true;
newMemberSet.push(member);
});
formerUsers.forEach((userId) => {
newMemberSet.push({
userId,
isAdmin: false,
isActive: false,
});
});
Boards.update(board._id, {$set: {members: newMemberSet}}, noValidate);
});
});<|fim▁end|> | |
<|file_name|>DifferentNamespacesTestCases.java<|end_file_name|><|fim▁begin|>/*******************************************************************************
* Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 and Eclipse Distribution License v. 1.0
* which accompanies this distribution.
* The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html
* and the Eclipse Distribution License is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* Contributors:
* Denise Smith - 2.4
******************************************************************************/
package org.eclipse.persistence.testing.jaxb.json.namespaces;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.xml.bind.PropertyException;
import org.eclipse.persistence.jaxb.JAXBContextProperties;
import org.eclipse.persistence.jaxb.MarshallerProperties;
import org.eclipse.persistence.jaxb.UnmarshallerProperties;
import org.eclipse.persistence.testing.jaxb.json.JSONMarshalUnmarshalTestCases;
public class DifferentNamespacesTestCases extends JSONMarshalUnmarshalTestCases{
private final static String JSON_RESOURCE = "org/eclipse/persistence/testing/jaxb/json/namespaces/person.json";
private final static String JSON_WRITE_RESOURCE = "org/eclipse/persistence/testing/jaxb/json/namespaces/person_different.json";
public DifferentNamespacesTestCases(String name) throws Exception {
super(name);
setControlJSON(JSON_RESOURCE);
setWriteControlJSON(JSON_WRITE_RESOURCE);
setClasses(new Class[]{Person.class});
}
protected Object getControlObject() {
Person p = new Person();
p.setId(10);
p.setFirstName("Jill");
p.setLastName("MacDonald");
List<String> middleNames = new ArrayList<String>();
middleNames.add("Jane");
middleNames.add("Janice");
p.setMiddleNames(middleNames);
Address addr = new Address();
addr.setStreet("The Street");
addr.setCity("Ottawa");
p.setAddress(addr);
<|fim▁hole|>
public void setUp() throws Exception{
super.setUp();
Map<String, String> marshalNamespaceMap = new HashMap<String, String>();
marshalNamespaceMap.put("namespace0", "aaa");
marshalNamespaceMap.put("namespace1", "bbb");
marshalNamespaceMap.put("namespace2", "ccc");
marshalNamespaceMap.put("namespace3", "ddd");
Map<String, String> unmarshalNamespaceMap = new HashMap<String, String>();
unmarshalNamespaceMap.put("namespace0", "ns0");
unmarshalNamespaceMap.put("namespace1", "ns1");
unmarshalNamespaceMap.put("namespace2", "ns2");
unmarshalNamespaceMap.put("namespace3", "ns3");
try{
jsonMarshaller.setProperty(MarshallerProperties.NAMESPACE_PREFIX_MAPPER, marshalNamespaceMap);
jsonUnmarshaller.setProperty(UnmarshallerProperties.JSON_NAMESPACE_PREFIX_MAPPER, unmarshalNamespaceMap);
}catch(PropertyException e){
e.printStackTrace();
fail("An error occurred setting properties during setup.");
}
}
public Map getProperties(){
Map props = new HashMap();
props.put(JAXBContextProperties.JSON_ATTRIBUTE_PREFIX, "@");
return props;
}
}<|fim▁end|> |
return p;
}
|
<|file_name|>ico_task.js<|end_file_name|><|fim▁begin|>/**
* @file Generate ico image files.
* @memberof module:ci/tasks
* @function icoTask
* @param grunt
* @param {object} config - Task configuration.
* @param {function} callback - Callback when done.<|fim▁hole|> */
"use strict";
var ico = require('../../lib/commands/ico');
module.exports = function (grunt, config, callback) {
ico(config.src, config.dest, {}, function (err) {
if (!err) {
grunt.log.writeln('ICO file created: %s', config.dest);
callback(err);
}
});
};<|fim▁end|> | * |
<|file_name|>persistence.py<|end_file_name|><|fim▁begin|># orm/persistence.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
import operator
from itertools import groupby
from .. import sql, util, exc as sa_exc, schema
from . import attributes, sync, exc as orm_exc, evaluator
from .base import _state_mapper, state_str, _attr_as_key
from ..sql import expression
from . import loading
def save_obj(base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_insert, states_to_update = _organize_states_for_save(
base_mapper,
states,
uowtransaction)
cached_connections = _cached_connection_dict(base_mapper)
for table, mapper in base_mapper._sorted_tables.items():
insert = _collect_insert_commands(base_mapper, uowtransaction,
table, states_to_insert)
update = _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update)
if update:
_emit_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
if insert:
_emit_insert_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, insert)
_finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = _organize_states_for_post_update(
base_mapper,
states, uowtransaction)
for table, mapper in base_mapper._sorted_tables.items():
update = _collect_post_update_commands(base_mapper, uowtransaction,
table, states_to_update,
post_update_cols)
if update:
_emit_post_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = _organize_states_for_delete(
base_mapper,
states,
uowtransaction)
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
mapper = table_to_mapper[table]
_emit_delete_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, delete)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
states_to_insert = []
states_to_update = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
if mapper._validate_polymorphic_identity:
mapper._validate_polymorphic_identity(mapper, state, dict_)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if not has_identity and \
instance_key in uowtransaction.session.identity_map:
instance = \
uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s" %
(state_str(state), instance_key,
state_str(existing)))
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction", instance_key,
state_str(state), state_str(existing))
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if not has_identity and not row_switch:
states_to_insert.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
else:
states_to_update.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
return states_to_insert, states_to_update
def _organize_states_for_post_update(base_mapper, states,
uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return list(_connections_for_states(base_mapper, uowtransaction,
states))
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
states_to_delete = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
mapper.dispatch.before_delete(mapper, connection, state)
states_to_delete.append((state, dict_, mapper,
bool(state.key), connection))
return states_to_delete
def _collect_insert_commands(base_mapper, uowtransaction, table,
states_to_insert):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
insert = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
has_all_pks = True
has_all_defaults = True
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col and \
mapper.version_id_generator is not False:
val = mapper.version_id_generator(None)
params[col.key] = val
else:
# pull straight from the dict for
# pending objects
prop = mapper._columntoproperty[col]
value = state_dict.get(prop.key, None)
if value is None:
if col in pks:
has_all_pks = False
elif col.default is None and \
col.server_default is None:
params[col.key] = value
elif col.server_default is not None and \
mapper.base_mapper.eager_defaults:
has_all_defaults = False
elif isinstance(value, sql.ClauseElement):
value_params[col] = value
else:
params[col.key] = value
insert.append((state, state_dict, params, mapper,
connection, value_params, has_all_pks,
has_all_defaults))
return insert
def _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
update = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
hasdata = hasnull = False
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col:
params[col._label] = \
mapper._get_committed_state_attr_by_column(
row_switch or state,
row_switch and row_switch.dict
or state_dict,
col)
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
params[col.key] = history.added[0]
hasdata = True
else:
if mapper.version_id_generator is not False:
val = mapper.version_id_generator(params[col._label])
params[col.key] = val
# HACK: check for history, in case the
# history is only
# in a different table than the one
# where the version_id_col is.
for prop in mapper._columntoproperty.values():
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
hasdata = True
else:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
if isinstance(history.added[0],
sql.ClauseElement):
value_params[col] = history.added[0]
else:
value = history.added[0]
params[col.key] = value
if col in pks:
if history.deleted and \
not row_switch:
# if passive_updates and sync detected
# this was a pk->pk sync, use the new
# value to locate the row, since the
# DB would already have set this
if ("pk_cascaded", state, col) in \
uowtransaction.attributes:
value = history.added[0]
params[col._label] = value
else:
# use the old value to
# locate the row
value = history.deleted[0]
params[col._label] = value
hasdata = True
else:
# row switch logic can reach us here
# remove the pk from the update params
# so the update doesn't
# attempt to include the pk in the
# update statement
del params[col.key]
value = history.added[0]
params[col._label] = value
if value is None:
hasnull = True
else:
hasdata = True
elif col in pks:
value = state.manager[prop.key].impl.get(
state, state_dict)
if value is None:
hasnull = True
params[col._label] = value
if hasdata:
if hasnull:
raise orm_exc.FlushError(
"Can't update table "
"using NULL for primary "
"key value")
update.append((state, state_dict, params, mapper,
connection, value_params))
return update
def _collect_post_update_commands(base_mapper, uowtransaction, table,
states_to_update, post_update_cols):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
update = []
for state, state_dict, mapper, connection in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = \
mapper._get_state_attr_by_column(
state,
state_dict, col)
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
update.append((state, state_dict, params, mapper,
connection))
return update
def _collect_delete_commands(base_mapper, uowtransaction, table,
states_to_delete):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
delete = util.defaultdict(list)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
if not has_identity or table not in mapper._pks_by_table:
continue
params = {}
delete[connection].append(params)
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_state_attr_by_column(
state, state_dict, col)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table "
"using NULL for primary "
"key value")
if mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col):
params[mapper.version_id_col.key] = \
mapper._get_committed_state_attr_by_column(
state, state_dict,
mapper.version_id_col)
return delete
def _emit_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
if needs_version_id:
clause.clauses.append(mapper.version_id_col ==\
sql.bindparam(mapper.version_id_col._label,
type_=mapper.version_id_col.type))
stmt = table.update(clause)
if mapper.base_mapper.eager_defaults:
stmt = stmt.return_defaults()
elif mapper.version_id_col is not None:
stmt = stmt.return_defaults(mapper.version_id_col)
return stmt
statement = base_mapper._memo(('update', table), update_stmt)
rows = 0
for state, state_dict, params, mapper, \
connection, value_params in update:
if value_params:
c = connection.execute(
statement.values(value_params),
params)
else:
c = cached_connections[connection].\
execute(statement, params)
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
if connection.dialect.supports_sane_rowcount:
if rows != len(update):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched." %
(table.description, len(update), rows))
elif needs_version_id:
util.warn("Dialect %s does not support updated rowcount "
"- versioning cannot be verified." %
c.dialect.dialect_description,
stacklevel=12)
def _emit_insert_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, insert):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
statement = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \
records in groupby(insert,
lambda rec: (rec[4],
list(rec[2].keys()),
bool(rec[5]),
rec[6], rec[7])
):
if \
(
has_all_defaults
or not base_mapper.eager_defaults
or not connection.dialect.implicit_returning
) and has_all_pks and not hasvalue:
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].\
execute(statement, multiparams)
for (state, state_dict, params, mapper_rec,
conn, value_params, has_all_pks, has_all_defaults), \
last_inserted_params in \
zip(records, c.context.compiled_parameters):
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
last_inserted_params,
value_params)
else:
if not has_all_defaults and base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
for state, state_dict, params, mapper_rec, \
connection, value_params, \
has_all_pks, has_all_defaults in records:
if value_params:
result = connection.execute(
statement.values(value_params),
params)
else:
result = cached_connections[connection].\
execute(statement, params)
primary_key = result.context.inserted_primary_key<|fim▁hole|> if primary_key is not None:
# set primary key attributes
for pk, col in zip(primary_key,
mapper._pks_by_table[table]):
prop = mapper_rec._columntoproperty[col]
if state_dict.get(prop.key) is None:
# TODO: would rather say:
#state_dict[prop.key] = pk
mapper_rec._set_state_attr_by_column(
state,
state_dict,
col, pk)
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
result,
result.context.compiled_parameters[0],
value_params)
def _emit_post_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
return table.update(clause)
statement = base_mapper._memo(('post_update', table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
update, lambda rec: (rec[4], list(rec[2].keys()))
):
connection = key[0]
multiparams = [params for state, state_dict,
params, mapper, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
mapper, table, delete):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type))
if need_version_id:
clause.clauses.append(
mapper.version_id_col ==
sql.bindparam(
mapper.version_id_col.key,
type_=mapper.version_id_col.type
)
)
return table.delete(clause)
for connection, del_objects in delete.items():
statement = base_mapper._memo(('delete', table), delete_stmt)
connection = cached_connections[connection]
if need_version_id:
# TODO: need test coverage for this [ticket:1761]
if connection.dialect.supports_sane_rowcount:
rows = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows += c.rowcount
if rows != len(del_objects):
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched." %
(table.description, len(del_objects), c.rowcount)
)
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified." %
connection.dialect.dialect_description,
stacklevel=12)
connection.execute(statement, del_objects)
else:
connection.execute(statement, del_objects)
def _finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert + \
states_to_update:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[p.key for p in mapper._readonly_props
if p.expire_on_flush or p.key not in state.dict]
)
if readonly:
state._expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled, load
# all expired cols. Else if we have a version_id_col, make sure
# it isn't expired.
toload_now = []
if base_mapper.eager_defaults and state.unloaded:
toload_now.extend(state.unloaded)
elif mapper.version_id_col is not None and \
mapper.version_id_generator is False:
prop = mapper._columntoproperty[mapper.version_id_col]
if prop.key in state.unloaded:
toload_now.extend([prop.key])
if toload_now:
state.key = base_mapper._identity_key_from_state(state)
loading.load_on_ident(
uowtransaction.session.query(base_mapper),
state.key, refresh_state=state,
only_load_props=toload_now)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
def _postfetch(mapper, uowtransaction, table,
state, dict_, result, params, value_params):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
prefetch_cols = result.context.prefetch_cols
postfetch_cols = result.context.postfetch_cols
returning_cols = result.context.returning_cols
if mapper.version_id_col is not None:
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
if returning_cols:
row = result.context.returned_defaults
if row is not None:
for col in returning_cols:
if col.primary_key:
continue
mapper._set_state_attr_by_column(state, dict_, col, row[col])
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
mapper._set_state_attr_by_column(state, dict_, c, params[c.key])
if postfetch_cols:
state._expire_attributes(state.dict,
[mapper._columntoproperty[c].key
for c in postfetch_cols if c in
mapper._columntoproperty]
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(state, m, state, m,
equated_pairs,
uowtransaction,
mapper.passive_updates)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = \
uowtransaction.session.connection_callable
else:
connection = None
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
elif not connection:
connection = uowtransaction.transaction.connection(
base_mapper)
mapper = _state_mapper(state)
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn: conn.execution_options(
compiled_cache=base_mapper._compiled_cache
))
def _sort_states(states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
return sorted(pending, key=operator.attrgetter("insert_order")) + \
sorted(persistent, key=lambda q: q.key[1])
class BulkUD(object):
"""Handle bulk update and deletes via a :class:`.Query`."""
def __init__(self, query):
self.query = query.enable_eagerloads(False)
@property
def session(self):
return self.query.session
@classmethod
def _factory(cls, lookup, synchronize_session, *arg):
try:
klass = lookup[synchronize_session]
except KeyError:
raise sa_exc.ArgumentError(
"Valid strategies for session synchronization "
"are %s" % (", ".join(sorted(repr(x)
for x in lookup))))
else:
return klass(*arg)
def exec_(self):
self._do_pre()
self._do_pre_synchronize()
self._do_exec()
self._do_post_synchronize()
self._do_post()
def _do_pre(self):
query = self.query
self.context = context = query._compile_context()
if len(context.statement.froms) != 1 or \
not isinstance(context.statement.froms[0], schema.Table):
self.primary_table = query._only_entity_zero(
"This operation requires only one Table or "
"entity be specified as the target."
).mapper.local_table
else:
self.primary_table = context.statement.froms[0]
session = query.session
if query._autoflush:
session._autoflush()
def _do_pre_synchronize(self):
pass
def _do_post_synchronize(self):
pass
class BulkEvaluate(BulkUD):
"""BulkUD which does the 'evaluate' method of session state resolution."""
def _additional_evaluators(self, evaluator_compiler):
pass
def _do_pre_synchronize(self):
query = self.query
try:
evaluator_compiler = evaluator.EvaluatorCompiler()
if query.whereclause is not None:
eval_condition = evaluator_compiler.process(
query.whereclause)
else:
def eval_condition(obj):
return True
self._additional_evaluators(evaluator_compiler)
except evaluator.UnevaluatableError:
raise sa_exc.InvalidRequestError(
"Could not evaluate current criteria in Python. "
"Specify 'fetch' or False for the "
"synchronize_session parameter.")
target_cls = query._mapper_zero().class_
#TODO: detect when the where clause is a trivial primary key match
self.matched_objects = [
obj for (cls, pk), obj in
query.session.identity_map.items()
if issubclass(cls, target_cls) and
eval_condition(obj)]
class BulkFetch(BulkUD):
"""BulkUD which does the 'fetch' method of session state resolution."""
def _do_pre_synchronize(self):
query = self.query
session = query.session
select_stmt = self.context.statement.with_only_columns(
self.primary_table.primary_key)
self.matched_rows = session.execute(
select_stmt,
params=query._params).fetchall()
class BulkUpdate(BulkUD):
"""BulkUD which handles UPDATEs."""
def __init__(self, query, values):
super(BulkUpdate, self).__init__(query)
self.query._no_select_modifiers("update")
self.values = values
@classmethod
def factory(cls, query, synchronize_session, values):
return BulkUD._factory({
"evaluate": BulkUpdateEvaluate,
"fetch": BulkUpdateFetch,
False: BulkUpdate
}, synchronize_session, query, values)
def _do_exec(self):
update_stmt = sql.update(self.primary_table,
self.context.whereclause, self.values)
self.result = self.query.session.execute(
update_stmt, params=self.query._params)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_update(self)
class BulkDelete(BulkUD):
"""BulkUD which handles DELETEs."""
def __init__(self, query):
super(BulkDelete, self).__init__(query)
self.query._no_select_modifiers("delete")
@classmethod
def factory(cls, query, synchronize_session):
return BulkUD._factory({
"evaluate": BulkDeleteEvaluate,
"fetch": BulkDeleteFetch,
False: BulkDelete
}, synchronize_session, query)
def _do_exec(self):
delete_stmt = sql.delete(self.primary_table,
self.context.whereclause)
self.result = self.query.session.execute(delete_stmt,
params=self.query._params)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_delete(self)
class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
"""BulkUD which handles UPDATEs using the "evaluate"
method of session resolution."""
def _additional_evaluators(self, evaluator_compiler):
self.value_evaluators = {}
for key, value in self.values.items():
key = _attr_as_key(key)
self.value_evaluators[key] = evaluator_compiler.process(
expression._literal_as_binds(value))
def _do_post_synchronize(self):
session = self.query.session
states = set()
evaluated_keys = list(self.value_evaluators.keys())
for obj in self.matched_objects:
state, dict_ = attributes.instance_state(obj),\
attributes.instance_dict(obj)
# only evaluate unmodified attributes
to_evaluate = state.unmodified.intersection(
evaluated_keys)
for key in to_evaluate:
dict_[key] = self.value_evaluators[key](obj)
state._commit(dict_, list(to_evaluate))
# expire attributes with pending changes
# (there was no autoflush, so they are overwritten)
state._expire_attributes(dict_,
set(evaluated_keys).
difference(to_evaluate))
states.add(state)
session._register_altered(states)
class BulkDeleteEvaluate(BulkEvaluate, BulkDelete):
"""BulkUD which handles DELETEs using the "evaluate"
method of session resolution."""
def _do_post_synchronize(self):
self.query.session._remove_newly_deleted(
[attributes.instance_state(obj)
for obj in self.matched_objects])
class BulkUpdateFetch(BulkFetch, BulkUpdate):
"""BulkUD which handles UPDATEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
states = set([
attributes.instance_state(session.identity_map[identity_key])
for identity_key in [
target_mapper.identity_key_from_primary_key(
list(primary_key))
for primary_key in self.matched_rows
]
if identity_key in session.identity_map
])
attrib = [_attr_as_key(k) for k in self.values]
for state in states:
session._expire_state(state, attrib)
session._register_altered(states)
class BulkDeleteFetch(BulkFetch, BulkDelete):
"""BulkUD which handles DELETEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
for primary_key in self.matched_rows:
# TODO: inline this and call remove_newly_deleted
# once
identity_key = target_mapper.identity_key_from_primary_key(
list(primary_key))
if identity_key in session.identity_map:
session._remove_newly_deleted(
[attributes.instance_state(
session.identity_map[identity_key]
)]
)<|fim▁end|> | |
<|file_name|>view.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/views/view.h"
#include <algorithm>
#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/message_loop.h"
#include "base/stringprintf.h"
#include "base/utf_string_conversions.h"
#include "third_party/skia/include/core/SkRect.h"
#include "ui/base/accessibility/accessibility_types.h"
#include "ui/base/dragdrop/drag_drop_types.h"
#include "ui/compositor/compositor.h"
#include "ui/compositor/layer.h"
#include "ui/compositor/layer_animator.h"
#include "ui/gfx/canvas.h"
#include "ui/gfx/interpolated_transform.h"
#include "ui/gfx/path.h"
#include "ui/gfx/point3.h"
#include "ui/gfx/transform.h"
#include "ui/views/background.h"
#include "ui/views/context_menu_controller.h"
#include "ui/views/drag_controller.h"
#include "ui/views/layout/layout_manager.h"
#include "ui/views/views_delegate.h"
#include "ui/views/widget/native_widget_private.h"
#include "ui/views/widget/root_view.h"
#include "ui/views/widget/tooltip_manager.h"
#include "ui/views/widget/widget.h"
#if defined(OS_WIN)
#include "base/win/scoped_gdi_object.h"
#include "ui/views/accessibility/native_view_accessibility_win.h"
#endif
namespace {
// Whether to use accelerated compositing when necessary (e.g. when a view has a
// transformation).
#if defined(USE_AURA)
bool use_acceleration_when_possible = true;
#else
bool use_acceleration_when_possible = false;
#endif
// Saves the drawing state, and restores the state when going out of scope.
class ScopedCanvas {
public:
explicit ScopedCanvas(gfx::Canvas* canvas) : canvas_(canvas) {
if (canvas_)
canvas_->Save();
}
~ScopedCanvas() {
if (canvas_)
canvas_->Restore();
}
void SetCanvas(gfx::Canvas* canvas) {
if (canvas_)
canvas_->Restore();
canvas_ = canvas;
canvas_->Save();
}
private:
gfx::Canvas* canvas_;
DISALLOW_COPY_AND_ASSIGN(ScopedCanvas);
};
// Returns the top view in |view|'s hierarchy.
const views::View* GetHierarchyRoot(const views::View* view) {
const views::View* root = view;
while (root && root->parent())
root = root->parent();
return root;
}
} // namespace
namespace views {
// static
ViewsDelegate* ViewsDelegate::views_delegate = NULL;
// static
const char View::kViewClassName[] = "views/View";
////////////////////////////////////////////////////////////////////////////////
// View, public:
// Creation and lifetime -------------------------------------------------------
View::View()
: owned_by_client_(false),
id_(0),
group_(-1),
parent_(NULL),
visible_(true),
enabled_(true),
painting_enabled_(true),
notify_enter_exit_on_child_(false),
registered_for_visible_bounds_notification_(false),
clip_insets_(0, 0, 0, 0),
needs_layout_(true),
flip_canvas_on_paint_for_rtl_ui_(false),
paint_to_layer_(false),
accelerator_registration_delayed_(false),
accelerator_focus_manager_(NULL),
registered_accelerator_count_(0),
next_focusable_view_(NULL),
previous_focusable_view_(NULL),
focusable_(false),
accessibility_focusable_(false),
context_menu_controller_(NULL),
drag_controller_(NULL) {
}
View::~View() {
if (parent_)
parent_->RemoveChildView(this);
for (Views::const_iterator i(children_.begin()); i != children_.end(); ++i) {
(*i)->parent_ = NULL;
if (!(*i)->owned_by_client_)
delete *i;
}
#if defined(OS_WIN)
if (native_view_accessibility_win_.get())
native_view_accessibility_win_->set_view(NULL);
#endif
}
// Tree operations -------------------------------------------------------------
const Widget* View::GetWidget() const {
// The root view holds a reference to this view hierarchy's Widget.
return parent_ ? parent_->GetWidget() : NULL;
}
Widget* View::GetWidget() {
return const_cast<Widget*>(const_cast<const View*>(this)->GetWidget());
}
void View::AddChildView(View* view) {
if (view->parent_ == this)
return;
AddChildViewAt(view, child_count());
}
void View::AddChildViewAt(View* view, int index) {
CHECK_NE(view, this) << "You cannot add a view as its own child";
DCHECK_GE(index, 0);
DCHECK_LE(index, child_count());
// If |view| has a parent, remove it from its parent.
View* parent = view->parent_;
if (parent) {
if (parent == this) {
ReorderChildView(view, index);
return;
}
parent->RemoveChildView(view);
}
// Sets the prev/next focus views.
InitFocusSiblings(view, index);
// Let's insert the view.
view->parent_ = this;
children_.insert(children_.begin() + index, view);
for (View* v = this; v; v = v->parent_)
v->ViewHierarchyChangedImpl(false, true, this, view);
view->PropagateAddNotifications(this, view);
UpdateTooltip();
if (GetWidget())
RegisterChildrenForVisibleBoundsNotification(view);
if (layout_manager_.get())
layout_manager_->ViewAdded(this, view);
if (use_acceleration_when_possible)
ReorderLayers();
// Make sure the visibility of the child layers are correct.
// If any of the parent View is hidden, then the layers of the subtree
// rooted at |this| should be hidden. Otherwise, all the child layers should
// inherit the visibility of the owner View.
UpdateLayerVisibility();
}
void View::ReorderChildView(View* view, int index) {
DCHECK_EQ(view->parent_, this);
if (index < 0)
index = child_count() - 1;
else if (index >= child_count())
return;
if (children_[index] == view)
return;
const Views::iterator i(std::find(children_.begin(), children_.end(), view));
DCHECK(i != children_.end());
children_.erase(i);
// Unlink the view first
View* next_focusable = view->next_focusable_view_;
View* prev_focusable = view->previous_focusable_view_;
if (prev_focusable)
prev_focusable->next_focusable_view_ = next_focusable;
if (next_focusable)
next_focusable->previous_focusable_view_ = prev_focusable;
// Add it in the specified index now.
InitFocusSiblings(view, index);
children_.insert(children_.begin() + index, view);
if (use_acceleration_when_possible)
ReorderLayers();
}
void View::RemoveChildView(View* view) {
DoRemoveChildView(view, true, true, false);
}
void View::RemoveAllChildViews(bool delete_children) {
while (!children_.empty())
DoRemoveChildView(children_.front(), false, false, delete_children);
UpdateTooltip();
}
bool View::Contains(const View* view) const {
for (const View* v = view; v; v = v->parent_) {
if (v == this)
return true;
}
return false;
}
int View::GetIndexOf(const View* view) const {
Views::const_iterator i(std::find(children_.begin(), children_.end(), view));
return i != children_.end() ? static_cast<int>(i - children_.begin()) : -1;
}
// Size and disposition --------------------------------------------------------
void View::SetBounds(int x, int y, int width, int height) {
SetBoundsRect(gfx::Rect(x, y, std::max(0, width), std::max(0, height)));
}
void View::SetBoundsRect(const gfx::Rect& bounds) {
if (bounds == bounds_) {
if (needs_layout_) {
needs_layout_ = false;
Layout();
SchedulePaint();
}
return;
}
if (visible_) {
// Paint where the view is currently.
SchedulePaintBoundsChanged(
bounds_.size() == bounds.size() ? SCHEDULE_PAINT_SIZE_SAME :
SCHEDULE_PAINT_SIZE_CHANGED);
}
gfx::Rect prev = bounds_;
bounds_ = bounds;
BoundsChanged(prev);
}
void View::SetSize(const gfx::Size& size) {
SetBounds(x(), y(), size.width(), size.height());
}
void View::SetPosition(const gfx::Point& position) {
SetBounds(position.x(), position.y(), width(), height());
}
void View::SetX(int x) {
SetBounds(x, y(), width(), height());
}
void View::SetY(int y) {
SetBounds(x(), y, width(), height());
}
gfx::Rect View::GetContentsBounds() const {
gfx::Rect contents_bounds(GetLocalBounds());
if (border_.get()) {
gfx::Insets insets;
border_->GetInsets(&insets);
contents_bounds.Inset(insets);
}
return contents_bounds;
}
gfx::Rect View::GetLocalBounds() const {
return gfx::Rect(size());
}
gfx::Rect View::GetLayerBoundsInPixel() const {
return layer()->GetTargetBounds();
}
gfx::Insets View::GetInsets() const {
gfx::Insets insets;
if (border_.get())
border_->GetInsets(&insets);
return insets;
}
gfx::Rect View::GetVisibleBounds() const {
if (!IsDrawn())
return gfx::Rect();
gfx::Rect vis_bounds(GetLocalBounds());
gfx::Rect ancestor_bounds;
const View* view = this;
ui::Transform transform;
while (view != NULL && !vis_bounds.IsEmpty()) {
transform.ConcatTransform(view->GetTransform());
transform.ConcatTranslate(static_cast<float>(view->GetMirroredX()),
static_cast<float>(view->y()));
vis_bounds = view->ConvertRectToParent(vis_bounds);
const View* ancestor = view->parent_;
if (ancestor != NULL) {
ancestor_bounds.SetRect(0, 0, ancestor->width(), ancestor->height());
vis_bounds = vis_bounds.Intersect(ancestor_bounds);
} else if (!view->GetWidget()) {
// If the view has no Widget, we're not visible. Return an empty rect.
return gfx::Rect();
}
view = ancestor;
}
if (vis_bounds.IsEmpty())
return vis_bounds;
// Convert back to this views coordinate system.
transform.TransformRectReverse(&vis_bounds);
return vis_bounds;
}
gfx::Rect View::GetBoundsInScreen() const {
gfx::Point origin;
View::ConvertPointToScreen(this, &origin);
return gfx::Rect(origin, size());
}
gfx::Size View::GetPreferredSize() {
if (layout_manager_.get())
return layout_manager_->GetPreferredSize(this);
return gfx::Size();
}
int View::GetBaseline() const {
return -1;
}
void View::SizeToPreferredSize() {
gfx::Size prefsize = GetPreferredSize();
if ((prefsize.width() != width()) || (prefsize.height() != height()))
SetBounds(x(), y(), prefsize.width(), prefsize.height());
}
gfx::Size View::GetMinimumSize() {
return GetPreferredSize();
}
gfx::Size View::GetMaximumSize() {
return gfx::Size();
}
int View::GetHeightForWidth(int w) {
if (layout_manager_.get())
return layout_manager_->GetPreferredHeightForWidth(this, w);
return GetPreferredSize().height();
}
void View::SetVisible(bool visible) {
if (visible != visible_) {
// If the View is currently visible, schedule paint to refresh parent.
// TODO(beng): not sure we should be doing this if we have a layer.
if (visible_)
SchedulePaint();
visible_ = visible;
// Notify the parent.
if (parent_)
parent_->ChildVisibilityChanged(this);
// This notifies all sub-views recursively.
PropagateVisibilityNotifications(this, visible_);
UpdateLayerVisibility();
// If we are newly visible, schedule paint.
if (visible_)
SchedulePaint();
}
}
bool View::IsDrawn() const {
return visible_ && parent_ ? parent_->IsDrawn() : false;
}
void View::SetEnabled(bool enabled) {
if (enabled != enabled_) {
enabled_ = enabled;
OnEnabledChanged();
}
}
void View::OnEnabledChanged() {
SchedulePaint();
}
// Transformations -------------------------------------------------------------
const ui::Transform& View::GetTransform() const {
static const ui::Transform* no_op = new ui::Transform;
return layer() ? layer()->transform() : *no_op;
}
void View::SetTransform(const ui::Transform& transform) {
if (!transform.HasChange()) {
if (layer()) {
layer()->SetTransform(transform);
if (!paint_to_layer_)
DestroyLayer();
} else {
// Nothing.
}
} else {
if (!layer())
CreateLayer();
layer()->SetTransform(transform);
layer()->ScheduleDraw();
}
}
void View::SetPaintToLayer(bool paint_to_layer) {
paint_to_layer_ = paint_to_layer;
if (paint_to_layer_ && !layer()) {
CreateLayer();
} else if (!paint_to_layer_ && layer()) {
DestroyLayer();
}
}
ui::Layer* View::RecreateLayer() {
ui::Layer* layer = AcquireLayer();
if (!layer)
return NULL;
CreateLayer();
layer_->set_scale_content(layer->scale_content());
return layer;
}
// RTL positioning -------------------------------------------------------------
gfx::Rect View::GetMirroredBounds() const {
gfx::Rect bounds(bounds_);
bounds.set_x(GetMirroredX());
return bounds;
}
gfx::Point View::GetMirroredPosition() const {
return gfx::Point(GetMirroredX(), y());
}
int View::GetMirroredX() const {
return parent_ ? parent_->GetMirroredXForRect(bounds_) : x();
}
int View::GetMirroredXForRect(const gfx::Rect& bounds) const {
return base::i18n::IsRTL() ?
(width() - bounds.x() - bounds.width()) : bounds.x();
}
int View::GetMirroredXInView(int x) const {
return base::i18n::IsRTL() ? width() - x : x;
}
int View::GetMirroredXWithWidthInView(int x, int w) const {
return base::i18n::IsRTL() ? width() - x - w : x;
}
// Layout ----------------------------------------------------------------------
void View::Layout() {
needs_layout_ = false;
// If we have a layout manager, let it handle the layout for us.
if (layout_manager_.get())
layout_manager_->Layout(this);
// Make sure to propagate the Layout() call to any children that haven't
// received it yet through the layout manager and need to be laid out. This
// is needed for the case when the child requires a layout but its bounds
// weren't changed by the layout manager. If there is no layout manager, we
// just propagate the Layout() call down the hierarchy, so whoever receives
// the call can take appropriate action.
for (int i = 0, count = child_count(); i < count; ++i) {
View* child = child_at(i);
if (child->needs_layout_ || !layout_manager_.get()) {
child->needs_layout_ = false;
child->Layout();
}
}
}
void View::InvalidateLayout() {
// Always invalidate up. This is needed to handle the case of us already being
// valid, but not our parent.
needs_layout_ = true;
if (parent_)
parent_->InvalidateLayout();
}
LayoutManager* View::GetLayoutManager() const {
return layout_manager_.get();
}
void View::SetLayoutManager(LayoutManager* layout_manager) {
if (layout_manager_.get())
layout_manager_->Uninstalled(this);
layout_manager_.reset(layout_manager);
if (layout_manager_.get())
layout_manager_->Installed(this);
}
// Attributes ------------------------------------------------------------------
std::string View::GetClassName() const {
return kViewClassName;
}
View* View::GetAncestorWithClassName(const std::string& name) {
for (View* view = this; view; view = view->parent_) {
if (view->GetClassName() == name)
return view;
}
return NULL;
}
const View* View::GetViewByID(int id) const {
if (id == id_)
return const_cast<View*>(this);
for (int i = 0, count = child_count(); i < count; ++i) {
const View* view = child_at(i)->GetViewByID(id);
if (view)
return view;
}
return NULL;
}
View* View::GetViewByID(int id) {
return const_cast<View*>(const_cast<const View*>(this)->GetViewByID(id));
}
void View::SetGroup(int gid) {
// Don't change the group id once it's set.
DCHECK(group_ == -1 || group_ == gid);
group_ = gid;
}
int View::GetGroup() const {
return group_;
}
bool View::IsGroupFocusTraversable() const {
return true;
}
void View::GetViewsInGroup(int group, Views* views) {
if (group_ == group)
views->push_back(this);
for (int i = 0, count = child_count(); i < count; ++i)
child_at(i)->GetViewsInGroup(group, views);
}
View* View::GetSelectedViewForGroup(int group) {
Views views;
GetWidget()->GetRootView()->GetViewsInGroup(group, &views);
return views.empty() ? NULL : views[0];
}
// Coordinate conversion -------------------------------------------------------
// static
void View::ConvertPointToView(const View* source,
const View* target,
gfx::Point* point) {
if (source == target)
return;
// |source| can be NULL.
const View* root = GetHierarchyRoot(target);
if (source) {
CHECK_EQ(GetHierarchyRoot(source), root);
if (source != root)
source->ConvertPointForAncestor(root, point);
}
if (target != root)
target->ConvertPointFromAncestor(root, point);
// API defines NULL |source| as returning the point in screen coordinates.
if (!source) {
*point = point->Subtract(
root->GetWidget()->GetClientAreaBoundsInScreen().origin());
}
}
// static
void View::ConvertPointToWidget(const View* src, gfx::Point* p) {
DCHECK(src);
DCHECK(p);
src->ConvertPointForAncestor(NULL, p);
}
// static
void View::ConvertPointFromWidget(const View* dest, gfx::Point* p) {
DCHECK(dest);
DCHECK(p);
dest->ConvertPointFromAncestor(NULL, p);
}
// static
void View::ConvertPointToScreen(const View* src, gfx::Point* p) {
DCHECK(src);
DCHECK(p);
// If the view is not connected to a tree, there's nothing we can do.
const Widget* widget = src->GetWidget();
if (widget) {
ConvertPointToWidget(src, p);
gfx::Rect r = widget->GetClientAreaBoundsInScreen();
p->SetPoint(p->x() + r.x(), p->y() + r.y());
}
}
// static
void View::ConvertPointFromScreen(const View* dst, gfx::Point* p) {
DCHECK(dst);
DCHECK(p);
const views::Widget* widget = dst->GetWidget();
if (!widget)
return;
const gfx::Rect r = widget->GetClientAreaBoundsInScreen();
p->Offset(-r.x(), -r.y());
views::View::ConvertPointFromWidget(dst, p);
}
gfx::Rect View::ConvertRectToParent(const gfx::Rect& rect) const {
gfx::Rect x_rect = rect;
GetTransform().TransformRect(&x_rect);
x_rect.Offset(GetMirroredPosition());
return x_rect;
}
gfx::Rect View::ConvertRectToWidget(const gfx::Rect& rect) const {
gfx::Rect x_rect = rect;
for (const View* v = this; v; v = v->parent_)
x_rect = v->ConvertRectToParent(x_rect);
return x_rect;
}
// Painting --------------------------------------------------------------------
void View::SchedulePaint() {
SchedulePaintInRect(GetLocalBounds());
}
void View::SchedulePaintInRect(const gfx::Rect& rect) {
if (!visible_ || !painting_enabled_)
return;
if (layer()) {
layer()->SchedulePaint(rect);
} else if (parent_) {
// Translate the requested paint rect to the parent's coordinate system
// then pass this notification up to the parent.
parent_->SchedulePaintInRect(ConvertRectToParent(rect));
}
}
void View::Paint(gfx::Canvas* canvas) {
TRACE_EVENT0("views", "View::Paint");
ScopedCanvas scoped_canvas(canvas);
// Paint this View and its children, setting the clip rect to the bounds
// of this View and translating the origin to the local bounds' top left
// point.
//
// Note that the X (or left) position we pass to ClipRectInt takes into
// consideration whether or not the view uses a right-to-left layout so that
// we paint our view in its mirrored position if need be.
gfx::Rect clip_rect = bounds();
clip_rect.Inset(clip_insets_);
if (parent_)
clip_rect.set_x(parent_->GetMirroredXForRect(clip_rect));
if (!canvas->ClipRect(clip_rect))
return;
// Non-empty clip, translate the graphics such that 0,0 corresponds to
// where this view is located (related to its parent).
canvas->Translate(GetMirroredPosition());
canvas->Transform(GetTransform());
PaintCommon(canvas);
}
ThemeProvider* View::GetThemeProvider() const {
const Widget* widget = GetWidget();
return widget ? widget->GetThemeProvider() : NULL;
}
// Accelerated Painting --------------------------------------------------------
// static
void View::set_use_acceleration_when_possible(bool use) {
use_acceleration_when_possible = use;
}
// static
bool View::get_use_acceleration_when_possible() {
return use_acceleration_when_possible;
}
// Input -----------------------------------------------------------------------
View* View::GetEventHandlerForPoint(const gfx::Point& point) {
// Walk the child Views recursively looking for the View that most
// tightly encloses the specified point.
for (int i = child_count() - 1; i >= 0; --i) {
View* child = child_at(i);
if (!child->visible())
continue;
gfx::Point point_in_child_coords(point);
View::ConvertPointToView(this, child, &point_in_child_coords);
if (child->HitTest(point_in_child_coords))
return child->GetEventHandlerForPoint(point_in_child_coords);
}
return this;
}
gfx::NativeCursor View::GetCursor(const MouseEvent& event) {
#if defined(OS_WIN) && !defined(USE_AURA)
static HCURSOR arrow = LoadCursor(NULL, IDC_ARROW);
return arrow;
#else
return gfx::kNullCursor;
#endif
}
bool View::HitTest(const gfx::Point& l) const {
if (GetLocalBounds().Contains(l)) {
if (HasHitTestMask()) {
gfx::Path mask;
GetHitTestMask(&mask);
#if defined(USE_AURA)
// TODO: should we use this every where?
SkRegion clip_region;
clip_region.setRect(0, 0, width(), height());
SkRegion mask_region;
return mask_region.setPath(mask, clip_region) &&
mask_region.contains(l.x(), l.y());
#elif defined(OS_WIN)
base::win::ScopedRegion rgn(mask.CreateNativeRegion());
return !!PtInRegion(rgn, l.x(), l.y());
#endif
}
// No mask, but inside our bounds.
return true;
}
// Outside our bounds.
return false;
}
bool View::OnMousePressed(const MouseEvent& event) {
return false;
}
bool View::OnMouseDragged(const MouseEvent& event) {
return false;
}
void View::OnMouseReleased(const MouseEvent& event) {
}
void View::OnMouseCaptureLost() {
}
void View::OnMouseMoved(const MouseEvent& event) {
}
void View::OnMouseEntered(const MouseEvent& event) {
}
void View::OnMouseExited(const MouseEvent& event) {
}
ui::TouchStatus View::OnTouchEvent(const TouchEvent& event) {
return ui::TOUCH_STATUS_UNKNOWN;
}
ui::GestureStatus View::OnGestureEvent(const GestureEvent& event) {
return ui::GESTURE_STATUS_UNKNOWN;
}
void View::SetMouseHandler(View* new_mouse_handler) {
// |new_mouse_handler| may be NULL.
if (parent_)
parent_->SetMouseHandler(new_mouse_handler);
}
bool View::OnKeyPressed(const KeyEvent& event) {
return false;
}
bool View::OnKeyReleased(const KeyEvent& event) {
return false;
}
bool View::OnMouseWheel(const MouseWheelEvent& event) {
return false;
}
bool View::OnScrollEvent(const ScrollEvent& event) {
return false;
}
ui::TextInputClient* View::GetTextInputClient() {
return NULL;
}
InputMethod* View::GetInputMethod() {
Widget* widget = GetWidget();
return widget ? widget->GetInputMethod() : NULL;
}
// Accelerators ----------------------------------------------------------------
void View::AddAccelerator(const ui::Accelerator& accelerator) {
if (!accelerators_.get())
accelerators_.reset(new std::vector<ui::Accelerator>());
if (std::find(accelerators_->begin(), accelerators_->end(), accelerator) ==
accelerators_->end()) {
accelerators_->push_back(accelerator);
}
RegisterPendingAccelerators();
}
void View::RemoveAccelerator(const ui::Accelerator& accelerator) {
if (!accelerators_.get()) {
NOTREACHED() << "Removing non-existing accelerator";
return;
}
std::vector<ui::Accelerator>::iterator i(
std::find(accelerators_->begin(), accelerators_->end(), accelerator));
if (i == accelerators_->end()) {
NOTREACHED() << "Removing non-existing accelerator";
return;
}
size_t index = i - accelerators_->begin();
accelerators_->erase(i);
if (index >= registered_accelerator_count_) {
// The accelerator is not registered to FocusManager.
return;
}
--registered_accelerator_count_;
// Providing we are attached to a Widget and registered with a focus manager,
// we should de-register from that focus manager now.
if (GetWidget() && accelerator_focus_manager_)
accelerator_focus_manager_->UnregisterAccelerator(accelerator, this);
}
void View::ResetAccelerators() {
if (accelerators_.get())
UnregisterAccelerators(false);
}
bool View::AcceleratorPressed(const ui::Accelerator& accelerator) {
return false;
}
bool View::CanHandleAccelerators() const {
return enabled() && IsDrawn() && GetWidget() && GetWidget()->IsVisible();
}
// Focus -----------------------------------------------------------------------
bool View::HasFocus() const {
const FocusManager* focus_manager = GetFocusManager();
return focus_manager && (focus_manager->GetFocusedView() == this);
}
View* View::GetNextFocusableView() {
return next_focusable_view_;
}
const View* View::GetNextFocusableView() const {
return next_focusable_view_;
}
View* View::GetPreviousFocusableView() {
return previous_focusable_view_;
}
void View::SetNextFocusableView(View* view) {
if (view)
view->previous_focusable_view_ = this;
next_focusable_view_ = view;
}
bool View::IsFocusable() const {
return focusable_ && enabled_ && IsDrawn();
}
bool View::IsAccessibilityFocusable() const {
return (focusable_ || accessibility_focusable_) && enabled_ && IsDrawn();
}
FocusManager* View::GetFocusManager() {
Widget* widget = GetWidget();
return widget ? widget->GetFocusManager() : NULL;
}
const FocusManager* View::GetFocusManager() const {
const Widget* widget = GetWidget();
return widget ? widget->GetFocusManager() : NULL;
}
void View::RequestFocus() {
FocusManager* focus_manager = GetFocusManager();
if (focus_manager && IsFocusable())
focus_manager->SetFocusedView(this);
}
bool View::SkipDefaultKeyEventProcessing(const KeyEvent& event) {
return false;
}
FocusTraversable* View::GetFocusTraversable() {
return NULL;
}
FocusTraversable* View::GetPaneFocusTraversable() {
return NULL;
}
// Tooltips --------------------------------------------------------------------
bool View::GetTooltipText(const gfx::Point& p, string16* tooltip) const {
return false;
}
bool View::GetTooltipTextOrigin(const gfx::Point& p, gfx::Point* loc) const {
return false;
}
// Context menus ---------------------------------------------------------------
void View::ShowContextMenu(const gfx::Point& p, bool is_mouse_gesture) {
if (!context_menu_controller_)
return;
context_menu_controller_->ShowContextMenuForView(this, p);
}
// Drag and drop ---------------------------------------------------------------
bool View::GetDropFormats(
int* formats,
std::set<OSExchangeData::CustomFormat>* custom_formats) {
return false;
}
bool View::AreDropTypesRequired() {
return false;
}
bool View::CanDrop(const OSExchangeData& data) {
// TODO(sky): when I finish up migration, this should default to true.
return false;
}
void View::OnDragEntered(const DropTargetEvent& event) {
}
int View::OnDragUpdated(const DropTargetEvent& event) {
return ui::DragDropTypes::DRAG_NONE;
}
void View::OnDragExited() {
}
int View::OnPerformDrop(const DropTargetEvent& event) {
return ui::DragDropTypes::DRAG_NONE;
}
void View::OnDragDone() {
}
// static
bool View::ExceededDragThreshold(int delta_x, int delta_y) {
return (abs(delta_x) > GetHorizontalDragThreshold() ||
abs(delta_y) > GetVerticalDragThreshold());
}
// Scrolling -------------------------------------------------------------------
void View::ScrollRectToVisible(const gfx::Rect& rect) {
// We must take RTL UI mirroring into account when adjusting the position of
// the region.
if (parent_) {
gfx::Rect scroll_rect(rect);
scroll_rect.Offset(GetMirroredX(), y());
parent_->ScrollRectToVisible(scroll_rect);
}
}
int View::GetPageScrollIncrement(ScrollView* scroll_view,
bool is_horizontal, bool is_positive) {
return 0;
}
int View::GetLineScrollIncrement(ScrollView* scroll_view,
bool is_horizontal, bool is_positive) {
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// View, protected:
// Size and disposition --------------------------------------------------------
void View::OnBoundsChanged(const gfx::Rect& previous_bounds) {
}
void View::PreferredSizeChanged() {
InvalidateLayout();
if (parent_)
parent_->ChildPreferredSizeChanged(this);
}
bool View::NeedsNotificationWhenVisibleBoundsChange() const {
return false;
}
void View::OnVisibleBoundsChanged() {
}
// Tree operations -------------------------------------------------------------
void View::ViewHierarchyChanged(bool is_add, View* parent, View* child) {
}
void View::VisibilityChanged(View* starting_from, bool is_visible) {
}
void View::NativeViewHierarchyChanged(bool attached,
gfx::NativeView native_view,
internal::RootView* root_view) {
FocusManager* focus_manager = GetFocusManager();
if (!accelerator_registration_delayed_ &&
accelerator_focus_manager_ &&
accelerator_focus_manager_ != focus_manager) {
UnregisterAccelerators(true);
accelerator_registration_delayed_ = true;
}
if (accelerator_registration_delayed_ && attached) {
if (focus_manager) {
RegisterPendingAccelerators();
accelerator_registration_delayed_ = false;
}
}
}
// Painting --------------------------------------------------------------------
void View::PaintChildren(gfx::Canvas* canvas) {
TRACE_EVENT0("views", "View::PaintChildren");
for (int i = 0, count = child_count(); i < count; ++i)
if (!child_at(i)->layer())
child_at(i)->Paint(canvas);
}
void View::OnPaint(gfx::Canvas* canvas) {
TRACE_EVENT0("views", "View::OnPaint");
OnPaintBackground(canvas);
OnPaintFocusBorder(canvas);
OnPaintBorder(canvas);
}
void View::OnPaintBackground(gfx::Canvas* canvas) {
if (background_.get()) {
TRACE_EVENT2("views", "View::OnPaintBackground",
"width", canvas->sk_canvas()->getDevice()->width(),
"height", canvas->sk_canvas()->getDevice()->height());
background_->Paint(canvas, this);
}
}
void View::OnPaintBorder(gfx::Canvas* canvas) {
if (border_.get()) {
TRACE_EVENT2("views", "View::OnPaintBorder",
"width", canvas->sk_canvas()->getDevice()->width(),
"height", canvas->sk_canvas()->getDevice()->height());
border_->Paint(*this, canvas);
}
}
void View::OnPaintFocusBorder(gfx::Canvas* canvas) {
if (HasFocus() && (focusable() || IsAccessibilityFocusable())) {
TRACE_EVENT2("views", "views::OnPaintFocusBorder",
"width", canvas->sk_canvas()->getDevice()->width(),
"height", canvas->sk_canvas()->getDevice()->height());
canvas->DrawFocusRect(GetLocalBounds());
}
}
// Accelerated Painting --------------------------------------------------------
void View::SetFillsBoundsOpaquely(bool fills_bounds_opaquely) {
// This method should not have the side-effect of creating the layer.
if (layer())
layer()->SetFillsBoundsOpaquely(fills_bounds_opaquely);
}
bool View::SetExternalTexture(ui::Texture* texture) {
DCHECK(texture);
SetPaintToLayer(true);
layer()->SetExternalTexture(texture);
// Child views must not paint into the external texture. So make sure each
// child view has its own layer to paint into.
for (Views::iterator i = children_.begin(); i != children_.end(); ++i)
(*i)->SetPaintToLayer(true);
SchedulePaintInRect(GetLocalBounds());
return true;
}
void View::CalculateOffsetToAncestorWithLayer(gfx::Point* offset,
ui::Layer** layer_parent) {
if (layer()) {
if (layer_parent)
*layer_parent = layer();
return;
}
if (!parent_)
return;
offset->Offset(GetMirroredX(), y());
parent_->CalculateOffsetToAncestorWithLayer(offset, layer_parent);
}
void View::MoveLayerToParent(ui::Layer* parent_layer,
const gfx::Point& point) {
gfx::Point local_point(point);
if (parent_layer != layer())
local_point.Offset(GetMirroredX(), y());
if (layer() && parent_layer != layer()) {
parent_layer->Add(layer());
SetLayerBounds(gfx::Rect(local_point.x(), local_point.y(),
width(), height()));
} else {
for (int i = 0, count = child_count(); i < count; ++i)
child_at(i)->MoveLayerToParent(parent_layer, local_point);
}
}
void View::UpdateLayerVisibility() {
if (!use_acceleration_when_possible)
return;
bool visible = visible_;
for (const View* v = parent_; visible && v && !v->layer(); v = v->parent_)
visible = v->visible();
UpdateChildLayerVisibility(visible);
}
void View::UpdateChildLayerVisibility(bool ancestor_visible) {
if (layer()) {
layer()->SetVisible(ancestor_visible && visible_);
} else {
for (int i = 0, count = child_count(); i < count; ++i)
child_at(i)->UpdateChildLayerVisibility(ancestor_visible && visible_);
}
}
void View::UpdateChildLayerBounds(const gfx::Point& offset) {
if (layer()) {
SetLayerBounds(gfx::Rect(offset.x(), offset.y(), width(), height()));
} else {
for (int i = 0, count = child_count(); i < count; ++i) {
gfx::Point new_offset(offset.x() + child_at(i)->GetMirroredX(),
offset.y() + child_at(i)->y());
child_at(i)->UpdateChildLayerBounds(new_offset);
}
}
}
void View::OnPaintLayer(gfx::Canvas* canvas) {
if (!layer() || !layer()->fills_bounds_opaquely())
canvas->DrawColor(SK_ColorBLACK, SkXfermode::kClear_Mode);
PaintCommon(canvas);
}
void View::OnDeviceScaleFactorChanged(float device_scale_factor) {
// Repainting with new scale factor will paint the content at the right scale.
}
base::Closure View::PrepareForLayerBoundsChange() {
return base::Closure();
}
void View::ReorderLayers() {
View* v = this;
while (v && !v->layer())
v = v->parent();
// Forward to widget in case we're in a NativeWidgetAura.
if (!v) {
if (GetWidget())
GetWidget()->ReorderLayers();
} else {
for (Views::const_iterator i(v->children_.begin());
i != v->children_.end();
++i)
(*i)->ReorderChildLayers(v->layer());
}
}
void View::ReorderChildLayers(ui::Layer* parent_layer) {
if (layer()) {
DCHECK_EQ(parent_layer, layer()->parent());
parent_layer->StackAtTop(layer());
} else {
for (Views::const_iterator i(children_.begin()); i != children_.end(); ++i)
(*i)->ReorderChildLayers(parent_layer);
}
}
// Input -----------------------------------------------------------------------
bool View::HasHitTestMask() const {
return false;
}
void View::GetHitTestMask(gfx::Path* mask) const {
DCHECK(mask);
}
// Focus -----------------------------------------------------------------------
void View::OnFocus() {
// TODO(beng): Investigate whether it's possible for us to move this to
// Focus().
// By default, we clear the native focus. This ensures that no visible native
// view as the focus and that we still receive keyboard inputs.
FocusManager* focus_manager = GetFocusManager();
if (focus_manager)
focus_manager->ClearNativeFocus();
// TODO(beng): Investigate whether it's possible for us to move this to
// Focus().
// Notify assistive technologies of the focus change.
GetWidget()->NotifyAccessibilityEvent(
this, ui::AccessibilityTypes::EVENT_FOCUS, true);
}
void View::OnBlur() {
}
void View::Focus() {
SchedulePaint();
OnFocus();
}
void View::Blur() {
SchedulePaint();
OnBlur();
}
// Tooltips --------------------------------------------------------------------
void View::TooltipTextChanged() {
Widget* widget = GetWidget();
// TooltipManager may be null if there is a problem creating it.
if (widget && widget->native_widget_private()->GetTooltipManager()) {
widget->native_widget_private()->GetTooltipManager()->
TooltipTextChanged(this);
}
}
// Context menus ---------------------------------------------------------------
gfx::Point View::GetKeyboardContextMenuLocation() {
gfx::Rect vis_bounds = GetVisibleBounds();
gfx::Point screen_point(vis_bounds.x() + vis_bounds.width() / 2,
vis_bounds.y() + vis_bounds.height() / 2);
ConvertPointToScreen(this, &screen_point);
return screen_point;
}
// Drag and drop ---------------------------------------------------------------
int View::GetDragOperations(const gfx::Point& press_pt) {
return drag_controller_ ?
drag_controller_->GetDragOperationsForView(this, press_pt) :
ui::DragDropTypes::DRAG_NONE;
}
void View::WriteDragData(const gfx::Point& press_pt, OSExchangeData* data) {
DCHECK(drag_controller_);
drag_controller_->WriteDragDataForView(this, press_pt, data);
}
bool View::InDrag() {
Widget* widget = GetWidget();
return widget ? widget->dragged_view() == this : false;
}
// Debugging -------------------------------------------------------------------
#if !defined(NDEBUG)
std::string View::PrintViewGraph(bool first) {
return DoPrintViewGraph(first, this);
}
std::string View::DoPrintViewGraph(bool first, View* view_with_children) {
// 64-bit pointer = 16 bytes of hex + "0x" + '\0' = 19.
const size_t kMaxPointerStringLength = 19;
std::string result;
if (first)
result.append("digraph {\n");
// Node characteristics.
char p[kMaxPointerStringLength];
size_t baseNameIndex = GetClassName().find_last_of('/');
if (baseNameIndex == std::string::npos)
baseNameIndex = 0;
else
baseNameIndex++;
char bounds_buffer[512];
// Information about current node.
base::snprintf(p, arraysize(bounds_buffer), "%p", view_with_children);
result.append(" N");
result.append(p+2);
result.append(" [label=\"");
result.append(GetClassName().substr(baseNameIndex).c_str());
base::snprintf(bounds_buffer,
arraysize(bounds_buffer),
"\\n bounds: (%d, %d), (%dx%d)",
this->bounds().x(),
this->bounds().y(),
this->bounds().width(),
this->bounds().height());
result.append(bounds_buffer);
if (GetTransform().HasChange()) {
gfx::Point translation;
float rotation;
gfx::Point3f scale;
if (ui::InterpolatedTransform::FactorTRS(GetTransform(),
&translation,
&rotation,
&scale)) {
if (translation != gfx::Point(0, 0)) {
base::snprintf(bounds_buffer,
arraysize(bounds_buffer),
"\\n translation: (%d, %d)",
translation.x(),
translation.y());
result.append(bounds_buffer);
}
if (fabs(rotation) > 1e-5) {
base::snprintf(bounds_buffer,
arraysize(bounds_buffer),
"\\n rotation: %3.2f", rotation);
result.append(bounds_buffer);
}
if (scale.AsPoint() != gfx::Point(0, 0)) {
base::snprintf(bounds_buffer,
arraysize(bounds_buffer),
"\\n scale: (%2.4f, %2.4f)",
scale.x(),
scale.y());
result.append(bounds_buffer);
}
}
}
result.append("\"");
if (!parent_)
result.append(", shape=box");
if (layer()) {
if (layer()->texture())
result.append(", color=green");
else
result.append(", color=red");
if (layer()->fills_bounds_opaquely())
result.append(", style=filled");
}
result.append("]\n");
// Link to parent.
if (parent_) {
char pp[kMaxPointerStringLength];
base::snprintf(pp, kMaxPointerStringLength, "%p", parent_);
result.append(" N");
result.append(pp+2);
result.append(" -> N");
result.append(p+2);
result.append("\n");
}
// Children.
for (int i = 0, count = view_with_children->child_count(); i < count; ++i)
result.append(view_with_children->child_at(i)->PrintViewGraph(false));
if (first)
result.append("}\n");
return result;
}
#endif
////////////////////////////////////////////////////////////////////////////////
// View, private:
// DropInfo --------------------------------------------------------------------
void View::DragInfo::Reset() {
possible_drag = false;
start_pt = gfx::Point();
}
void View::DragInfo::PossibleDrag(const gfx::Point& p) {
possible_drag = true;
start_pt = p;
}
// Painting --------------------------------------------------------------------
void View::SchedulePaintBoundsChanged(SchedulePaintType type) {
// If we have a layer and the View's size did not change, we do not need to
// schedule any paints since the layer will be redrawn at its new location
// during the next Draw() cycle in the compositor.
if (!layer() || type == SCHEDULE_PAINT_SIZE_CHANGED) {
// Otherwise, if the size changes or we don't have a layer then we need to
// use SchedulePaint to invalidate the area occupied by the View.
SchedulePaint();
} else if (parent_ && type == SCHEDULE_PAINT_SIZE_SAME) {
// The compositor doesn't Draw() until something on screen changes, so
// if our position changes but nothing is being animated on screen, then
// tell the compositor to redraw the scene. We know layer() exists due to
// the above if clause.
layer()->ScheduleDraw();
}
}
void View::PaintCommon(gfx::Canvas* canvas) {
if (!visible_ || !painting_enabled_)
return;
{
// If the View we are about to paint requested the canvas to be flipped, we
// should change the transform appropriately.
// The canvas mirroring is undone once the View is done painting so that we
// don't pass the canvas with the mirrored transform to Views that didn't
// request the canvas to be flipped.
ScopedCanvas scoped(canvas);
if (FlipCanvasOnPaintForRTLUI()) {
canvas->Translate(gfx::Point(width(), 0));
canvas->Scale(-1, 1);
}
OnPaint(canvas);
}
PaintChildren(canvas);
}
// Tree operations -------------------------------------------------------------
void View::DoRemoveChildView(View* view,
bool update_focus_cycle,
bool update_tool_tip,
bool delete_removed_view) {
DCHECK(view);
const Views::iterator i(std::find(children_.begin(), children_.end(), view));
scoped_ptr<View> view_to_be_deleted;
if (i != children_.end()) {
if (update_focus_cycle) {
// Let's remove the view from the focus traversal.
View* next_focusable = view->next_focusable_view_;
View* prev_focusable = view->previous_focusable_view_;
if (prev_focusable)
prev_focusable->next_focusable_view_ = next_focusable;
if (next_focusable)
next_focusable->previous_focusable_view_ = prev_focusable;
}
if (GetWidget())
UnregisterChildrenForVisibleBoundsNotification(view);
view->PropagateRemoveNotifications(this);
view->parent_ = NULL;
view->UpdateLayerVisibility();
if (delete_removed_view && !view->owned_by_client_)
view_to_be_deleted.reset(view);
children_.erase(i);
}
if (update_tool_tip)
UpdateTooltip();
if (layout_manager_.get())
layout_manager_->ViewRemoved(this, view);
}
void View::PropagateRemoveNotifications(View* parent) {
for (int i = 0, count = child_count(); i < count; ++i)
child_at(i)->PropagateRemoveNotifications(parent);
for (View* v = this; v; v = v->parent_)
v->ViewHierarchyChangedImpl(true, false, parent, this);
}
void View::PropagateAddNotifications(View* parent, View* child) {
for (int i = 0, count = child_count(); i < count; ++i)
child_at(i)->PropagateAddNotifications(parent, child);
ViewHierarchyChangedImpl(true, true, parent, child);
}
void View::PropagateNativeViewHierarchyChanged(bool attached,
gfx::NativeView native_view,
internal::RootView* root_view) {
for (int i = 0, count = child_count(); i < count; ++i)
child_at(i)->PropagateNativeViewHierarchyChanged(attached,
native_view,
root_view);
NativeViewHierarchyChanged(attached, native_view, root_view);
}
void View::ViewHierarchyChangedImpl(bool register_accelerators,
bool is_add,
View* parent,
View* child) {
if (register_accelerators) {
if (is_add) {
// If you get this registration, you are part of a subtree that has been
// added to the view hierarchy.
if (GetFocusManager()) {
RegisterPendingAccelerators();
} else {
// Delay accelerator registration until visible as we do not have
// focus manager until then.
accelerator_registration_delayed_ = true;
}
} else {
if (child == this)
UnregisterAccelerators(true);
}
}
if (is_add && layer() && !layer()->parent()) {
UpdateParentLayer();
Widget* widget = GetWidget();
if (widget)
widget->UpdateRootLayers();
} else if (!is_add && child == this) {
// Make sure the layers beloning to the subtree rooted at |child| get
// removed from layers that do not belong in the same subtree.
OrphanLayers();
if (use_acceleration_when_possible) {
Widget* widget = GetWidget();
if (widget)
widget->UpdateRootLayers();
}
}
ViewHierarchyChanged(is_add, parent, child);
parent->needs_layout_ = true;
}
// Size and disposition --------------------------------------------------------
void View::PropagateVisibilityNotifications(View* start, bool is_visible) {
for (int i = 0, count = child_count(); i < count; ++i)
child_at(i)->PropagateVisibilityNotifications(start, is_visible);
VisibilityChangedImpl(start, is_visible);
}
void View::VisibilityChangedImpl(View* starting_from, bool is_visible) {
VisibilityChanged(starting_from, is_visible);
}
void View::BoundsChanged(const gfx::Rect& previous_bounds) {
if (visible_) {
// Paint the new bounds.
SchedulePaintBoundsChanged(
bounds_.size() == previous_bounds.size() ? SCHEDULE_PAINT_SIZE_SAME :
SCHEDULE_PAINT_SIZE_CHANGED);
}
if (use_acceleration_when_possible) {
if (layer()) {
if (parent_) {
gfx::Point offset;
parent_->CalculateOffsetToAncestorWithLayer(&offset, NULL);
offset.Offset(GetMirroredX(), y());
SetLayerBounds(gfx::Rect(offset, size()));
} else {
SetLayerBounds(bounds_);
}
// TODO(beng): this seems redundant with the SchedulePaint at the top of
// this function. explore collapsing.
if (previous_bounds.size() != bounds_.size() &&
!layer()->layer_updated_externally()) {
// If our bounds have changed then we need to update the complete
// texture.
layer()->SchedulePaint(GetLocalBounds());
}
} else {
// If our bounds have changed, then any descendant layer bounds may
// have changed. Update them accordingly.
gfx::Point offset;
CalculateOffsetToAncestorWithLayer(&offset, NULL);
UpdateChildLayerBounds(offset);
}
}
OnBoundsChanged(previous_bounds);
if (previous_bounds.size() != size()) {
needs_layout_ = false;
Layout();
}
if (NeedsNotificationWhenVisibleBoundsChange())
OnVisibleBoundsChanged();
// Notify interested Views that visible bounds within the root view may have
// changed.
if (descendants_to_notify_.get()) {
for (Views::iterator i(descendants_to_notify_->begin());
i != descendants_to_notify_->end(); ++i) {
(*i)->OnVisibleBoundsChanged();
}
}
}
// static
void View::RegisterChildrenForVisibleBoundsNotification(View* view) {
if (view->NeedsNotificationWhenVisibleBoundsChange())
view->RegisterForVisibleBoundsNotification();
for (int i = 0; i < view->child_count(); ++i)
RegisterChildrenForVisibleBoundsNotification(view->child_at(i));
}
// static
void View::UnregisterChildrenForVisibleBoundsNotification(View* view) {
if (view->NeedsNotificationWhenVisibleBoundsChange())
view->UnregisterForVisibleBoundsNotification();
for (int i = 0; i < view->child_count(); ++i)
UnregisterChildrenForVisibleBoundsNotification(view->child_at(i));
}
void View::RegisterForVisibleBoundsNotification() {
if (registered_for_visible_bounds_notification_)
return;
registered_for_visible_bounds_notification_ = true;
for (View* ancestor = parent_; ancestor; ancestor = ancestor->parent_)
ancestor->AddDescendantToNotify(this);
}
void View::UnregisterForVisibleBoundsNotification() {
if (!registered_for_visible_bounds_notification_)
return;
registered_for_visible_bounds_notification_ = false;
for (View* ancestor = parent_; ancestor; ancestor = ancestor->parent_)
ancestor->RemoveDescendantToNotify(this);
}
void View::AddDescendantToNotify(View* view) {
DCHECK(view);
if (!descendants_to_notify_.get())
descendants_to_notify_.reset(new Views);
descendants_to_notify_->push_back(view);
}
void View::RemoveDescendantToNotify(View* view) {
DCHECK(view && descendants_to_notify_.get());
Views::iterator i(std::find(
descendants_to_notify_->begin(), descendants_to_notify_->end(), view));
DCHECK(i != descendants_to_notify_->end());
descendants_to_notify_->erase(i);
if (descendants_to_notify_->empty())
descendants_to_notify_.reset();
}
void View::SetLayerBounds(const gfx::Rect& bounds) {
layer()->SetBounds(bounds);
}
// Transformations -------------------------------------------------------------
bool View::GetTransformRelativeTo(const View* ancestor,
ui::Transform* transform) const {
const View* p = this;
while (p && p != ancestor) {
transform->ConcatTransform(p->GetTransform());
transform->ConcatTranslate(static_cast<float>(p->GetMirroredX()),
static_cast<float>(p->y()));
p = p->parent_;
}
return p == ancestor;
}
// Coordinate conversion -------------------------------------------------------
bool View::ConvertPointForAncestor(const View* ancestor,
gfx::Point* point) const {
ui::Transform trans;
// TODO(sad): Have some way of caching the transformation results.
bool result = GetTransformRelativeTo(ancestor, &trans);
gfx::Point3f p(*point);
trans.TransformPoint(p);
*point = p.AsPoint();
return result;
}
bool View::ConvertPointFromAncestor(const View* ancestor,
gfx::Point* point) const {
ui::Transform trans;
bool result = GetTransformRelativeTo(ancestor, &trans);
gfx::Point3f p(*point);
trans.TransformPointReverse(p);
*point = p.AsPoint();
return result;
}
// Accelerated painting --------------------------------------------------------
void View::CreateLayer() {
// A new layer is being created for the view. So all the layers of the
// sub-tree can inherit the visibility of the corresponding view.
for (int i = 0, count = child_count(); i < count; ++i)
child_at(i)->UpdateChildLayerVisibility(true);
layer_ = new ui::Layer();
layer_owner_.reset(layer_);
layer_->set_delegate(this);
#if !defined(NDEBUG)
layer_->set_name(GetClassName());
#endif
UpdateParentLayers();
UpdateLayerVisibility();
// The new layer needs to be ordered in the layer tree according
// to the view tree. Children of this layer were added in order
// in UpdateParentLayers().
if (parent())
parent()->ReorderLayers();
Widget* widget = GetWidget();
if (widget)
widget->UpdateRootLayers();
}
void View::UpdateParentLayers() {
// Attach all top-level un-parented layers.
if (layer() && !layer()->parent()) {
UpdateParentLayer();
} else {
for (int i = 0, count = child_count(); i < count; ++i)
child_at(i)->UpdateParentLayers();
}
}
void View::UpdateParentLayer() {
if (!layer())
return;
ui::Layer* parent_layer = NULL;
gfx::Point offset(GetMirroredX(), y());
// TODO(sad): The NULL check here for parent_ essentially is to check if this
// is the RootView. Instead of doing this, this function should be made
// virtual and overridden from the RootView.
if (parent_)
parent_->CalculateOffsetToAncestorWithLayer(&offset, &parent_layer);
else if (!parent_ && GetWidget())
GetWidget()->CalculateOffsetToAncestorWithLayer(&offset, &parent_layer);
ReparentLayer(offset, parent_layer);
}
void View::OrphanLayers() {
if (layer()) {
if (layer()->parent())
layer()->parent()->Remove(layer());
// The layer belonging to this View has already been orphaned. It is not
// necessary to orphan the child layers.
return;
}
for (int i = 0, count = child_count(); i < count; ++i)
child_at(i)->OrphanLayers();
}
void View::ReparentLayer(const gfx::Point& offset, ui::Layer* parent_layer) {
layer_->SetBounds(gfx::Rect(offset.x(), offset.y(), width(), height()));
DCHECK_NE(layer(), parent_layer);
if (parent_layer)
parent_layer->Add(layer());
layer_->SchedulePaint(GetLocalBounds());
MoveLayerToParent(layer(), gfx::Point());
}
void View::DestroyLayer() {
ui::Layer* new_parent = layer()->parent();
std::vector<ui::Layer*> children = layer()->children();
for (size_t i = 0; i < children.size(); ++i) {
layer()->Remove(children[i]);
if (new_parent)
new_parent->Add(children[i]);
}
layer_ = NULL;
layer_owner_.reset();
if (new_parent)
ReorderLayers();
gfx::Point offset;
CalculateOffsetToAncestorWithLayer(&offset, NULL);
UpdateChildLayerBounds(offset);
SchedulePaint();
Widget* widget = GetWidget();
if (widget)
widget->UpdateRootLayers();
}
// Input -----------------------------------------------------------------------
bool View::ProcessMousePressed(const MouseEvent& event, DragInfo* drag_info) {
int drag_operations =
(enabled_ && event.IsOnlyLeftMouseButton() && HitTest(event.location())) ?
GetDragOperations(event.location()) : 0;
ContextMenuController* context_menu_controller = event.IsRightMouseButton() ?
context_menu_controller_ : 0;
const bool enabled = enabled_;
const bool result = OnMousePressed(event);
// WARNING: we may have been deleted, don't use any View variables.
if (!enabled)
return result;
if (drag_operations != ui::DragDropTypes::DRAG_NONE) {
drag_info->PossibleDrag(event.location());
return true;
}
return !!context_menu_controller || result;
}
bool View::ProcessMouseDragged(const MouseEvent& event, DragInfo* drag_info) {
// Copy the field, that way if we're deleted after drag and drop no harm is
// done.
ContextMenuController* context_menu_controller = context_menu_controller_;
const bool possible_drag = drag_info->possible_drag;
if (possible_drag && ExceededDragThreshold(
drag_info->start_pt.x() - event.x(),
drag_info->start_pt.y() - event.y())) {
if (!drag_controller_ ||
drag_controller_->CanStartDragForView(
this, drag_info->start_pt, event.location()))
DoDrag(event, drag_info->start_pt);
} else {
if (OnMouseDragged(event))
return true;
// Fall through to return value based on context menu controller.
}
// WARNING: we may have been deleted.
return (context_menu_controller != NULL) || possible_drag;
}
void View::ProcessMouseReleased(const MouseEvent& event) {
if (context_menu_controller_ && event.IsOnlyRightMouseButton()) {
// Assume that if there is a context menu controller we won't be deleted
// from mouse released.
gfx::Point location(event.location());
OnMouseReleased(event);
if (HitTest(location)) {
ConvertPointToScreen(this, &location);
ShowContextMenu(location, true);
}
} else {
OnMouseReleased(event);
}
// WARNING: we may have been deleted.
}
ui::TouchStatus View::ProcessTouchEvent(const TouchEvent& event) {
// TODO(rjkroege): Implement a grab scheme similar to as as is found in
// MousePressed.
return OnTouchEvent(event);
}
ui::GestureStatus View::ProcessGestureEvent(const GestureEvent& event) {
if (context_menu_controller_ &&
(event.type() == ui::ET_GESTURE_LONG_PRESS ||
event.type() == ui::ET_GESTURE_TWO_FINGER_TAP)) {
gfx::Point location(event.location());
ConvertPointToScreen(this, &location);
ShowContextMenu(location, true);
return ui::GESTURE_STATUS_CONSUMED;
}
return OnGestureEvent(event);
}
// Accelerators ----------------------------------------------------------------
void View::RegisterPendingAccelerators() {
if (!accelerators_.get() ||
registered_accelerator_count_ == accelerators_->size()) {
// No accelerators are waiting for registration.
return;
}
if (!GetWidget()) {
// The view is not yet attached to a widget, defer registration until then.
return;
}
accelerator_focus_manager_ = GetFocusManager();
if (!accelerator_focus_manager_) {
// Some crash reports seem to show that we may get cases where we have no
// focus manager (see bug #1291225). This should never be the case, just
// making sure we don't crash.
NOTREACHED();
return;
}
for (std::vector<ui::Accelerator>::const_iterator i(
accelerators_->begin() + registered_accelerator_count_);
i != accelerators_->end(); ++i) {
accelerator_focus_manager_->RegisterAccelerator(
*i, ui::AcceleratorManager::kNormalPriority, this);
}
registered_accelerator_count_ = accelerators_->size();
}
void View::UnregisterAccelerators(bool leave_data_intact) {
if (!accelerators_.get())
return;
if (GetWidget()) {
if (accelerator_focus_manager_) {
// We may not have a FocusManager if the window containing us is being
// closed, in which case the FocusManager is being deleted so there is
// nothing to unregister.
accelerator_focus_manager_->UnregisterAccelerators(this);
accelerator_focus_manager_ = NULL;
}
if (!leave_data_intact) {
accelerators_->clear();
accelerators_.reset();
}
registered_accelerator_count_ = 0;
}
}
// Focus -----------------------------------------------------------------------
void View::InitFocusSiblings(View* v, int index) {
int count = child_count();
<|fim▁hole|> } else {
if (index == count) {
// We are inserting at the end, but the end of the child list may not be
// the last focusable element. Let's try to find an element with no next
// focusable element to link to.
View* last_focusable_view = NULL;
for (Views::iterator i(children_.begin()); i != children_.end(); ++i) {
if (!(*i)->next_focusable_view_) {
last_focusable_view = *i;
break;
}
}
if (last_focusable_view == NULL) {
// Hum... there is a cycle in the focus list. Let's just insert ourself
// after the last child.
View* prev = children_[index - 1];
v->previous_focusable_view_ = prev;
v->next_focusable_view_ = prev->next_focusable_view_;
prev->next_focusable_view_->previous_focusable_view_ = v;
prev->next_focusable_view_ = v;
} else {
last_focusable_view->next_focusable_view_ = v;
v->next_focusable_view_ = NULL;
v->previous_focusable_view_ = last_focusable_view;
}
} else {
View* prev = children_[index]->GetPreviousFocusableView();
v->previous_focusable_view_ = prev;
v->next_focusable_view_ = children_[index];
if (prev)
prev->next_focusable_view_ = v;
children_[index]->previous_focusable_view_ = v;
}
}
}
// System events ---------------------------------------------------------------
void View::PropagateThemeChanged() {
for (int i = child_count() - 1; i >= 0; --i)
child_at(i)->PropagateThemeChanged();
OnThemeChanged();
}
void View::PropagateLocaleChanged() {
for (int i = child_count() - 1; i >= 0; --i)
child_at(i)->PropagateLocaleChanged();
OnLocaleChanged();
}
// Tooltips --------------------------------------------------------------------
void View::UpdateTooltip() {
Widget* widget = GetWidget();
// TODO(beng): The TooltipManager NULL check can be removed when we
// consolidate Init() methods and make views_unittests Init() all
// Widgets that it uses.
if (widget && widget->native_widget_private()->GetTooltipManager())
widget->native_widget_private()->GetTooltipManager()->UpdateTooltip();
}
// Drag and drop ---------------------------------------------------------------
bool View::DoDrag(const LocatedEvent& event, const gfx::Point& press_pt) {
#if !defined(OS_MACOSX)
int drag_operations = GetDragOperations(press_pt);
if (drag_operations == ui::DragDropTypes::DRAG_NONE)
return false;
OSExchangeData data;
WriteDragData(press_pt, &data);
// Message the RootView to do the drag and drop. That way if we're removed
// the RootView can detect it and avoid calling us back.
gfx::Point widget_location(event.location());
ConvertPointToWidget(this, &widget_location);
GetWidget()->RunShellDrag(this, data, widget_location, drag_operations);
return true;
#else
return false;
#endif // !defined(OS_MACOSX)
}
} // namespace views<|fim▁end|> | if (count == 0) {
v->next_focusable_view_ = NULL;
v->previous_focusable_view_ = NULL; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.