file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
test_resource.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
import os
import time
import mock
import unittest
from azure_devtools.scenario_tests.const import MOCKED_SUBSCRIPTION_ID
from azure_devtools.scenario_tests import AllowLargeResponse
from azure.cli.testsdk import ScenarioTest, LiveScenarioTest, ResourceGroupPreparer, create_random_name, live_only, record_only
from azure.cli.core.util import get_file_json
class ResourceGroupScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_rg_scenario')
def test_resource_group(self, resource_group):
self.cmd('group delete -n {rg} --yes')
self.cmd('group exists -n {rg}',
checks=self.check('@', False))
self.cmd('group create -n {rg} -l westus --tag a=b c', checks=[
self.check('name', '{rg}'),
self.check('tags', {'a': 'b', 'c': ''})
])
self.cmd('group exists -n {rg}',
checks=self.check('@', True))
self.cmd('group show -n {rg}', checks=[
self.check('name', '{rg}'),
self.check('tags', {'a': 'b', 'c': ''})
])
self.cmd('group list --tag a=b', checks=[
self.check('[0].name', '{rg}'),
self.check('[0].tags', {'a': 'b', 'c': ''})
])
# test --force-string
self.kwargs.update({'tag': "\"{\\\"k\\\":\\\"v\\\"}\""})
self.cmd('group update -g {rg} --tags ""',
checks=self.check('tags', {}))
self.cmd('group update -g {rg} --set tags.a={tag}',
checks=self.check('tags.a', "{{'k': 'v'}}"))
self.cmd('group update -g {rg} --set tags.b={tag} --force-string',
checks=self.check('tags.b', '{{\"k\":\"v\"}}'))
class ResourceGroupNoWaitScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_rg_nowait_test')
def test_resource_group_no_wait(self, resource_group):
self.cmd('group delete -n {rg} --no-wait --yes',
checks=self.is_empty())
self.cmd('group wait --deleted -n {rg}',
checks=self.is_empty())
self.cmd('group exists -n {rg}',
checks=self.check('@', False))
self.cmd('group create -n {rg} -l westus',
checks=self.check('name', '{rg}'))
self.cmd('group exists -n {rg}',
checks=self.check('@', True))
self.cmd('group wait --exists -n {rg}',
checks=self.is_empty())
class ResourceScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_resource_scenario', location='southcentralus')
@AllowLargeResponse()
def test_resource_scenario(self, resource_group, resource_group_location):
self.kwargs.update({
'loc': resource_group_location,
'vnet': self.create_random_name('vnet-', 30),
'subnet': self.create_random_name('subnet-', 30),
'rt': 'Microsoft.Network/virtualNetworks'
})
vnet_count = self.cmd("resource list --query \"length([?name=='{vnet}'])\"").get_output_in_json() or 0
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet} --tags cli-test=test')
vnet_count += 1
self.cmd('resource list',
checks=self.check("length([?name=='{vnet}'])", vnet_count))
self.cmd('resource list -l {loc}',
checks=self.check("length([?location == '{loc}']) == length(@)", True))
self.cmd('resource list --resource-type {rt}',
checks=self.check("length([?name=='{vnet}'])", vnet_count))
self.cmd('resource list --name {vnet}',
checks=self.check("length([?name=='{vnet}'])", vnet_count))
self.cmd('resource list --tag cli-test',
checks=self.check("length([?name=='{vnet}'])", vnet_count))
self.cmd('resource list --tag cli-test=test',
checks=self.check("length([?name=='{vnet}'])", vnet_count))
# check for simple resource with tag
self.cmd('resource show -n {vnet} -g {rg} --resource-type Microsoft.Network/virtualNetworks', checks=[
self.check('name', '{vnet}'),
self.check('location', '{loc}'),
self.check('resourceGroup', '{rg}'),
self.check('tags', {'cli-test': 'test'})
])
# check for child resource
self.cmd('resource show -n {subnet} -g {rg} --namespace Microsoft.Network --parent virtualNetworks/{vnet} --resource-type subnets', checks=[
self.check('name', '{subnet}'),
self.check('resourceGroup', '{rg}')
])
# clear tag and verify
self.cmd('resource tag -n {vnet} -g {rg} --resource-type Microsoft.Network/virtualNetworks --tags')
self.cmd('resource show -n {vnet} -g {rg} --resource-type Microsoft.Network/virtualNetworks',
checks=self.check('tags', {}))
# delete and verify
self.cmd('resource delete -n {vnet} -g {rg} --resource-type {rt}')
time.sleep(10)
self.cmd('resource list', checks=self.check("length([?name=='{vnet}'])", 0))
class ResourceIDScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_resource_id')
def test_resource_id_scenario(self, resource_group):
self.kwargs.update({
'vnet': 'cli_test_resource_id_vnet',
'subnet': 'cli_test_resource_id_subnet'
})
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet}')
self.kwargs['sub'] = self.get_subscription_id()
self.kwargs['vnet_id'] = '/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetworks/{vnet}'.format(
**self.kwargs)
self.cmd('resource tag --id {vnet_id} --tags tag-vnet')
self.cmd('resource show --id {vnet_id}', checks=[
self.check('name', '{vnet}'),
self.check('resourceGroup', '{rg}'),
self.check('tags', {'tag-vnet': ''})
])
self.kwargs['subnet_id'] = '/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetworks/{vnet}/subnets/{subnet}'.format(
**self.kwargs)
self.cmd('resource show --id {subnet_id}', checks=[
self.check('name', '{subnet}'),
self.check('resourceGroup', '{rg}'),
self.check('properties.addressPrefix', '10.0.0.0/24')
])
self.cmd('resource update --id {subnet_id} --set properties.addressPrefix=10.0.0.0/22',
checks=self.check('properties.addressPrefix', '10.0.0.0/22'))
self.cmd('resource delete --id {subnet_id}', checks=self.is_empty())
self.cmd('resource delete --id {vnet_id}', checks=self.is_empty())
class ResourceGenericUpdate(LiveScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_resource_id')
def test_resource_id_scenario(self, resource_group):
self.kwargs.update({
'stor_1': self.create_random_name(prefix='stor1', length=10),
'stor_2': self.create_random_name(prefix='stor2', length=10)
})
# create storage accounts
self.cmd('az storage account create -g {rg} -n {stor_1}')
self.cmd('az storage account create -g {rg} -n {stor_2}')
# get ids
self.kwargs['stor_ids'] = " ".join(self.cmd('az storage account list -g {rg} --query "[].id"').get_output_in_json())
# update tags
self.cmd('az storage account update --ids {stor_ids} --set tags.isTag=True tags.isNotTag=False')
self.cmd('az storage account show --name {stor_1} -g {rg}', checks=[
self.check('tags.isTag', 'True'),
self.check('tags.isNotTag', 'False')
])
self.cmd('az storage account show --name {stor_2} -g {rg}', checks=[
self.check('tags.isTag', 'True'),
self.check('tags.isNotTag', 'False')
])
# delete tags.isTag
self.cmd('az storage account update --ids {stor_ids} --remove tags.isTag')
self.cmd('az storage account show --name {stor_1} -g {rg} --query "tags"', checks=[
self.check('isNotTag', 'False'),
self.check('isTag', None)
])
self.cmd('az storage account show --name {stor_2} -g {rg} --query "tags"', checks=[
self.check('isNotTag', 'False'),
self.check('isTag', None)
])
# delete tags.isNotTag
self.cmd('az storage account update --ids {stor_ids} --remove tags.isNotTag')
# check tags is empty.
self.cmd('az storage account show --name {stor_1} -g {rg} --query "tags"', checks=self.is_empty())
self.cmd('az storage account show --name {stor_2} -g {rg} --query "tags"', checks=self.is_empty())
class ResourceCreateAndShowScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_resource_create')
def test_resource_create_and_show(self, resource_group, resource_group_location):
self.kwargs.update({
'plan': 'cli_res_create_plan',
'app': 'clirescreateweb',
'loc': resource_group_location
})
self.cmd('resource create -g {rg} -n {plan} --resource-type Microsoft.web/serverFarms --is-full-object --properties "{{\\"location\\":\\"{loc}\\",\\"sku\\":{{\\"name\\":\\"B1\\",\\"tier\\":\\"BASIC\\"}}}}"',
checks=self.check('name', '{plan}'))
result = self.cmd('resource create -g {rg} -n {app} --resource-type Microsoft.web/sites --properties "{{\\"serverFarmId\\":\\"{plan}\\"}}"',
checks=self.check('name', '{app}')).get_output_in_json()
self.kwargs['app_settings_id'] = result['id'] + '/config/appsettings'
self.kwargs['app_config_id'] = result['id'] + '/config/web'
self.cmd('resource create --id {app_settings_id} --properties "{{\\"key2\\":\\"value12\\"}}"',
checks=[self.check('properties.key2', 'value12')])
self.cmd('resource show --id {app_config_id}',
checks=self.check('properties.publishingUsername', '${app}'))
self.cmd('resource show --id {app_config_id} --include-response-body',
checks=self.check('responseBody.properties.publishingUsername', '${app}'))
class TagScenarioTest(ScenarioTest):
def test_tag_scenario(self):
self.kwargs.update({
'tag': 'cli_test_tag'
})
tag_values = self.cmd('tag list --query "[?tagName == \'{tag}\'].values[].tagValue"').get_output_in_json()
for tag_value in tag_values:
self.cmd('tag remove-value --value {} -n {{tag}}'.format(tag_value))
self.cmd('tag delete -n {tag}')
self.cmd('tag list --query "[?tagName == \'{tag}\']"', checks=self.is_empty())
self.cmd('tag create -n {tag}', checks=[
self.check('tagName', '{tag}'),
self.check('values', []),
self.check('count.value', 0)
])
self.cmd('tag add-value -n {tag} --value test')
self.cmd('tag add-value -n {tag} --value test2')
self.cmd('tag list --query "[?tagName == \'{tag}\']"',
checks=self.check('[].values[].tagValue', [u'test', u'test2']))
self.cmd('tag remove-value -n {tag} --value test')
self.cmd('tag list --query "[?tagName == \'{tag}\']"',
checks=self.check('[].values[].tagValue', [u'test2']))
self.cmd('tag remove-value -n {tag} --value test2')
self.cmd('tag list --query "[?tagName == \'{tag}\']"',
checks=self.check('[].values[].tagValue', []))
self.cmd('tag delete -n {tag}')
self.cmd('tag list --query "[?tagName == \'{tag}\']"',
checks=self.is_empty())
class ProviderRegistrationTest(ScenarioTest):
def test_provider_registration(self):
self.kwargs.update({'prov': 'TrendMicro.DeepSecurity'})
result = self.cmd('provider show -n {prov}').get_output_in_json()
if result['registrationState'] == 'Unregistered':
self.cmd('provider register -n {prov}')
result = self.cmd('provider show -n {prov}').get_output_in_json()
self.assertTrue(result['registrationState'] in ['Registering', 'Registered'])
self.cmd('provider unregister -n {prov}')
result = self.cmd('provider show -n {prov}').get_output_in_json()
self.assertTrue(result['registrationState'] in ['Unregistering', 'Unregistered'])
else:
self.cmd('provider unregister -n {prov}')
result = self.cmd('provider show -n {prov}').get_output_in_json()
self.assertTrue(result['registrationState'] in ['Unregistering', 'Unregistered'])
self.cmd('provider register -n {prov}')
result = self.cmd('provider show -n {prov}').get_output_in_json()
self.assertTrue(result['registrationState'] in ['Registering', 'Registered'])
class ProviderOperationTest(ScenarioTest):
def test_provider_operation(self):
self.cmd('provider operation show --namespace microsoft.compute', checks=[
self.check('id', '/providers/Microsoft.Authorization/providerOperations/Microsoft.Compute'),
self.check('type', 'Microsoft.Authorization/providerOperations')
])
self.cmd('provider operation show --namespace microsoft.compute', checks=[
self.check('id', '/providers/Microsoft.Authorization/providerOperations/Microsoft.Compute'),
self.check('type', 'Microsoft.Authorization/providerOperations')
])
self.cmd('provider operation show --namespace microsoft.storage', checks=[
self.check('id', '/providers/Microsoft.Authorization/providerOperations/Microsoft.Storage'),
self.check('type', 'Microsoft.Authorization/providerOperations')
])
class SubscriptionLevelDeploymentTest(LiveScenarioTest):
def tearDown(self):
self.cmd('policy assignment delete -n location-lock')
self.cmd('policy definition delete -n policy2')
def test_subscription_level_deployment(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'subscription_level_template.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'subscription_level_parameters.json').replace('\\', '\\\\'),
# params-uri below is the raw file url of the subscription_level_parameters.json above
'params_uri': 'https://raw.githubusercontent.com/Azure/azure-cli/dev/src/command_modules/azure-cli-resource/azure/cli/command_modules/resource/tests/latest/subscription_level_parameters.json',
'dn': self.create_random_name('azure-cli-sub-level-desubscription_level_parametersployment', 40)
})
self.cmd('group create --name cli_test_subscription_level_deployment --location WestUS', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment validate --location WestUS --template-file "{tf}" --parameters @"{params}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment validate --location WestUS --template-file "{tf}" --parameters @"{params_uri}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment create -n {dn} --location WestUS --template-file "{tf}" --parameters @"{params}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
])
self.cmd('deployment list', checks=[
self.check('[0].name', '{dn}'),
])
self.cmd('deployment show -n {dn}', checks=[
self.check('name', '{dn}')
])
self.cmd('deployment export -n {dn}', checks=[
])
self.cmd('deployment operation list -n {dn}', checks=[
self.check('length([])', 4)
])
class DeploymentTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_deployment_lite')
def test_group_deployment_lite(self, resource_group):
# ensures that a template that is missing "parameters" or "resources" still deploys
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'test-template-lite.json').replace('\\', '\\\\'),
'dn': self.create_random_name('azure-cli-deployment', 30)
})
self.cmd('group deployment create -g {rg} -n {dn} --template-file "{tf}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}')
])
@ResourceGroupPreparer(name_prefix='cli_test_deployment')
def test_group_deployment(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'test-template.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'test-params.json').replace('\\', '\\\\'),
# params-uri below is the raw file url of the test_params.json above
'params_uri': 'https://raw.githubusercontent.com/Azure/azure-cli/dev/src/azure-cli/azure/cli/command_modules/resource/tests/latest/test-params.json',
'of': os.path.join(curr_dir, 'test-object.json').replace('\\', '\\\\'),
'dn': 'azure-cli-deployment'
})
self.kwargs['subnet_id'] = self.cmd('network vnet create -g {rg} -n vnet1 --subnet-name subnet1').get_output_in_json()['newVNet']['subnets'][0]['id']
self.cmd('group deployment validate -g {rg} --template-file "{tf}" --parameters @"{params}" --parameters subnetId="{subnet_id}" --parameters backendAddressPools=@"{of}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('group deployment validate -g {rg} --template-file "{tf}" --parameters "{params_uri}" --parameters subnetId="{subnet_id}" --parameters backendAddressPools=@"{of}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('group deployment create -g {rg} -n {dn} --template-file "{tf}" --parameters @"{params}" --parameters subnetId="{subnet_id}" --parameters backendAddressPools=@"{of}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}')
])
self.cmd('network lb show -g {rg} -n test-lb',
checks=self.check('tags', {'key': 'super=value'}))
self.cmd('group deployment list -g {rg}', checks=[
self.check('[0].name', '{dn}'),
self.check('[0].resourceGroup', '{rg}')
])
self.cmd('group deployment show -g {rg} -n {dn}', checks=[
self.check('name', '{dn}'),
self.check('resourceGroup', '{rg}')
])
self.cmd('group deployment operation list -g {rg} -n {dn}', checks=[
self.check('length([])', 2),
self.check('[0].resourceGroup', '{rg}')
])
@ResourceGroupPreparer(name_prefix='cli_test_on_error_deployment_lastsuccessful')
def test_group_on_error_deployment_lastsuccessful(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'test-template-lite.json').replace('\\', '\\\\'),
'dn': self.create_random_name('azure-cli-deployment', 30),
'onErrorType': 'LastSuccessful',
'sdn': self.create_random_name('azure-cli-deployment', 30)
})
self.cmd('group deployment create -g {rg} -n {dn} --template-file "{tf}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}'),
self.check('properties.onErrorDeployment', None)
])
self.cmd('group deployment create -g {rg} -n {sdn} --template-file "{tf}" --rollback-on-error', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}'),
self.check('properties.onErrorDeployment.deploymentName', '{dn}'),
self.check('properties.onErrorDeployment.type', '{onErrorType}')
])
@ResourceGroupPreparer(name_prefix='cli_test_on_error_deployment_specificdeployment')
def test_group_on_error_deployment_specificdeployment(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'test-template-lite.json').replace('\\', '\\\\'),
'dn': self.create_random_name('azure-cli-deployment', 30),
'onErrorType': 'SpecificDeployment',
'sdn': self.create_random_name('azure-cli-deployment', 30)
})
self.cmd('group deployment create -g {rg} -n {dn} --template-file "{tf}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}'),
self.check('properties.onErrorDeployment', None)
])
self.cmd('group deployment create -g {rg} -n {sdn} --template-file "{tf}" --rollback-on-error {dn}', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}'),
self.check('properties.onErrorDeployment.deploymentName', '{dn}'),
self.check('properties.onErrorDeployment.type', '{onErrorType}')
])
class DeploymentLiveTest(LiveScenarioTest):
@ResourceGroupPreparer()
def test_group_deployment_progress(self, resource_group):
from azure.cli.testsdk.utilities import force_progress_logging
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'test-template.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'test-params.json').replace('\\', '\\\\'),
'of': os.path.join(curr_dir, 'test-object.json').replace('\\', '\\\\'),
'dn': 'azure-cli-deployment2'
})
self.kwargs['subnet_id'] = self.cmd('network vnet create -g {rg} -n vnet1 --subnet-name subnet1').get_output_in_json()['newVNet']['subnets'][0]['id']
with force_progress_logging() as test_io:
self.cmd('group deployment create --verbose -g {rg} -n {dn} --template-file "{tf}" --parameters @"{params}" --parameters subnetId="{subnet_id}" --parameters backendAddressPools=@"{of}"')
# very the progress
lines = test_io.getvalue().splitlines()
for line in lines:
self.assertTrue(line.split(':')[0] in ['Accepted', 'Succeeded'])
self.assertTrue('Succeeded: {} (Microsoft.Resources/deployments)'.format(self.kwargs['dn']), lines)
class DeploymentNoWaitTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_group_deployment_no_wait')
def test_group_deployment_no_wait(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'simple_deploy.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'simple_deploy_parameters.json').replace('\\', '\\\\'),
'dn': 'azure-cli-deployment'
})
self.cmd('group deployment create -g {rg} -n {dn} --template-file "{tf}" --parameters @"{params}" --no-wait',
checks=self.is_empty())
self.cmd('group deployment wait -g {rg} -n {dn} --created',
checks=self.is_empty())
self.cmd('group deployment show -g {rg} -n {dn}',
checks=self.check('properties.provisioningState', 'Succeeded'))
class DeploymentThruUriTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_deployment_uri')
def test_group_deployment_thru_uri(self, resource_group):
self.resource_group = resource_group
curr_dir = os.path.dirname(os.path.realpath(__file__))
# same copy of the sample template file under current folder, but it is uri based now
self.kwargs.update({
'tf': 'https://raw.githubusercontent.com/Azure/azure-cli/dev/src/azure-cli/azure/cli/command_modules/resource/tests/latest/simple_deploy.json',
'params': os.path.join(curr_dir, 'simple_deploy_parameters.json').replace('\\', '\\\\')
})
self.kwargs['dn'] = self.cmd('group deployment create -g {rg} --template-uri "{tf}" --parameters @"{params}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}'),
]).get_output_in_json()['name']
self.cmd('group deployment show -g {rg} -n {dn}',
checks=self.check('name', '{dn}'))
self.cmd('group deployment delete -g {rg} -n {dn}')
self.cmd('group deployment list -g {rg}',
checks=self.is_empty())
class ResourceMoveScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_resource_move_dest', parameter_name='resource_group_dest', key='rg2')
@ResourceGroupPreparer(name_prefix='cli_test_resource_move_source', key='rg1')
def test_resource_move(self, resource_group, resource_group_dest):
self.kwargs.update({
'nsg1': self.create_random_name('nsg-move', 20),
'nsg2': self.create_random_name('nsg-move', 20)
})
self.kwargs['nsg1_id'] = self.cmd('network nsg create -n {nsg1} -g {rg1}').get_output_in_json()['NewNSG']['id']
self.kwargs['nsg2_id'] = self.cmd('network nsg create -n {nsg2} -g {rg1}').get_output_in_json()['NewNSG']['id']
self.cmd('resource move --ids {nsg1_id} {nsg2_id} --destination-group {rg2}')
self.cmd('network nsg show -g {rg2} -n {nsg1}', checks=[
self.check('name', '{nsg1}')])
self.cmd('network nsg show -g {rg2} -n {nsg2}', checks=[
self.check('name', '{nsg2}')])
class PolicyScenarioTest(ScenarioTest):
def cmdstring(self, basic, management_group=None, subscription=None):
cmd = basic
if (management_group):
cmd = cmd + ' --management-group {mg}'
if (subscription):
cmd = cmd + ' --subscription {sub}'
return cmd
def applyPolicy(self):
# create a policy assignment on a resource group
self.kwargs.update({
'pan': self.create_random_name('azurecli-test-policy-assignment', 40),
'padn': self.create_random_name('test_assignment', 20)
})
self.cmd('policy assignment create --policy {pn} -n {pan} --display-name {padn} -g {rg} --params {params}', checks=[
self.check('name', '{pan}'),
self.check('displayName', '{padn}')
])
# create a policy assignment using a built in policy definition name
self.kwargs['pan2'] = self.create_random_name('azurecli-test-policy-assignment2', 40)
self.kwargs['bip'] = '06a78e20-9358-41c9-923c-fb736d382a4d'
self.cmd('policy assignment create --policy {bip} -n {pan2} --display-name {padn} -g {rg}', checks=[
self.check('name', '{pan2}'),
self.check('displayName', '{padn}')
])
self.cmd('policy assignment delete -n {pan2} -g {rg}')
# listing at subscription level won't find the assignment made at a resource group
import jmespath
try:
self.cmd('policy assignment list', checks=self.check("length([?name=='{pan}'])", 0))
except jmespath.exceptions.JMESPathTypeError: # ok if query fails on None result
pass
# but enable --show-all works
self.cmd('policy assignment list --disable-scope-strict-match', checks=self.check("length([?name=='{pan}'])", 1))
# delete the assignment and validate it's gone
self.cmd('policy assignment delete -n {pan} -g {rg}')
self.cmd('policy assignment list --disable-scope-strict-match', checks=self.check("length([?name=='{pan}'])", 0))
def applyPolicyAtScope(self, scope, policyId):
# create a policy assignment at the given scope
self.kwargs.update({
'pol': policyId,
'pan': self.create_random_name('cli-test-polassg', 24), # limit is 24 characters at MG scope
'padn': self.create_random_name('test_assignment', 20),
'scope': scope
})
self.cmd('policy assignment create --policy {pol} -n {pan} --display-name {padn} --params {params} --scope {scope}', checks=[
self.check('name', '{pan}'),
self.check('displayName', '{padn}'),
self.check('sku.name', 'A0'),
self.check('sku.tier', 'Free')
])
# delete the assignment and validate it's gone
self.cmd('policy assignment delete -n {pan} --scope {scope}')
self.cmd('policy assignment list --disable-scope-strict-match', checks=self.check("length([?name=='{pan}'])", 0))
def resource_policy_operations(self, resource_group, management_group=None, subscription=None):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'pn': self.create_random_name('azure-cli-test-policy', 30),
'pdn': self.create_random_name('test_policy', 20),
'desc': 'desc_for_test_policy_123',
'rf': os.path.join(curr_dir, 'sample_policy_rule.json').replace('\\', '\\\\'),
'pdf': os.path.join(curr_dir, 'sample_policy_param_def.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'sample_policy_param.json').replace('\\', '\\\\'),
'mode': 'Indexed'
})
if (management_group):
self.kwargs.update({'mg': management_group})
if (subscription):
self.kwargs.update({'sub': subscription})
# create a policy
cmd = self.cmdstring('policy definition create -n {pn} --rules {rf} --params {pdf} --display-name {pdn} --description {desc} --mode {mode}', management_group, subscription)
self.cmd(cmd, checks=[
self.check('name', '{pn}'),
self.check('displayName', '{pdn}'),
self.check('description', '{desc}'),
self.check('mode', '{mode}')
])
# update it
self.kwargs['desc'] = self.kwargs['desc'] + '_new'
self.kwargs['pdn'] = self.kwargs['pdn'] + '_new'
cmd = self.cmdstring('policy definition update -n {pn} --description {desc} --display-name {pdn}', management_group, subscription)
self.cmd(cmd, checks=[
self.check('description', '{desc}'),
self.check('displayName', '{pdn}')
])
# list and show it
cmd = self.cmdstring('policy definition list', management_group, subscription)
self.cmd(cmd, checks=self.check("length([?name=='{pn}'])", 1))
cmd = self.cmdstring('policy definition show -n {pn}', management_group, subscription)
self.cmd(cmd, checks=[
self.check('name', '{pn}'),
self.check('displayName', '{pdn}')
])
# apply assignments
if management_group:
scope = '/providers/Microsoft.Management/managementGroups/{mg}'.format(mg=management_group)
policy = '{scope}/providers/Microsoft.Authorization/policyDefinitions/{pn}'.format(pn=self.kwargs['pn'], scope=scope)
self.applyPolicyAtScope(scope, policy)
elif subscription:
policy = '/subscriptions/{sub}/providers/Microsoft.Authorization/policyDefinitions/{pn}'.format(sub=subscription, pn=self.kwargs['pn'])
self.applyPolicyAtScope('/subscriptions/{sub}'.format(sub=subscription), policy)
else:
self.applyPolicy()
# delete the policy
cmd = self.cmdstring('policy definition delete -n {pn}', management_group, subscription)
self.cmd(cmd)
time.sleep(10) # ensure the policy is gone when run live.
cmd = self.cmdstring('policy definition list', management_group, subscription)
self.cmd(cmd, checks=self.check("length([?name=='{pn}'])", 0))
def resource_policyset_operations(self, resource_group, management_group=None, subscription=None):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'pn': self.create_random_name('azure-cli-test-policy', 30),
'pdn': self.create_random_name('test_policy', 20),
'desc': 'desc_for_test_policy_123',
'psn': self.create_random_name('azure-cli-test-policyset', 30),
'psdn': self.create_random_name('test_policyset', 20),
'ps_desc': 'desc_for_test_policyset_123',
'rf': os.path.join(curr_dir, 'sample_policy_rule.json').replace('\\', '\\\\'),
'psf': os.path.join(curr_dir, 'sample_policy_set.json').replace('\\', '\\\\'),
'pdf': os.path.join(curr_dir, 'sample_policy_param_def.json').replace('\\', '\\\\')
})
if (management_group):
self.kwargs.update({'mg': management_group})
if (subscription):
self.kwargs.update({'sub': subscription})
# create a policy
cmd = self.cmdstring('policy definition create -n {pn} --rules {rf} --params {pdf} --display-name {pdn} --description {desc}', management_group, subscription)
policy = self.cmd(cmd).get_output_in_json()
# create a policy set
policyset = get_file_json(self.kwargs['psf'])
policyset[0]['policyDefinitionId'] = policy['id']
with open(os.path.join(curr_dir, 'sample_policy_set.json'), 'w') as outfile:
json.dump(policyset, outfile)
cmd = self.cmdstring('policy set-definition create -n {psn} --definitions @"{psf}" --display-name {psdn} --description {ps_desc}', management_group, subscription)
self.cmd(cmd, checks=[
self.check('name', '{psn}'),
self.check('displayName', '{psdn}'),
self.check('description', '{ps_desc}')
])
# update it
self.kwargs['ps_desc'] = self.kwargs['ps_desc'] + '_new'
self.kwargs['psdn'] = self.kwargs['psdn'] + '_new'
cmd = self.cmdstring('policy set-definition update -n {psn} --display-name {psdn} --description {ps_desc}', management_group, subscription)
self.cmd(cmd, checks=[
self.check('description', '{ps_desc}'),
self.check('displayName', '{psdn}')
])
# list and show it
cmd = self.cmdstring('policy set-definition list', management_group, subscription)
self.cmd(cmd, checks=self.check("length([?name=='{psn}'])", 1))
cmd = self.cmdstring('policy set-definition show -n {psn}', management_group, subscription)
self.cmd(cmd, checks=[
self.check('name', '{psn}'),
self.check('displayName', '{psdn}')
])
# create a policy assignment on a resource group
if not management_group and not subscription:
self.kwargs.update({
'pan': self.create_random_name('azurecli-test-policy-assignment', 40),
'padn': self.create_random_name('test_assignment', 20)
})
self.cmd('policy assignment create -d {psn} -n {pan} --display-name {padn} -g {rg}', checks=[
self.check('name', '{pan}'),
self.check('displayName', '{padn}'),
self.check('sku.name', 'A0'),
self.check('sku.tier', 'Free'),
])
# delete the assignment and validate it's gone
self.cmd('policy assignment delete -n {pan} -g {rg}')
self.cmd('policy assignment list --disable-scope-strict-match', checks=self.check("length([?name=='{pan}'])", 0))
# delete the policy set
cmd = self.cmdstring('policy set-definition delete -n {psn}', management_group, subscription)
self.cmd(cmd)
time.sleep(10) # ensure the policy is gone when run live.
cmd = self.cmdstring('policy set-definition list', management_group, subscription)
self.cmd(cmd, checks=self.check("length([?name=='{psn}'])", 0))
# delete the policy
cmd = self.cmdstring('policy definition delete -n {pn}', management_group, subscription)
self.cmd(cmd)
time.sleep(10) # ensure the policy is gone when run live.
cmd = self.cmdstring('policy definition list', management_group, subscription)
self.cmd(cmd, checks=self.check("length([?name=='{pn}'])", 0))
@ResourceGroupPreparer(name_prefix='cli_test_policy')
@AllowLargeResponse(8192)
def test_resource_policy_default(self, resource_group):
self.resource_policy_operations(resource_group)
@record_only()
@unittest.skip('mock doesnt work when the subscription comes from --scope')
@ResourceGroupPreparer(name_prefix='cli_test_policy_subscription_id')
@AllowLargeResponse()
def test_resource_policy_subscription_id(self, resource_group):
# under playback, we mock it so the subscription id will be '00000000...' and it will match
# the same sanitized value in the recording
if not self.in_recording:
with mock.patch('azure.cli.command_modules.resource.custom._get_subscription_id_from_subscription',
return_value=MOCKED_SUBSCRIPTION_ID):
self.resource_policy_operations(resource_group, None, 'e8a0d3c2-c26a-4363-ba6b-f56ac74c5ae0')
else:
self.resource_policy_operations(resource_group, None, 'e8a0d3c2-c26a-4363-ba6b-f56ac74c5ae0')
@AllowLargeResponse(8192)
def test_show_built_in_policy(self):
# get the list of builtins, then retrieve each via show and validate the results match
results = self.cmd('policy definition list --query "[?policyType==\'BuiltIn\']"').get_output_in_json()
for i, result in enumerate(results):
self.kwargs['pn'] = result['name']
self.kwargs['dn'] = result['displayName']
self.kwargs['desc'] = result['description']
self.kwargs['id'] = result['id']
self.cmd('policy definition show -n {pn}', checks=[
self.check('name', '{pn}'),
self.check('description', '{desc}'),
self.check('displayName', '{dn}'),
self.check('id', '{id}')
])
class ManagedAppDefinitionScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
def test_managedappdef_inline(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'loc': 'eastus',
'adn': self.create_random_name('testappdefname', 20), | 'ad_desc': 'test_appdef_123',
'auth': '5e91139a-c94b-462e-a6ff-1ee95e8aac07:8e3af657-a8ff-443c-a75c-2fe8c4bcb635',
'lock': 'None',
'ui_file': os.path.join(curr_dir, 'sample_create_ui_definition.json').replace('\\', '\\\\'),
'main_file': os.path.join(curr_dir, 'sample_main_template.json').replace('\\', '\\\\')
})
# create a managedapp definition with inline params for create-ui-definition and main-template
self.kwargs['ad_id'] = self.cmd('managedapp definition create -n {adn} --create-ui-definition @"{ui_file}" --main-template @"{main_file}" --display-name {addn} --description {ad_desc} -l {loc} -a {auth} --lock-level {lock} -g {rg}', checks=[
self.check('name', '{adn}'),
self.check('displayName', '{addn}'),
self.check('description', '{ad_desc}'),
self.check('authorizations[0].principalId', '5e91139a-c94b-462e-a6ff-1ee95e8aac07'),
self.check('authorizations[0].roleDefinitionId', '8e3af657-a8ff-443c-a75c-2fe8c4bcb635'),
self.check('artifacts[0].name', 'ApplicationResourceTemplate'),
self.check('artifacts[0].type', 'Template'),
self.check('artifacts[1].name', 'CreateUiDefinition'),
self.check('artifacts[1].type', 'Custom')
]).get_output_in_json()['id']
self.cmd('managedapp definition list -g {rg}',
checks=self.check('[0].name', '{adn}'))
self.cmd('managedapp definition show --ids {ad_id}', checks=[
self.check('name', '{adn}'),
self.check('displayName', '{addn}'),
self.check('description', '{ad_desc}'),
self.check('authorizations[0].principalId', '5e91139a-c94b-462e-a6ff-1ee95e8aac07'),
self.check('authorizations[0].roleDefinitionId', '8e3af657-a8ff-443c-a75c-2fe8c4bcb635'),
self.check('artifacts[0].name', 'ApplicationResourceTemplate'),
self.check('artifacts[0].type', 'Template'),
self.check('artifacts[1].name', 'CreateUiDefinition'),
self.check('artifacts[1].type', 'Custom')
])
self.cmd('managedapp definition delete -g {rg} -n {adn}')
self.cmd('managedapp definition list -g {rg}', checks=self.is_empty())
# TODO: Change back to ScenarioTest and re-record when issue #5110 is fixed.
class ManagedAppScenarioTest(LiveScenarioTest):
@ResourceGroupPreparer()
def test_managedapp(self, resource_group):
self.kwargs.update({
'loc': 'westcentralus',
'adn': 'testappdefname',
'addn': 'test_appdef_123',
'ad_desc': 'test_appdef_123',
'uri': 'https://wud.blob.core.windows.net/appliance/SingleStorageAccount.zip',
'auth': '5e91139a-c94b-462e-a6ff-1ee95e8aac07:8e3af657-a8ff-443c-a75c-2fe8c4bcb635',
'lock': 'None',
'sub': self.get_subscription_id()
})
self.kwargs['ad_id'] = self.cmd('managedapp definition create -n {adn} --package-file-uri {uri} --display-name {addn} --description {ad_desc} -l {loc} -a {auth} --lock-level {lock} -g {rg}').get_output_in_json()['id']
# create a managedapp
self.kwargs.update({
'man': 'mymanagedapp',
'ma_loc': 'westcentralus',
'ma_kind': 'servicecatalog',
'ma_rg': self.create_random_name('climanagedapp', 25)
})
self.kwargs['ma_rg_id'] = '/subscriptions/{sub}/resourceGroups/{ma_rg}'.format(**self.kwargs)
self.kwargs['ma_id'] = self.cmd('managedapp create -n {man} -g {rg} -l {ma_loc} --kind {ma_kind} -m {ma_rg_id} -d {ad_id}', checks=[
self.check('name', '{man}'),
self.check('type', 'Microsoft.Solutions/applications'),
self.check('kind', 'servicecatalog'),
self.check('managedResourceGroupId', '{ma_rg_id}')
]).get_output_in_json()['id']
self.cmd('managedapp list -g {rg}', checks=self.check('[0].name', '{man}'))
self.cmd('managedapp show --ids {ma_id}', checks=[
self.check('name', '{man}'),
self.check('type', 'Microsoft.Solutions/applications'),
self.check('kind', 'servicecatalog'),
self.check('managedResourceGroupId', '{ma_rg_id}')
])
self.cmd('managedapp delete -g {rg} -n {man}')
self.cmd('managedapp list -g {rg}', checks=self.is_empty())
class CrossRGDeploymentScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_cross_rg_alt', parameter_name='resource_group_cross')
@ResourceGroupPreparer(name_prefix='cli_test_cross_rg_deploy')
def test_group_deployment_crossrg(self, resource_group, resource_group_cross):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'rg1': resource_group,
'rg2': resource_group_cross,
'tf': os.path.join(curr_dir, 'crossrg_deploy.json').replace('\\', '\\\\'),
'dn': self.create_random_name('azure-cli-crossrgdeployment', 40),
'sa1': create_random_name(prefix='crossrg'),
'sa2': create_random_name(prefix='crossrg')
})
self.cmd('group deployment validate -g {rg1} --template-file "{tf}" --parameters CrossRg={rg2} StorageAccountName1={sa1} StorageAccountName2={sa2}', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('group deployment create -g {rg1} -n {dn} --template-file "{tf}" --parameters CrossRg={rg2}', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg1}'),
])
self.cmd('group deployment list -g {rg1}', checks=[
self.check('[0].name', '{dn}'),
self.check('[0].resourceGroup', '{rg1}')
])
self.cmd('group deployment show -g {rg1} -n {dn}', checks=[
self.check('name', '{dn}'),
self.check('resourceGroup', '{rg1}')
])
self.cmd('group deployment operation list -g {rg1} -n {dn}', checks=[
self.check('length([])', 3),
self.check('[0].resourceGroup', '{rg1}')
])
class InvokeActionTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_invoke_action')
def test_invoke_action(self, resource_group):
self.kwargs.update({
'vm': self.create_random_name('cli-test-vm', 30),
'user': 'ubuntu',
'pass': self.create_random_name('Longpassword#1', 30)
})
self.kwargs['vm_id'] = self.cmd('vm create -g {rg} -n {vm} --use-unmanaged-disk --image UbuntuLTS --admin-username {user} --admin-password {pass} --authentication-type password --nsg-rule None').get_output_in_json()['id']
self.cmd('resource invoke-action --action powerOff --ids {vm_id}')
self.cmd('resource invoke-action --action generalize --ids {vm_id}')
self.cmd('resource invoke-action --action deallocate --ids {vm_id}')
self.kwargs['request_body'] = '{\\"vhdPrefix\\":\\"myPrefix\\",\\"destinationContainerName\\":\\"container\\",\\"overwriteVhds\\":\\"true\\"}'
self.cmd('resource invoke-action --action capture --ids {vm_id} --request-body {request_body}')
class GlobalIdsScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_global_ids')
def test_global_ids(self, resource_group):
self.kwargs.update({
'vnet': 'vnet1'
})
self.kwargs['vnet_id'] = self.cmd('network vnet create -g {rg} -n {vnet}').get_output_in_json()['newVNet']['id']
# command will fail if the other parameters were actually used
self.cmd('network vnet show --subscription fakesub --resource-group fakerg -n fakevnet --ids {vnet_id}')
if __name__ == '__main__':
unittest.main() | 'addn': self.create_random_name('test_appdef', 20), |
progressbar-rounded.scenario.js | /*
Copyright (c) 2018-2020 Uber Technologies, Inc. | This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
*/
// @flow
import * as React from 'react';
import {ProgressBarRounded, SIZE} from '../index.js';
export default function Scenario() {
return (
<React.Fragment>
<ProgressBarRounded
progress={0.5}
size={SIZE.small}
animate={false}
inline
/>
<ProgressBarRounded
progress={0.5}
size={SIZE.medium}
animate={false}
inline
/>
<ProgressBarRounded
progress={0.5}
size={SIZE.large}
animate={false}
inline
/>
</React.Fragment>
);
} | |
ci.go | package yaml
import (
"bytes"
"fmt"
"github.com/alydnh/go-micro-ci-common/utils"
"gopkg.in/yaml.v3"
"io/ioutil"
"os"
"strings"
"text/template"
)
type CI struct {
Variables map[string]string `yaml:"variables"`
CommonEnvs map[string]string `yaml:"commonEnvs"`
ThirdServices map[string]*Service `yaml:"thirdServices"`
Services map[string]*Service `yaml:"services"`
Registry *Registry `yaml:"registry"`
Credentials []*Credential `yaml:"credentials"`
Metadata map[string]string `yaml:"metadata"`
CIName string `yaml:"name"`
Namespace string `yaml:"namespace"`
sequencedServiceNames []string
}
func OpenCI(path string, doValidate bool) (ci *CI, err error) {
if _, err := os.Stat(path); nil != err {
return nil, err
}
ci = &CI{}
templateBuffer := &bytes.Buffer{}
var data []byte
data, err = ioutil.ReadFile(path)
if nil != err {
return
}
if err = yaml.Unmarshal(data, ci); nil != err {
return
}
if nil != ci.Variables && len(ci.Variables) > 0 {
var tpl *template.Template
tpl, err = template.New("t").Parse(string(data))
if nil != err {
return
}
if err = tpl.Execute(templateBuffer, ci.Variables); nil != err {
return
}
if err = yaml.Unmarshal(templateBuffer.Bytes(), &ci); nil != err {
return
}
if err := ci.Initialize(doValidate); nil != err {
return nil, err
}
}
return
}
func NewCI(name string) *CI {
return &CI{
Variables: make(map[string]string),
CommonEnvs: make(map[string]string),
ThirdServices: make(map[string]*Service),
Services: make(map[string]*Service),
Credentials: make([]*Credential, 0),
Metadata: make(map[string]string),
CIName: name,
sequencedServiceNames: make([]string, 0),
}
}
func (y CI) GetSequencedContainerNames() []string {
return utils.Select(y.sequencedServiceNames, y.GetContainerName).([]string)
}
func (y CI) GetContainerName(serviceName string) string {
return fmt.Sprintf("%s-%s", y.Name(), serviceName)
}
func (y CI) GetService(name string) *Service {
if service, ok := y.ThirdServices[name]; ok {
return service
} else if service, ok = y.Services[name]; ok {
return service
}
serviceName := strings.TrimPrefix(name, fmt.Sprintf("%s-", y.Name()))
if service, ok := y.ThirdServices[serviceName]; ok {
return service
}
return y.Services[serviceName]
}
func (y *CI) SetMetadata(key, value string) {
if nil == y.Metadata {
y.Metadata = make(map[string]string)
}
if utils.EmptyOrWhiteSpace(value) {
delete(y.Metadata, key)
} else {
y.Metadata[key] = value
}
}
func (y CI) GetMetadata(key string) string {
if nil == y.Metadata {
return utils.EmptyString
}
return y.Metadata[key]
}
func (y CI) Name() string {
return y.CIName
}
func (y *CI) AddOrUpdateService(service *Service) {
if service.IsThird() {
y.ThirdServices[service.Name()] = service
} else {
y.Services[service.Name()] = service
}
}
func (y *CI) RemoveService(service *Service) {
if service.IsThird() {
delete(y.ThirdServices, service.Name())
} else {
delete(y.Services, service.Name())
}
}
func (y *CI) Initialize(doValidate bool) error {
if nil == y.Metadata {
y.Metadata = make(map[string]string)
}
if doValidate {
if len(y.Metadata) > 0 {
return fmt.Errorf("metadata只读")
}
}
if nil == y.CommonEnvs {
y.CommonEnvs = make(map[string]string)
}
if nil != y.Credentials {
for index, credential := range y.Credentials {
if err := credential.Validate(); nil != err {
return fmt.Errorf("credentials[%d].%s", index, err.Error())
}
}
} else {
y.Credentials = make([]*Credential, 0)
}
serviceNames := make(map[string]bool)
if nil == y.ThirdServices {
y.ThirdServices = make(map[string]*Service)
}
context := &dependsOnContext{
processed: make([]*Service, 0, 10),
unprocessed: make([]*Service, 0, 10),
chain: make([]*Service, 0),
}
if len(y.ThirdServices) > 0 {
for name, service := range y.ThirdServices {
if err := service.Initialize(name, true, doValidate); nil != err {
return fmt.Errorf("thirdServices.%s", err.Error())
}
if _, ok := serviceNames[name]; ok {
return fmt.Errorf("thirdService.%s 名字重复", name)
}
serviceNames[name] = true
context.add(service)
}
for _, service := range y.ThirdServices {
if service.HasBaseService() {
targetService, ok := y.ThirdServices[service.BaseService]
if !ok {
return fmt.Errorf("thirdService.%s.baseService:%s 未找到", service.Name(), service.BaseService)
}
if strings.Compare(targetService.Name(), service.Name()) == 0 {
return fmt.Errorf("thirdService.%s.baseService:%s 不能是服务自身", service.Name(), service.BaseService)
}
if targetService.HasBaseService() {
return fmt.Errorf("thirdService.%s.baseService:%s 不能也是引用服务", service.Name(), service.BaseService)
}
service.ApplyBaseService(targetService)
}
}
}
if doValidate && (nil == y.Services || len(y.Services) == 0) {
return fmt.Errorf("未找到services定义")
}
for | vice.Initialize(name, false, doValidate); nil != err {
return fmt.Errorf("services.%s", err.Error())
}
if _, ok := serviceNames[name]; ok {
return fmt.Errorf("services.%s 名字重复", name)
}
serviceNames[name] = true
context.add(service)
}
for _, service := range y.Services {
if service.HasBaseService() {
targetService, ok := y.Services[service.BaseService]
if !ok {
return fmt.Errorf("service.%s.baseService:%s 未找到", service.Name(), service.BaseService)
}
if strings.Compare(targetService.Name(), service.Name()) == 0 {
return fmt.Errorf("service.%s.baseService:%s 不能是服务自身", service.Name(), service.BaseService)
}
if targetService.HasBaseService() {
return fmt.Errorf("service.%s.baseService:%s 不能也是引用服务", service.Name(), service.BaseService)
}
service.ApplyBaseService(targetService)
}
}
for !context.finished() {
for _, service := range context.unprocessed {
if err := service.processDependsOn(context); nil != err {
return err
} else {
break
}
}
}
y.sequencedServiceNames = utils.Select(context.processed, func(s *Service) string { return s.name }).([]string)
return nil
}
func (y CI) GetCredential(name string, ref *string) *Credential {
if v, ok := utils.FirstOrDefault(y.Credentials, func(c *Credential) bool {
return c.Match(name, ref)
}); ok {
return v.(*Credential)
}
return nil
}
func (y CI) Clone() *CI {
cloned := &CI{
Variables: utils.CopyMap(y.Variables).(map[string]string),
CommonEnvs: utils.CopyMap(y.CommonEnvs).(map[string]string),
ThirdServices: make(map[string]*Service),
Services: make(map[string]*Service),
Credentials: make([]*Credential, 0),
sequencedServiceNames: append([]string{}, y.sequencedServiceNames...),
}
if nil != y.ThirdServices {
for name, service := range y.ThirdServices {
cloned.ThirdServices[name] = service.Clone()
}
}
if nil != y.Services {
for name, service := range y.Services {
cloned.Services[name] = service.Clone()
}
}
if nil != y.Credentials {
cloned.Credentials = make([]*Credential, 0, len(y.Credentials))
for _, credential := range y.Credentials {
cloned.Credentials = append(cloned.Credentials, credential.Clone())
}
}
return cloned
}
| name, service := range y.Services {
if err := ser |
config.rs | // This file is generated by rust-protobuf 2.22.1. Do not edit
// @generated
// https://github.com/rust-lang/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy::all)]
#![allow(unused_attributes)]
#![cfg_attr(rustfmt, rustfmt::skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unused_imports)]
#![allow(unused_results)]
//! Generated file from `src/config/internal/config.proto`
/// Generated files are compatible only with the same version
/// of protobuf runtime.
// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_22_1;
#[derive(PartialEq,Clone,Default,Debug)]
pub struct Api {
// message fields
pub address: ::std::string::String,
pub port: u32,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Api {
fn default() -> &'a Api {
<Api as ::protobuf::Message>::default_instance()
}
}
impl Api {
pub fn new() -> Api {
::std::default::Default::default()
}
// string address = 1;
pub fn get_address(&self) -> &str {
&self.address
}
// uint32 port = 2;
pub fn get_port(&self) -> u32 {
self.port
}
}
impl ::protobuf::Message for Api {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.address)?;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.port = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.address.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.address);
}
if self.port != 0 {
my_size += ::protobuf::rt::value_size(2, self.port, ::protobuf::wire_format::WireTypeVarint);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.address.is_empty() {
os.write_string(1, &self.address)?;
}
if self.port != 0 {
os.write_uint32(2, self.port)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Api {
Api::new()
}
fn default_instance() -> &'static Api {
static instance: ::protobuf::rt::LazyV2<Api> = ::protobuf::rt::LazyV2::INIT;
instance.get(Api::new)
}
}
impl ::protobuf::Clear for Api {
fn clear(&mut self) {
self.address.clear();
self.port = 0;
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for Api {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct Dns {
// message fields
pub servers: ::protobuf::RepeatedField<::std::string::String>,
pub bind: ::std::string::String,
pub hosts: ::std::collections::HashMap<::std::string::String, Dns_Ips>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Dns {
fn default() -> &'a Dns {
<Dns as ::protobuf::Message>::default_instance()
}
}
impl Dns {
pub fn new() -> Dns {
::std::default::Default::default()
}
// repeated string servers = 1;
pub fn get_servers(&self) -> &[::std::string::String] {
&self.servers
}
// string bind = 2;
pub fn get_bind(&self) -> &str {
&self.bind
}
// repeated .Dns.HostsEntry hosts = 3;
pub fn get_hosts(&self) -> &::std::collections::HashMap<::std::string::String, Dns_Ips> {
&self.hosts
}
}
impl ::protobuf::Message for Dns {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.servers)?;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.bind)?;
},
3 => {
::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<Dns_Ips>>(wire_type, is, &mut self.hosts)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.servers {
my_size += ::protobuf::rt::string_size(1, &value);
};
if !self.bind.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.bind);
}
my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<Dns_Ips>>(3, &self.hosts);
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.servers {
os.write_string(1, &v)?;
};
if !self.bind.is_empty() {
os.write_string(2, &self.bind)?;
}
::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<Dns_Ips>>(3, &self.hosts, os)?;
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Dns {
Dns::new()
}
fn default_instance() -> &'static Dns {
static instance: ::protobuf::rt::LazyV2<Dns> = ::protobuf::rt::LazyV2::INIT;
instance.get(Dns::new)
}
}
impl ::protobuf::Clear for Dns {
fn clear(&mut self) {
self.servers.clear();
self.bind.clear();
self.hosts.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for Dns {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct Dns_Ips {
// message fields
pub values: ::protobuf::RepeatedField<::std::string::String>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Dns_Ips {
fn default() -> &'a Dns_Ips {
<Dns_Ips as ::protobuf::Message>::default_instance()
}
}
impl Dns_Ips {
pub fn new() -> Dns_Ips {
::std::default::Default::default()
}
// repeated string values = 1;
pub fn get_values(&self) -> &[::std::string::String] {
&self.values
}
}
impl ::protobuf::Message for Dns_Ips {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.values)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.values {
my_size += ::protobuf::rt::string_size(1, &value);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.values {
os.write_string(1, &v)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Dns_Ips {
Dns_Ips::new()
}
fn default_instance() -> &'static Dns_Ips {
static instance: ::protobuf::rt::LazyV2<Dns_Ips> = ::protobuf::rt::LazyV2::INIT;
instance.get(Dns_Ips::new)
}
}
impl ::protobuf::Clear for Dns_Ips {
fn clear(&mut self) {
self.values.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for Dns_Ips {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct Log {
// message fields
pub level: Log_Level,
pub output: Log_Output,
pub output_file: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Log {
fn default() -> &'a Log {
<Log as ::protobuf::Message>::default_instance()
}
}
impl Log {
pub fn new() -> Log {
::std::default::Default::default()
}
// .Log.Level level = 1;
pub fn get_level(&self) -> Log_Level {
self.level
}
// .Log.Output output = 2;
pub fn get_output(&self) -> Log_Output {
self.output
}
// string output_file = 3;
pub fn get_output_file(&self) -> &str {
&self.output_file
}
}
impl ::protobuf::Message for Log {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.level, 1, &mut self.unknown_fields)?
},
2 => {
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.output, 2, &mut self.unknown_fields)?
},
3 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.output_file)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.level != Log_Level::TRACE {
my_size += ::protobuf::rt::enum_size(1, self.level);
}
if self.output != Log_Output::CONSOLE {
my_size += ::protobuf::rt::enum_size(2, self.output);
}
if !self.output_file.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.output_file);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.level != Log_Level::TRACE {
os.write_enum(1, ::protobuf::ProtobufEnum::value(&self.level))?;
}
if self.output != Log_Output::CONSOLE {
os.write_enum(2, ::protobuf::ProtobufEnum::value(&self.output))?;
}
if !self.output_file.is_empty() {
os.write_string(3, &self.output_file)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Log {
Log::new()
}
fn default_instance() -> &'static Log {
static instance: ::protobuf::rt::LazyV2<Log> = ::protobuf::rt::LazyV2::INIT;
instance.get(Log::new)
}
}
impl ::protobuf::Clear for Log {
fn clear(&mut self) {
self.level = Log_Level::TRACE;
self.output = Log_Output::CONSOLE;
self.output_file.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for Log {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum Log_Level {
TRACE = 0,
DEBUG = 1,
INFO = 2,
WARN = 3,
ERROR = 4,
}
impl ::protobuf::ProtobufEnum for Log_Level {
fn value(&self) -> i32 {
*self as i32
}
fn from_i32(value: i32) -> ::std::option::Option<Log_Level> {
match value {
0 => ::std::option::Option::Some(Log_Level::TRACE),
1 => ::std::option::Option::Some(Log_Level::DEBUG),
2 => ::std::option::Option::Some(Log_Level::INFO),
3 => ::std::option::Option::Some(Log_Level::WARN),
4 => ::std::option::Option::Some(Log_Level::ERROR),
_ => ::std::option::Option::None
}
}
fn values() -> &'static [Self] {
static values: &'static [Log_Level] = &[
Log_Level::TRACE,
Log_Level::DEBUG,
Log_Level::INFO,
Log_Level::WARN,
Log_Level::ERROR,
];
values
}
}
impl ::std::marker::Copy for Log_Level {
}
impl ::std::default::Default for Log_Level {
fn default() -> Self {
Log_Level::TRACE
}
}
impl ::protobuf::reflect::ProtobufValue for Log_Level {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Enum(::protobuf::ProtobufEnum::descriptor(self))
}
}
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum Log_Output {
CONSOLE = 0,
FILE = 1,
}
impl ::protobuf::ProtobufEnum for Log_Output {
fn value(&self) -> i32 {
*self as i32
}
fn from_i32(value: i32) -> ::std::option::Option<Log_Output> {
match value {
0 => ::std::option::Option::Some(Log_Output::CONSOLE),
1 => ::std::option::Option::Some(Log_Output::FILE),
_ => ::std::option::Option::None
}
}
fn values() -> &'static [Self] {
static values: &'static [Log_Output] = &[
Log_Output::CONSOLE,
Log_Output::FILE,
];
values
}
}
impl ::std::marker::Copy for Log_Output {
}
impl ::std::default::Default for Log_Output {
fn default() -> Self {
Log_Output::CONSOLE
}
}
impl ::protobuf::reflect::ProtobufValue for Log_Output {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Enum(::protobuf::ProtobufEnum::descriptor(self))
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct TunInboundSettings {
// message fields
pub fd: i32,
pub auto: bool,
pub name: ::std::string::String,
pub address: ::std::string::String,
pub gateway: ::std::string::String,
pub netmask: ::std::string::String,
pub mtu: i32,
pub fake_dns_exclude: ::protobuf::RepeatedField<::std::string::String>,
pub fake_dns_include: ::protobuf::RepeatedField<::std::string::String>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a TunInboundSettings {
fn default() -> &'a TunInboundSettings {
<TunInboundSettings as ::protobuf::Message>::default_instance()
}
}
impl TunInboundSettings {
pub fn new() -> TunInboundSettings {
::std::default::Default::default()
}
// int32 fd = 1;
pub fn get_fd(&self) -> i32 {
self.fd
}
// bool auto = 9;
pub fn get_auto(&self) -> bool {
self.auto
}
// string name = 2;
pub fn get_name(&self) -> &str {
&self.name
}
// string address = 3;
pub fn get_address(&self) -> &str {
&self.address
}
// string gateway = 4;
pub fn get_gateway(&self) -> &str {
&self.gateway
}
// string netmask = 5;
pub fn get_netmask(&self) -> &str {
&self.netmask
}
// int32 mtu = 6;
pub fn get_mtu(&self) -> i32 {
self.mtu
}
// repeated string fake_dns_exclude = 7;
pub fn get_fake_dns_exclude(&self) -> &[::std::string::String] {
&self.fake_dns_exclude
}
// repeated string fake_dns_include = 8;
pub fn get_fake_dns_include(&self) -> &[::std::string::String] {
&self.fake_dns_include
}
}
impl ::protobuf::Message for TunInboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.fd = tmp;
},
9 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.auto = tmp;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.name)?;
},
3 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.address)?;
},
4 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.gateway)?;
},
5 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.netmask)?;
},
6 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int32()?;
self.mtu = tmp;
},
7 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.fake_dns_exclude)?;
},
8 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.fake_dns_include)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.fd != 0 {
my_size += ::protobuf::rt::value_size(1, self.fd, ::protobuf::wire_format::WireTypeVarint);
}
if self.auto != false {
my_size += 2;
}
if !self.name.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.name);
}
if !self.address.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.address);
}
if !self.gateway.is_empty() {
my_size += ::protobuf::rt::string_size(4, &self.gateway);
}
if !self.netmask.is_empty() {
my_size += ::protobuf::rt::string_size(5, &self.netmask);
}
if self.mtu != 0 {
my_size += ::protobuf::rt::value_size(6, self.mtu, ::protobuf::wire_format::WireTypeVarint);
}
for value in &self.fake_dns_exclude {
my_size += ::protobuf::rt::string_size(7, &value);
};
for value in &self.fake_dns_include {
my_size += ::protobuf::rt::string_size(8, &value);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.fd != 0 {
os.write_int32(1, self.fd)?;
}
if self.auto != false {
os.write_bool(9, self.auto)?;
}
if !self.name.is_empty() {
os.write_string(2, &self.name)?;
}
if !self.address.is_empty() {
os.write_string(3, &self.address)?;
}
if !self.gateway.is_empty() {
os.write_string(4, &self.gateway)?;
}
if !self.netmask.is_empty() {
os.write_string(5, &self.netmask)?;
}
if self.mtu != 0 {
os.write_int32(6, self.mtu)?;
}
for v in &self.fake_dns_exclude {
os.write_string(7, &v)?;
};
for v in &self.fake_dns_include {
os.write_string(8, &v)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> TunInboundSettings {
TunInboundSettings::new()
}
fn default_instance() -> &'static TunInboundSettings {
static instance: ::protobuf::rt::LazyV2<TunInboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(TunInboundSettings::new)
}
}
impl ::protobuf::Clear for TunInboundSettings {
fn clear(&mut self) {
self.fd = 0;
self.auto = false;
self.name.clear();
self.address.clear();
self.gateway.clear();
self.netmask.clear();
self.mtu = 0;
self.fake_dns_exclude.clear();
self.fake_dns_include.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for TunInboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct ShadowsocksInboundSettings {
// message fields
pub method: ::std::string::String,
pub password: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a ShadowsocksInboundSettings {
fn default() -> &'a ShadowsocksInboundSettings {
<ShadowsocksInboundSettings as ::protobuf::Message>::default_instance()
}
}
impl ShadowsocksInboundSettings {
pub fn new() -> ShadowsocksInboundSettings {
::std::default::Default::default()
}
// string method = 1;
pub fn get_method(&self) -> &str {
&self.method
}
// string password = 2;
pub fn get_password(&self) -> &str {
&self.password
}
}
impl ::protobuf::Message for ShadowsocksInboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.method)?;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.password)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.method.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.method);
}
if !self.password.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.password);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.method.is_empty() {
os.write_string(1, &self.method)?;
}
if !self.password.is_empty() {
os.write_string(2, &self.password)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> ShadowsocksInboundSettings {
ShadowsocksInboundSettings::new()
}
fn default_instance() -> &'static ShadowsocksInboundSettings {
static instance: ::protobuf::rt::LazyV2<ShadowsocksInboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(ShadowsocksInboundSettings::new)
}
}
impl ::protobuf::Clear for ShadowsocksInboundSettings {
fn clear(&mut self) {
self.method.clear();
self.password.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for ShadowsocksInboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct TrojanInboundSettings {
// message fields
pub password: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a TrojanInboundSettings {
fn default() -> &'a TrojanInboundSettings {
<TrojanInboundSettings as ::protobuf::Message>::default_instance()
}
}
impl TrojanInboundSettings {
pub fn new() -> TrojanInboundSettings {
::std::default::Default::default()
}
// string password = 3;
pub fn get_password(&self) -> &str {
&self.password
}
}
impl ::protobuf::Message for TrojanInboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
3 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.password)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.password.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.password);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.password.is_empty() {
os.write_string(3, &self.password)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> TrojanInboundSettings {
TrojanInboundSettings::new()
}
fn default_instance() -> &'static TrojanInboundSettings {
static instance: ::protobuf::rt::LazyV2<TrojanInboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(TrojanInboundSettings::new)
}
}
impl ::protobuf::Clear for TrojanInboundSettings {
fn clear(&mut self) {
self.password.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for TrojanInboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct WebSocketInboundSettings {
// message fields
pub path: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a WebSocketInboundSettings {
fn default() -> &'a WebSocketInboundSettings {
<WebSocketInboundSettings as ::protobuf::Message>::default_instance()
}
}
impl WebSocketInboundSettings {
pub fn new() -> WebSocketInboundSettings {
::std::default::Default::default()
}
// string path = 1;
pub fn get_path(&self) -> &str {
&self.path
}
}
impl ::protobuf::Message for WebSocketInboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.path)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.path.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.path);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.path.is_empty() {
os.write_string(1, &self.path)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> WebSocketInboundSettings {
WebSocketInboundSettings::new()
}
fn default_instance() -> &'static WebSocketInboundSettings {
static instance: ::protobuf::rt::LazyV2<WebSocketInboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(WebSocketInboundSettings::new)
}
}
impl ::protobuf::Clear for WebSocketInboundSettings {
fn clear(&mut self) {
self.path.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for WebSocketInboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct AMuxInboundSettings {
// message fields
pub actors: ::protobuf::RepeatedField<::std::string::String>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a AMuxInboundSettings {
fn default() -> &'a AMuxInboundSettings {
<AMuxInboundSettings as ::protobuf::Message>::default_instance()
}
}
impl AMuxInboundSettings {
pub fn new() -> AMuxInboundSettings {
::std::default::Default::default()
}
// repeated string actors = 1;
pub fn get_actors(&self) -> &[::std::string::String] {
&self.actors
}
}
impl ::protobuf::Message for AMuxInboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.actors)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.actors {
my_size += ::protobuf::rt::string_size(1, &value);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.actors {
os.write_string(1, &v)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> AMuxInboundSettings {
AMuxInboundSettings::new()
}
fn default_instance() -> &'static AMuxInboundSettings {
static instance: ::protobuf::rt::LazyV2<AMuxInboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(AMuxInboundSettings::new)
}
}
impl ::protobuf::Clear for AMuxInboundSettings {
fn clear(&mut self) {
self.actors.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for AMuxInboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct QuicInboundSettings {
// message fields
pub certificate: ::std::string::String,
pub certificate_key: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a QuicInboundSettings {
fn default() -> &'a QuicInboundSettings {
<QuicInboundSettings as ::protobuf::Message>::default_instance()
}
}
impl QuicInboundSettings {
pub fn new() -> QuicInboundSettings {
::std::default::Default::default()
}
// string certificate = 1;
pub fn get_certificate(&self) -> &str {
&self.certificate
}
// string certificate_key = 2;
pub fn get_certificate_key(&self) -> &str {
&self.certificate_key
}
}
impl ::protobuf::Message for QuicInboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.certificate)?;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.certificate_key)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.certificate.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.certificate);
}
if !self.certificate_key.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.certificate_key);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.certificate.is_empty() {
os.write_string(1, &self.certificate)?;
}
if !self.certificate_key.is_empty() {
os.write_string(2, &self.certificate_key)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> QuicInboundSettings {
QuicInboundSettings::new()
}
fn default_instance() -> &'static QuicInboundSettings {
static instance: ::protobuf::rt::LazyV2<QuicInboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(QuicInboundSettings::new)
}
}
impl ::protobuf::Clear for QuicInboundSettings {
fn clear(&mut self) {
self.certificate.clear();
self.certificate_key.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for QuicInboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct TlsInboundSettings {
// message fields
pub certificate: ::std::string::String,
pub certificate_key: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a TlsInboundSettings {
fn default() -> &'a TlsInboundSettings {
<TlsInboundSettings as ::protobuf::Message>::default_instance()
}
}
impl TlsInboundSettings {
pub fn new() -> TlsInboundSettings {
::std::default::Default::default()
}
// string certificate = 1;
pub fn get_certificate(&self) -> &str {
&self.certificate
}
// string certificate_key = 2;
pub fn get_certificate_key(&self) -> &str {
&self.certificate_key
}
}
impl ::protobuf::Message for TlsInboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.certificate)?;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.certificate_key)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.certificate.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.certificate);
}
if !self.certificate_key.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.certificate_key);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.certificate.is_empty() {
os.write_string(1, &self.certificate)?;
}
if !self.certificate_key.is_empty() {
os.write_string(2, &self.certificate_key)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> TlsInboundSettings {
TlsInboundSettings::new()
}
fn default_instance() -> &'static TlsInboundSettings {
static instance: ::protobuf::rt::LazyV2<TlsInboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(TlsInboundSettings::new)
}
}
impl ::protobuf::Clear for TlsInboundSettings {
fn clear(&mut self) {
self.certificate.clear();
self.certificate_key.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for TlsInboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct ChainInboundSettings {
// message fields
pub actors: ::protobuf::RepeatedField<::std::string::String>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a ChainInboundSettings {
fn default() -> &'a ChainInboundSettings {
<ChainInboundSettings as ::protobuf::Message>::default_instance()
}
}
impl ChainInboundSettings {
pub fn new() -> ChainInboundSettings {
::std::default::Default::default()
}
// repeated string actors = 1;
pub fn get_actors(&self) -> &[::std::string::String] {
&self.actors
}
}
impl ::protobuf::Message for ChainInboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.actors)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.actors {
my_size += ::protobuf::rt::string_size(1, &value);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.actors {
os.write_string(1, &v)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> ChainInboundSettings {
ChainInboundSettings::new()
}
fn default_instance() -> &'static ChainInboundSettings {
static instance: ::protobuf::rt::LazyV2<ChainInboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(ChainInboundSettings::new)
}
}
impl ::protobuf::Clear for ChainInboundSettings {
fn clear(&mut self) {
self.actors.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for ChainInboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct Inbound {
// message fields
pub tag: ::std::string::String,
pub protocol: ::std::string::String,
pub address: ::std::string::String,
pub port: u32,
pub settings: ::std::vec::Vec<u8>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Inbound {
fn default() -> &'a Inbound {
<Inbound as ::protobuf::Message>::default_instance()
}
}
impl Inbound {
pub fn new() -> Inbound {
::std::default::Default::default()
}
// string tag = 1;
pub fn get_tag(&self) -> &str {
&self.tag
}
// string protocol = 2;
pub fn get_protocol(&self) -> &str {
&self.protocol
}
// string address = 3;
pub fn get_address(&self) -> &str {
&self.address
}
// uint32 port = 4;
pub fn get_port(&self) -> u32 {
self.port
}
// bytes settings = 5;
pub fn get_settings(&self) -> &[u8] {
&self.settings
}
}
impl ::protobuf::Message for Inbound {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.tag)?;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.protocol)?;
},
3 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.address)?;
},
4 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.port = tmp;
},
5 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.settings)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.tag.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.tag);
}
if !self.protocol.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.protocol);
}
if !self.address.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.address);
}
if self.port != 0 {
my_size += ::protobuf::rt::value_size(4, self.port, ::protobuf::wire_format::WireTypeVarint);
}
if !self.settings.is_empty() {
my_size += ::protobuf::rt::bytes_size(5, &self.settings);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.tag.is_empty() {
os.write_string(1, &self.tag)?;
}
if !self.protocol.is_empty() {
os.write_string(2, &self.protocol)?;
}
if !self.address.is_empty() {
os.write_string(3, &self.address)?;
}
if self.port != 0 {
os.write_uint32(4, self.port)?;
}
if !self.settings.is_empty() {
os.write_bytes(5, &self.settings)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Inbound {
Inbound::new()
}
fn default_instance() -> &'static Inbound {
static instance: ::protobuf::rt::LazyV2<Inbound> = ::protobuf::rt::LazyV2::INIT;
instance.get(Inbound::new)
}
}
impl ::protobuf::Clear for Inbound {
fn clear(&mut self) {
self.tag.clear();
self.protocol.clear();
self.address.clear();
self.port = 0;
self.settings.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for Inbound {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct RedirectOutboundSettings {
// message fields
pub address: ::std::string::String,
pub port: u32,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a RedirectOutboundSettings {
fn default() -> &'a RedirectOutboundSettings {
<RedirectOutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl RedirectOutboundSettings {
pub fn new() -> RedirectOutboundSettings {
::std::default::Default::default()
}
// string address = 1;
pub fn get_address(&self) -> &str {
&self.address
}
// uint32 port = 2;
pub fn get_port(&self) -> u32 {
self.port
}
}
impl ::protobuf::Message for RedirectOutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.address)?;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.port = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.address.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.address);
}
if self.port != 0 {
my_size += ::protobuf::rt::value_size(2, self.port, ::protobuf::wire_format::WireTypeVarint);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.address.is_empty() {
os.write_string(1, &self.address)?;
}
if self.port != 0 {
os.write_uint32(2, self.port)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> RedirectOutboundSettings {
RedirectOutboundSettings::new()
}
fn default_instance() -> &'static RedirectOutboundSettings {
static instance: ::protobuf::rt::LazyV2<RedirectOutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(RedirectOutboundSettings::new)
}
}
impl ::protobuf::Clear for RedirectOutboundSettings {
fn clear(&mut self) {
self.address.clear();
self.port = 0;
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for RedirectOutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct SocksOutboundSettings {
// message fields
pub address: ::std::string::String,
pub port: u32,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a SocksOutboundSettings {
fn default() -> &'a SocksOutboundSettings {
<SocksOutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl SocksOutboundSettings {
pub fn new() -> SocksOutboundSettings {
::std::default::Default::default()
}
// string address = 1;
pub fn get_address(&self) -> &str {
&self.address
}
// uint32 port = 2;
pub fn get_port(&self) -> u32 {
self.port
}
}
impl ::protobuf::Message for SocksOutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.address)?;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.port = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.address.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.address);
}
if self.port != 0 {
my_size += ::protobuf::rt::value_size(2, self.port, ::protobuf::wire_format::WireTypeVarint);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.address.is_empty() {
os.write_string(1, &self.address)?;
}
if self.port != 0 {
os.write_uint32(2, self.port)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> SocksOutboundSettings {
SocksOutboundSettings::new()
}
fn default_instance() -> &'static SocksOutboundSettings {
static instance: ::protobuf::rt::LazyV2<SocksOutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(SocksOutboundSettings::new)
}
}
impl ::protobuf::Clear for SocksOutboundSettings {
fn clear(&mut self) {
self.address.clear();
self.port = 0;
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for SocksOutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct ShadowsocksOutboundSettings {
// message fields
pub address: ::std::string::String,
pub port: u32,
pub method: ::std::string::String,
pub password: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a ShadowsocksOutboundSettings {
fn default() -> &'a ShadowsocksOutboundSettings {
<ShadowsocksOutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl ShadowsocksOutboundSettings {
pub fn new() -> ShadowsocksOutboundSettings {
::std::default::Default::default()
}
// string address = 1;
pub fn get_address(&self) -> &str {
&self.address
}
// uint32 port = 2;
pub fn get_port(&self) -> u32 {
self.port
}
// string method = 3;
pub fn get_method(&self) -> &str {
&self.method
}
// string password = 4;
pub fn get_password(&self) -> &str {
&self.password
}
}
impl ::protobuf::Message for ShadowsocksOutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.address)?;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.port = tmp;
},
3 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.method)?;
},
4 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.password)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.address.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.address);
}
if self.port != 0 {
my_size += ::protobuf::rt::value_size(2, self.port, ::protobuf::wire_format::WireTypeVarint);
}
if !self.method.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.method);
}
if !self.password.is_empty() {
my_size += ::protobuf::rt::string_size(4, &self.password);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.address.is_empty() {
os.write_string(1, &self.address)?;
}
if self.port != 0 {
os.write_uint32(2, self.port)?;
}
if !self.method.is_empty() {
os.write_string(3, &self.method)?;
}
if !self.password.is_empty() {
os.write_string(4, &self.password)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> ShadowsocksOutboundSettings {
ShadowsocksOutboundSettings::new()
}
fn default_instance() -> &'static ShadowsocksOutboundSettings {
static instance: ::protobuf::rt::LazyV2<ShadowsocksOutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(ShadowsocksOutboundSettings::new)
}
}
impl ::protobuf::Clear for ShadowsocksOutboundSettings {
fn clear(&mut self) {
self.address.clear();
self.port = 0;
self.method.clear();
self.password.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for ShadowsocksOutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct TrojanOutboundSettings {
// message fields
pub address: ::std::string::String,
pub port: u32,
pub password: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a TrojanOutboundSettings {
fn default() -> &'a TrojanOutboundSettings {
<TrojanOutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl TrojanOutboundSettings {
pub fn new() -> TrojanOutboundSettings {
::std::default::Default::default()
}
// string address = 1;
pub fn get_address(&self) -> &str {
&self.address
}
// uint32 port = 2;
pub fn get_port(&self) -> u32 {
self.port
}
// string password = 3;
pub fn get_password(&self) -> &str {
&self.password
}
}
impl ::protobuf::Message for TrojanOutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.address)?;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.port = tmp;
},
3 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.password)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.address.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.address);
}
if self.port != 0 {
my_size += ::protobuf::rt::value_size(2, self.port, ::protobuf::wire_format::WireTypeVarint);
}
if !self.password.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.password);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.address.is_empty() {
os.write_string(1, &self.address)?;
}
if self.port != 0 {
os.write_uint32(2, self.port)?;
}
if !self.password.is_empty() {
os.write_string(3, &self.password)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> TrojanOutboundSettings {
TrojanOutboundSettings::new()
}
fn default_instance() -> &'static TrojanOutboundSettings {
static instance: ::protobuf::rt::LazyV2<TrojanOutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(TrojanOutboundSettings::new)
}
}
impl ::protobuf::Clear for TrojanOutboundSettings {
fn clear(&mut self) {
self.address.clear();
self.port = 0;
self.password.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for TrojanOutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct VMessOutboundSettings {
// message fields
pub address: ::std::string::String,
pub port: u32,
pub uuid: ::std::string::String,
pub security: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a VMessOutboundSettings {
fn default() -> &'a VMessOutboundSettings {
<VMessOutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl VMessOutboundSettings {
pub fn new() -> VMessOutboundSettings {
::std::default::Default::default()
}
// string address = 1;
pub fn get_address(&self) -> &str {
&self.address
}
// uint32 port = 2;
pub fn get_port(&self) -> u32 {
self.port
}
// string uuid = 3;
pub fn get_uuid(&self) -> &str {
&self.uuid
}
// string security = 4;
pub fn get_security(&self) -> &str {
&self.security
}
}
impl ::protobuf::Message for VMessOutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.address)?;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.port = tmp;
},
3 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.uuid)?;
},
4 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.security)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.address.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.address);
}
if self.port != 0 {
my_size += ::protobuf::rt::value_size(2, self.port, ::protobuf::wire_format::WireTypeVarint);
}
if !self.uuid.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.uuid);
}
if !self.security.is_empty() {
my_size += ::protobuf::rt::string_size(4, &self.security);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.address.is_empty() {
os.write_string(1, &self.address)?;
}
if self.port != 0 {
os.write_uint32(2, self.port)?;
}
if !self.uuid.is_empty() {
os.write_string(3, &self.uuid)?;
}
if !self.security.is_empty() {
os.write_string(4, &self.security)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> VMessOutboundSettings {
VMessOutboundSettings::new()
}
fn default_instance() -> &'static VMessOutboundSettings {
static instance: ::protobuf::rt::LazyV2<VMessOutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(VMessOutboundSettings::new)
}
}
impl ::protobuf::Clear for VMessOutboundSettings {
fn clear(&mut self) {
self.address.clear();
self.port = 0;
self.uuid.clear();
self.security.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for VMessOutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct TlsOutboundSettings {
// message fields
pub server_name: ::std::string::String,
pub alpn: ::protobuf::RepeatedField<::std::string::String>,
pub certificate: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a TlsOutboundSettings {
fn default() -> &'a TlsOutboundSettings {
<TlsOutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl TlsOutboundSettings {
pub fn new() -> TlsOutboundSettings {
::std::default::Default::default()
}
// string server_name = 1;
pub fn get_server_name(&self) -> &str {
&self.server_name
}
// repeated string alpn = 2;
pub fn get_alpn(&self) -> &[::std::string::String] {
&self.alpn
}
// string certificate = 3;
pub fn get_certificate(&self) -> &str {
&self.certificate
}
}
impl ::protobuf::Message for TlsOutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.server_name)?;
},
2 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.alpn)?;
},
3 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.certificate)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.server_name.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.server_name);
}
for value in &self.alpn {
my_size += ::protobuf::rt::string_size(2, &value);
};
if !self.certificate.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.certificate);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.server_name.is_empty() {
os.write_string(1, &self.server_name)?;
}
for v in &self.alpn {
os.write_string(2, &v)?;
};
if !self.certificate.is_empty() {
os.write_string(3, &self.certificate)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> TlsOutboundSettings {
TlsOutboundSettings::new()
}
fn default_instance() -> &'static TlsOutboundSettings {
static instance: ::protobuf::rt::LazyV2<TlsOutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(TlsOutboundSettings::new)
}
}
impl ::protobuf::Clear for TlsOutboundSettings {
fn clear(&mut self) {
self.server_name.clear();
self.alpn.clear();
self.certificate.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for TlsOutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct WebSocketOutboundSettings {
// message fields
pub path: ::std::string::String,
pub headers: ::std::collections::HashMap<::std::string::String, ::std::string::String>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a WebSocketOutboundSettings {
fn default() -> &'a WebSocketOutboundSettings {
<WebSocketOutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl WebSocketOutboundSettings {
pub fn new() -> WebSocketOutboundSettings |
// string path = 1;
pub fn get_path(&self) -> &str {
&self.path
}
// repeated .WebSocketOutboundSettings.HeadersEntry headers = 2;
pub fn get_headers(&self) -> &::std::collections::HashMap<::std::string::String, ::std::string::String> {
&self.headers
}
}
impl ::protobuf::Message for WebSocketOutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.path)?;
},
2 => {
::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(wire_type, is, &mut self.headers)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.path.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.path);
}
my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(2, &self.headers);
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.path.is_empty() {
os.write_string(1, &self.path)?;
}
::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(2, &self.headers, os)?;
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> WebSocketOutboundSettings {
WebSocketOutboundSettings::new()
}
fn default_instance() -> &'static WebSocketOutboundSettings {
static instance: ::protobuf::rt::LazyV2<WebSocketOutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(WebSocketOutboundSettings::new)
}
}
impl ::protobuf::Clear for WebSocketOutboundSettings {
fn clear(&mut self) {
self.path.clear();
self.headers.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for WebSocketOutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct HTTP2OutboundSettings {
// message fields
pub path: ::std::string::String,
pub host: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a HTTP2OutboundSettings {
fn default() -> &'a HTTP2OutboundSettings {
<HTTP2OutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl HTTP2OutboundSettings {
pub fn new() -> HTTP2OutboundSettings {
::std::default::Default::default()
}
// string path = 1;
pub fn get_path(&self) -> &str {
&self.path
}
// string host = 2;
pub fn get_host(&self) -> &str {
&self.host
}
}
impl ::protobuf::Message for HTTP2OutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.path)?;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.host)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.path.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.path);
}
if !self.host.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.host);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.path.is_empty() {
os.write_string(1, &self.path)?;
}
if !self.host.is_empty() {
os.write_string(2, &self.host)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> HTTP2OutboundSettings {
HTTP2OutboundSettings::new()
}
fn default_instance() -> &'static HTTP2OutboundSettings {
static instance: ::protobuf::rt::LazyV2<HTTP2OutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(HTTP2OutboundSettings::new)
}
}
impl ::protobuf::Clear for HTTP2OutboundSettings {
fn clear(&mut self) {
self.path.clear();
self.host.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for HTTP2OutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct TryAllOutboundSettings {
// message fields
pub actors: ::protobuf::RepeatedField<::std::string::String>,
pub delay_base: u32,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a TryAllOutboundSettings {
fn default() -> &'a TryAllOutboundSettings {
<TryAllOutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl TryAllOutboundSettings {
pub fn new() -> TryAllOutboundSettings {
::std::default::Default::default()
}
// repeated string actors = 1;
pub fn get_actors(&self) -> &[::std::string::String] {
&self.actors
}
// uint32 delay_base = 2;
pub fn get_delay_base(&self) -> u32 {
self.delay_base
}
}
impl ::protobuf::Message for TryAllOutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.actors)?;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.delay_base = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.actors {
my_size += ::protobuf::rt::string_size(1, &value);
};
if self.delay_base != 0 {
my_size += ::protobuf::rt::value_size(2, self.delay_base, ::protobuf::wire_format::WireTypeVarint);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.actors {
os.write_string(1, &v)?;
};
if self.delay_base != 0 {
os.write_uint32(2, self.delay_base)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> TryAllOutboundSettings {
TryAllOutboundSettings::new()
}
fn default_instance() -> &'static TryAllOutboundSettings {
static instance: ::protobuf::rt::LazyV2<TryAllOutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(TryAllOutboundSettings::new)
}
}
impl ::protobuf::Clear for TryAllOutboundSettings {
fn clear(&mut self) {
self.actors.clear();
self.delay_base = 0;
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for TryAllOutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct RandomOutboundSettings {
// message fields
pub actors: ::protobuf::RepeatedField<::std::string::String>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a RandomOutboundSettings {
fn default() -> &'a RandomOutboundSettings {
<RandomOutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl RandomOutboundSettings {
pub fn new() -> RandomOutboundSettings {
::std::default::Default::default()
}
// repeated string actors = 1;
pub fn get_actors(&self) -> &[::std::string::String] {
&self.actors
}
}
impl ::protobuf::Message for RandomOutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.actors)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.actors {
my_size += ::protobuf::rt::string_size(1, &value);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.actors {
os.write_string(1, &v)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> RandomOutboundSettings {
RandomOutboundSettings::new()
}
fn default_instance() -> &'static RandomOutboundSettings {
static instance: ::protobuf::rt::LazyV2<RandomOutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(RandomOutboundSettings::new)
}
}
impl ::protobuf::Clear for RandomOutboundSettings {
fn clear(&mut self) {
self.actors.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for RandomOutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct RROutboundSettings {
// message fields
pub actors: ::protobuf::RepeatedField<::std::string::String>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a RROutboundSettings {
fn default() -> &'a RROutboundSettings {
<RROutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl RROutboundSettings {
pub fn new() -> RROutboundSettings {
::std::default::Default::default()
}
// repeated string actors = 1;
pub fn get_actors(&self) -> &[::std::string::String] {
&self.actors
}
}
impl ::protobuf::Message for RROutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.actors)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.actors {
my_size += ::protobuf::rt::string_size(1, &value);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.actors {
os.write_string(1, &v)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> RROutboundSettings {
RROutboundSettings::new()
}
fn default_instance() -> &'static RROutboundSettings {
static instance: ::protobuf::rt::LazyV2<RROutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(RROutboundSettings::new)
}
}
impl ::protobuf::Clear for RROutboundSettings {
fn clear(&mut self) {
self.actors.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for RROutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct AMuxOutboundSettings {
// message fields
pub address: ::std::string::String,
pub port: u32,
pub actors: ::protobuf::RepeatedField<::std::string::String>,
pub max_accepts: u32,
pub concurrency: u32,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a AMuxOutboundSettings {
fn default() -> &'a AMuxOutboundSettings {
<AMuxOutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl AMuxOutboundSettings {
pub fn new() -> AMuxOutboundSettings {
::std::default::Default::default()
}
// string address = 1;
pub fn get_address(&self) -> &str {
&self.address
}
// uint32 port = 2;
pub fn get_port(&self) -> u32 {
self.port
}
// repeated string actors = 3;
pub fn get_actors(&self) -> &[::std::string::String] {
&self.actors
}
// uint32 max_accepts = 4;
pub fn get_max_accepts(&self) -> u32 {
self.max_accepts
}
// uint32 concurrency = 5;
pub fn get_concurrency(&self) -> u32 {
self.concurrency
}
}
impl ::protobuf::Message for AMuxOutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.address)?;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.port = tmp;
},
3 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.actors)?;
},
4 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.max_accepts = tmp;
},
5 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.concurrency = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.address.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.address);
}
if self.port != 0 {
my_size += ::protobuf::rt::value_size(2, self.port, ::protobuf::wire_format::WireTypeVarint);
}
for value in &self.actors {
my_size += ::protobuf::rt::string_size(3, &value);
};
if self.max_accepts != 0 {
my_size += ::protobuf::rt::value_size(4, self.max_accepts, ::protobuf::wire_format::WireTypeVarint);
}
if self.concurrency != 0 {
my_size += ::protobuf::rt::value_size(5, self.concurrency, ::protobuf::wire_format::WireTypeVarint);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.address.is_empty() {
os.write_string(1, &self.address)?;
}
if self.port != 0 {
os.write_uint32(2, self.port)?;
}
for v in &self.actors {
os.write_string(3, &v)?;
};
if self.max_accepts != 0 {
os.write_uint32(4, self.max_accepts)?;
}
if self.concurrency != 0 {
os.write_uint32(5, self.concurrency)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> AMuxOutboundSettings {
AMuxOutboundSettings::new()
}
fn default_instance() -> &'static AMuxOutboundSettings {
static instance: ::protobuf::rt::LazyV2<AMuxOutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(AMuxOutboundSettings::new)
}
}
impl ::protobuf::Clear for AMuxOutboundSettings {
fn clear(&mut self) {
self.address.clear();
self.port = 0;
self.actors.clear();
self.max_accepts = 0;
self.concurrency = 0;
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for AMuxOutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct QuicOutboundSettings {
// message fields
pub address: ::std::string::String,
pub port: u32,
pub server_name: ::std::string::String,
pub certificate: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a QuicOutboundSettings {
fn default() -> &'a QuicOutboundSettings {
<QuicOutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl QuicOutboundSettings {
pub fn new() -> QuicOutboundSettings {
::std::default::Default::default()
}
// string address = 1;
pub fn get_address(&self) -> &str {
&self.address
}
// uint32 port = 2;
pub fn get_port(&self) -> u32 {
self.port
}
// string server_name = 3;
pub fn get_server_name(&self) -> &str {
&self.server_name
}
// string certificate = 4;
pub fn get_certificate(&self) -> &str {
&self.certificate
}
}
impl ::protobuf::Message for QuicOutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.address)?;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.port = tmp;
},
3 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.server_name)?;
},
4 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.certificate)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.address.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.address);
}
if self.port != 0 {
my_size += ::protobuf::rt::value_size(2, self.port, ::protobuf::wire_format::WireTypeVarint);
}
if !self.server_name.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.server_name);
}
if !self.certificate.is_empty() {
my_size += ::protobuf::rt::string_size(4, &self.certificate);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.address.is_empty() {
os.write_string(1, &self.address)?;
}
if self.port != 0 {
os.write_uint32(2, self.port)?;
}
if !self.server_name.is_empty() {
os.write_string(3, &self.server_name)?;
}
if !self.certificate.is_empty() {
os.write_string(4, &self.certificate)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> QuicOutboundSettings {
QuicOutboundSettings::new()
}
fn default_instance() -> &'static QuicOutboundSettings {
static instance: ::protobuf::rt::LazyV2<QuicOutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(QuicOutboundSettings::new)
}
}
impl ::protobuf::Clear for QuicOutboundSettings {
fn clear(&mut self) {
self.address.clear();
self.port = 0;
self.server_name.clear();
self.certificate.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for QuicOutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct ChainOutboundSettings {
// message fields
pub actors: ::protobuf::RepeatedField<::std::string::String>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a ChainOutboundSettings {
fn default() -> &'a ChainOutboundSettings {
<ChainOutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl ChainOutboundSettings {
pub fn new() -> ChainOutboundSettings {
::std::default::Default::default()
}
// repeated string actors = 1;
pub fn get_actors(&self) -> &[::std::string::String] {
&self.actors
}
}
impl ::protobuf::Message for ChainOutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.actors)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.actors {
my_size += ::protobuf::rt::string_size(1, &value);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.actors {
os.write_string(1, &v)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> ChainOutboundSettings {
ChainOutboundSettings::new()
}
fn default_instance() -> &'static ChainOutboundSettings {
static instance: ::protobuf::rt::LazyV2<ChainOutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(ChainOutboundSettings::new)
}
}
impl ::protobuf::Clear for ChainOutboundSettings {
fn clear(&mut self) {
self.actors.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for ChainOutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct RetryOutboundSettings {
// message fields
pub actors: ::protobuf::RepeatedField<::std::string::String>,
pub attempts: u32,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a RetryOutboundSettings {
fn default() -> &'a RetryOutboundSettings {
<RetryOutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl RetryOutboundSettings {
pub fn new() -> RetryOutboundSettings {
::std::default::Default::default()
}
// repeated string actors = 1;
pub fn get_actors(&self) -> &[::std::string::String] {
&self.actors
}
// uint32 attempts = 2;
pub fn get_attempts(&self) -> u32 {
self.attempts
}
}
impl ::protobuf::Message for RetryOutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.actors)?;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.attempts = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.actors {
my_size += ::protobuf::rt::string_size(1, &value);
};
if self.attempts != 0 {
my_size += ::protobuf::rt::value_size(2, self.attempts, ::protobuf::wire_format::WireTypeVarint);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.actors {
os.write_string(1, &v)?;
};
if self.attempts != 0 {
os.write_uint32(2, self.attempts)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> RetryOutboundSettings {
RetryOutboundSettings::new()
}
fn default_instance() -> &'static RetryOutboundSettings {
static instance: ::protobuf::rt::LazyV2<RetryOutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(RetryOutboundSettings::new)
}
}
impl ::protobuf::Clear for RetryOutboundSettings {
fn clear(&mut self) {
self.actors.clear();
self.attempts = 0;
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for RetryOutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct FailOverOutboundSettings {
// message fields
pub actors: ::protobuf::RepeatedField<::std::string::String>,
pub fail_timeout: u32,
pub health_check: bool,
pub check_interval: u32,
pub failover: bool,
pub fallback_cache: bool,
pub cache_size: u32,
pub cache_timeout: u32,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a FailOverOutboundSettings {
fn default() -> &'a FailOverOutboundSettings {
<FailOverOutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl FailOverOutboundSettings {
pub fn new() -> FailOverOutboundSettings {
::std::default::Default::default()
}
// repeated string actors = 1;
pub fn get_actors(&self) -> &[::std::string::String] {
&self.actors
}
// uint32 fail_timeout = 2;
pub fn get_fail_timeout(&self) -> u32 {
self.fail_timeout
}
// bool health_check = 3;
pub fn get_health_check(&self) -> bool {
self.health_check
}
// uint32 check_interval = 4;
pub fn get_check_interval(&self) -> u32 {
self.check_interval
}
// bool failover = 5;
pub fn get_failover(&self) -> bool {
self.failover
}
// bool fallback_cache = 6;
pub fn get_fallback_cache(&self) -> bool {
self.fallback_cache
}
// uint32 cache_size = 7;
pub fn get_cache_size(&self) -> u32 {
self.cache_size
}
// uint32 cache_timeout = 8;
pub fn get_cache_timeout(&self) -> u32 {
self.cache_timeout
}
}
impl ::protobuf::Message for FailOverOutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.actors)?;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.fail_timeout = tmp;
},
3 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.health_check = tmp;
},
4 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.check_interval = tmp;
},
5 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.failover = tmp;
},
6 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.fallback_cache = tmp;
},
7 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.cache_size = tmp;
},
8 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.cache_timeout = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.actors {
my_size += ::protobuf::rt::string_size(1, &value);
};
if self.fail_timeout != 0 {
my_size += ::protobuf::rt::value_size(2, self.fail_timeout, ::protobuf::wire_format::WireTypeVarint);
}
if self.health_check != false {
my_size += 2;
}
if self.check_interval != 0 {
my_size += ::protobuf::rt::value_size(4, self.check_interval, ::protobuf::wire_format::WireTypeVarint);
}
if self.failover != false {
my_size += 2;
}
if self.fallback_cache != false {
my_size += 2;
}
if self.cache_size != 0 {
my_size += ::protobuf::rt::value_size(7, self.cache_size, ::protobuf::wire_format::WireTypeVarint);
}
if self.cache_timeout != 0 {
my_size += ::protobuf::rt::value_size(8, self.cache_timeout, ::protobuf::wire_format::WireTypeVarint);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.actors {
os.write_string(1, &v)?;
};
if self.fail_timeout != 0 {
os.write_uint32(2, self.fail_timeout)?;
}
if self.health_check != false {
os.write_bool(3, self.health_check)?;
}
if self.check_interval != 0 {
os.write_uint32(4, self.check_interval)?;
}
if self.failover != false {
os.write_bool(5, self.failover)?;
}
if self.fallback_cache != false {
os.write_bool(6, self.fallback_cache)?;
}
if self.cache_size != 0 {
os.write_uint32(7, self.cache_size)?;
}
if self.cache_timeout != 0 {
os.write_uint32(8, self.cache_timeout)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> FailOverOutboundSettings {
FailOverOutboundSettings::new()
}
fn default_instance() -> &'static FailOverOutboundSettings {
static instance: ::protobuf::rt::LazyV2<FailOverOutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(FailOverOutboundSettings::new)
}
}
impl ::protobuf::Clear for FailOverOutboundSettings {
fn clear(&mut self) {
self.actors.clear();
self.fail_timeout = 0;
self.health_check = false;
self.check_interval = 0;
self.failover = false;
self.fallback_cache = false;
self.cache_size = 0;
self.cache_timeout = 0;
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for FailOverOutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct SelectOutboundSettings {
// message fields
pub actors: ::protobuf::RepeatedField<::std::string::String>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a SelectOutboundSettings {
fn default() -> &'a SelectOutboundSettings {
<SelectOutboundSettings as ::protobuf::Message>::default_instance()
}
}
impl SelectOutboundSettings {
pub fn new() -> SelectOutboundSettings {
::std::default::Default::default()
}
// repeated string actors = 1;
pub fn get_actors(&self) -> &[::std::string::String] {
&self.actors
}
}
impl ::protobuf::Message for SelectOutboundSettings {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.actors)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.actors {
my_size += ::protobuf::rt::string_size(1, &value);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.actors {
os.write_string(1, &v)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> SelectOutboundSettings {
SelectOutboundSettings::new()
}
fn default_instance() -> &'static SelectOutboundSettings {
static instance: ::protobuf::rt::LazyV2<SelectOutboundSettings> = ::protobuf::rt::LazyV2::INIT;
instance.get(SelectOutboundSettings::new)
}
}
impl ::protobuf::Clear for SelectOutboundSettings {
fn clear(&mut self) {
self.actors.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for SelectOutboundSettings {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct Outbound {
// message fields
pub tag: ::std::string::String,
pub protocol: ::std::string::String,
pub bind: ::std::string::String,
pub settings: ::std::vec::Vec<u8>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Outbound {
fn default() -> &'a Outbound {
<Outbound as ::protobuf::Message>::default_instance()
}
}
impl Outbound {
pub fn new() -> Outbound {
::std::default::Default::default()
}
// string tag = 1;
pub fn get_tag(&self) -> &str {
&self.tag
}
// string protocol = 2;
pub fn get_protocol(&self) -> &str {
&self.protocol
}
// string bind = 3;
pub fn get_bind(&self) -> &str {
&self.bind
}
// bytes settings = 4;
pub fn get_settings(&self) -> &[u8] {
&self.settings
}
}
impl ::protobuf::Message for Outbound {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.tag)?;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.protocol)?;
},
3 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.bind)?;
},
4 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.settings)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.tag.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.tag);
}
if !self.protocol.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.protocol);
}
if !self.bind.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.bind);
}
if !self.settings.is_empty() {
my_size += ::protobuf::rt::bytes_size(4, &self.settings);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.tag.is_empty() {
os.write_string(1, &self.tag)?;
}
if !self.protocol.is_empty() {
os.write_string(2, &self.protocol)?;
}
if !self.bind.is_empty() {
os.write_string(3, &self.bind)?;
}
if !self.settings.is_empty() {
os.write_bytes(4, &self.settings)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Outbound {
Outbound::new()
}
fn default_instance() -> &'static Outbound {
static instance: ::protobuf::rt::LazyV2<Outbound> = ::protobuf::rt::LazyV2::INIT;
instance.get(Outbound::new)
}
}
impl ::protobuf::Clear for Outbound {
fn clear(&mut self) {
self.tag.clear();
self.protocol.clear();
self.bind.clear();
self.settings.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for Outbound {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct Router {
// message fields
pub rules: ::protobuf::RepeatedField<Router_Rule>,
pub domain_resolve: bool,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Router {
fn default() -> &'a Router {
<Router as ::protobuf::Message>::default_instance()
}
}
impl Router {
pub fn new() -> Router {
::std::default::Default::default()
}
// repeated .Router.Rule rules = 1;
pub fn get_rules(&self) -> &[Router_Rule] {
&self.rules
}
// bool domain_resolve = 2;
pub fn get_domain_resolve(&self) -> bool {
self.domain_resolve
}
}
impl ::protobuf::Message for Router {
fn is_initialized(&self) -> bool {
for v in &self.rules {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.rules)?;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.domain_resolve = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.rules {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
if self.domain_resolve != false {
my_size += 2;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.rules {
os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
if self.domain_resolve != false {
os.write_bool(2, self.domain_resolve)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Router {
Router::new()
}
fn default_instance() -> &'static Router {
static instance: ::protobuf::rt::LazyV2<Router> = ::protobuf::rt::LazyV2::INIT;
instance.get(Router::new)
}
}
impl ::protobuf::Clear for Router {
fn clear(&mut self) {
self.rules.clear();
self.domain_resolve = false;
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for Router {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct Router_Rule {
// message fields
pub target_tag: ::std::string::String,
pub domains: ::protobuf::RepeatedField<Router_Rule_Domain>,
pub ip_cidrs: ::protobuf::RepeatedField<::std::string::String>,
pub mmdbs: ::protobuf::RepeatedField<Router_Rule_Mmdb>,
pub port_ranges: ::protobuf::RepeatedField<::std::string::String>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Router_Rule {
fn default() -> &'a Router_Rule {
<Router_Rule as ::protobuf::Message>::default_instance()
}
}
impl Router_Rule {
pub fn new() -> Router_Rule {
::std::default::Default::default()
}
// string target_tag = 1;
pub fn get_target_tag(&self) -> &str {
&self.target_tag
}
// repeated .Router.Rule.Domain domains = 2;
pub fn get_domains(&self) -> &[Router_Rule_Domain] {
&self.domains
}
// repeated string ip_cidrs = 3;
pub fn get_ip_cidrs(&self) -> &[::std::string::String] {
&self.ip_cidrs
}
// repeated .Router.Rule.Mmdb mmdbs = 4;
pub fn get_mmdbs(&self) -> &[Router_Rule_Mmdb] {
&self.mmdbs
}
// repeated string port_ranges = 5;
pub fn get_port_ranges(&self) -> &[::std::string::String] {
&self.port_ranges
}
}
impl ::protobuf::Message for Router_Rule {
fn is_initialized(&self) -> bool {
for v in &self.domains {
if !v.is_initialized() {
return false;
}
};
for v in &self.mmdbs {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.target_tag)?;
},
2 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.domains)?;
},
3 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.ip_cidrs)?;
},
4 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.mmdbs)?;
},
5 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.port_ranges)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.target_tag.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.target_tag);
}
for value in &self.domains {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in &self.ip_cidrs {
my_size += ::protobuf::rt::string_size(3, &value);
};
for value in &self.mmdbs {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in &self.port_ranges {
my_size += ::protobuf::rt::string_size(5, &value);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.target_tag.is_empty() {
os.write_string(1, &self.target_tag)?;
}
for v in &self.domains {
os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
for v in &self.ip_cidrs {
os.write_string(3, &v)?;
};
for v in &self.mmdbs {
os.write_tag(4, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
for v in &self.port_ranges {
os.write_string(5, &v)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Router_Rule {
Router_Rule::new()
}
fn default_instance() -> &'static Router_Rule {
static instance: ::protobuf::rt::LazyV2<Router_Rule> = ::protobuf::rt::LazyV2::INIT;
instance.get(Router_Rule::new)
}
}
impl ::protobuf::Clear for Router_Rule {
fn clear(&mut self) {
self.target_tag.clear();
self.domains.clear();
self.ip_cidrs.clear();
self.mmdbs.clear();
self.port_ranges.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for Router_Rule {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct Router_Rule_Domain {
// message fields
pub field_type: Router_Rule_Domain_Type,
pub value: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Router_Rule_Domain {
fn default() -> &'a Router_Rule_Domain {
<Router_Rule_Domain as ::protobuf::Message>::default_instance()
}
}
impl Router_Rule_Domain {
pub fn new() -> Router_Rule_Domain {
::std::default::Default::default()
}
// .Router.Rule.Domain.Type type = 1;
pub fn get_field_type(&self) -> Router_Rule_Domain_Type {
self.field_type
}
// string value = 2;
pub fn get_value(&self) -> &str {
&self.value
}
}
impl ::protobuf::Message for Router_Rule_Domain {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.field_type, 1, &mut self.unknown_fields)?
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.value)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.field_type != Router_Rule_Domain_Type::PLAIN {
my_size += ::protobuf::rt::enum_size(1, self.field_type);
}
if !self.value.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.value);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.field_type != Router_Rule_Domain_Type::PLAIN {
os.write_enum(1, ::protobuf::ProtobufEnum::value(&self.field_type))?;
}
if !self.value.is_empty() {
os.write_string(2, &self.value)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Router_Rule_Domain {
Router_Rule_Domain::new()
}
fn default_instance() -> &'static Router_Rule_Domain {
static instance: ::protobuf::rt::LazyV2<Router_Rule_Domain> = ::protobuf::rt::LazyV2::INIT;
instance.get(Router_Rule_Domain::new)
}
}
impl ::protobuf::Clear for Router_Rule_Domain {
fn clear(&mut self) {
self.field_type = Router_Rule_Domain_Type::PLAIN;
self.value.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for Router_Rule_Domain {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum Router_Rule_Domain_Type {
PLAIN = 0,
DOMAIN = 1,
FULL = 2,
}
impl ::protobuf::ProtobufEnum for Router_Rule_Domain_Type {
fn value(&self) -> i32 {
*self as i32
}
fn from_i32(value: i32) -> ::std::option::Option<Router_Rule_Domain_Type> {
match value {
0 => ::std::option::Option::Some(Router_Rule_Domain_Type::PLAIN),
1 => ::std::option::Option::Some(Router_Rule_Domain_Type::DOMAIN),
2 => ::std::option::Option::Some(Router_Rule_Domain_Type::FULL),
_ => ::std::option::Option::None
}
}
fn values() -> &'static [Self] {
static values: &'static [Router_Rule_Domain_Type] = &[
Router_Rule_Domain_Type::PLAIN,
Router_Rule_Domain_Type::DOMAIN,
Router_Rule_Domain_Type::FULL,
];
values
}
}
impl ::std::marker::Copy for Router_Rule_Domain_Type {
}
impl ::std::default::Default for Router_Rule_Domain_Type {
fn default() -> Self {
Router_Rule_Domain_Type::PLAIN
}
}
impl ::protobuf::reflect::ProtobufValue for Router_Rule_Domain_Type {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Enum(::protobuf::ProtobufEnum::descriptor(self))
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct Router_Rule_Mmdb {
// message fields
pub file: ::std::string::String,
pub country_code: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Router_Rule_Mmdb {
fn default() -> &'a Router_Rule_Mmdb {
<Router_Rule_Mmdb as ::protobuf::Message>::default_instance()
}
}
impl Router_Rule_Mmdb {
pub fn new() -> Router_Rule_Mmdb {
::std::default::Default::default()
}
// string file = 1;
pub fn get_file(&self) -> &str {
&self.file
}
// string country_code = 2;
pub fn get_country_code(&self) -> &str {
&self.country_code
}
}
impl ::protobuf::Message for Router_Rule_Mmdb {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.file)?;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.country_code)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.file.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.file);
}
if !self.country_code.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.country_code);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.file.is_empty() {
os.write_string(1, &self.file)?;
}
if !self.country_code.is_empty() {
os.write_string(2, &self.country_code)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Router_Rule_Mmdb {
Router_Rule_Mmdb::new()
}
fn default_instance() -> &'static Router_Rule_Mmdb {
static instance: ::protobuf::rt::LazyV2<Router_Rule_Mmdb> = ::protobuf::rt::LazyV2::INIT;
instance.get(Router_Rule_Mmdb::new)
}
}
impl ::protobuf::Clear for Router_Rule_Mmdb {
fn clear(&mut self) {
self.file.clear();
self.country_code.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for Router_Rule_Mmdb {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default,Debug)]
pub struct Config {
// message fields
pub log: ::protobuf::SingularPtrField<Log>,
pub inbounds: ::protobuf::RepeatedField<Inbound>,
pub outbounds: ::protobuf::RepeatedField<Outbound>,
pub router: ::protobuf::SingularPtrField<Router>,
pub dns: ::protobuf::SingularPtrField<Dns>,
pub api: ::protobuf::SingularPtrField<Api>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Config {
fn default() -> &'a Config {
<Config as ::protobuf::Message>::default_instance()
}
}
impl Config {
pub fn new() -> Config {
::std::default::Default::default()
}
// .Log log = 1;
pub fn get_log(&self) -> &Log {
self.log.as_ref().unwrap_or_else(|| <Log as ::protobuf::Message>::default_instance())
}
// repeated .Inbound inbounds = 2;
pub fn get_inbounds(&self) -> &[Inbound] {
&self.inbounds
}
// repeated .Outbound outbounds = 3;
pub fn get_outbounds(&self) -> &[Outbound] {
&self.outbounds
}
// .Router router = 4;
pub fn get_router(&self) -> &Router {
self.router.as_ref().unwrap_or_else(|| <Router as ::protobuf::Message>::default_instance())
}
// .Dns dns = 5;
pub fn get_dns(&self) -> &Dns {
self.dns.as_ref().unwrap_or_else(|| <Dns as ::protobuf::Message>::default_instance())
}
// .Api api = 6;
pub fn get_api(&self) -> &Api {
self.api.as_ref().unwrap_or_else(|| <Api as ::protobuf::Message>::default_instance())
}
}
impl ::protobuf::Message for Config {
fn is_initialized(&self) -> bool {
for v in &self.log {
if !v.is_initialized() {
return false;
}
};
for v in &self.inbounds {
if !v.is_initialized() {
return false;
}
};
for v in &self.outbounds {
if !v.is_initialized() {
return false;
}
};
for v in &self.router {
if !v.is_initialized() {
return false;
}
};
for v in &self.dns {
if !v.is_initialized() {
return false;
}
};
for v in &self.api {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.log)?;
},
2 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.inbounds)?;
},
3 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.outbounds)?;
},
4 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.router)?;
},
5 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.dns)?;
},
6 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.api)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if let Some(ref v) = self.log.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
for value in &self.inbounds {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in &self.outbounds {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
if let Some(ref v) = self.router.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if let Some(ref v) = self.dns.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if let Some(ref v) = self.api.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if let Some(ref v) = self.log.as_ref() {
os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
for v in &self.inbounds {
os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
for v in &self.outbounds {
os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
if let Some(ref v) = self.router.as_ref() {
os.write_tag(4, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if let Some(ref v) = self.dns.as_ref() {
os.write_tag(5, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if let Some(ref v) = self.api.as_ref() {
os.write_tag(6, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Config {
Config::new()
}
fn default_instance() -> &'static Config {
static instance: ::protobuf::rt::LazyV2<Config> = ::protobuf::rt::LazyV2::INIT;
instance.get(Config::new)
}
}
impl ::protobuf::Clear for Config {
fn clear(&mut self) {
self.log.clear();
self.inbounds.clear();
self.outbounds.clear();
self.router.clear();
self.dns.clear();
self.api.clear();
self.unknown_fields.clear();
}
}
impl ::protobuf::reflect::ProtobufValue for Config {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
| {
::std::default::Default::default()
} |
transmuxer_test.go | package ffmpeg
import (
"fmt"
"os"
"testing"
)
func TestTransmuxer_Join(t *testing.T) {
run, dir := setupTest(t)
defer os.RemoveAll(dir)
cmd := `
# run segmenter and sanity check frame counts . Hardcode for now.
ffmpeg -loglevel warning -i "$1"/../transcoder/test.ts -c:a copy -c:v copy -f hls test.m3u8
ffprobe -loglevel warning -select_streams v -count_frames -show_streams test0.ts | grep nb_read_frames=120
ffprobe -loglevel warning -select_streams v -count_frames -show_streams test1.ts | grep nb_read_frames=120
ffprobe -loglevel warning -select_streams v -count_frames -show_streams test2.ts | grep nb_read_frames=120
ffprobe -loglevel warning -select_streams v -count_frames -show_streams test3.ts | grep nb_read_frames=120
`
run(cmd)
tc := NewTranscoder()
out := []TranscodeOptions{
{
Oname: fmt.Sprintf("%s/out.mp4", dir),
VideoEncoder: ComponentOptions{
Name: "copy",
},
AudioEncoder: ComponentOptions{
Name: "copy",
},
Profile: VideoProfile{Format: FormatNone}, | Opts: map[string]string{"movflags": "frag_keyframe+negative_cts_offsets+omit_tfhd_offset+disable_chpl+default_base_moof"},
},
},
}
for i := 0; i < 4; i++ {
in := &TranscodeOptionsIn{
Fname: fmt.Sprintf("%s/test%d.ts", dir, i),
Transmuxing: true,
}
res, err := tc.Transcode(in, out)
if err != nil {
t.Fatal(err)
}
if res.Decoded.Frames != 120 {
t.Error(in.Fname, " Mismatched frame count: expected 120 got ", res.Decoded.Frames)
}
}
tc.StopTranscoder()
cmd = `
ffprobe -loglevel warning -select_streams v -count_frames -show_streams out.mp4 | grep nb_read_frames=480
`
run(cmd)
}
func TestTransmuxer_Discontinuity(t *testing.T) {
run, dir := setupTest(t)
defer os.RemoveAll(dir)
cmd := `
# run segmenter and sanity check frame counts . Hardcode for now.
ffmpeg -loglevel warning -i "$1"/../transcoder/test.ts -c:a copy -c:v copy -f hls test.m3u8
ffprobe -loglevel warning -select_streams v -count_frames -show_streams test0.ts | grep nb_read_frames=120
ffprobe -loglevel warning -select_streams v -count_frames -show_streams test1.ts | grep nb_read_frames=120
ffprobe -loglevel warning -select_streams v -count_frames -show_streams test2.ts | grep nb_read_frames=120
ffprobe -loglevel warning -select_streams v -count_frames -show_streams test3.ts | grep nb_read_frames=120
`
run(cmd)
tc := NewTranscoder()
out := []TranscodeOptions{
{
Oname: fmt.Sprintf("%s/out.mp4", dir),
VideoEncoder: ComponentOptions{
Name: "copy",
},
AudioEncoder: ComponentOptions{
Name: "copy",
},
Profile: VideoProfile{Format: FormatNone},
Muxer: ComponentOptions{
Name: "mp4",
Opts: map[string]string{"movflags": "frag_keyframe+negative_cts_offsets+omit_tfhd_offset+disable_chpl+default_base_moof"},
},
},
}
for i := 0; i < 4; i++ {
in := &TranscodeOptionsIn{
Fname: fmt.Sprintf("%s/test%d.ts", dir, i),
Transmuxing: true,
}
res, err := tc.Transcode(in, out)
if err != nil {
t.Fatal(err)
}
if res.Decoded.Frames != 120 {
t.Error(in.Fname, " Mismatched frame count: expected 120 got ", res.Decoded.Frames)
}
}
tc.Discontinuity()
for i := 0; i < 4; i++ {
in := &TranscodeOptionsIn{
Fname: fmt.Sprintf("%s/test%d.ts", dir, i),
Transmuxing: true,
}
res, err := tc.Transcode(in, out)
if err != nil {
t.Fatal(err)
}
if res.Decoded.Frames != 120 {
t.Error(in.Fname, " Mismatched frame count: expected 120 got ", res.Decoded.Frames)
}
}
tc.StopTranscoder()
cmd = `
ffprobe -loglevel warning -select_streams v -count_frames -show_streams out.mp4 | grep nb_read_frames=960
ffprobe -loglevel warning -select_streams v -count_frames -show_streams -show_frames out.mp4 | grep pkt_pts=1444380
`
run(cmd)
} | Muxer: ComponentOptions{
Name: "mp4", |
util.py | import time
from threading import Thread
def timestamp_datetime(value):
format = '%Y-%m-%d %H:%M:%S'
value = time.localtime(value) | return dt
def log(s):
print("[",timestamp_datetime(time.time()),"]",s) | dt = time.strftime(format, value) |
objobject.rs | use super::objdict::PyDictRef;
use super::objlist::PyList;
use super::objproperty::PropertyBuilder;
use super::objstr::PyStringRef;
use super::objtype::{self, PyClassRef};
use crate::function::{OptionalArg, PyFuncArgs};
use crate::pyhash;
use crate::pyobject::{
IdProtocol, ItemProtocol, PyAttributes, PyContext, PyObject, PyObjectRef, PyResult, PyValue,
TryFromObject, TypeProtocol,
};
use crate::vm::VirtualMachine;
#[derive(Debug)]
pub struct PyInstance;
impl PyValue for PyInstance {
fn class(vm: &VirtualMachine) -> PyClassRef {
vm.ctx.object()
}
}
pub fn new_instance(vm: &VirtualMachine, mut args: PyFuncArgs) -> PyResult {
// more or less __new__ operator
let cls = PyClassRef::try_from_object(vm, args.shift())?;
let dict = if cls.is(&vm.ctx.object()) {
None
} else {
Some(vm.ctx.new_dict())
};
Ok(PyObject::new(PyInstance, cls, dict))
}
fn object_eq(_zelf: PyObjectRef, _other: PyObjectRef, vm: &VirtualMachine) -> PyObjectRef {
vm.ctx.not_implemented()
}
fn object_ne(_zelf: PyObjectRef, _other: PyObjectRef, vm: &VirtualMachine) -> PyObjectRef {
vm.ctx.not_implemented()
}
fn object_lt(_zelf: PyObjectRef, _other: PyObjectRef, vm: &VirtualMachine) -> PyObjectRef {
vm.ctx.not_implemented()
}
fn object_le(_zelf: PyObjectRef, _other: PyObjectRef, vm: &VirtualMachine) -> PyObjectRef {
vm.ctx.not_implemented()
}
fn object_gt(_zelf: PyObjectRef, _other: PyObjectRef, vm: &VirtualMachine) -> PyObjectRef {
vm.ctx.not_implemented()
}
fn object_ge(_zelf: PyObjectRef, _other: PyObjectRef, vm: &VirtualMachine) -> PyObjectRef {
vm.ctx.not_implemented()
}
fn object_hash(zelf: PyObjectRef, _vm: &VirtualMachine) -> pyhash::PyHash {
zelf.get_id() as pyhash::PyHash
}
fn object_setattr(
obj: PyObjectRef,
attr_name: PyStringRef,
value: PyObjectRef,
vm: &VirtualMachine,
) -> PyResult<()> {
vm_trace!("object.__setattr__({:?}, {}, {:?})", obj, attr_name, value);
let cls = obj.class();
if let Some(attr) = objtype::class_get_attr(&cls, attr_name.as_str()) {
if let Some(descriptor) = objtype::class_get_attr(&attr.class(), "__set__") {
return vm
.invoke(&descriptor, vec![attr, obj.clone(), value])
.map(|_| ());
}
}
if let Some(ref dict) = obj.clone().dict {
dict.borrow().set_item(attr_name.as_str(), value, vm)?;
Ok(())
} else {
Err(vm.new_attribute_error(format!(
"'{}' object has no attribute '{}'",
obj.class().name,
attr_name.as_str()
)))
}
}
fn object_delattr(obj: PyObjectRef, attr_name: PyStringRef, vm: &VirtualMachine) -> PyResult<()> {
let cls = obj.class();
if let Some(attr) = objtype::class_get_attr(&cls, attr_name.as_str()) {
if let Some(descriptor) = objtype::class_get_attr(&attr.class(), "__delete__") {
return vm.invoke(&descriptor, vec![attr, obj.clone()]).map(|_| ());
}
}
if let Some(ref dict) = obj.dict {
dict.borrow().del_item(attr_name.as_str(), vm)?;
Ok(())
} else {
Err(vm.new_attribute_error(format!(
"'{}' object has no attribute '{}'",
obj.class().name,
attr_name.as_str()
)))
}
}
fn object_str(zelf: PyObjectRef, vm: &VirtualMachine) -> PyResult {
vm.call_method(&zelf, "__repr__", vec![])
}
fn object_repr(zelf: PyObjectRef, _vm: &VirtualMachine) -> String {
format!("<{} object at 0x{:x}>", zelf.class().name, zelf.get_id())
}
fn | (vm: &VirtualMachine, _args: PyFuncArgs) -> PyResult {
Ok(vm.ctx.not_implemented())
}
pub fn object_dir(obj: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyList> {
let attributes: PyAttributes = objtype::get_attributes(obj.class());
let dict = PyDictRef::from_attributes(attributes, vm)?;
// Get instance attributes:
if let Some(object_dict) = &obj.dict {
vm.invoke(
&vm.get_attribute(dict.clone().into_object(), "update")?,
object_dict.borrow().clone().into_object(),
)?;
}
let attributes: Vec<_> = dict.into_iter().map(|(k, _v)| k).collect();
Ok(PyList::from(attributes))
}
fn object_format(
obj: PyObjectRef,
format_spec: PyStringRef,
vm: &VirtualMachine,
) -> PyResult<PyStringRef> {
if format_spec.as_str().is_empty() {
vm.to_str(&obj)
} else {
Err(vm.new_type_error("unsupported format string passed to object.__format__".to_string()))
}
}
pub fn init(context: &PyContext) {
let object = &context.types.object_type;
let object_doc = "The most base type";
extend_class!(context, object, {
(slot new) => new_instance,
// yeah, it's `type_new`, but we're putting here so it's available on every object
"__new__" => context.new_classmethod(objtype::type_new),
"__init__" => context.new_method(object_init),
"__class__" =>
PropertyBuilder::new(context)
.add_getter(object_class)
.add_setter(object_class_setter)
.create(),
"__eq__" => context.new_method(object_eq),
"__ne__" => context.new_method(object_ne),
"__lt__" => context.new_method(object_lt),
"__le__" => context.new_method(object_le),
"__gt__" => context.new_method(object_gt),
"__ge__" => context.new_method(object_ge),
"__setattr__" => context.new_method(object_setattr),
"__delattr__" => context.new_method(object_delattr),
"__dict__" =>
PropertyBuilder::new(context)
.add_getter(object_dict)
.add_setter(object_dict_setter)
.create(),
"__dir__" => context.new_method(object_dir),
"__hash__" => context.new_method(object_hash),
"__str__" => context.new_method(object_str),
"__repr__" => context.new_method(object_repr),
"__format__" => context.new_method(object_format),
"__getattribute__" => context.new_method(object_getattribute),
"__subclasshook__" => context.new_classmethod(object_subclasshook),
"__reduce__" => context.new_method(object_reduce),
"__reduce_ex__" => context.new_method(object_reduce_ex),
"__doc__" => context.new_str(object_doc.to_string()),
});
}
fn object_init(vm: &VirtualMachine, _args: PyFuncArgs) -> PyResult {
Ok(vm.ctx.none())
}
fn object_class(obj: PyObjectRef, _vm: &VirtualMachine) -> PyObjectRef {
obj.class().into_object()
}
fn object_class_setter(
instance: PyObjectRef,
_value: PyObjectRef,
vm: &VirtualMachine,
) -> PyResult<()> {
let type_repr = vm.to_pystr(&instance.class())?;
Err(vm.new_type_error(format!("can't change class of type '{}'", type_repr)))
}
fn object_dict(object: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyDictRef> {
if let Some(ref dict) = object.dict {
Ok(dict.borrow().clone())
} else {
Err(vm.new_attribute_error("no dictionary.".to_string()))
}
}
fn object_dict_setter(
instance: PyObjectRef,
value: PyDictRef,
vm: &VirtualMachine,
) -> PyResult<()> {
if let Some(dict) = &instance.dict {
*dict.borrow_mut() = value;
Ok(())
} else {
Err(vm.new_attribute_error(format!(
"'{}' object has no attribute '__dict__'",
instance.class().name
)))
}
}
fn object_getattribute(obj: PyObjectRef, name: PyStringRef, vm: &VirtualMachine) -> PyResult {
vm_trace!("object.__getattribute__({:?}, {:?})", obj, name);
vm.generic_getattribute(obj.clone(), name.clone())?
.ok_or_else(|| vm.new_attribute_error(format!("{} has no attribute '{}'", obj, name)))
}
fn object_reduce(obj: PyObjectRef, proto: OptionalArg<usize>, vm: &VirtualMachine) -> PyResult {
common_reduce(obj, proto.unwrap_or(0), vm)
}
fn object_reduce_ex(obj: PyObjectRef, proto: usize, vm: &VirtualMachine) -> PyResult {
let cls = obj.class();
if let Some(reduce) = objtype::class_get_attr(&cls, "__reduce__") {
let object_reduce =
objtype::class_get_attr(&vm.ctx.types.object_type, "__reduce__").unwrap();
if !reduce.is(&object_reduce) {
return vm.invoke(&reduce, vec![]);
}
}
common_reduce(obj, proto, vm)
}
fn common_reduce(obj: PyObjectRef, proto: usize, vm: &VirtualMachine) -> PyResult {
if proto >= 2 {
let reducelib = vm.import("__reducelib", &[], 0)?;
let reduce_2 = vm.get_attribute(reducelib, "reduce_2")?;
vm.invoke(&reduce_2, vec![obj])
} else {
let copyreg = vm.import("copyreg", &[], 0)?;
let reduce_ex = vm.get_attribute(copyreg, "_reduce_ex")?;
vm.invoke(&reduce_ex, vec![obj, vm.new_int(proto)])
}
}
| object_subclasshook |
setting.py | # RT Lib - Setting
from typing import (
TYPE_CHECKING, TypedDict, Optional, Union, Literal, Dict, Tuple, List,
overload, get_origin, get_args
)
from discord.ext import commands
import discord
from collections import defaultdict
from aiohttp import ClientSession
from functools import partial
from datetime import datetime
from ujson import dumps
from time import time
from pytz import utc
from . import websocket
from .slash import Option
if TYPE_CHECKING:
from .typed import RT
class CommandRunData(TypedDict):
command: str
kwargs: Dict[str, Union[str, int, float, bool]]
guild_id: Union[int, Literal[0]]
category: str
user_id: int
ip: str
class Setting:
@overload
def __init__(
_, mode: str, name: Optional[str] = None,
help_command: Tuple[str, str] = None, **kwargs
):
...
def __new__(cls, mode, name=None, help_command=None, **kwargs):
return lambda func: func
self = super().__new__(cls)
self.mode, self.name, self.kwargs = mode, name, kwargs
self.help_command = help_command
def _decorator(func):
func._setting = self
return func
return _decorator
class Context:
"ダッシュボードから呼ばれたコマンドで実行されるContextです。"
def __init__(
self, cog: "SettingManager", data: CommandRunData,
command: commands.Command, **kwargs
):
# IDを文字列から整数に変換する。
for key, value in list(data.items()):
if key.endswith("id"):
data[key] = int(value)
# 変数を作っていく。
self.data = data
self.setting_manager = cog
self.bot: "RT" = self.setting_manager.bot
self.guild: Optional[discord.Guild] = self.bot.get_guild(data["guild_id"])
self.created_at: datetime = datetime.now(utc)
self.edited_at = None
self.__setting_context__ = True
self.channel: Optional[
Union[discord.abc.GuildChannel, discord.DMChannel]
] = (
self.guild.get_channel(data["kwargs"].pop(
"channel_id", data["kwargs"].pop(
"channel", data["kwargs"].pop("Channel", 0)
)
))
if data["category"].endswith("guild")
else self.bot.get_user(data["user_id"])
)
self.author: Union[discord.User, discord.Member] = (
self.guild.get_member(data["user_id"]) if self.guild
else self.bot.get_user(data["user_id"])
)
for key in kwargs:
setattr(self, key, kwargs.pop(key, None))
self.command = command
self.cog = command.cog
self.voice_client: Optional[discord.VoiceClient] = \
getattr(self.guild, "voice_client", None)
self.prefix = "r2!" if self.bot.test else "rt!"
self.me: Union[discord.Member, discord.ClientUser] = \
getattr(self.guild, "me", self.bot.user)
self.message = self
self.reply = self.send
async def trigger_typing(self):
...
async def send(
self, content: str = None, embed: discord.Embed = None, *args, **kwargs
):
"返信をします。"
content = self.bot.cogs["Language"].get_text(
embed if embed else content, self.author.id
)
if isinstance(content, discord.Embed):
content = content.to_dict()
async with self.setting_manager.session.post(
f"{self.bot.get_url()}/api/settings/reply/{self.data['ip']}",
json={"data": content}
) as r:
self.bot.print(
"[SettingManager]", "[Reply]",
f"Response: {await r.text()}, Content: {content}"
)
@overload
async def reply(
self, content: str = None, embed: discord.Embed = None, *args, **kwargs
):
...
async def delete(self) -> None:
...
class SettingManager(commands.Cog):
SUPPORTED_DISCORD_ANNOTATIONS = (
"Member", "User", "TextChannel", "VoiceChannel", "StageChannel",
"Thread", "Role"
)
SUPPORTED_ANNOTATIONS = (str, int, float, bool)
def __init__(self, bot: "RT"):
self.bot = bot
self.data: Dict[
str, Tuple[commands.Command, Setting]
] = {}
self.before = {}
@property
def session(self) -> ClientSession:
if not hasattr(self, "_session"):
self._session = ClientSession(
loop=self.bot.loop, json_serialize=partial(
dumps, ensure_ascii=False
)
)
return self._session
def get_parsed_args(self, annotation: object) -> Union[str, List[str]]:
"渡されたオブジェクトから設定項目の型の名前を判定し返します。"
if isinstance(annotation, Option):
annotation = annotation.annotation
if annotation in self.SUPPORTED_ANNOTATIONS:
return annotation.__name__
elif getattr(annotation, "__name__", "") in self.SUPPORTED_DISCORD_ANNOTATIONS:
return annotation.__name__.replace("Text", "").replace("Voice", "") \
.replace("Stage", "").replace("Thread", "Channel").replace("User", "Member")
elif (origin := get_origin(annotation)) == Union:
return ["Union"] + [self.get_parsed_args(arg) for arg in get_args(annotation)]
elif origin == Literal:
return ["Literal"] + list(get_args(annotation))
else:
return "str"
def reset(self):
self.data = {}
def add_command(self, command: commands.Command) -> None:
self.data[command.qualified_name] = (command, command.callback._setting)
@commands.Cog.listener()
async def on_command_add(self, command: commands.Command):
if hasattr(command.callback, "_setting"):
self.add_command(command)
@commands.Cog.listener("on_update_api")
async def update(self):
"APIにBotにあるコマンドの設定のJSONデータを送る。"
# バックエンド用のデータを作る。
|
for command, setting in self.data.values():
kwargs = {
parameter.name: (
ant := self.get_parsed_args(parameter.annotation),
"" if parameter.default == parameter.empty
else parameter.default,
parameter.kind == parameter.KEYWORD_ONLY \
and ant == "str"
) for parameter in command.clean_params.values()
}
kwargs.update({
key: (self.get_parsed_args(value), "", False)
for key, value in setting.kwargs.items()
})
data[setting.mode][command.qualified_name] = {
"help": (
self.bot.cogs["BotGeneral"].get_help_url(*setting.help_command)
if setting.help_command
else self.bot.cogs["BotGeneral"].get_command_url(command)
), "kwargs": kwargs, "sub_category": getattr(
command.parent, "name", None
), "headding": (
command.extras.get("headding")
or command.__original_kwargs__.get("headding")
), "display_name": setting.name or command.name
}
# データを送る。
async with self.bot.session.post(
f"{self.bot.get_url()}/api/settings/commands/update",
json=data
) as r:
self.bot.print("[SettingManager]", "[Updater]", time(), await r.text())
self.before = data
@websocket.websocket("/api/settings/websocket", auto_connect=True, reconnect=True)
async def setting_websocket(self, ws: websocket.WebSocket, _):
# ユーザーがダッシュボードから設定を更新した際に、すぐに反応できるようにするためのものです。
await ws.send("on_ready")
@setting_websocket.event("on_post")
async def post(self, ws: websocket.WebSocket, data: CommandRunData):
if isinstance(data, dict):
self.bot.loop.create_task(
self.run_command(self.data[data["command"]][0], data),
name=f"UpdateSetting[{data.get('command')}]: {data.get('user_id')}"
)
await ws.send("on_posted")
@setting_websocket.event("on_posted")
async def posted(self, ws: websocket.WebSocket, _):
await self.setting_websocket(ws, None)
async def run_command(self, command: commands.Command, data: CommandRunData):
"コマンドを走らせます。"
ctx = None
try:
# コマンドのメッセージを組み立てる。
content = f"{self.bot.command_prefix[0]}{command.qualified_name}"
for parameter in command.clean_params.values():
tentative = f' "{data["kwargs"].get(parameter.name, "")}"'
if parameter.kind == parameter.KEYWORD_ONLY:
tentative = f" {tentative[2:-1]}"
content += tentative
# 実行できるかチェックをしてからオリジナルContextでコマンドを実行する。
ctx = Context(self, data, command)
ctx.content = content
ctx._state = self.bot.http
parsed_ctx = await self.bot.get_context(ctx)
ctx.view = parsed_ctx.view
ctx.args, ctx.kwargs = parsed_ctx.args, parsed_ctx.kwargs
for name in dir(parsed_ctx):
if not name.startswith(
(
"__", "send", "reply", "trigger", "typing", "created",
"channel", "message", "guild"
)
):
setattr(ctx, name, getattr(parsed_ctx, name))
return await self.bot.invoke(ctx.message)
except Exception as e:
if ctx:
self.bot.dispatch("command_error", ctx, e)
def cog_unload(self):
if hasattr(self, "_session"):
self.bot.loop.create_task(self._session.close())
def setup(bot):
return
bot.add_cog(SettingManager(bot))
| data = defaultdict(dict) |
test_paper_collection.py | #!/usr/bin/env python
"""Tests for `paper_collection` package."""
import unittest
from paper_collection import paper_collection
import pandas as pd
import numpy as np
class TestPaper_collection(unittest.TestCase):
"""Tests for `paper_collection` package."""
def setUp(self):
"""Set up test fixtures, if any."""
self.df_papers = pd.read_csv('tests/jw_papers_mag2019.tsv', sep='\t')
self.df_papers.drop_duplicates(subset=['PaperId'], inplace=True)
self.num_papers = len(self.df_papers)
self.df_citations = pd.read_csv('tests/jw_citations_mag2019.tsv', sep='\t')
self.num_citations = len(self.df_citations)
self.df_authors = pd.read_csv('tests/jw_PaperAuthorAffiliations_mag2019.tsv', sep='\t')
self.authors_by_paper = self.get_authors_by_paper(self.df_authors)
def tearDown(self):
"""Tear down test fixtures, if any."""
def get_authors_by_paper(self, df_authors):
"""Get a dictionary mapping paper_id to author data
"""
author_data = {}
for paper_id, group in df_authors.groupby('PaperId'):
group = group.sort_values('AuthorSequenceNumber')
this_authors = []
for _, row in group.iterrows():
this_authors.append({'name': row.OriginalAuthor, 'author_id': row.AuthorId})
author_data[paper_id] = this_authors
return author_data
def load_paper(self, prow):
paper_id = prow.PaperId
authors = self.authors_by_paper[paper_id]
return paper_collection.Paper(dataset='mag',
dataset_version='mag-2019-11-22',
paper_id=paper_id,
title=prow.PaperTitle,
display_title=prow.OriginalTitle,
doi=prow.Doi,
pub_date=prow.Date,
year=prow.Year,
venue=prow.OriginalVenue,
authors=authors,
node_rank=prow.flow)
def test_000_single_paper(self):
"""Load a single paper"""
prow = self.df_papers.iloc[0]
p = self.load_paper(prow) | """Load a collection"""
coll = paper_collection.PaperCollection(description="Paper Collection")
for _, prow in self.df_papers.iterrows():
p = self.load_paper(prow)
coll.papers.append(p)
assert len(coll) == self.num_papers
def test_002_graph(self):
"""Construct graph"""
coll = paper_collection.PaperCollection(description="Paper Collection")
for _, prow in self.df_papers.iterrows():
p = self.load_paper(prow)
coll.papers.append(p)
for _, row in self.df_citations.iterrows():
coll.citations.append((row.PaperId, row.PaperReferenceId))
G = coll.construct_graph()
assert G.number_of_nodes() == self.num_papers
assert G.number_of_edges() == self.num_citations | assert p.display_title is not None
assert len(p.display_title)
def test_001_collection(self): |
virtual_network_usage_name.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class | (Model):
"""Usage strings container.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar localized_value: Localized subnet size and usage string.
:vartype localized_value: str
:ivar value: Subnet size and usage string.
:vartype value: str
"""
_validation = {
'localized_value': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'localized_value': {'key': 'localizedValue', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self):
self.localized_value = None
self.value = None
| VirtualNetworkUsageName |
error.rs | #[derive(Debug)]
pub enum Uma2Error {
NoUma2Discovered,
AudienceFieldRequired,
NoResourceSetEndpoint,
NoPermissionsEndpoint,
NoPolicyAssociationEndpoint,
ResourceSetEndpointMalformed,
PolicyAssociationEndpointMalformed,
}
impl std::error::Error for Uma2Error {
fn description(&self) -> &str {
"UMA2 API error"
}
}
impl std::fmt::Display for Uma2Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> |
}
| {
write!(
f,
"{}",
match *self {
Uma2Error::NoUma2Discovered => "No UMA2 discovered",
Uma2Error::AudienceFieldRequired => "Audience field required",
Uma2Error::NoResourceSetEndpoint => "No resource_set endpoint discovered",
Uma2Error::NoPermissionsEndpoint => "No permissions endpoint discovered",
Uma2Error::NoPolicyAssociationEndpoint =>
"No permissions policy association endpoint discovered",
Uma2Error::ResourceSetEndpointMalformed => "resource_set endpoint is malformed",
Uma2Error::PolicyAssociationEndpointMalformed => "policy_endpoint is malformed",
}
)
} |
radar_chart.py | """
http://matplotlib.org/examples/api/radar_chart.html
Example of creating a radar chart (a.k.a. a spider or star chart) [1]_.
Although this example allows a frame of either 'circle' or 'polygon', polygon
frames don't have proper gridlines (the lines are circles instead of polygons).
It's possible to get a polygon grid by setting GRIDLINE_INTERPOLATION_STEPS in
matplotlib.axis to the desired number of vertices, but the orientation of the
polygon is not aligned with the radial axes.
.. [1] http://en.wikipedia.org/wiki/Radar_chart
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.spines import Spine
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
def radar_factory(num_vars, frame='circle'):
"""Create a radar chart with `num_vars` axes.
This function creates a RadarAxes projection and registers it.
Parameters
----------
num_vars : int
Number of variables for radar chart.
frame : {'circle' | 'polygon'}
Shape of frame surrounding axes.
"""
# calculate evenly-spaced axis angles
theta = 2*np.pi * np.linspace(0, 1-1./num_vars, num_vars)
# rotate theta such that the first axis is at the top
theta += np.pi/2 | def draw_poly_patch(self):
verts = unit_poly_verts(theta)
return plt.Polygon(verts, closed=True, edgecolor='k')
def draw_circle_patch(self):
# unit circle centered on (0.5, 0.5)
return plt.Circle((0.5, 0.5), 0.5)
patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}
if frame not in patch_dict:
raise ValueError('unknown value for `frame`: %s' % frame)
class RadarAxes(PolarAxes):
name = 'radar'
# use 1 line segment to connect specified points
RESOLUTION = 1
# define draw_frame method
draw_patch = patch_dict[frame]
def fill(self, *args, **kwargs):
"""Override fill so that line is closed by default"""
closed = kwargs.pop('closed', True)
return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)
def plot(self, *args, **kwargs):
"""Override plot so that line is closed by default"""
lines = super(RadarAxes, self).plot(*args, **kwargs)
for line in lines:
self._close_line(line)
def _close_line(self, line):
x, y = line.get_data()
# FIXME: markers at x[0], y[0] get doubled-up
if x[0] != x[-1]:
x = np.concatenate((x, [x[0]]))
y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
def set_varlabels(self, labels):
self.set_thetagrids(theta * 180/np.pi, labels)
def _gen_axes_patch(self):
return self.draw_patch()
def _gen_axes_spines(self):
if frame == 'circle':
return PolarAxes._gen_axes_spines(self)
# The following is a hack to get the spines (i.e. the axes frame)
# to draw correctly for a polygon frame.
# spine_type must be 'left', 'right', 'top', 'bottom', or `circle`.
spine_type = 'circle'
verts = unit_poly_verts(theta)
# close off polygon by repeating first vertex
verts.append(verts[0])
path = Path(verts)
spine = Spine(self, spine_type, path)
spine.set_transform(self.transAxes)
return {'polar': spine}
register_projection(RadarAxes)
return theta
def unit_poly_verts(theta):
"""Return vertices of polygon for subplot axes.
This polygon is circumscribed by a unit circle centered at (0.5, 0.5)
"""
x0, y0, r = [0.5] * 3
verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]
return verts
if __name__ == "__main__":
n_spokes = 5
theta = radar_factory(n_spokes, frame="polygon")
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection="radar")
datapoints = np.random.random(n_spokes)
ax.plot(theta, datapoints)
ax.fill(theta, datapoints)
plt.show() | |
installed_apps.rs | // Copyright 2017, Igor Shaula
// Licensed under the MIT License <LICENSE or
// http://opensource.org/licenses/MIT>. This file
// may not be copied, modified, or distributed
// except according to those terms.
#[macro_use]
extern crate serde_derive;
extern crate winreg;
use winreg::enums::*;
use std::collections::HashMap;
use std::fmt;
#[allow(non_snake_case)]
#[derive(Debug, Serialize, Deserialize)]
struct InstalledApp {
DisplayName: Option<String>,
DisplayVersion: Option<String>,
UninstallString: Option<String>
}
macro_rules! str_from_opt {
($s:expr) => { $s.as_ref().map(|x| &**x).unwrap_or("") }
}
impl fmt::Display for InstalledApp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}-{}",
str_from_opt!(self.DisplayName),
str_from_opt!(self.DisplayVersion))
}
}
fn main() {
let hklm = winreg::RegKey::predef(HKEY_LOCAL_MACHINE);
let uninstall_key = hklm.open_subkey("SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall") | for (_k, v) in &apps {
println!("{}", v);
}
} | .expect("key is missing");
let apps: HashMap<String, InstalledApp> = uninstall_key.decode().expect("deserialization failed");
|
unnecessary_cast_fixable.rs | // run-rustfix
#![warn(clippy::unnecessary_cast)]
#![allow(clippy::no_effect, clippy::unnecessary_operation, clippy::nonstandard_macro_braces)]
fn main() {
// casting integer literal to float is unnecessary
100 as f32;
100 as f64;
100_i32 as f64;
let _ = -100 as f32;
let _ = -100 as f64;
let _ = -100_i32 as f64;
100. as f32; | 100. as f64;
// Should not trigger
#[rustfmt::skip]
let v = vec!(1);
&v as &[i32];
0x10 as f32;
0o10 as f32;
0b10 as f32;
0x11 as f64;
0o11 as f64;
0b11 as f64;
1 as u32;
0x10 as i32;
0b10 as usize;
0o73 as u16;
1_000_000_000 as u32;
1.0 as f64;
0.5 as f32;
1.0 as u16;
let _ = -1 as i32;
let _ = -1.0 as f32;
} | |
error_handlers.py | """Plugin for adding external error handlers
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def init(api):
"""initialize the error_handlers plugin.
@api.errorhandler(<MyError>)
def _my_error(err):
return {'message': str(err),
'status': http.client.BAD_REQUEST}, http.client.BAD_REQUEST | """
del api |
|
cmd.go | // Copyright 2016 Palantir Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package verify
import (
"fmt"
"strings"
"github.com/nmiyake/pkg/dirs"
"github.com/palantir/amalgomate/amalgomated"
"github.com/palantir/pkg/cli"
"github.com/palantir/pkg/cli/flag"
"github.com/pkg/errors"
"github.com/palantir/godel/cmd"
)
const (
cmdName = "verify"
apply = "apply"
skipFormat = "skip-format"
skipGenerate = "skip-generate"
skipImports = "skip-imports"
skipLicense = "skip-license"
skipCheck = "skip-check"
skipTest = "skip-test"
junitOutputPath = "junit-output"
)
func Command(gödelPath string) cli.Command { | func globalFlags(ctx cli.Context) ([]string, error) {
var globalArgs []string
for _, f := range cmd.GlobalCLIFlags() {
if ctx.Has(f.MainName()) {
var flagValue string
switch f.(type) {
case flag.BoolFlag:
flagValue = fmt.Sprintf("%v", ctx.Bool(f.MainName()))
case flag.StringFlag:
flagValue = ctx.String(f.MainName())
default:
return nil, errors.Errorf("Unhandled flag type %T for flag %v", f, f)
}
globalArgs = append(globalArgs, "--"+f.MainName(), flagValue)
}
}
return globalArgs, nil
}
func runCmd(cmder amalgomated.Cmder, args []string, wd string, ctx cli.Context) error {
cmd := cmder.Cmd(args, wd)
cmd.Stdout = ctx.App.Stdout
cmd.Stderr = ctx.App.Stderr
return cmd.Run()
}
|
return cli.Command{
Name: cmdName,
Usage: "Run format, generate, imports, license, check and test tasks",
Flags: []flag.Flag{
flag.BoolFlag{Name: apply, Usage: "Apply changes when possible", Value: true},
flag.BoolFlag{Name: skipFormat, Usage: "Skip 'format' task"},
flag.BoolFlag{Name: skipGenerate, Usage: "Skip 'generate' task"},
flag.BoolFlag{Name: skipImports, Usage: "Skip 'imports' task"},
flag.BoolFlag{Name: skipLicense, Usage: "Skip 'license' task"},
flag.BoolFlag{Name: skipCheck, Usage: "Skip 'check' task"},
flag.BoolFlag{Name: skipTest, Usage: "Skip 'test' task"},
flag.StringFlag{Name: junitOutputPath, Usage: "Path to JUnit XML output (only used if 'test' task is run)"},
},
Action: func(ctx cli.Context) error {
wd, err := dirs.GetwdEvalSymLinks()
if err != nil {
return err
}
globalFlags, err := globalFlags(ctx)
if err != nil {
return err
}
cmder := amalgomated.PathCmder(gödelPath, globalFlags...)
var failedChecks []string
if !ctx.Bool(skipFormat) {
args := []string{"format", "-v"}
if !ctx.Bool(apply) {
args = append(args, "-l")
}
if err := runCmd(cmder, args, wd, ctx); err != nil {
failedChecks = append(failedChecks, strings.Join(args, " "))
}
}
if !ctx.Bool(skipGenerate) {
args := []string{"generate"}
if !ctx.Bool(apply) {
args = append(args, "--verify")
}
ctx.Println("Running gogenerate...")
if err := runCmd(cmder, args, wd, ctx); err != nil {
failedChecks = append(failedChecks, strings.Join(args, " "))
}
}
if !ctx.Bool(skipImports) {
args := []string{"imports"}
if !ctx.Bool(apply) {
args = append(args, "--verify")
}
ctx.Println("Running gocd...")
if err := runCmd(cmder, args, wd, ctx); err != nil {
failedChecks = append(failedChecks, strings.Join(args, " "))
}
}
if !ctx.Bool(skipLicense) {
args := []string{"license"}
if !ctx.Bool(apply) {
args = append(args, "--verify")
}
ctx.Println("Running golicense...")
if err := runCmd(cmder, args, wd, ctx); err != nil {
failedChecks = append(failedChecks, strings.Join(args, " "))
}
}
if !ctx.Bool(skipCheck) {
if err := runCmd(cmder, []string{"check"}, wd, ctx); err != nil {
failedChecks = append(failedChecks, "check")
}
}
if !ctx.Bool(skipTest) {
args := []string{"test"}
if ctx.Has(junitOutputPath) {
args = append(args, "--"+junitOutputPath, ctx.String(junitOutputPath))
}
if err := runCmd(cmder, args, wd, ctx); err != nil {
failedChecks = append(failedChecks, "test")
}
}
if len(failedChecks) != 0 {
msgParts := []string{"Failed tasks:"}
for _, check := range failedChecks {
msgParts = append(msgParts, "\t"+check)
}
return fmt.Errorf(strings.Join(msgParts, "\n"))
}
return nil
},
}
}
|
lesa_mask_rcnn_r50_dconv_c3-c5_8x1_20e.py | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance_1024.py',
'../_base_/schedules/schedule_20e.py',
'../_base_/default_runtime.py',
]
optimizer = dict(lr=0.01)
model = dict(
pretrained=\
'./checkpoints/lesa_pretrained_imagenet/'+\
'lesa_resnet50_pretrained/'+\
'lesa_resnet50/'+\
'checkpoint.pth',
backbone=dict(
type='ResNet',
strides=(1,2,2,2),
wrn=False,
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
stage_spatial_res=[256, 128, 64, 32], # 1024: [256, 128, 64, 32], 1280: [320, 160, 80, 40]
stage_with_first_conv = [True, True, True, False],
lesa=dict(
type='LESA',
with_cp_UB_terms_only=True, # cp used on the unary and binary terms only.
pe_type='detection_qr', # ('classification', 'detection_qr')
groups = 8,
df_channel_shrink = [2], # df: dynamic fusion
df_kernel_size = [1,1],
df_group = [1,1],
),
stage_with_lesa = (False, False, True, True),
),
rpn_head=dict(
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
), |
data_root = 'data/coco/'
data = dict(
samples_per_gpu=1,
workers_per_gpu=1,
# test=dict(
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# img_prefix=data_root + 'test2017/'),
) | ) |
tests_impl.rs | use super::super::*;
// Test target self-consistency and JSON encoding/decoding roundtrip.
pub(super) fn test_target(target: Target) {
target.check_consistency();
assert_eq!(Target::from_json(target.to_json()).map(|(j, _)| j), Ok(target));
}
impl Target {
fn check_consistency(&self) {
assert!(self.is_like_windows || !self.is_like_msvc);
// Check that LLD with the given flavor is treated identically to the linker it emulates.
// If your target really needs to deviate from the rules below, except it and document the
// reasons.
assert_eq!(
self.linker_flavor == LinkerFlavor::Msvc
|| self.linker_flavor == LinkerFlavor::Lld(LldFlavor::Link),
self.lld_flavor == LldFlavor::Link,
);
assert_eq!(self.is_like_msvc, self.lld_flavor == LldFlavor::Link);
for args in &[
&self.pre_link_args,
&self.late_link_args,
&self.late_link_args_dynamic,
&self.late_link_args_static,
&self.post_link_args,
] {
assert_eq!(
args.get(&LinkerFlavor::Msvc),
args.get(&LinkerFlavor::Lld(LldFlavor::Link)),
);
if args.contains_key(&LinkerFlavor::Msvc) {
assert_eq!(self.lld_flavor, LldFlavor::Link);
}
}
assert!(
(self.pre_link_objects_fallback.is_empty()
&& self.post_link_objects_fallback.is_empty())
|| self.crt_objects_fallback.is_some()
);
// Keep the default "unknown" vendor instead.
assert_ne!(self.vendor, "");
if !self.can_use_os_unknown() {
// Keep the default "none" for bare metal targets instead.
assert_ne!(self.os, "unknown");
}
}
// Add your target to the whitelist if it has `std` library
// and you certainly want "unknown" for the OS name.
fn can_use_os_unknown(&self) -> bool |
}
| {
self.llvm_target == "wasm32-unknown-unknown"
|| self.llvm_target == "wasm64-unknown-unknown"
|| self.llvm_target == "bpf"
|| (self.env == "sgx" && self.vendor == "fortanix")
} |
cousins-in-binary-tree.go | //time: O(n)
//space: O(n)
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
//Two nodes of a binary tree are cousins
//if they have the same depth with different parents
//Each node has a unique value.
func isCousins(root *TreeNode, x int, y int) bool {
var foundX, foundY bool
var xDepth, yDepth int
var xParent, yParent int
postOrder(root, root, 0, func(nodeV, parentV, curDepth int) bool {
switch nodeV {
case x:
foundX = true
xDepth = curDepth
xParent = parentV
case y:
foundY = true
yDepth = curDepth | }
return false
})
return xDepth == yDepth && xParent != yParent
}
//post -> LRD
func postOrder(node *TreeNode, parent *TreeNode, curDepth int, f func(node, parentV, curDepth int) bool) {
if node != nil {
postOrder(node.Left, node, curDepth+1, f)
postOrder(node.Right, node, curDepth+1, f)
foundBoth := f(node.Val, parent.Val, curDepth)
if foundBoth { return }
}
} | yParent = parentV
}
if foundX == true && foundY == true {
return true |
tick_test.go | package stake
import (
"testing"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func | (t *testing.T) {
ctx, _, keeper := createTestInput(t, false, 0)
pool := keeper.GetPool(ctx)
params := keeper.GetParams(ctx)
hrsPerYrRat := sdk.NewRat(hrsPerYr)
// Governing Mechanism:
// bondedRatio = BondedPool / TotalSupply
// inflationRateChangePerYear = (1- bondedRatio/ GoalBonded) * MaxInflationRateChange
tests := []struct {
name string
setBondedPool, setTotalSupply int64
setInflation, expectedChange sdk.Rat
}{
// with 0% bonded atom supply the inflation should increase by InflationRateChange
{"test 1", 0, 0, sdk.NewRat(7, 100), params.InflationRateChange.Quo(hrsPerYrRat).Round(precision)},
// 100% bonded, starting at 20% inflation and being reduced
// (1 - (1/0.67))*(0.13/8667)
{"test 2", 1, 1, sdk.NewRat(20, 100),
sdk.OneRat().Sub(sdk.OneRat().Quo(params.GoalBonded)).Mul(params.InflationRateChange).Quo(hrsPerYrRat).Round(precision)},
// 50% bonded, starting at 10% inflation and being increased
{"test 3", 1, 2, sdk.NewRat(10, 100),
sdk.OneRat().Sub(sdk.NewRat(1, 2).Quo(params.GoalBonded)).Mul(params.InflationRateChange).Quo(hrsPerYrRat).Round(precision)},
// test 7% minimum stop (testing with 100% bonded)
{"test 4", 1, 1, sdk.NewRat(7, 100), sdk.ZeroRat()},
{"test 5", 1, 1, sdk.NewRat(70001, 1000000), sdk.NewRat(-1, 1000000).Round(precision)},
// test 20% maximum stop (testing with 0% bonded)
{"test 6", 0, 0, sdk.NewRat(20, 100), sdk.ZeroRat()},
{"test 7", 0, 0, sdk.NewRat(199999, 1000000), sdk.NewRat(1, 1000000).Round(precision)},
// perfect balance shouldn't change inflation
{"test 8", 67, 100, sdk.NewRat(15, 100), sdk.ZeroRat()},
}
for _, tc := range tests {
pool.BondedPool, pool.TotalSupply = tc.setBondedPool, tc.setTotalSupply
pool.Inflation = tc.setInflation
keeper.setPool(ctx, pool)
inflation := keeper.nextInflation(ctx)
diffInflation := inflation.Sub(tc.setInflation)
assert.True(t, diffInflation.Equal(tc.expectedChange),
"Name: %v\nDiff: %v\nExpected: %v\n", tc.name, diffInflation, tc.expectedChange)
}
}
func TestProcessProvisions(t *testing.T) {
ctx, _, keeper := createTestInput(t, false, 0)
params := defaultParams()
keeper.setParams(ctx, params)
pool := keeper.GetPool(ctx)
// create some candidates some bonded, some unbonded
candidates := make([]Candidate, 10)
for i := 0; i < 10; i++ {
c := Candidate{
Status: Unbonded,
PubKey: pks[i],
Address: addrs[i],
Assets: sdk.NewRat(0),
Liabilities: sdk.NewRat(0),
}
if i < 5 {
c.Status = Bonded
}
mintedTokens := int64((i + 1) * 10000000)
pool.TotalSupply += mintedTokens
pool, c, _ = pool.candidateAddTokens(c, mintedTokens)
keeper.setCandidate(ctx, c)
candidates[i] = c
}
keeper.setPool(ctx, pool)
var totalSupply int64 = 550000000
var bondedShares int64 = 150000000
var unbondedShares int64 = 400000000
assert.Equal(t, totalSupply, pool.TotalSupply)
assert.Equal(t, bondedShares, pool.BondedPool)
assert.Equal(t, unbondedShares, pool.UnbondedPool)
// initial bonded ratio ~ 27%
assert.True(t, pool.bondedRatio().Equal(sdk.NewRat(bondedShares, totalSupply)), "%v", pool.bondedRatio())
// test the value of candidate shares
assert.True(t, pool.bondedShareExRate().Equal(sdk.OneRat()), "%v", pool.bondedShareExRate())
initialSupply := pool.TotalSupply
initialUnbonded := pool.TotalSupply - pool.BondedPool
// process the provisions a year
for hr := 0; hr < 8766; hr++ {
pool := keeper.GetPool(ctx)
expInflation := keeper.nextInflation(ctx).Round(1000000000)
expProvisions := (expInflation.Mul(sdk.NewRat(pool.TotalSupply)).Quo(hrsPerYrRat)).Evaluate()
startBondedPool := pool.BondedPool
startTotalSupply := pool.TotalSupply
pool = keeper.processProvisions(ctx)
keeper.setPool(ctx, pool)
//fmt.Printf("hr %v, startBondedPool %v, expProvisions %v, pool.BondedPool %v\n", hr, startBondedPool, expProvisions, pool.BondedPool)
require.Equal(t, startBondedPool+expProvisions, pool.BondedPool, "hr %v", hr)
require.Equal(t, startTotalSupply+expProvisions, pool.TotalSupply)
}
pool = keeper.GetPool(ctx)
assert.NotEqual(t, initialSupply, pool.TotalSupply)
assert.Equal(t, initialUnbonded, pool.UnbondedPool)
//panic(fmt.Sprintf("debug total %v, bonded %v, diff %v\n", p.TotalSupply, p.BondedPool, pool.TotalSupply-pool.BondedPool))
// initial bonded ratio ~ from 27% to 40% increase for bonded holders ownership of total supply
assert.True(t, pool.bondedRatio().Equal(sdk.NewRat(211813022, 611813022)), "%v", pool.bondedRatio())
// global supply
assert.Equal(t, int64(611813022), pool.TotalSupply)
assert.Equal(t, int64(211813022), pool.BondedPool)
assert.Equal(t, unbondedShares, pool.UnbondedPool)
// test the value of candidate shares
assert.True(t, pool.bondedShareExRate().Mul(sdk.NewRat(bondedShares)).Equal(sdk.NewRat(211813022)), "%v", pool.bondedShareExRate())
}
| TestGetInflation |
footer.js | import React from 'react'
import { StyleSheet, css } from 'aphrodite'
import squareLogo from '../images/square-logo.png'
const styles = StyleSheet.create({
outerContainer: { | backgroundColor: '#1b2126',
marginBottom: '0',
position: 'sticky',
top: '0',
color: "eceef1",
marginTop: "96pt"
},
innerContainer: {
margin: '0 auto',
maxWidth: 1400,
padding: '1.45rem 1.0875rem',
},
columnGrid: {
display: "grid",
gridTemplateColumns: "repeat(3, 1fr);",
gridColumnGap: "24px",
paddingTop: "24pt",
paddingBottom: "48pt"
},
logo: {
width: "32pt",
height: "32pt"
},
contributorList: {
listStyle: "none",
margin: "0",
padding: "0"
}
});
const Footer = ({ siteTitle }) => (
<div className={css(styles.outerContainer)} >
<div className={css(styles.innerContainer)}>
<div className={css(styles.columnGrid)}>
<div>
<img src={squareLogo} alt="logo" className={css(styles.logo)} />
</div>
<div>
<ul className={css(styles.contributorList)}>
<li>© 2019 Square, Inc.</li>
</ul>
</div>
<div>
<ul className={css(styles.contributorList)}>
<li>David Apgar</li>
<li>Tim Donnelly</li>
<li>Zach Klippenstein</li>
<li>Ray Ryan</li>
</ul>
</div>
</div>
</div>
</div>
)
export default Footer | |
env.go | package context
import (
"fmt"
"os"
"github.com/baetyl/baetyl-go/v2/errors"
)
// All keys
const (
KeyBaetyl = "BAETYL"
KeyConfFile = "BAETYL_CONF_FILE"
KeyNodeName = "BAETYL_NODE_NAME"
KeyAppName = "BAETYL_APP_NAME"
KeyAppVersion = "BAETYL_APP_VERSION"
KeySvcName = "BAETYL_SERVICE_NAME"
KeySysConf = "BAETYL_SYSTEM_CONF"
KeyRunMode = "BAETYL_RUN_MODE"
KeyServiceDynamicPort = "BAETYL_SERVICE_DYNAMIC_PORT"
KeyBaetylHostPathLib = "BAETYL_HOST_PATH_LIB"
)
const (
RunModeKube = "kube"
RunModeNative = "native"
)
const (
baetylEdgeNamespace = "baetyl-edge"
baetylEdgeSystemNamespace = "baetyl-edge-system"
baetylBrokerSystemPort = "50010"
baetylFunctionSystemHttpPort = "50011"
baetylFunctionSystemGrpcPort = "50012"
defaultHostPathLib = "/var/lib/baetyl"
)
// HostPathLib return HostPathLib
func HostPathLib() (string, error) {
var hostPathLib string
if val := os.Getenv(KeyBaetylHostPathLib); val == "" {
err := os.Setenv(KeyBaetylHostPathLib, defaultHostPathLib)
if err != nil {
return "", errors.Trace(err)
}
hostPathLib = defaultHostPathLib
} else {
hostPathLib = val
}
return hostPathLib, nil
}
// RunMode return run mode of edge.
func RunMode() string {
mode := os.Getenv(KeyRunMode)
if mode != RunModeNative {
mode = RunModeKube
}
return mode
}
// EdgeNamespace return namespace of edge.
func | () string {
return baetylEdgeNamespace
}
// EdgeSystemNamespace return system namespace of edge.
func EdgeSystemNamespace() string {
return baetylEdgeSystemNamespace
}
// BrokerPort return broker port.
func BrokerPort() string {
return baetylBrokerSystemPort
}
// FunctionPort return http port of function.
func FunctionHttpPort() string {
return baetylFunctionSystemHttpPort
}
// BrokerHost return broker host.
func BrokerHost() string {
if RunMode() == RunModeNative {
return "127.0.0.1"
}
return fmt.Sprintf("%s.%s", "baetyl-broker", baetylEdgeNamespace)
}
// FunctionHost return function host.
func FunctionHost() string {
if RunMode() == RunModeNative {
return "127.0.0.1"
}
return fmt.Sprintf("%s.%s", "baetyl-function", baetylEdgeSystemNamespace)
}
func getBrokerAddress() string {
return fmt.Sprintf("%s://%s:%s", "ssl", BrokerHost(), BrokerPort())
}
func getFunctionAddress() string {
return fmt.Sprintf("%s://%s:%s", "https", FunctionHost(), FunctionHttpPort())
}
| EdgeNamespace |
tx.go | package cli
import (
"fmt"
"os"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/context"
"github.com/cosmos/cosmos-sdk/client/utils"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
authtxb "github.com/cosmos/cosmos-sdk/x/auth/client/txbuilder"
"github.com/cosmos/cosmos-sdk/x/stake"
| // GetCmdCreateValidator implements the create validator command handler.
func GetCmdCreateValidator(cdc *codec.Codec) *cobra.Command {
cmd := &cobra.Command{
Use: "create-validator",
Short: "create new validator initialized with a self-delegation to it",
RunE: func(cmd *cobra.Command, args []string) error {
txBldr := authtxb.NewTxBuilderFromCLI().WithCodec(cdc)
cliCtx := context.NewCLIContext().
WithCodec(cdc).
WithAccountDecoder(cdc)
cliCtx, txBldr, msg, err := BuildCreateValidatorMsg(cliCtx, txBldr)
if err != nil {
return err
}
if viper.GetBool(FlagGenesisFormat) || cliCtx.GenerateOnly {
return utils.PrintUnsignedStdTx(os.Stdout, txBldr, cliCtx, []sdk.Msg{msg}, true)
}
// build and sign the transaction, then broadcast to Tendermint
return utils.CompleteAndBroadcastTxCli(txBldr, cliCtx, []sdk.Msg{msg})
},
}
cmd.Flags().AddFlagSet(FsPk)
cmd.Flags().AddFlagSet(FsAmount)
cmd.Flags().AddFlagSet(fsDescriptionCreate)
cmd.Flags().AddFlagSet(FsCommissionCreate)
cmd.Flags().AddFlagSet(fsDelegator)
cmd.Flags().Bool(FlagGenesisFormat, false, "Export the transaction in gen-tx format; it implies --generate-only")
cmd.Flags().String(FlagIP, "", fmt.Sprintf("Node's public IP. It takes effect only when used in combination with --%s", FlagGenesisFormat))
cmd.Flags().String(FlagNodeID, "", "Node's ID")
cmd.MarkFlagRequired(client.FlagFrom)
cmd.MarkFlagRequired(FlagAmount)
cmd.MarkFlagRequired(FlagPubKey)
cmd.MarkFlagRequired(FlagMoniker)
return cmd
}
// GetCmdEditValidator implements the create edit validator command.
func GetCmdEditValidator(cdc *codec.Codec) *cobra.Command {
cmd := &cobra.Command{
Use: "edit-validator",
Short: "edit and existing validator account",
RunE: func(cmd *cobra.Command, args []string) error {
txBldr := authtxb.NewTxBuilderFromCLI().WithCodec(cdc)
cliCtx := context.NewCLIContext().
WithCodec(cdc).
WithAccountDecoder(cdc)
valAddr, err := cliCtx.GetFromAddress()
if err != nil {
return err
}
description := stake.Description{
Moniker: viper.GetString(FlagMoniker),
Identity: viper.GetString(FlagIdentity),
Website: viper.GetString(FlagWebsite),
Details: viper.GetString(FlagDetails),
}
var newRate *sdk.Dec
commissionRate := viper.GetString(FlagCommissionRate)
if commissionRate != "" {
rate, err := sdk.NewDecFromStr(commissionRate)
if err != nil {
return fmt.Errorf("invalid new commission rate: %v", err)
}
newRate = &rate
}
msg := stake.NewMsgEditValidator(sdk.ValAddress(valAddr), description, newRate)
if cliCtx.GenerateOnly {
return utils.PrintUnsignedStdTx(os.Stdout, txBldr, cliCtx, []sdk.Msg{msg}, false)
}
// build and sign the transaction, then broadcast to Tendermint
return utils.CompleteAndBroadcastTxCli(txBldr, cliCtx, []sdk.Msg{msg})
},
}
cmd.Flags().AddFlagSet(fsDescriptionEdit)
cmd.Flags().AddFlagSet(fsCommissionUpdate)
return cmd
}
// GetCmdDelegate implements the delegate command.
func GetCmdDelegate(cdc *codec.Codec) *cobra.Command {
cmd := &cobra.Command{
Use: "delegate",
Short: "delegate liquid tokens to an validator",
RunE: func(cmd *cobra.Command, args []string) error {
txBldr := authtxb.NewTxBuilderFromCLI().WithCodec(cdc)
cliCtx := context.NewCLIContext().
WithCodec(cdc).
WithAccountDecoder(cdc)
amount, err := sdk.ParseCoin(viper.GetString(FlagAmount))
if err != nil {
return err
}
delAddr, err := cliCtx.GetFromAddress()
if err != nil {
return err
}
valAddr, err := sdk.ValAddressFromBech32(viper.GetString(FlagAddressValidator))
if err != nil {
return err
}
msg := stake.NewMsgDelegate(delAddr, valAddr, amount)
if cliCtx.GenerateOnly {
return utils.PrintUnsignedStdTx(os.Stdout, txBldr, cliCtx, []sdk.Msg{msg}, false)
}
// build and sign the transaction, then broadcast to Tendermint
return utils.CompleteAndBroadcastTxCli(txBldr, cliCtx, []sdk.Msg{msg})
},
}
cmd.Flags().AddFlagSet(FsAmount)
cmd.Flags().AddFlagSet(fsValidator)
return cmd
}
// GetCmdRedelegate the begin redelegation command.
func GetCmdRedelegate(storeName string, cdc *codec.Codec) *cobra.Command {
cmd := &cobra.Command{
Use: "redelegate",
Short: "redelegate illiquid tokens from one validator to another",
RunE: func(cmd *cobra.Command, args []string) error {
txBldr := authtxb.NewTxBuilderFromCLI().WithCodec(cdc)
cliCtx := context.NewCLIContext().
WithCodec(cdc).
WithAccountDecoder(cdc)
var err error
delAddr, err := cliCtx.GetFromAddress()
if err != nil {
return err
}
valSrcAddr, err := sdk.ValAddressFromBech32(viper.GetString(FlagAddressValidatorSrc))
if err != nil {
return err
}
valDstAddr, err := sdk.ValAddressFromBech32(viper.GetString(FlagAddressValidatorDst))
if err != nil {
return err
}
// get the shares amount
sharesAmountStr := viper.GetString(FlagSharesAmount)
sharesFractionStr := viper.GetString(FlagSharesFraction)
sharesAmount, err := getShares(
storeName, cdc, sharesAmountStr, sharesFractionStr,
delAddr, valSrcAddr,
)
if err != nil {
return err
}
msg := stake.NewMsgBeginRedelegate(delAddr, valSrcAddr, valDstAddr, sharesAmount)
if cliCtx.GenerateOnly {
return utils.PrintUnsignedStdTx(os.Stdout, txBldr, cliCtx, []sdk.Msg{msg}, false)
}
// build and sign the transaction, then broadcast to Tendermint
return utils.CompleteAndBroadcastTxCli(txBldr, cliCtx, []sdk.Msg{msg})
},
}
cmd.Flags().AddFlagSet(fsShares)
cmd.Flags().AddFlagSet(fsRedelegation)
return cmd
}
// GetCmdUnbond implements the unbond validator command.
func GetCmdUnbond(storeName string, cdc *codec.Codec) *cobra.Command {
cmd := &cobra.Command{
Use: "unbond",
Short: "unbond shares from a validator",
RunE: func(cmd *cobra.Command, args []string) error {
txBldr := authtxb.NewTxBuilderFromCLI().WithCodec(cdc)
cliCtx := context.NewCLIContext().
WithCodec(cdc).
WithAccountDecoder(cdc)
delAddr, err := cliCtx.GetFromAddress()
if err != nil {
return err
}
valAddr, err := sdk.ValAddressFromBech32(viper.GetString(FlagAddressValidator))
if err != nil {
return err
}
// get the shares amount
sharesAmountStr := viper.GetString(FlagSharesAmount)
sharesFractionStr := viper.GetString(FlagSharesFraction)
sharesAmount, err := getShares(
storeName, cdc, sharesAmountStr, sharesFractionStr,
delAddr, valAddr,
)
if err != nil {
return err
}
msg := stake.NewMsgBeginUnbonding(delAddr, valAddr, sharesAmount)
if cliCtx.GenerateOnly {
return utils.PrintUnsignedStdTx(os.Stdout, txBldr, cliCtx, []sdk.Msg{msg}, false)
}
// build and sign the transaction, then broadcast to Tendermint
return utils.CompleteAndBroadcastTxCli(txBldr, cliCtx, []sdk.Msg{msg})
},
}
cmd.Flags().AddFlagSet(fsShares)
cmd.Flags().AddFlagSet(fsValidator)
return cmd
}
// BuildCreateValidatorMsg makes a new MsgCreateValidator.
func BuildCreateValidatorMsg(cliCtx context.CLIContext, txBldr authtxb.TxBuilder) (context.CLIContext, authtxb.TxBuilder, sdk.Msg, error) {
amounstStr := viper.GetString(FlagAmount)
amount, err := sdk.ParseCoin(amounstStr)
if err != nil {
return cliCtx, txBldr, nil, err
}
valAddr, err := cliCtx.GetFromAddress()
if err != nil {
return cliCtx, txBldr, nil, err
}
pkStr := viper.GetString(FlagPubKey)
pk, err := sdk.GetConsPubKeyBech32(pkStr)
if err != nil {
return cliCtx, txBldr, nil, err
}
description := stake.NewDescription(
viper.GetString(FlagMoniker),
viper.GetString(FlagIdentity),
viper.GetString(FlagWebsite),
viper.GetString(FlagDetails),
)
// get the initial validator commission parameters
rateStr := viper.GetString(FlagCommissionRate)
maxRateStr := viper.GetString(FlagCommissionMaxRate)
maxChangeRateStr := viper.GetString(FlagCommissionMaxChangeRate)
commissionMsg, err := buildCommissionMsg(rateStr, maxRateStr, maxChangeRateStr)
if err != nil {
return cliCtx, txBldr, nil, err
}
var msg sdk.Msg
if viper.GetString(FlagAddressDelegator) != "" {
delAddr, err := sdk.AccAddressFromBech32(viper.GetString(FlagAddressDelegator))
if err != nil {
return cliCtx, txBldr, nil, err
}
msg = stake.NewMsgCreateValidatorOnBehalfOf(
delAddr, sdk.ValAddress(valAddr), pk, amount, description, commissionMsg,
)
} else {
msg = stake.NewMsgCreateValidator(
sdk.ValAddress(valAddr), pk, amount, description, commissionMsg,
)
}
if viper.GetBool(FlagGenesisFormat) {
ip := viper.GetString(FlagIP)
nodeID := viper.GetString(FlagNodeID)
if nodeID != "" && ip != "" {
txBldr = txBldr.WithMemo(fmt.Sprintf("%s@%s:26656", nodeID, ip))
}
}
return cliCtx, txBldr, msg, nil
} | "github.com/spf13/cobra"
"github.com/spf13/viper"
)
|
ibc.applications.interchain_accounts.v1.rs | /// Metadata defines a set of protocol specific data encoded into the ICS27 channel version bytestring
/// See ICS004: <https://github.com/cosmos/ibc/tree/master/spec/core/ics-004-channel-and-packet-semantics#Versioning>
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Metadata {
/// version defines the ICS27 protocol version
#[prost(string, tag="1")]
pub version: ::prost::alloc::string::String,
/// controller_connection_id is the connection identifier associated with the controller chain
#[prost(string, tag="2")]
pub controller_connection_id: ::prost::alloc::string::String,
/// host_connection_id is the connection identifier associated with the host chain
#[prost(string, tag="3")]
pub host_connection_id: ::prost::alloc::string::String,
/// address defines the interchain account address to be fulfilled upon the OnChanOpenTry handshake step
/// NOTE: the address field is empty on the OnChanOpenInit handshake step
#[prost(string, tag="4")]
pub address: ::prost::alloc::string::String,
/// encoding defines the supported codec format
#[prost(string, tag="5")]
pub encoding: ::prost::alloc::string::String,
/// tx_type defines the type of transactions the interchain account can execute
#[prost(string, tag="6")]
pub tx_type: ::prost::alloc::string::String,
}
/// GenesisState defines the interchain accounts genesis state
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GenesisState {
#[prost(message, optional, tag="1")]
pub controller_genesis_state: ::core::option::Option<ControllerGenesisState>,
#[prost(message, optional, tag="2")]
pub host_genesis_state: ::core::option::Option<HostGenesisState>,
}
/// ControllerGenesisState defines the interchain accounts controller genesis state
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ControllerGenesisState {
#[prost(message, repeated, tag="1")]
pub active_channels: ::prost::alloc::vec::Vec<ActiveChannel>,
#[prost(message, repeated, tag="2")]
pub interchain_accounts: ::prost::alloc::vec::Vec<RegisteredInterchainAccount>,
#[prost(string, repeated, tag="3")]
pub ports: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
#[prost(message, optional, tag="4")]
pub params: ::core::option::Option<super::controller::v1::Params>,
}
/// HostGenesisState defines the interchain accounts host genesis state
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct HostGenesisState {
#[prost(message, repeated, tag="1")]
pub active_channels: ::prost::alloc::vec::Vec<ActiveChannel>,
#[prost(message, repeated, tag="2")]
pub interchain_accounts: ::prost::alloc::vec::Vec<RegisteredInterchainAccount>,
#[prost(string, tag="3")]
pub port: ::prost::alloc::string::String,
#[prost(message, optional, tag="4")]
pub params: ::core::option::Option<super::host::v1::Params>,
}
/// ActiveChannel contains a connection ID, port ID and associated active channel ID
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ActiveChannel {
#[prost(string, tag="1")]
pub connection_id: ::prost::alloc::string::String,
#[prost(string, tag="2")]
pub port_id: ::prost::alloc::string::String,
#[prost(string, tag="3")]
pub channel_id: ::prost::alloc::string::String,
}
/// RegisteredInterchainAccount contains a connection ID, port ID and associated interchain account address
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RegisteredInterchainAccount {
#[prost(string, tag="1")]
pub connection_id: ::prost::alloc::string::String,
#[prost(string, tag="2")]
pub port_id: ::prost::alloc::string::String,
#[prost(string, tag="3")]
pub account_address: ::prost::alloc::string::String,
}
/// InterchainAccountPacketData is comprised of a raw transaction, type of transaction and optional memo field.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct | {
#[prost(enumeration="Type", tag="1")]
pub r#type: i32,
#[prost(bytes="vec", tag="2")]
pub data: ::prost::alloc::vec::Vec<u8>,
#[prost(string, tag="3")]
pub memo: ::prost::alloc::string::String,
}
/// CosmosTx contains a list of sdk.Msg's. It should be used when sending transactions to an SDK host chain.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CosmosTx {
#[prost(message, repeated, tag="1")]
pub messages: ::prost::alloc::vec::Vec<::prost_types::Any>,
}
/// Type defines a classification of message issued from a controller chain to its associated interchain accounts
/// host
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum Type {
/// Default zero value enumeration
Unspecified = 0,
/// Execute a transaction on an interchain accounts host chain
ExecuteTx = 1,
}
/// An InterchainAccount is defined as a BaseAccount & the address of the account owner on the controller chain
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct InterchainAccount {
#[prost(message, optional, tag="1")]
pub base_account: ::core::option::Option<super::super::super::super::cosmos::auth::v1beta1::BaseAccount>,
#[prost(string, tag="2")]
pub account_owner: ::prost::alloc::string::String,
}
| InterchainAccountPacketData |
txn_test.go | // Copyright 2021 Matrix Origin
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package txnimpl
import (
"fmt"
"math/rand"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/buffer"
"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/catalog"
"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/common"
"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/container/compute"
"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/dataio/mockio"
"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/iface/txnif"
"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/tables"
"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/tables/updates"
"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/testutils"
"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/txn/txnbase"
"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/wal"
"github.com/RoaringBitmap/roaring"
"github.com/RoaringBitmap/roaring/roaring64"
"github.com/matrixorigin/matrixone/pkg/container/nulls"
"github.com/matrixorigin/matrixone/pkg/container/types"
gvec "github.com/matrixorigin/matrixone/pkg/container/vector"
"github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/assert"
)
const (
ModuleName = "TAETXN"
)
// 1. 30 concurrency
// 2. 10000 node
// 3. 512K buffer
// 4. 1K(30%), 4K(25%), 8K(%20), 16K(%15), 32K(%10)
func init() {
rand.Seed(time.Now().UnixNano())
}
func getNodes() int {
v := rand.Intn(100)
if v < 30 {
return 1 * 2
} else if v < 55 {
return 2 * 2
} else if v < 75 {
return 3 * 2
} else if v < 90 {
return 4 * 2
}
return 5 * 2
}
func makeTable(t *testing.T, dir string, colCnt int, bufSize uint64) *txnTable {
mgr := buffer.NewNodeManager(bufSize, nil)
driver := wal.NewDriver(dir, "store", nil)
id := common.NextGlobalSeqNum()
schema := catalog.MockSchemaAll(colCnt)
rel := mockTestRelation(id, schema)
txn := txnbase.NewTxn(nil, nil, common.NextGlobalSeqNum(), common.NextGlobalSeqNum(), nil)
store := newStore(nil, driver, mgr, nil)
store.BindTxn(txn)
return newTxnTable(store, rel)
}
func TestInsertNode(t *testing.T) {
dir := testutils.InitTestEnv(ModuleName, t)
tbl := makeTable(t, dir, 2, common.K*6)
defer tbl.store.driver.Close()
tbl.GetSchema().PrimaryKey = 1
bat := compute.MockBatch(tbl.GetSchema().Types(), common.K, int(tbl.GetSchema().PrimaryKey), nil)
p, _ := ants.NewPool(5)
var wg sync.WaitGroup
var all uint64
worker := func(id uint64) func() {
return func() {
defer wg.Done()
cnt := getNodes()
nodes := make([]*insertNode, cnt)
for i := 0; i < cnt; i++ {
var cid common.ID
cid.BlockID = id
cid.Idx = uint16(i)
n := NewInsertNode(tbl, tbl.store.nodesMgr, cid, tbl.store.driver)
nodes[i] = n
h := tbl.store.nodesMgr.Pin(n)
var err error
if err = n.Expand(common.K*1, func() error {
n.Append(bat, 0)
return nil
}); err != nil {
err = n.Expand(common.K*1, func() error {
n.Append(bat, 0)
return nil
})
}
if err != nil {
assert.NotNil(t, err)
}
h.Close()
}
for _, n := range nodes {
// n.ToTransient()
n.Close()
}
atomic.AddUint64(&all, uint64(len(nodes)))
}
}
idAlloc := common.NewIdAlloctor(1)
for {
id := idAlloc.Alloc()
if id > 10 {
break
}
wg.Add(1)
p.Submit(worker(id))
}
wg.Wait()
t.Log(all)
t.Log(tbl.store.nodesMgr.String())
t.Log(common.GPool.String())
}
func TestTable(t *testing.T) {
dir := testutils.InitTestEnv(ModuleName, t)
c, mgr, driver := initTestContext(t, dir)
defer driver.Close()
defer c.Close()
defer mgr.Stop()
schema := catalog.MockSchemaAll(3)
schema.BlockMaxRows = 10000
schema.SegmentMaxBlocks = 10
schema.PrimaryKey = 2
{
txn := mgr.StartTxn(nil)
db, _ := txn.CreateDatabase("db")
rel, _ := db.CreateRelation(schema)
bat := compute.MockBatch(schema.Types(), common.K*100, int(schema.PrimaryKey), nil)
bats := compute.SplitBatch(bat, 100)
for _, data := range bats {
err := rel.Append(data)
assert.Nil(t, err)
}
tbl, _ := txn.GetStore().(*txnStore).getOrSetTable(rel.ID())
tbl.RangeDeleteLocalRows(1024+20, 1024+30)
tbl.RangeDeleteLocalRows(1024*2+38, 1024*2+40)
assert.True(t, tbl.IsLocalDeleted(1024+20))
assert.True(t, tbl.IsLocalDeleted(1024+30))
assert.False(t, tbl.IsLocalDeleted(1024+19))
assert.False(t, tbl.IsLocalDeleted(1024+31))
err := txn.Commit()
assert.Nil(t, err)
}
}
func TestUpdateUncommitted(t *testing.T) {
dir := testutils.InitTestEnv(ModuleName, t)
c, mgr, driver := initTestContext(t, dir)
defer driver.Close()
defer c.Close()
defer mgr.Stop()
schema := catalog.MockSchemaAll(3)
schema.BlockMaxRows = 10000
schema.SegmentMaxBlocks = 10
schema.PrimaryKey = 1
bat := compute.MockBatch(schema.Types(), 1000, int(schema.PrimaryKey), nil)
bats := compute.SplitBatch(bat, 2)
txn := mgr.StartTxn(nil)
db, _ := txn.CreateDatabase("db")
rel, _ := db.CreateRelation(schema)
for _, b := range bats {
err := rel.Append(b)
assert.Nil(t, err)
}
tbl, _ := txn.GetStore().(*txnStore).getOrSetTable(rel.ID())
row := uint32(9)
assert.False(t, tbl.IsLocalDeleted(row))
rows := tbl.Rows()
err := tbl.UpdateLocalValue(row, 0, 999)
assert.Nil(t, err)
assert.True(t, tbl.IsLocalDeleted(row))
assert.Equal(t, rows+1, tbl.Rows())
}
func TestAppend(t *testing.T) {
dir := testutils.InitTestEnv(ModuleName, t)
c, mgr, driver := initTestContext(t, dir)
defer driver.Close()
defer c.Close()
defer mgr.Stop()
schema := catalog.MockSchemaAll(3)
schema.BlockMaxRows = 10000
schema.SegmentMaxBlocks = 10
schema.PrimaryKey = 1
txn := mgr.StartTxn(nil)
db, _ := txn.CreateDatabase("db")
rel, _ := db.CreateRelation(schema)
table, _ := txn.GetStore().(*txnStore).getOrSetTable(rel.ID())
tbl := table.(*txnTable)
rows := uint64(txnbase.MaxNodeRows) / 8 * 3
brows := rows / 3
bat := compute.MockBatch(tbl.GetSchema().Types(), rows, int(tbl.GetSchema().PrimaryKey), nil)
bats := compute.SplitBatch(bat, 3)
err := tbl.BatchDedupLocal(bats[0])
assert.Nil(t, err)
err = tbl.Append(bats[0])
assert.Nil(t, err)
assert.Equal(t, int(brows), int(tbl.Rows()))
assert.Equal(t, int(brows), int(tbl.index.Count()))
err = tbl.BatchDedupLocal(bats[0])
assert.NotNil(t, err)
err = tbl.BatchDedupLocal(bats[1])
assert.Nil(t, err)
err = tbl.Append(bats[1])
assert.Nil(t, err)
assert.Equal(t, 2*int(brows), int(tbl.Rows()))
assert.Equal(t, 2*int(brows), int(tbl.index.Count()))
err = tbl.BatchDedupLocal(bats[2])
assert.Nil(t, err)
err = tbl.Append(bats[2])
assert.Nil(t, err)
assert.Equal(t, 3*int(brows), int(tbl.Rows()))
assert.Equal(t, 3*int(brows), int(tbl.index.Count()))
}
func TestIndex(t *testing.T) {
index := NewSimpleTableIndex()
err := index.Insert(1, 10)
assert.Nil(t, err)
err = index.Insert("one", 10)
assert.Nil(t, err)
row, err := index.Find("one")
assert.Nil(t, err)
assert.Equal(t, 10, int(row))
err = index.Delete("one")
assert.Nil(t, err)
_, err = index.Find("one")
assert.NotNil(t, err)
schema := catalog.MockSchemaAll(14)
schema.PrimaryKey = 1
bat := compute.MockBatch(schema.Types(), 500, int(schema.PrimaryKey), nil)
idx := NewSimpleTableIndex()
err = idx.BatchDedup(bat.Vecs[0])
assert.Nil(t, err)
err = idx.BatchInsert(bat.Vecs[0], 0, gvec.Length(bat.Vecs[0]), 0, false)
assert.NotNil(t, err)
err = idx.BatchDedup(bat.Vecs[1])
assert.Nil(t, err)
err = idx.BatchInsert(bat.Vecs[1], 0, gvec.Length(bat.Vecs[1]), 0, false)
assert.Nil(t, err)
window := gvec.New(bat.Vecs[1].Typ)
gvec.Window(bat.Vecs[1], 20, 22, window)
assert.Equal(t, 2, gvec.Length(window))
err = idx.BatchDedup(window)
assert.NotNil(t, err)
schema.PrimaryKey = 12
bat = compute.MockBatch(schema.Types(), 500, int(schema.PrimaryKey), nil)
idx = NewSimpleTableIndex()
err = idx.BatchDedup(bat.Vecs[12])
assert.Nil(t, err)
err = idx.BatchInsert(bat.Vecs[12], 0, gvec.Length(bat.Vecs[12]), 0, false)
assert.Nil(t, err)
window = gvec.New(bat.Vecs[12].Typ)
gvec.Window(bat.Vecs[12], 20, 22, window)
assert.Equal(t, 2, gvec.Length(window))
err = idx.BatchDedup(window)
assert.NotNil(t, err)
}
func TestLoad(t *testing.T) {
dir := testutils.InitTestEnv(ModuleName, t)
c, mgr, driver := initTestContext(t, dir)
defer driver.Close()
defer c.Close()
defer mgr.Stop()
schema := catalog.MockSchemaAll(14)
schema.BlockMaxRows = 10000
schema.SegmentMaxBlocks = 10
schema.PrimaryKey = 13
bat := compute.MockBatch(schema.Types(), 60000, int(schema.PrimaryKey), nil)
bats := compute.SplitBatch(bat, 5)
txn := mgr.StartTxn(nil)
db, _ := txn.CreateDatabase("db")
rel, _ := db.CreateRelation(schema)
table, _ := txn.GetStore().(*txnStore).getOrSetTable(rel.ID())
tbl := table.(*txnTable)
err := tbl.Append(bats[0])
assert.Nil(t, err)
t.Log(tbl.store.nodesMgr.String())
v, err := tbl.GetLocalValue(100, 0)
assert.Nil(t, err)
t.Log(tbl.store.nodesMgr.String())
t.Logf("Row %d, Col %d, Val %v", 100, 0, v)
}
func TestNodeCommand(t *testing.T) {
dir := testutils.InitTestEnv(ModuleName, t)
c, mgr, driver := initTestContext(t, dir)
defer driver.Close()
defer c.Close()
defer mgr.Stop()
schema := catalog.MockSchemaAll(14)
schema.BlockMaxRows = 10000
schema.SegmentMaxBlocks = 10
schema.PrimaryKey = 13
bat := compute.MockBatch(schema.Types(), 15000, int(schema.PrimaryKey), nil)
txn := mgr.StartTxn(nil)
db, _ := txn.CreateDatabase("db")
rel, _ := db.CreateRelation(schema)
table, _ := txn.GetStore().(*txnStore).getOrSetTable(rel.ID())
tbl := table.(*txnTable)
err := tbl.Append(bat)
assert.Nil(t, err)
err = tbl.RangeDeleteLocalRows(100, 200)
assert.Nil(t, err)
for i, inode := range tbl.inodes {
cmd, entry, err := inode.MakeCommand(uint32(i), false)
assert.Nil(t, err)
if i == 0 {
assert.Equal(t, 2, len(cmd.(*AppendCmd).Cmds))
} else {
assert.Equal(t, 1, len(cmd.(*AppendCmd).Cmds))
}
if entry != nil {
entry.WaitDone()
entry.Free()
}
t.Log(cmd.String())
}
}
func TestBuildCommand(t *testing.T) {
dir := testutils.InitTestEnv(ModuleName, t)
c, mgr, driver := initTestContext(t, dir)
defer driver.Close()
defer c.Close()
defer mgr.Stop()
schema := catalog.MockSchemaAll(14)
schema.BlockMaxRows = 10000
schema.SegmentMaxBlocks = 10
schema.PrimaryKey = 13
bat := compute.MockBatch(schema.Types(), 55000, int(schema.PrimaryKey), nil)
txn := mgr.StartTxn(nil)
db, _ := txn.CreateDatabase("db")
rel, _ := db.CreateRelation(schema)
table, _ := txn.GetStore().(*txnStore).getOrSetTable(rel.ID())
tbl := table.(*txnTable)
err := tbl.Append(bat)
assert.Nil(t, err)
err = tbl.RangeDeleteLocalRows(100, 200)
assert.Nil(t, err)
t.Log(tbl.store.nodesMgr.String())
cmdSeq := uint32(1)
cmd, entries, err := tbl.buildCommitCmd(&cmdSeq)
assert.Nil(t, err)
tbl.Close()
assert.Equal(t, 0, tbl.store.nodesMgr.Count())
t.Log(cmd.String())
for _, e := range entries {
e.WaitDone()
e.Free()
}
t.Log(tbl.store.nodesMgr.String())
}
func TestApplyToColumn1(t *testing.T) {
deletes := &roaring.Bitmap{}
deletes.Add(1)
ts := common.NextGlobalSeqNum()
chain := updates.MockColumnUpdateChain()
node := updates.NewCommittedColumnNode(ts, ts, nil, nil)
node.AttachTo(chain)
node.UpdateLocked(3, []byte("update"))
deletes.AddRange(3, 4)
vec := &gvec.Vector{}
vec.Typ.Oid = types.T_varchar
col := &types.Bytes{
Data: make([]byte, 0),
Offsets: make([]uint32, 0),
Lengths: make([]uint32, 0),
}
for i := 0; i < 5; i++ {
col.Offsets = append(col.Offsets, uint32(len(col.Data)))
data := "val" + strconv.Itoa(i)
col.Data = append(col.Data, []byte(data)...)
col.Lengths = append(col.Lengths, uint32(len(data)))
}
vec.Col = col
vec.Nsp = &nulls.Nulls{}
vec.Nsp.Np = &roaring64.Bitmap{}
vec.Nsp.Np.Add(2)
// vec.Nsp.Np.Add(1)
// vec.Nsp.Np.Add(3)
vec.Nsp.Np.Add(4)
// vec.Nsp.Np.Add(0)
fmt.Printf("%s\n%v\n->\n", vec.Col, vec.Nsp.Np)
res := node.ApplyToColumn(vec, deletes)
fmt.Printf("%s\n%v\n", res.Col, res.Nsp.Np)
}
func TestApplyToColumn2(t *testing.T) {
deletes := &roaring.Bitmap{}
deletes.Add(1)
ts := common.NextGlobalSeqNum()
chain := updates.MockColumnUpdateChain()
node := updates.NewCommittedColumnNode(ts, ts, nil, nil)
node.AttachTo(chain)
node.UpdateLocked(0, int32(8))
deletes.AddRange(2, 4)
vec := &gvec.Vector{}
vec.Typ.Oid = types.T_int32
vec.Col = []int32{1, 2, 3, 4}
vec.Nsp = &nulls.Nulls{}
vec.Nsp.Np = &roaring64.Bitmap{}
vec.Nsp.Np.Add(2)
vec.Nsp.Np.Add(1)
vec.Nsp.Np.Add(3)
vec.Nsp.Np.Add(0)
fmt.Printf("%v\n%v\n->\n", vec.Col, vec.Nsp.Np)
res := node.ApplyToColumn(vec, deletes)
fmt.Printf("%v\n%v\n", res.Col, res.Nsp.Np)
}
func TestApplyToColumn3(t *testing.T) {
ts := common.NextGlobalSeqNum()
chain := updates.MockColumnUpdateChain()
node := updates.NewCommittedColumnNode(ts, ts, nil, nil)
node.AttachTo(chain)
node.UpdateLocked(3, []byte("update"))
vec := &gvec.Vector{}
vec.Typ.Oid = types.T_varchar
col := &types.Bytes{
Data: make([]byte, 0),
Offsets: make([]uint32, 0),
Lengths: make([]uint32, 0),
}
for i := 0; i < 5; i++ {
col.Offsets = append(col.Offsets, uint32(len(col.Data)))
data := "val" + strconv.Itoa(i)
col.Data = append(col.Data, []byte(data)...)
col.Lengths = append(col.Lengths, uint32(len(data)))
}
vec.Col = col
deletes := &roaring.Bitmap{}
deletes.Add(1)
fmt.Printf("%s\n->\n", vec.Col)
res := node.ApplyToColumn(vec, deletes)
fmt.Printf("%s\n", res.Col)
}
func TestApplyToColumn4(t *testing.T) |
func TestTxnManager1(t *testing.T) {
mgr := txnbase.NewTxnManager(TxnStoreFactory(nil, nil, nil, nil), TxnFactory(nil))
mgr.Start()
txn := mgr.StartTxn(nil)
txn.MockIncWriteCnt()
lock := sync.Mutex{}
seqs := make([]int, 0)
txn.SetPrepareCommitFn(func(i interface{}) error {
time.Sleep(time.Millisecond * 100)
lock.Lock()
seqs = append(seqs, 2)
lock.Unlock()
return nil
})
var wg sync.WaitGroup
short := func() {
defer wg.Done()
txn2 := mgr.StartTxn(nil)
txn2.MockIncWriteCnt()
txn2.SetPrepareCommitFn(func(i interface{}) error {
lock.Lock()
seqs = append(seqs, 4)
lock.Unlock()
return nil
})
time.Sleep(10 * time.Millisecond)
lock.Lock()
seqs = append(seqs, 1)
lock.Unlock()
txn.GetTxnState(true)
lock.Lock()
seqs = append(seqs, 3)
lock.Unlock()
txn2.Commit()
}
for i := 0; i < 1; i++ {
wg.Add(1)
go short()
}
txn.Commit()
wg.Wait()
defer mgr.Stop()
expected := []int{1, 2, 3, 4}
assert.Equal(t, expected, seqs)
}
func initTestContext(t *testing.T, dir string) (*catalog.Catalog, *txnbase.TxnManager, wal.Driver) {
c := catalog.MockCatalog(dir, "mock", nil, nil)
driver := wal.NewDriver(dir, "store", nil)
txnBufMgr := buffer.NewNodeManager(common.G, nil)
mutBufMgr := buffer.NewNodeManager(common.G, nil)
factory := tables.NewDataFactory(mockio.SegmentFileMockFactory, mutBufMgr, nil)
// factory := tables.NewDataFactory(dataio.SegmentFileMockFactory, mutBufMgr)
mgr := txnbase.NewTxnManager(TxnStoreFactory(c, driver, txnBufMgr, factory), TxnFactory(c))
mgr.Start()
return c, mgr, driver
}
// 1. Txn1 create database "db" and table "tb1". Commit
// 2. Txn2 drop database
// 3. Txn3 create table "tb2"
// 4. Txn2 commit
// 5. Txn3 commit
func TestTransaction1(t *testing.T) {
// dir := initTestPath(t)
dir := testutils.InitTestEnv(ModuleName, t)
c, mgr, driver := initTestContext(t, dir)
defer driver.Close()
defer c.Close()
defer mgr.Stop()
txn1 := mgr.StartTxn(nil)
name := "db"
schema := catalog.MockSchema(1)
db, err := txn1.CreateDatabase(name)
assert.Nil(t, err)
_, err = db.CreateRelation(schema)
assert.Nil(t, err)
err = txn1.Commit()
assert.Nil(t, err)
txn2 := mgr.StartTxn(nil)
db2, err := txn2.DropDatabase(name)
assert.Nil(t, err)
t.Log(db2.String())
txn3 := mgr.StartTxn(nil)
db3, err := txn3.GetDatabase(name)
assert.Nil(t, err)
t.Log(db3.String())
schema = catalog.MockSchema(1)
rel, err := db3.CreateRelation(schema)
assert.Nil(t, err)
t.Log(rel.String())
err = txn2.Commit()
assert.Nil(t, err)
err = txn3.Commit()
assert.Equal(t, txnif.TxnStateRollbacked, txn3.GetTxnState(true))
t.Log(txn3.String())
// assert.NotNil(t, err)
t.Log(db2.String())
t.Log(rel.String())
t.Log(c.SimplePPString(common.PPL1))
}
func TestTransaction2(t *testing.T) {
dir := testutils.InitTestEnv(ModuleName, t)
c, mgr, driver := initTestContext(t, dir)
defer driver.Close()
defer c.Close()
defer mgr.Stop()
name := "db"
txn1 := mgr.StartTxn(nil)
db, err := txn1.CreateDatabase(name)
assert.Nil(t, err)
t.Log(db.String())
schema := catalog.MockSchema(1)
rel, err := db.CreateRelation(schema)
assert.Nil(t, err)
t.Log(rel.String())
err = txn1.Commit()
assert.Nil(t, err)
t.Log(db.String())
assert.Equal(t, txn1.GetCommitTS(), db.GetMeta().(*catalog.DBEntry).CreateAt)
assert.Nil(t, db.GetMeta().(*catalog.DBEntry).Txn)
assert.Equal(t, txn1.GetCommitTS(), rel.GetMeta().(*catalog.TableEntry).CreateAt)
assert.Nil(t, rel.GetMeta().(*catalog.TableEntry).Txn)
txn2 := mgr.StartTxn(nil)
get, err := txn2.GetDatabase(name)
assert.Nil(t, err)
t.Log(get.String())
dropped, err := txn2.DropDatabase(name)
assert.Nil(t, err)
t.Log(dropped.String())
get, err = txn2.GetDatabase(name)
assert.Equal(t, catalog.ErrNotFound, err)
t.Log(err)
txn3 := mgr.StartTxn(nil)
err = txn3.UseDatabase(name)
assert.Nil(t, err)
err = txn3.UseDatabase("xx")
assert.NotNil(t, err)
db3, err := txn3.GetDatabase(name)
assert.Nil(t, err)
rel, err = db3.GetRelationByName(schema.Name)
assert.Nil(t, err)
t.Log(rel.String())
}
func TestTransaction3(t *testing.T) {
dir := testutils.InitTestEnv(ModuleName, t)
c, mgr, driver := initTestContext(t, dir)
defer driver.Close()
defer mgr.Stop()
defer c.Close()
pool, _ := ants.NewPool(20)
var wg sync.WaitGroup
flow := func(i int) func() {
return func() {
defer wg.Done()
txn := mgr.StartTxn(nil)
name := fmt.Sprintf("db-%d", i)
db, err := txn.CreateDatabase(name)
assert.Nil(t, err)
schema := catalog.MockSchemaAll(13)
_, err = db.CreateRelation(schema)
assert.Nil(t, err)
err = txn.Commit()
assert.Nil(t, err)
}
}
for i := 0; i < 100; i++ {
wg.Add(1)
pool.Submit(flow(i))
}
wg.Wait()
}
func TestSegment1(t *testing.T) {
dir := testutils.InitTestEnv(ModuleName, t)
c, mgr, driver := initTestContext(t, dir)
defer driver.Close()
defer mgr.Stop()
defer c.Close()
txn1 := mgr.StartTxn(nil)
name := "db"
schema := catalog.MockSchema(1)
db, err := txn1.CreateDatabase(name)
assert.Nil(t, err)
rel, err := db.CreateRelation(schema)
assert.Nil(t, err)
_, err = rel.CreateSegment()
assert.Nil(t, err)
err = txn1.Commit()
assert.Nil(t, err)
txn2 := mgr.StartTxn(nil)
db, err = txn2.GetDatabase(name)
assert.Nil(t, err)
rel, err = db.GetRelationByName(schema.Name)
assert.Nil(t, err)
segIt := rel.MakeSegmentIt()
cnt := 0
for segIt.Valid() {
iseg := segIt.GetSegment()
t.Log(iseg.String())
cnt++
segIt.Next()
}
assert.Equal(t, 1, cnt)
_, err = rel.CreateSegment()
assert.Nil(t, err)
segIt = rel.MakeSegmentIt()
cnt = 0
for segIt.Valid() {
iseg := segIt.GetSegment()
t.Log(iseg.String())
cnt++
segIt.Next()
}
assert.Equal(t, 2, cnt)
txn3 := mgr.StartTxn(nil)
db, _ = txn3.GetDatabase(name)
rel, _ = db.GetRelationByName(schema.Name)
segIt = rel.MakeSegmentIt()
cnt = 0
for segIt.Valid() {
iseg := segIt.GetSegment()
t.Log(iseg.String())
cnt++
segIt.Next()
}
assert.Equal(t, 1, cnt)
err = txn2.Commit()
assert.Nil(t, err)
segIt = rel.MakeSegmentIt()
cnt = 0
for segIt.Valid() {
iseg := segIt.GetSegment()
t.Log(iseg.String())
cnt++
segIt.Next()
}
assert.Equal(t, 1, cnt)
}
func TestSegment2(t *testing.T) {
dir := testutils.InitTestEnv(ModuleName, t)
c, mgr, driver := initTestContext(t, dir)
defer driver.Close()
defer mgr.Stop()
defer c.Close()
txn1 := mgr.StartTxn(nil)
db, _ := txn1.CreateDatabase("db")
schema := catalog.MockSchema(1)
rel, _ := db.CreateRelation(schema)
segCnt := 10
for i := 0; i < segCnt; i++ {
_, err := rel.CreateSegment()
assert.Nil(t, err)
}
it := rel.MakeSegmentIt()
cnt := 0
for it.Valid() {
cnt++
// iseg := it.GetSegment()
it.Next()
}
assert.Equal(t, segCnt, cnt)
// err := txn1.Commit()
// assert.Nil(t, err)
t.Log(c.SimplePPString(common.PPL1))
}
func TestBlock1(t *testing.T) {
dir := testutils.InitTestEnv(ModuleName, t)
c, mgr, driver := initTestContext(t, dir)
defer driver.Close()
defer mgr.Stop()
defer c.Close()
txn1 := mgr.StartTxn(nil)
db, _ := txn1.CreateDatabase("db")
schema := catalog.MockSchema(1)
rel, _ := db.CreateRelation(schema)
seg, _ := rel.CreateSegment()
blkCnt := 100
for i := 0; i < blkCnt; i++ {
_, err := seg.CreateBlock()
assert.Nil(t, err)
}
it := seg.MakeBlockIt()
cnt := 0
for it.Valid() {
cnt++
it.Next()
}
assert.Equal(t, blkCnt, cnt)
err := txn1.Commit()
assert.Nil(t, err)
txn2 := mgr.StartTxn(nil)
db, _ = txn2.GetDatabase("db")
rel, _ = db.GetRelationByName(schema.Name)
segIt := rel.MakeSegmentIt()
cnt = 0
for segIt.Valid() {
seg = segIt.GetSegment()
it = seg.MakeBlockIt()
for it.Valid() {
cnt++
it.Next()
}
segIt.Next()
}
assert.Equal(t, blkCnt, cnt)
}
func TestDedup1(t *testing.T) {
dir := testutils.InitTestEnv(ModuleName, t)
c, mgr, driver := initTestContext(t, dir)
defer driver.Close()
defer c.Close()
defer mgr.Stop()
schema := catalog.MockSchemaAll(4)
schema.BlockMaxRows = 20
schema.SegmentMaxBlocks = 4
schema.PrimaryKey = 2
cnt := uint64(10)
rows := uint64(schema.BlockMaxRows) / 2 * cnt
bat := compute.MockBatch(schema.Types(), rows, int(schema.PrimaryKey), nil)
bats := compute.SplitBatch(bat, int(cnt))
{
txn := mgr.StartTxn(nil)
db, _ := txn.CreateDatabase("db")
db.CreateRelation(schema)
assert.Nil(t, txn.Commit())
}
{
txn := mgr.StartTxn(nil)
db, _ := txn.GetDatabase("db")
rel, _ := db.GetRelationByName(schema.Name)
err := rel.Append(bats[0])
assert.Nil(t, err)
err = rel.Append(bats[0])
assert.NotNil(t, err)
assert.Nil(t, txn.Rollback())
}
{
txn := mgr.StartTxn(nil)
db, _ := txn.GetDatabase("db")
rel, _ := db.GetRelationByName(schema.Name)
err := rel.Append(bats[0])
assert.Nil(t, err)
assert.Nil(t, txn.Commit())
}
{
txn := mgr.StartTxn(nil)
db, _ := txn.GetDatabase("db")
rel, _ := db.GetRelationByName(schema.Name)
err := rel.Append(bats[0])
assert.NotNil(t, err)
assert.Nil(t, txn.Rollback())
}
{
txn := mgr.StartTxn(nil)
db, _ := txn.GetDatabase("db")
rel, _ := db.GetRelationByName(schema.Name)
err := rel.Append(bats[1])
assert.Nil(t, err)
txn2 := mgr.StartTxn(nil)
db2, _ := txn2.GetDatabase("db")
rel2, _ := db2.GetRelationByName(schema.Name)
err = rel2.Append(bats[2])
assert.Nil(t, err)
err = rel2.Append(bats[3])
assert.Nil(t, err)
assert.Nil(t, txn2.Commit())
txn3 := mgr.StartTxn(nil)
db3, _ := txn3.GetDatabase("db")
rel3, _ := db3.GetRelationByName(schema.Name)
err = rel3.Append(bats[4])
assert.Nil(t, err)
err = rel3.Append(bats[5])
assert.Nil(t, err)
assert.Nil(t, txn3.Commit())
err = rel.Append(bats[3])
assert.Nil(t, err)
err = txn.Commit()
t.Log(txn.String())
assert.NotNil(t, err)
}
t.Log(c.SimplePPString(common.PPL1))
}
| {
ts := common.NextGlobalSeqNum()
chain := updates.MockColumnUpdateChain()
node := updates.NewCommittedColumnNode(ts, ts, nil, nil)
node.AttachTo(chain)
node.UpdateLocked(3, int32(8))
vec := &gvec.Vector{}
vec.Typ.Oid = types.T_int32
vec.Col = []int32{1, 2, 3, 4}
fmt.Printf("%v\n->\n", vec.Col)
res := node.ApplyToColumn(vec, nil)
fmt.Printf("%v\n", res.Col)
} |
Solution.py | # https://www.hackerrank.com/challenges/30-loops/problem
#!/bin/python3
import math
import os
import random
import re
import sys
def printMultiples(number):
|
if __name__ == '__main__':
n = int(input())
printMultiples(n) | for i in range(1,11):
print(str(number)+" x "+str(i)+" = "+str(number*i)) |
config_flow.py | """Config flow to configure Neato integration."""
import logging
from pybotvac import Account, Neato, Vorwerk
from pybotvac.exceptions import NeatoLoginException, NeatoRobotException
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
# pylint: disable=unused-import
from .const import CONF_VENDOR, NEATO_DOMAIN, VALID_VENDORS
DOCS_URL = "https://www.home-assistant.io/integrations/neato"
DEFAULT_VENDOR = "neato"
_LOGGER = logging.getLogger(__name__)
class NeatoConfigFlow(config_entries.ConfigFlow, domain=NEATO_DOMAIN):
"""Neato integration config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize flow."""
self._username = vol.UNDEFINED
self._password = vol.UNDEFINED
self._vendor = vol.UNDEFINED
async def | (self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if self._async_current_entries():
return self.async_abort(reason="already_configured")
if user_input is not None:
self._username = user_input["username"]
self._password = user_input["password"]
self._vendor = user_input["vendor"]
error = await self.hass.async_add_executor_job(
self.try_login, self._username, self._password, self._vendor
)
if error:
errors["base"] = error
else:
return self.async_create_entry(
title=user_input[CONF_USERNAME],
data=user_input,
description_placeholders={"docs_url": DOCS_URL},
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Optional(CONF_VENDOR, default="neato"): vol.In(VALID_VENDORS),
}
),
description_placeholders={"docs_url": DOCS_URL},
errors=errors,
)
async def async_step_import(self, user_input):
"""Import a config flow from configuration."""
if self._async_current_entries():
return self.async_abort(reason="already_configured")
username = user_input[CONF_USERNAME]
password = user_input[CONF_PASSWORD]
vendor = user_input[CONF_VENDOR]
error = await self.hass.async_add_executor_job(
self.try_login, username, password, vendor
)
if error is not None:
_LOGGER.error(error)
return self.async_abort(reason=error)
return self.async_create_entry(
title=f"{username} (from configuration)",
data={
CONF_USERNAME: username,
CONF_PASSWORD: password,
CONF_VENDOR: vendor,
},
)
@staticmethod
def try_login(username, password, vendor):
"""Try logging in to device and return any errors."""
this_vendor = None
if vendor == "vorwerk":
this_vendor = Vorwerk()
else: # Neato
this_vendor = Neato()
try:
Account(username, password, this_vendor)
except NeatoLoginException:
return "invalid_auth"
except NeatoRobotException:
return "unknown"
return None
| async_step_user |
test612.js |
var callbackArguments = [];
var argument1 = function (d) {
callbackArguments.push(arguments)
return d3.select(this).node().classList[1] !== liClass;
};
var argument2 = function () {
callbackArguments.push(arguments)
for (i = 0; i < len; i++) {
if (jQuery.contains(self[i], this)) {
return true;
}
}
};
var argument3 = false;
var argument4 = function () {
callbackArguments.push(arguments)
return this.name && !this.disabled && (this.checked || rselectTextarea.test(this.nodeName) || rinput.test(this.message));
};
var argument5 = 2.0669808705907619e+307;
var argument6 = true;
var argument7 = function (value) {
callbackArguments.push(arguments)
return !_.include(other, value);
};
var argument8 = "";
var base_0 = [714,242,893,655,213,25]
var r_0= undefined
try {
r_0 = base_0.filter(argument1)
}
catch(e) {
r_0= "Error"
}
var base_1 = [714,242,893,655,213,25]
var r_1= undefined
try {
r_1 = base_1.filter(argument2,argument3)
}
catch(e) {
r_1= "Error"
}
var base_2 = [714,242,893,655,213,25]
var r_2= undefined
try {
r_2 = base_2.filter(argument4,argument5,argument6)
}
catch(e) {
r_2= "Error"
}
var base_3 = [714,242,893,655,213,25]
var r_3= undefined
try {
r_3 = base_3.filter(argument7,argument8)
}
catch(e) {
r_3= "Error"
}
function | (array){
return array.map(function(a){
if (a === null || a == undefined) return a;
var name = a.constructor.name;
if (name==='Object' || name=='Boolean'|| name=='Array'||name=='Number'||name=='String')
return JSON.stringify(a);
return name;
});
}
setTimeout(function(){
require("fs").writeFileSync("./experiments/filter/filterMined/test612.json",JSON.stringify({"baseObjects":serialize([base_0,base_1,base_2,base_3]),"returnObjects":serialize([r_0,r_1,r_2,r_3]),"callbackArgs":callbackArguments}))
},300) | serialize |
requirements.txt.py | XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX |
||
HailRounded.js | import createSvgIcon from './utils/createSvgIcon'; | d: "M12 6c-1.1 0-2-.9-2-2s.9-2 2-2 2 .9 2 2-.9 2-2 2zm5.95-4c.59 0 1.06.51 1 1.09-.02.15-.21 4.06-3.95 5.31V21c0 .55-.45 1-1 1s-1-.45-1-1v-5h-2v5c0 .55-.45 1-1 1s-1-.45-1-1V10.1c-.3.1-.5.2-.6.3-.46.36-1.17.87-1.36 2.67-.05.52-.47.93-1 .93-.58 0-1.05-.49-1-1.07.13-1.6.62-2.98 2.07-4.22C8.21 7.81 10 7 12 7s2.68-.46 3.48-1.06c.43-.34 1.28-.99 1.48-3.02.05-.52.47-.92.99-.92zM5 16h1c.55 0 1 .45 1 1v4c0 .55-.45 1-1 1H5c-.55 0-1-.45-1-1v-4c0-.55.45-1 1-1z"
}), 'HailRounded'); | import { jsx as _jsx } from "react/jsx-runtime";
export default createSvgIcon( /*#__PURE__*/_jsx("path", { |
inference_test.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import sys
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.base_proposer import (
BaseProposer,
)
from beanmachine.ppl.world import World, init_from_prior
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Normal(0.0, 1.0)
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), 1.0)
@bm.functional
def baz(self):
return self.bar() * 2.0
class SampleDoubleModel:
@bm.random_variable
def foo(self):
return dist.Normal(torch.tensor(0.0).double(), torch.tensor(1.0).double())
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), torch.tensor(1.0).double())
@pytest.mark.parametrize("multiprocess", [False, True])
def test_inference(multiprocess):
if multiprocess and sys.platform.startswith("win"):
pytest.skip(
"Windows does not support fork-based multiprocessing (which is necessary "
"for running parallel inference within pytest."
)
model = SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
queries = [model.foo(), model.baz()]
observations = {model.bar(): torch.tensor(0.5)}
num_samples = 30
num_chains = 2
samples = mh.infer(
queries,
observations,
num_samples,
num_adaptive_samples=num_samples,
num_chains=num_chains,
run_in_parallel=multiprocess,
mp_context="fork",
)
assert model.foo() in samples
assert isinstance(samples[model.foo()], torch.Tensor)
assert samples[model.foo()].shape == (num_chains, num_samples)
assert samples.get_num_samples(include_adapt_steps=True) == num_samples * 2
# make sure that the RNG state for each chain is different
assert not torch.equal(
samples.get_chain(0)[model.foo()], samples.get_chain(1)[model.foo()]
)
def test_get_proposers():
|
def test_initialize_world():
model = SampleModel()
nuts = bm.GlobalNoUTurnSampler()
world = nuts._initialize_world([model.bar()], {})
assert model.foo() in world
assert model.bar() in world
def test_initialize_from_prior():
mh = bm.SingleSiteAncestralMetropolisHastings()
model = SampleModel()
queries = [model.foo()]
samples_from_prior = []
for _ in range(10000):
world = mh._initialize_world(queries, {}, init_from_prior)
val = world.get(model.foo())
samples_from_prior.append(val.item())
assert samples_from_prior[0] != samples_from_prior[1]
assert math.isclose(sum(samples_from_prior) / 10000.0, 0.0, abs_tol=1e-2)
def test_initialization_resampling():
mh = bm.SingleSiteAncestralMetropolisHastings()
@bm.random_variable
def foo():
return dist.Uniform(3.0, 5.0)
# verify that the method re-sample as expected
retries = 0
def init_after_three_tries(d: dist.Distribution):
nonlocal retries
retries += 1
return torch.tensor(float("nan")) if retries < 3 else d.sample()
sampler = mh.sampler(
[foo()], {}, num_samples=10, initialize_fn=init_after_three_tries
)
for world in sampler:
assert not torch.isinf(world.log_prob()) and not torch.isnan(world.log_prob())
# an extreme case where the init value is always out of the support
def init_to_zero(d: dist.Distribution):
return torch.zeros_like(d.sample())
with pytest.raises(ValueError, match="Cannot find a valid initialization"):
mh.infer([foo()], {}, num_samples=10, initialize_fn=init_to_zero)
@pytest.mark.parametrize(
"algorithm",
[
bm.GlobalNoUTurnSampler(),
bm.GlobalHamiltonianMonteCarlo(trajectory_length=1.0),
bm.SingleSiteAncestralMetropolisHastings(),
bm.SingleSiteNewtonianMonteCarlo(),
bm.SingleSiteUniformMetropolisHastings(),
],
)
def test_inference_with_double_dtype(algorithm):
model = SampleDoubleModel()
queries = [model.foo()]
bar_val = torch.tensor(0.5).double()
# make sure that the inference can run successfully
samples = algorithm.infer(
queries,
{model.bar(): bar_val},
num_samples=20,
num_chains=1,
)
assert samples[model.foo()].dtype == bar_val.dtype
| world = World()
model = SampleModel()
world.call(model.bar())
nuts = bm.GlobalNoUTurnSampler()
proposers = nuts.get_proposers(world, world.latent_nodes, 10)
assert all(isinstance(proposer, BaseProposer) for proposer in proposers) |
schedule.go | /*
Copyright 2017 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
scheme "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// SchedulesGetter has a method to return a ScheduleInterface.
// A group's client should implement this interface.
type SchedulesGetter interface {
Schedules(namespace string) ScheduleInterface
}
// ScheduleInterface has methods to work with Schedule resources.
type ScheduleInterface interface {
Create(*v1.Schedule) (*v1.Schedule, error)
Update(*v1.Schedule) (*v1.Schedule, error)
UpdateStatus(*v1.Schedule) (*v1.Schedule, error)
Delete(name string, options *meta_v1.DeleteOptions) error
DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error
Get(name string, options meta_v1.GetOptions) (*v1.Schedule, error)
List(opts meta_v1.ListOptions) (*v1.ScheduleList, error)
Watch(opts meta_v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Schedule, err error)
ScheduleExpansion
}
// schedules implements ScheduleInterface
type schedules struct {
client rest.Interface
ns string
}
// newSchedules returns a Schedules
func newSchedules(c *ArkV1Client, namespace string) *schedules |
// Get takes name of the schedule, and returns the corresponding schedule object, and an error if there is any.
func (c *schedules) Get(name string, options meta_v1.GetOptions) (result *v1.Schedule, err error) {
result = &v1.Schedule{}
err = c.client.Get().
Namespace(c.ns).
Resource("schedules").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of Schedules that match those selectors.
func (c *schedules) List(opts meta_v1.ListOptions) (result *v1.ScheduleList, err error) {
result = &v1.ScheduleList{}
err = c.client.Get().
Namespace(c.ns).
Resource("schedules").
VersionedParams(&opts, scheme.ParameterCodec).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested schedules.
func (c *schedules) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("schedules").
VersionedParams(&opts, scheme.ParameterCodec).
Watch()
}
// Create takes the representation of a schedule and creates it. Returns the server's representation of the schedule, and an error, if there is any.
func (c *schedules) Create(schedule *v1.Schedule) (result *v1.Schedule, err error) {
result = &v1.Schedule{}
err = c.client.Post().
Namespace(c.ns).
Resource("schedules").
Body(schedule).
Do().
Into(result)
return
}
// Update takes the representation of a schedule and updates it. Returns the server's representation of the schedule, and an error, if there is any.
func (c *schedules) Update(schedule *v1.Schedule) (result *v1.Schedule, err error) {
result = &v1.Schedule{}
err = c.client.Put().
Namespace(c.ns).
Resource("schedules").
Name(schedule.Name).
Body(schedule).
Do().
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *schedules) UpdateStatus(schedule *v1.Schedule) (result *v1.Schedule, err error) {
result = &v1.Schedule{}
err = c.client.Put().
Namespace(c.ns).
Resource("schedules").
Name(schedule.Name).
SubResource("status").
Body(schedule).
Do().
Into(result)
return
}
// Delete takes name of the schedule and deletes it. Returns an error if one occurs.
func (c *schedules) Delete(name string, options *meta_v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("schedules").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *schedules) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("schedules").
VersionedParams(&listOptions, scheme.ParameterCodec).
Body(options).
Do().
Error()
}
// Patch applies the patch and returns the patched schedule.
func (c *schedules) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Schedule, err error) {
result = &v1.Schedule{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("schedules").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}
| {
return &schedules{
client: c.RESTClient(),
ns: namespace,
}
} |
getListsIdClients.js | /**
* Auto-generated trigger file for "Mailchimp Marketing API" API.
*
* Generated at: 2022-01-05T12:34:40.158Z
* Mass generator version: 1.0.0
*
* : mailchimp-newcomp
* Copyright © 2020, AG
*
* All files of this connector are licensed under the Apache 2.0 License. For details
* see the file LICENSE on the toplevel directory.
*
*
* Operation: 'getListsIdClients'
* Endpoint Path: '/lists/{list_id}/clients'
* Method: 'get'
*
*/
const Swagger = require('swagger-client');
const spec = require('../spec.json');
// this wrapers offers a simplified emitData(data) function
module.exports = { process: processTrigger };
// parameter names for this call
const PARAMETERS = [
"fields",
"exclude_fields",
"list_id"
];
// mappings from connector field names to API field names
const FIELD_MAP = {
"fields": "fields",
"exclude_fields": "exclude_fields",
"list_id": "list_id"
};
function processTrigger(msg, cfg) {
var isVerbose = process.env.debug || cfg.verbose;
console.log('msg:', msg);
console.log('cfg:', cfg);
if (isVerbose) {
console.log(`---MSG: ${JSON.stringify(msg)}`);
console.log(`---CFG: ${JSON.stringify(cfg)}`);
console.log(`---ENV: ${JSON.stringify(process.env)}`);
}
const contentType = undefined;
const body = msg.data;
mapFieldNames(body);
let parameters = {};
for (let param of PARAMETERS) {
parameters[param] = body[param];
}
const oihUid =
msg.metadata !== undefined && msg.metadata.oihUid !== undefined
? msg.metadata.oihUid
: 'oihUid not set yet';
const recordUid =
msg.metadata !== undefined && msg.metadata.recordUid !== undefined
? msg.metadata.recordUid
: undefined;
const applicationUid =
msg.metadata !== undefined && msg.metadata.applicationUid !== undefined
? msg.metadata.applicationUid
: undefined;
const newElement = {};
const oihMeta = {
applicationUid,
oihUid,
recordUid,
};
// credentials for this operation
let securities = {};
securities['basicAuth'] = {username: cfg.username, password: cfg.passphrase};;;
if (cfg.otherServer) {
if (!spec.servers) {
spec.servers = [];
}
spec.servers.push({ url: cfg.otherServer });
}
let callParams = {
spec: spec,
operationId: 'getListsIdClients',
pathName: '/lists/{list_id}/clients',
method: 'get',
parameters: parameters,
requestContentType: contentType,
requestBody: body,
securities: { authorized: securities },
server: spec.servers[cfg.server] || cfg.otherServer,
};
if (callParams.method === 'get') {
delete callParams.requestBody;
}
if (isVerbose) {
let out = Object.assign({}, callParams);
out.spec = '[omitted]';
console.log(`--SWAGGER CALL: ${JSON.stringify(out)}`);
}
// Call operation via Swagger client
return Swagger.execute(callParams).then((data) => {
delete data.uid;
newElement.metadata = oihMeta;
const response = JSON.parse(data.data);
if (!cfg.nodeSettings.arraySplittingKey) {
newElement.data = response;
} else {
newElement.data = cfg.nodeSettings.arraySplittingKey.split('.').reduce((p,c)=> p&&p[c]||null, response)
}
if (Array.isArray(newElement.data)) {
for (let i = 0; i < newElement.data.length; i++) {
const newObject = newElement;
newObject.data = newElement.data[i];
this.emit('data', newObject);
}
} else {
this.emit('data', newElement);
}
}); | function mapFieldNames(obj) {
if (Array.isArray(obj)) {
obj.forEach(mapFieldNames);
} else if (typeof obj === 'object' && obj) {
obj = Object.fromEntries(Object.entries(obj).filter(([_, v]) => v != null));
return obj;
}
} | }
|
group.tsx | import * as React from "react";
import { useRegisterItem, useUpdateItem } from "../context";
import { uuid } from "../util";
import { TouchBar } from "./touch-bar";
import { ItemType } from "../types";
export interface GroupProps {
/**
* For debug purposes
*/
id?: string;
children: React.ReactNode;
}
function | ({ id, children, ...props }: GroupProps) {
const componentId = React.useRef(id ?? uuid());
useRegisterItem(componentId.current, ItemType.TouchBarGroup, props);
useUpdateItem(componentId.current, props);
return (
<TouchBar
id={componentId.current}
// @ts-expect-error - "private property"
registerOnly
>
{children}
</TouchBar>
);
}
export const Group = React.memo(GroupComponent);
| GroupComponent |
lattice.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # Lattice Variables
//!
//! This file contains generic code for operating on inference variables
//! that are characterized by an upper- and lower-bound. The logic and
//! reasoning is explained in detail in the large comment in `infer.rs`.
//!
//! The code in here is defined quite generically so that it can be
//! applied both to type variables, which represent types being inferred,
//! and fn variables, which represent function types being inferred.
//! It may eventually be applied to their types as well, who knows.
//! In some cases, the functions are also generic with respect to the
//! operation on the lattice (GLB vs LUB).
//!
//! Although all the functions are generic, we generally write the
//! comments in a way that is specific to type variables and the LUB
//! operation. It's just easier that way.
//!
//! In general all of the functions are defined parametrically
//! over a `LatticeValue`, which is a value defined with respect to
//! a lattice.
use super::InferCtxt;
use super::type_variable::TypeVariableOrigin;
use traits::ObligationCause;
use ty::TyVar;
use ty::{self, Ty};
use ty::relate::{RelateResult, TypeRelation};
pub trait LatticeDir<'f, 'gcx: 'f+'tcx, 'tcx: 'f> : TypeRelation<'f, 'gcx, 'tcx> {
fn infcx(&self) -> &'f InferCtxt<'f, 'gcx, 'tcx>;
fn cause(&self) -> &ObligationCause<'tcx>;
// Relates the type `v` to `a` and `b` such that `v` represents
// the LUB/GLB of `a` and `b` as appropriate.
fn relate_bound(&mut self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()>;
}
pub fn | <'a, 'gcx, 'tcx, L>(this: &mut L,
a: Ty<'tcx>,
b: Ty<'tcx>)
-> RelateResult<'tcx, Ty<'tcx>>
where L: LatticeDir<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
debug!("{}.lattice_tys({:?}, {:?})",
this.tag(),
a,
b);
if a == b {
return Ok(a);
}
let infcx = this.infcx();
let a = infcx.type_variables.borrow_mut().replace_if_possible(a);
let b = infcx.type_variables.borrow_mut().replace_if_possible(b);
match (&a.sty, &b.sty) {
(&ty::TyInfer(TyVar(..)), &ty::TyInfer(TyVar(..)))
if infcx.type_var_diverges(a) && infcx.type_var_diverges(b) => {
let v = infcx.next_diverging_ty_var(
TypeVariableOrigin::LatticeVariable(this.cause().span));
this.relate_bound(v, a, b)?;
Ok(v)
}
(&ty::TyInfer(TyVar(..)), _) |
(_, &ty::TyInfer(TyVar(..))) => {
let v = infcx.next_ty_var(TypeVariableOrigin::LatticeVariable(this.cause().span));
this.relate_bound(v, a, b)?;
Ok(v)
}
_ => {
infcx.super_combine_tys(this, a, b)
}
}
}
| super_lattice_tys |
styleclip_mapper.py | import math
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import Module
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
rest_dim = [1] * (input.ndim - bias.ndim - 1)
input = input.cuda()
if input.ndim == 3:
return (
F.leaky_relu(
input + bias.view(1, *rest_dim, bias.shape[0]), negative_slope=negative_slope
)
* scale
)
else:
return (
F.leaky_relu(
input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope
)
* scale
)
class PixelNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
class EqualLinear(nn.Module):
def __init__(
self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = (1 / math.sqrt(in_dim)) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(
input, self.weight * self.scale, bias=self.bias * self.lr_mul
)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class Mapper(Module):
def __init__(self, opts):
super(Mapper, self).__init__()
self.opts = opts
layers = [PixelNorm()]
for i in range(4):
layers.append(
EqualLinear(
512, 512, lr_mul=0.01, activation='fused_lrelu'
)
)
self.mapping = nn.Sequential(*layers)
def forward(self, x):
x = self.mapping(x)
return x
class SingleMapper(Module):
def | (self, opts):
super(SingleMapper, self).__init__()
self.opts = opts
self.mapping = Mapper(opts)
def forward(self, x):
out = self.mapping(x)
return out
class LevelsMapper(Module):
def __init__(self, opts):
super(LevelsMapper, self).__init__()
self.opts = opts
if not opts.no_coarse_mapper:
self.course_mapping = Mapper(opts)
if not opts.no_medium_mapper:
self.medium_mapping = Mapper(opts)
if not opts.no_fine_mapper:
self.fine_mapping = Mapper(opts)
def forward(self, x):
x_coarse = x[:, :4, :]
x_medium = x[:, 4:8, :]
x_fine = x[:, 8:, :]
if not self.opts.no_coarse_mapper:
x_coarse = self.course_mapping(x_coarse)
else:
x_coarse = torch.zeros_like(x_coarse)
if not self.opts.no_medium_mapper:
x_medium = self.medium_mapping(x_medium)
else:
x_medium = torch.zeros_like(x_medium)
if not self.opts.no_fine_mapper:
x_fine = self.fine_mapping(x_fine)
else:
x_fine = torch.zeros_like(x_fine)
out = torch.cat([x_coarse, x_medium, x_fine], dim=1)
return out
def get_keys(d, name):
if 'state_dict' in d:
d = d['state_dict']
d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}
return d_filt
class StyleCLIPMapper(nn.Module):
def __init__(self, opts):
super().__init__()
self.opts = opts
# Define architecture
self.mapper = self.set_mapper()
# Load weights if needed
self.load_weights()
def set_mapper(self):
if self.opts.mapper_type == 'SingleMapper':
mapper = SingleMapper(self.opts)
elif self.opts.mapper_type == 'LevelsMapper':
mapper = LevelsMapper(self.opts)
else:
raise Exception('{} is not a valid mapper'.format(self.opts.mapper_type))
return mapper
def load_weights(self):
if self.opts.checkpoint_path is not None:
print('Loading from checkpoint: {}'.format(self.opts.checkpoint_path))
ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu')
self.mapper.load_state_dict(get_keys(ckpt, 'mapper'), strict=True)
| __init__ |
ParserTest.py | import unittest
from ParserTest.TestUtil import *
class TestParserMethods(unittest.TestCase):
DIRECT_KOs_ID = 2
PASSIVE_KOs_ID = 3
DEATHS_ID = 4
def test_direct_KO(self):
pokemon_data = {
"Raichu-Alola": ["p1", "Stokin' Dude!"],
"Magikarp": ["p2", "A Karp"]
}
simulator = ParserSimulator(pokemon_data)
simulator.load_all()
simulator.switch_in_all()
simulator.move("Stokin' Dude!", "Thunderbolt", "A Karp")
simulator.damage("A Karp")
def test_toxic_spikes(self):
pokemon_data = {
"Toxapex": ["p1", "The Worst"],
"Magikarp": ["p2", "Sushi Incarnate"],
"Pichu": ["p2", "Baby Pikachu"]
}
simulator = ParserSimulator(pokemon_data)
simulator.load_all()
simulator.switch_in_species("Toxapex")
simulator.switch_in_species("Magikarp")
simulator.move("The Worst", "Toxic Spikes", "Sushi Incarnate")
simulator.move("Sushi Incarnate", "Splash", "The Worst")
simulator.switch_in_species("Pichu")
simulator.damage("Baby Pikachu", "psn")
simulator.faint("Baby Pikachu")
def test_stealth_rocks(self):
pass
| unittest.main() |
if __name__ == '__main__': |
shim_test.go | /*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package shim
import (
"os"
"strings"
"testing"
"github.com/hyperledger/fabric/common/flogging"
"github.com/op/go-logging"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
)
// Test Go shim functionality that can be tested outside of a real chaincode
// context.
// TestShimLogging simply tests that the APIs are working. These tests test
// for correct control over the shim's logging object and the LogLevel
// function.
func TestShimLogging(t *testing.T) {
SetLoggingLevel(LogCritical)
if shimLoggingLevel != LogCritical {
t.Errorf("shimLoggingLevel is not LogCritical as expected")
}
if chaincodeLogger.IsEnabledFor(logging.DEBUG) {
t.Errorf("The chaincodeLogger should not be enabled for DEBUG")
}
if !chaincodeLogger.IsEnabledFor(logging.CRITICAL) {
t.Errorf("The chaincodeLogger should be enabled for CRITICAL")
}
var level LoggingLevel
var err error
level, err = LogLevel("debug")
if err != nil {
t.Errorf("LogLevel(debug) failed")
}
if level != LogDebug {
t.Errorf("LogLevel(debug) did not return LogDebug")
}
level, err = LogLevel("INFO")
if err != nil {
t.Errorf("LogLevel(INFO) failed")
}
if level != LogInfo {
t.Errorf("LogLevel(INFO) did not return LogInfo")
}
level, err = LogLevel("Notice")
if err != nil {
t.Errorf("LogLevel(Notice) failed")
}
if level != LogNotice {
t.Errorf("LogLevel(Notice) did not return LogNotice")
}
level, err = LogLevel("WaRnInG")
if err != nil {
t.Errorf("LogLevel(WaRnInG) failed")
}
if level != LogWarning {
t.Errorf("LogLevel(WaRnInG) did not return LogWarning")
}
level, err = LogLevel("ERRor")
if err != nil {
t.Errorf("LogLevel(ERRor) failed")
}
if level != LogError {
t.Errorf("LogLevel(ERRor) did not return LogError")
}
level, err = LogLevel("critiCAL")
if err != nil {
t.Errorf("LogLevel(critiCAL) failed")
}
if level != LogCritical {
t.Errorf("LogLevel(critiCAL) did not return LogCritical")
}
level, err = LogLevel("foo")
if err == nil {
t.Errorf("LogLevel(foo) did not fail")
}
if level != LogError {
t.Errorf("LogLevel(foo) did not return LogError")
}
}
// TestChaincodeLogging tests the logging APIs for chaincodes.
func TestChaincodeLogging(t *testing.T) {
// From start() - We can't call start() from this test
format := logging.MustStringFormatter("%{time:15:04:05.000} [%{module}] %{level:.4s} : %{message}")
backend := logging.NewLogBackend(os.Stderr, "", 0)
backendFormatter := logging.NewBackendFormatter(backend, format)
logging.SetBackend(backendFormatter).SetLevel(logging.Level(shimLoggingLevel), "shim")
foo := NewLogger("foo")
bar := NewLogger("bar")
foo.Debugf("Foo is debugging: %d", 10)
bar.Infof("Bar is informational? %s.", "Yes")
foo.Noticef("NOTE NOTE NOTE")
bar.Warningf("Danger, Danger %s %s", "Will", "Robinson!")
foo.Errorf("I'm sorry Dave, I'm afraid I can't do that.")
bar.Criticalf("PI is not equal to 3.14, we computed it as %.2f", 4.13)
bar.Debug("Foo is debugging:", 10)
foo.Info("Bar is informational?", "Yes.")
bar.Notice("NOTE NOTE NOTE")
foo.Warning("Danger, Danger", "Will", "Robinson!")
bar.Error("I'm sorry Dave, I'm afraid I can't do that.")
foo.Critical("PI is not equal to", 3.14, ", we computed it as", 4.13)
foo.SetLevel(LogWarning)
if foo.IsEnabledFor(LogDebug) {
t.Errorf("'foo' should not be enabled for LogDebug")
}
if !foo.IsEnabledFor(LogCritical) {
t.Errorf("'foo' should be enabled for LogCritical")
}
bar.SetLevel(LogCritical)
if bar.IsEnabledFor(LogDebug) {
t.Errorf("'bar' should not be enabled for LogDebug")
}
if !bar.IsEnabledFor(LogCritical) {
t.Errorf("'bar' should be enabled for LogCritical")
}
}
func TestNilEventName(t *testing.T) {
stub := ChaincodeStub{}
if err := stub.SetEvent("", []byte("event payload")); err == nil {
t.Error("Event name can not be nil string.")
}
}
type testCase struct {
name string
ccLogLevel string
shimLogLevel string
}
func TestSetupChaincodeLogging_shim(t *testing.T) {
var tc []testCase
tc = append(tc,
testCase{"ValidLevels", "debug", "warning"},
testCase{"EmptyLevels", "", ""},
testCase{"BadShimLevel", "debug", "war"},
testCase{"BadCCLevel", "deb", "notice"},
testCase{"EmptyShimLevel", "error", ""},
testCase{"EmptyCCLevel", "", "critical"},
)
assert := assert.New(t)
for i := 0; i < len(tc); i++ {
t.Run(tc[i].name, func(t *testing.T) {
viper.Set("chaincode.logging.level", tc[i].ccLogLevel)
viper.Set("chaincode.logging.shim", tc[i].shimLogLevel)
SetupChaincodeLogging()
_, ccErr := logging.LogLevel(tc[i].ccLogLevel)
_, shimErr := logging.LogLevel(tc[i].shimLogLevel)
if ccErr == nil {
assert.Equal(strings.ToUpper(tc[i].ccLogLevel), flogging.GetModuleLevel("ccLogger"), "Test case '%s' failed", tc[i].name)
if shimErr == nil {
assert.Equal(strings.ToUpper(tc[i].shimLogLevel), flogging.GetModuleLevel("shim"), "Test case '%s' failed", tc[i].name)
} else {
assert.Equal(strings.ToUpper(tc[i].ccLogLevel), flogging.GetModuleLevel("shim"), "Test case '%s' failed", tc[i].name)
}
} else {
assert.Equal(flogging.DefaultLevel(), flogging.GetModuleLevel("ccLogger"), "Test case '%s' failed", tc[i].name)
if shimErr == nil | else {
assert.Equal(flogging.DefaultLevel(), flogging.GetModuleLevel("shim"), "Test case '%s' failed", tc[i].name)
}
}
})
}
}
| {
assert.Equal(strings.ToUpper(tc[i].shimLogLevel), flogging.GetModuleLevel("shim"), "Test case '%s' failed", tc[i].name)
} |
test_pipeline.py | import unittest
from gr_nlp_toolkit.labels.dp_labels import dp_labels
from gr_nlp_toolkit.labels.ner_labels import ner_labels
from gr_nlp_toolkit.labels.pos_labels import pos_labels, pos_properties
from gr_nlp_toolkit.pipeline.pipeline import Pipeline
class TestPipeline(unittest.TestCase):
def test_using_all_processors(self):
| eline('dp,pos,ner')
doc = nlp("Η Ιταλία κέρδισε την Αγγλία στον τελικό του Euro το 2021")
deprels_preds = []
upos_preds = []
ner_preds = []
for token in doc.tokens:
deprels_preds.append(token.deprel)
upos_preds.append(token.upos)
ner_preds.append(token.ner)
nlp = Pipeline('dp')
doc = nlp("Η Ιταλία κέρδισε την Αγγλία στον τελικό του Euro το 2021")
new_deprels_preds = []
for token in doc.tokens:
new_deprels_preds.append(token.deprel)
nlp = Pipeline('pos')
doc = nlp("Η Ιταλία κέρδισε την Αγγλία στον τελικό του Euro το 2021")
new_upos_preds =[]
for token in doc.tokens:
new_upos_preds.append(token.upos)
nlp = Pipeline('ner')
doc = nlp("Η Ιταλία κέρδισε την Αγγλία στον τελικό του Euro το 2021")
new_ner_preds =[]
for token in doc.tokens:
new_ner_preds.append(token.ner)
self.assertEqual(new_deprels_preds, deprels_preds)
self.assertEqual(new_upos_preds, upos_preds)
self.assertEqual(new_ner_preds, ner_preds)
def test_using_only_one_processor(self):
nlp = Pipeline('ner')
doc = nlp("Η Ιταλία κέρδισε την Αγγλία στον τελικό του Euro το 2021")
for token in doc.tokens:
self.assertIsNotNone(token.ner)
self.assertTrue(token.ner in ner_labels)
self.assertIsNone(token.head)
self.assertIsNone(token.deprel)
self.assertFalse(token.head in range(0, len(doc.tokens)))
self.assertFalse(token.deprel in dp_labels)
self.assertIsNone(token.upos)
self.assertFalse(token.upos in pos_labels['upos'])
for feat, value in token.feats.items():
self.assertFalse(feat in pos_properties[token.upos])
self.assertFalse(value in pos_labels[feat])
if __name__ == '__main__':
unittest.main()
| nlp = Pipeline('dp,pos,ner')
sentences = ["Η Ιταλία κέρδισε την Αγγλία στον τελικό του Euro το 2021",
"Το ποιηματάκι το έγραψε ο διάσημος ποιητής, Νίκος Νικολαϊδης"]
for sent in sentences:
doc = nlp(sent)
for token in doc.tokens:
print(token.text, token.ner, token.upos, token.feats, token.head, token.deprel)
self.assertIsNotNone(token.ner)
self.assertTrue(token.ner in ner_labels)
self.assertIsNotNone(token.head)
self.assertIsNotNone(token.deprel)
# We have to add plus one, because the cls token is removed
self.assertTrue(token.head in range(0, len(doc.tokens) + 1))
self.assertTrue(token.deprel in dp_labels)
self.assertIsNotNone(token.upos)
self.assertTrue(token.upos in pos_labels['upos'])
self.assertIsNotNone(token.feats)
self.assertEqual(len(list(token.feats.keys())), len(pos_properties[token.upos]))
for feat, value in token.feats.items():
self.assertTrue(feat in pos_properties[token.upos])
self.assertTrue(value in pos_labels[feat])
print(token.text, token.ner, token.upos, token.feats, token.head, token.deprel)
self.assertIsNotNone(token.ner)
self.assertTrue(token.ner in ner_labels)
self.assertIsNotNone(token.head)
self.assertIsNotNone(token.deprel)
# We have to add plus one, because the cls token is removed
self.assertTrue(token.head in range(0, len(doc.tokens) + 1))
self.assertTrue(token.deprel in dp_labels)
self.assertIsNotNone(token.upos)
self.assertTrue(token.upos in pos_labels['upos'])
def test_annotations_are_same_with_multiple_configurations(self):
nlp = Pip |
server.go | package server
import (
"context"
"net/http"
"github.com/rancher/norman/api/builtin"
"github.com/rancher/norman/pkg/subscribe"
rancherapi "github.com/rancher/rancher/pkg/api"
"github.com/rancher/rancher/pkg/api/controllers/dynamicschema"
"github.com/rancher/rancher/pkg/api/controllers/samlconfig"
"github.com/rancher/rancher/pkg/api/controllers/settings"
"github.com/rancher/rancher/pkg/api/controllers/usercontrollers"
whitelistproxyKontainerDriver "github.com/rancher/rancher/pkg/api/controllers/whitelistproxy/kontainerdriver"
whitelistproxyNodeDriver "github.com/rancher/rancher/pkg/api/controllers/whitelistproxy/nodedriver"
"github.com/rancher/rancher/pkg/api/server/managementstored"
"github.com/rancher/rancher/pkg/api/server/userstored"
"github.com/rancher/rancher/pkg/clustermanager"
clusterSchema "github.com/rancher/types/apis/cluster.cattle.io/v3/schema"
managementSchema "github.com/rancher/types/apis/management.cattle.io/v3/schema"
projectSchema "github.com/rancher/types/apis/project.cattle.io/v3/schema"
"github.com/rancher/types/config"
)
func | (ctx context.Context, scaledContext *config.ScaledContext, clusterManager *clustermanager.Manager,
k8sProxy http.Handler) (http.Handler, error) {
subscribe.Register(&builtin.Version, scaledContext.Schemas)
subscribe.Register(&managementSchema.Version, scaledContext.Schemas)
subscribe.Register(&clusterSchema.Version, scaledContext.Schemas)
subscribe.Register(&projectSchema.Version, scaledContext.Schemas)
if err := managementstored.Setup(ctx, scaledContext, clusterManager, k8sProxy); err != nil {
return nil, err
}
if err := userstored.Setup(ctx, scaledContext, clusterManager, k8sProxy); err != nil {
return nil, err
}
server, err := rancherapi.NewServer(scaledContext.Schemas)
if err != nil {
return nil, err
}
server.AccessControl = scaledContext.AccessControl
dynamicschema.Register(ctx, scaledContext, server.Schemas)
whitelistproxyNodeDriver.Register(ctx, scaledContext)
whitelistproxyKontainerDriver.Register(ctx, scaledContext)
samlconfig.Register(ctx, scaledContext)
usercontrollers.Register(ctx, scaledContext, clusterManager)
err = settings.Register(scaledContext)
return server, err
}
| New |
highlightAnnotations.ts | /**
*
* Copyright (c) 2021 Aspose.PDF Cloud
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
import { HighlightAnnotation } from "./highlightAnnotation";
import { LinkElement } from "./linkElement";
/**
* Object representing a list of highlight annotations.
*/
export class Hi | xtends LinkElement {
/**
* List of highlight annotations.
*/
'list': Array<HighlightAnnotation>;
static discriminator = undefined;
static attributeTypeMap: Array<{name: string, baseName: string, type: string}> = [
{
"name": "list",
"baseName": "List",
"type": "Array<HighlightAnnotation>"
} ];
static getAttributeTypeMap() {
return super.getAttributeTypeMap().concat(HighlightAnnotations.attributeTypeMap);
}
}
| ghlightAnnotations e |
actions.ts | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
import { Store } from '../../types';
import { ActionTypes } from './../../../constants/index';
|
export const clearUndoHistory = (store: Store) => store.dispatch({ type: ActionTypes.HISTORY_CLEAR }); | export const undo = (store: Store) => store.dispatch({ type: ActionTypes.UNDO });
export const redo = (store: Store) => store.dispatch({ type: ActionTypes.REDO }); |
regex_epsilon_node.py | from lexer.regex_ast.regex_atomic_node import AtomicNode
from automaton import Automaton
| class EpsilonNode(AtomicNode):
def eval(self):
return Automaton(number_of_states=1, initial_state=0, finalStates=[0], transitions={}) |
|
bitcoin_zh_CN.ts | <TS language="zh_CN" version="2.1">
<context>
<name>AddressBookPage</name>
<message>
<source>Right-click to edit address or label</source>
<translation>鼠标右击编辑地址或标签</translation>
</message>
<message>
<source>Create a new address</source>
<translation>创建新地址</translation>
</message>
<message>
<source>&New</source>
<translation>新建(&N)</translation>
</message>
<message>
<source>Copy the currently selected address to the system clipboard</source>
<translation>复制当前选中的地址到系统剪贴板</translation>
</message>
<message>
<source>&Copy</source>
<translation>复制(&C)</translation>
</message>
<message>
<source>C&lose</source>
<translation>关闭(&l)</translation>
</message>
<message>
<source>Delete the currently selected address from the list</source>
<translation>从列表中删除选中的地址</translation>
</message>
<message>
<source>Export the data in the current tab to a file</source>
<translation>导出当前分页里的数据到文件</translation>
</message>
<message>
<source>&Export</source>
<translation>导出(&E)</translation>
</message>
<message>
<source>&Delete</source>
<translation>删除(&D)</translation>
</message>
<message>
<source>Choose the address to send coins to</source>
<translation>选择要付钱过去的地址</translation>
</message>
<message>
<source>Choose the address to receive coins with</source>
<translation>选择要收钱进来的地址</translation>
</message>
<message>
<source>C&hoose</source>
<translation>选择</translation>
</message>
<message>
<source>Sending addresses</source>
<translation>付款地址</translation>
</message>
<message>
<source>Receiving addresses</source>
<translation>收款地址</translation>
</message>
<message>
<source>These are your BLAST addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>These are your BLAST addresses for receiving payments. It is recommended to use a new receiving address for each transaction.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>&Copy Address</source>
<translation>复制地址</translation>
</message>
<message>
<source>Copy &Label</source>
<translation>复制标签</translation>
</message>
<message>
<source>&Edit</source>
<translation>编辑</translation>
</message>
<message>
<source>Export Address List</source>
<translation>导出地址列表</translation>
</message>
<message>
<source>Comma separated file (*.csv)</source>
<translation>逗号分隔文件 (*.csv)</translation>
</message>
<message>
<source>Exporting Failed</source>
<translation>导出失败</translation>
</message>
<message>
<source>There was an error trying to save the address list to %1. Please try again.</source>
<translation>存储地址列表到 %1 时发生错误。请再试一次。</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<source>Label</source>
<translation>标签</translation>
</message>
<message>
<source>Address</source>
<translation>地址</translation>
</message>
<message>
<source>(no label)</source>
<translation>(无标签)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<source>Passphrase Dialog</source>
<translation>密码对话框</translation>
</message>
<message>
<source>Enter passphrase</source>
<translation>输入密码</translation>
</message>
<message>
<source>New passphrase</source>
<translation>新密码</translation>
</message>
<message>
<source>Repeat new passphrase</source>
<translation>重复新密码</translation>
</message>
<message>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>ten or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>输入钱包的新密码。<br/>密码请用<b>10 个以上的随机字符</b>,或是<b>8 个以上的字词</b>。</translation>
</message>
<message>
<source>Encrypt wallet</source>
<translation>加密钱包</translation>
</message>
<message>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>这个操作需要你的钱包密码来解锁钱包。</translation>
</message>
<message>
<source>Unlock wallet</source>
<translation>解锁钱包</translation>
</message>
<message>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>这个操作需要你的钱包密码来把钱包解密。</translation>
</message>
<message>
<source>Decrypt wallet</source>
<translation>解密钱包</translation>
</message>
<message>
<source>Change passphrase</source>
<translation>修改密码</translation>
</message>
<message>
<source>Enter the old passphrase and new passphrase to the wallet.</source>
<translation>请输入钱包的旧密码和新密码。</translation>
</message>
<message>
<source>Confirm wallet encryption</source>
<translation>确认钱包加密</translation>
</message>
<message>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR BLAST</b>!</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>你确定要把钱包加密吗?</translation>
</message>
<message>
<source>Wallet encrypted</source>
<translation>钱包已加密</translation>
</message>
<message>
<source>%1 will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your blasts from being stolen by malware infecting your computer.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>重要: 请改用新产生的有加密的钱包文件,来取代旧钱包文件的备份。为了安全性,当你开始使用新的有加密的钱包后,旧钱包文件的备份就不能再使用了。</translation>
</message>
<message>
<source>Wallet encryption failed</source>
<translation>钱包加密失败</translation>
</message>
<message>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>因为内部错误导致钱包加密失败。你的钱包还是没加密。</translation>
</message>
<message>
<source>The supplied passphrases do not match.</source>
<translation>提供的密码不yi'zhi。</translation>
</message>
<message>
<source>Wallet unlock failed</source>
<translation>钱包解锁失败</translation>
</message>
<message>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>输入用来解密钱包的密码不正确。</translation>
</message>
<message>
<source>Wallet decryption failed</source>
<translation>钱包解密失败</translation>
</message>
<message>
<source>Wallet passphrase was successfully changed.</source>
<translation>钱包密码修改成功。</translation>
</message>
<message>
<source>Warning: The Caps Lock key is on!</source>
<translation>警告: 大写字母锁定已开启!</translation>
</message>
</context>
<context>
<name>BanTableModel</name>
<message>
<source>IP/Netmask</source>
<translation>IP/网络掩码</translation>
</message>
<message>
<source>Banned Until</source>
<translation>在此之前禁止:</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<source>Sign &message...</source>
<translation>消息签名(&M)...</translation>
</message>
<message>
<source>Synchronizing with network...</source>
<translation>正在与网络同步...</translation>
</message>
<message>
<source>&Overview</source>
<translation>概况(&O)</translation>
</message>
<message>
<source>Node</source>
<translation>节点</translation>
</message>
<message>
<source>Show general overview of wallet</source>
<translation>显示钱包概况</translation>
</message>
<message>
<source>&Transactions</source>
<translation>交易记录(&T)</translation>
</message>
<message>
<source>Browse transaction history</source>
<translation>查看交易历史</translation>
</message>
<message>
<source>E&xit</source>
<translation>退出(&X)</translation>
</message>
<message>
<source>Quit application</source>
<translation>退出程序</translation>
</message>
<message>
<source>&About %1</source>
<translation>关于 %1</translation>
</message>
<message>
<source>Show information about %1</source>
<translation>显示 %1 相关信息</translation>
</message>
<message>
<source>About &Qt</source>
<translation>关于Qt(&Q)</translation>
</message>
<message>
<source>Show information about Qt</source>
<translation>显示 Qt 相关信息</translation>
</message>
<message>
<source>&Options...</source>
<translation>选项(&O)...</translation>
</message>
<message>
<source>Modify configuration options for %1</source>
<translation>修改%1配置选项</translation>
</message>
<message>
<source>&Encrypt Wallet...</source>
<translation>加密钱包(&E)...</translation>
</message>
<message>
<source>&Backup Wallet...</source>
<translation>备份钱包(&B)...</translation>
</message>
<message>
<source>&Change Passphrase...</source>
<translation>更改密码(&C)...</translation>
</message>
<message>
<source>&Sending addresses...</source>
<translation>正在发送地址(&S)...</translation>
</message>
<message>
<source>&Receiving addresses...</source>
<translation>正在接收地址(&R)...</translation>
</message>
<message>
<source>Open &URI...</source>
<translation>打开 &URI...</translation>
</message>
<message>
<source>Reindexing blocks on disk...</source>
<translation>正在为数据块重建索引...</translation>
</message>
<message>
<source>Send coins to a BLAST address</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Backup wallet to another location</source>
<translation>备份钱包到其他文件夹</translation>
</message>
<message>
<source>Change the passphrase used for wallet encryption</source>
<translation>更改钱包加密口令</translation>
</message>
<message>
<source>&Debug window</source>
<translation>调试窗口(&D)</translation>
</message>
<message>
<source>Open debugging and diagnostic console</source>
<translation>打开调试和诊断控制台</translation>
</message>
<message>
<source>&Verify message...</source>
<translation>验证消息(&V)...</translation>
</message>
<message>
<source>BLAST</source>
<translation>BLAST</translation>
</message>
<message>
<source>Wallet</source>
<translation>钱包</translation>
</message>
<message>
<source>&Send</source>
<translation>发送(&S)</translation>
</message>
<message>
<source>&Receive</source>
<translation>接收(&R)</translation>
</message>
<message>
<source>&Show / Hide</source>
<translation>显示 / 隐藏(&S)</translation>
</message>
<message>
<source>Show or hide the main Window</source>
<translation>显示或隐藏主窗口</translation>
</message>
<message>
<source>Encrypt the private keys that belong to your wallet</source>
<translation>对钱包中的私钥加密</translation>
</message>
<message>
<source>Sign messages with your BLAST addresses to prove you own them</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Verify messages to ensure they were signed with specified BLAST addresses</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>&File</source>
<translation>文件(&F)</translation>
</message>
<message>
<source>&Settings</source>
<translation>设置(&S)</translation>
</message>
<message>
<source>&Help</source>
<translation>帮助(&H)</translation>
</message>
<message>
<source>Tabs toolbar</source>
<translation>分页工具栏</translation>
</message>
<message>
<source>Request payments (generates QR codes and blast: URIs)</source>
<translation>请求支付 (生成二维码和 blast: URI)</translation>
</message>
<message>
<source>Show the list of used sending addresses and labels</source>
<translation>显示用过的发送地址和标签的列表</translation>
</message>
<message>
<source>Show the list of used receiving addresses and labels</source>
<translation>显示用过的接收地址和标签的列表</translation>
</message>
<message>
<source>Open a blast: URI or payment request</source>
<translation>打开一个 blast: URI 或支付请求</translation>
</message>
<message>
<source>&Command-line options</source>
<translation>命令行选项(&C)</translation>
</message>
<message numerus="yes">
<source>%n active connection(s) to BLAST network</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Indexing blocks on disk...</source>
<translation>正在为数据块建立索引...</translation>
</message>
<message>
<source>Processing blocks on disk...</source>
<translation>正在处理数据块...</translation>
</message>
<message>
<source>No block source available...</source>
<translation>沒有可用的区块来源...</translation>
</message>
<message numerus="yes">
<source>Processed %n block(s) of transaction history.</source>
<translation><numerusform>已处理 %n 个交易历史数据块。</numerusform></translation>
</message>
<message numerus="yes">
<source>%n hour(s)</source>
<translation><numerusform>%n 小时</numerusform></translation>
</message>
<message numerus="yes">
<source>%n day(s)</source>
<translation><numerusform>%n 天</numerusform></translation>
</message>
<message numerus="yes">
<source>%n week(s)</source>
<translation><numerusform>%n 周</numerusform></translation>
</message>
<message>
<source>%1 and %2</source>
<translation>%1 和 %2</translation>
</message>
<message numerus="yes">
<source>%n year(s)</source>
<translation><numerusform>%n 年</numerusform></translation>
</message>
<message>
<source>%1 behind</source>
<translation>落后 %1 </translation>
</message>
<message>
<source>Last received block was generated %1 ago.</source>
<translation>最新收到的区块产生于 %1。</translation>
</message>
<message>
<source>Transactions after this will not yet be visible.</source>
<translation>在此之后的交易尚未可见</translation>
</message>
<message>
<source>Error</source>
<translation>错误</translation>
</message>
<message>
<source>Warning</source>
<translation>警告</translation>
</message>
<message>
<source>Information</source>
<translation>信息</translation>
</message>
<message>
<source>Up to date</source>
<translation>已是最新</translation>
</message>
<message>
<source>%1 client</source>
<translation>%1 客戶</translation>
</message>
<message>
<source>Catching up...</source>
<translation>更新中...</translation>
</message>
<message>
<source>Date: %1
</source>
<translation>日期: %1
</translation>
</message>
<message>
<source>Amount: %1
</source>
<translation>金额: %1
</translation>
</message>
<message>
<source>Type: %1
</source>
<translation>类型: %1
</translation>
</message>
<message>
<source>Label: %1
</source>
<translation>标签: %1
</translation>
</message>
<message>
<source>Address: %1
</source>
<translation>地址: %1
</translation>
</message>
<message>
<source>Sent transaction</source>
<translation>发送交易</translation>
</message>
<message>
<source>Incoming transaction</source>
<translation>流入交易</translation>
</message>
<message>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>钱包已被<b>加密</b>,当前为<b>解锁</b>状态</translation>
</message>
<message>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>钱包已被<b>加密</b>,当前为<b>锁定</b>状态</translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<source>Coin Selection</source>
<translation>选择钱币</translation>
</message>
<message>
<source>Quantity:</source>
<translation>总量:</translation>
</message>
<message>
<source>Bytes:</source>
<translation>字节:</translation>
</message>
<message>
<source>Amount:</source>
<translation>金额:</translation>
</message>
<message>
<source>Priority:</source>
<translation>优先级:</translation>
</message>
<message>
<source>Fee:</source>
<translation>费用:</translation>
</message>
<message>
<source>Dust:</source>
<translation>小额:</translation>
</message>
<message>
<source>After Fee:</source>
<translation>加上交易费用后:</translation>
</message>
<message>
<source>Change:</source>
<translation>变更 : </translation>
</message>
<message>
<source>(un)select all</source>
<translation>(不)全选</translation>
</message>
<message>
<source>Tree mode</source>
<translation>树状模式</translation>
</message>
<message>
<source>List mode</source>
<translation>列表模式</translation>
</message>
<message>
<source>Amount</source>
<translation>金额</translation>
</message>
<message>
<source>Received with label</source>
<translation>按标签收款</translation>
</message>
<message>
<source>Received with address</source>
<translation>按地址收款</translation>
</message>
<message>
<source>Date</source>
<translation>日期</translation>
</message>
<message>
<source>Confirmations</source>
<translation>确认</translation>
</message>
<message>
<source>Confirmed</source>
<translation>已确认</translation>
</message>
<message>
<source>Priority</source>
<translation>优先级</translation>
</message>
<message>
<source>Copy address</source>
<translation>复制地址</translation>
</message>
<message>
<source>Copy label</source>
<translation>复制标签</translation>
</message>
<message>
<source>Copy amount</source>
<translation>复制金额</translation>
</message>
<message>
<source>Copy transaction ID</source>
<translation>复制交易识别码</translation>
</message>
<message>
<source>Lock unspent</source>
<translation>锁定未花费</translation>
</message>
<message>
<source>Unlock unspent</source>
<translation>解锁未花费</translation>
</message>
<message>
<source>Copy quantity</source>
<translation>复制数目</translation>
</message>
<message>
<source>Copy fee</source>
<translation>复制手续费</translation>
</message>
<message>
<source>Copy after fee</source>
<translation>复制计费后金额</translation>
</message>
<message>
<source>Copy bytes</source>
<translation>复制字节数</translation>
</message>
<message>
<source>Copy priority</source>
<translation>复制优先度</translation>
</message>
<message>
<source>Copy dust</source>
<translation>复制零散金额</translation>
</message>
<message>
<source>Copy change</source>
<translation>复制找零金额</translation>
</message>
<message>
<source>highest</source>
<translation>最高</translation>
</message>
<message>
<source>higher</source>
<translation>很高</translation>
</message>
<message>
<source>high</source>
<translation>高</translation>
</message>
<message>
<source>medium-high</source>
<translation>中高</translation>
</message>
<message>
<source>medium</source>
<translation>中等</translation>
</message>
<message>
<source>low-medium</source>
<translation>中低</translation>
</message>
<message>
<source>low</source>
<translation>低</translation>
</message>
<message>
<source>lower</source>
<translation>很低</translation>
</message>
<message>
<source>lowest</source>
<translation>最低</translation>
</message>
<message>
<source>(%1 locked)</source>
<translation>(锁定 %1 枚)</translation>
</message>
<message>
<source>none</source>
<translation>无</translation>
</message>
<message>
<source>yes</source>
<translation>是</translation>
</message>
<message>
<source>no</source>
<translation>否</translation>
</message>
<message>
<source>This label turns red if the transaction size is greater than 1000 bytes.</source>
<translation>当交易大小大于 1000 字节时,文字会变红色。</translation>
</message>
<message>
<source>This means a fee of at least %1 per kB is required.</source>
<translation>表示每一千字节(kB)需要至少 %1 的手续费。</translation>
</message>
<message>
<source>Can vary +/- 1 byte per input.</source>
<translation>每组输入可能会误差多或少 1 个字节。</translation>
</message>
<message>
<source>Transactions with higher priority are more likely to get included into a block.</source>
<translation>优先度较高的交易比较有可能被接受放进区块中。</translation>
</message>
<message>
<source>This label turns red if the priority is smaller than "medium".</source>
<translation>当优先度低于“中等”时,文字会变红色。</translation>
</message>
<message>
<source>This label turns red if any recipient receives an amount smaller than the current dust threshold.</source>
<translation>当任何一个收款金额小于目前的零散金额上限时,文字会变红色。</translation>
</message>
<message>
<source>Can vary +/- %1 satoshi(s) per input.</source>
<translation>每组输入可能有 +/- %1 个 satoshi 的误差。</translation>
</message>
<message>
<source>(no label)</source>
<translation>(无标签)</translation>
</message>
<message>
<source>change from %1 (%2)</source>
<translation>找零前是 %1 (%2)</translation>
</message>
<message>
<source>(change)</source>
<translation>(找零)</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<source>Edit Address</source>
<translation>编辑地址</translation>
</message>
<message>
<source>&Label</source>
<translation>标签(&L)</translation>
</message>
<message>
<source>The label associated with this address list entry</source>
<translation>与此地址相关的标签项</translation>
</message>
<message>
<source>The address associated with this address list entry. This can only be modified for sending addresses.</source>
<translation>该地址已与地址列表中的条目关联,只能被发送地址修改。</translation>
</message>
<message>
<source>&Address</source>
<translation>地址(&A)</translation>
</message>
<message>
<source>New receiving address</source>
<translation>新建收款地址</translation>
</message>
<message>
<source>New sending address</source>
<translation>新建付款地址</translation>
</message>
<message>
<source>Edit receiving address</source>
<translation>编辑收款地址</translation>
</message>
<message>
<source>Edit sending address</source>
<translation>编辑付款地址</translation>
</message>
<message>
<source>The entered address "%1" is not a valid BLAST address.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>The entered address "%1" is already in the address book.</source>
<translation>输入的地址 %1 已经存在地址簿。</translation>
</message>
<message>
<source>Could not unlock wallet.</source>
<translation>无法将钱包解锁。</translation>
</message>
<message>
<source>New key generation failed.</source>
<translation>产生新的密钥失败了。</translation>
</message>
</context>
<context>
<name>FreespaceChecker</name>
<message>
<source>A new data directory will be created.</source>
<translation>一个新的数据目录将被创建。</translation>
</message>
<message>
<source>name</source>
<translation>名称</translation>
</message>
<message>
<source>Directory already exists. Add %1 if you intend to create a new directory here.</source>
<translation>目录已存在。如果您打算在这里创建一个新目录,添加 %1。</translation>
</message>
<message>
<source>Path already exists, and is not a directory.</source>
<translation>路径已存在,并且不是一个目录。</translation> | <message>
<source>Cannot create data directory here.</source>
<translation>无法在此创建数据目录。</translation>
</message>
</context>
<context>
<name>HelpMessageDialog</name>
<message>
<source>version</source>
<translation>版本</translation>
</message>
<message>
<source>(%1-bit)</source>
<translation>(%1 位)</translation>
</message>
<message>
<source>About %1</source>
<translation>關於 %1</translation>
</message>
<message>
<source>Command-line options</source>
<translation>命令行选项</translation>
</message>
<message>
<source>Usage:</source>
<translation>使用:</translation>
</message>
<message>
<source>command-line options</source>
<translation>命令行选项</translation>
</message>
<message>
<source>UI Options:</source>
<translation>界面选项:</translation>
</message>
<message>
<source>Choose data directory on startup (default: %u)</source>
<translation>在启动时选择目录(默认%u)</translation>
</message>
<message>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>设置语言, 例如“zh-CN”(默认:系统语言)</translation>
</message>
<message>
<source>Start minimized</source>
<translation>启动时最小化</translation>
</message>
<message>
<source>Set SSL root certificates for payment request (default: -system-)</source>
<translation>设置付款请求的SSL根证书(默认:-系统-)</translation>
</message>
<message>
<source>Show splash screen on startup (default: %u)</source>
<translation>显示启动画面(默认:%u)</translation>
</message>
<message>
<source>Reset all settings changed in the GUI</source>
<translation>重置图形界面所有的变更设置</translation>
</message>
</context>
<context>
<name>Intro</name>
<message>
<source>Welcome</source>
<translation>欢迎</translation>
</message>
<message>
<source>Welcome to %1.</source>
<translation>
歡迎來到 %1</translation>
</message>
<message>
<source>As this is the first time the program is launched, you can choose where %1 will store its data.</source>
<translation>由于这是第一次启动此程序,您可以选择%1的数据所存储的位置</translation>
</message>
<message>
<source>%1 will download and store a copy of the BLAST block chain. At least %2GB of data will be stored in this directory, and it will grow over time. The wallet will also be stored in this directory.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Use the default data directory</source>
<translation>使用默认的数据目录</translation>
</message>
<message>
<source>Use a custom data directory:</source>
<translation>使用自定义的数据目录:</translation>
</message>
<message>
<source>Error: Specified data directory "%1" cannot be created.</source>
<translation>错误:无法创建 指定的数据目录 "%1" </translation>
</message>
<message>
<source>Error</source>
<translation>错误</translation>
</message>
<message numerus="yes">
<source>%n GB of free space available</source>
<translation><numerusform>有 %n GB 空闲空间</numerusform></translation>
</message>
<message numerus="yes">
<source>(of %n GB needed)</source>
<translation><numerusform>(需要%n GB空间)</numerusform></translation>
</message>
</context>
<context>
<name>OpenURIDialog</name>
<message>
<source>Open URI</source>
<translation>打开 URI</translation>
</message>
<message>
<source>Open payment request from URI or file</source>
<translation>打开来自URI或文件的付款请求 </translation>
</message>
<message>
<source>URI:</source>
<translation>URI: </translation>
</message>
<message>
<source>Select payment request file</source>
<translation>选择付款请求文件 </translation>
</message>
<message>
<source>Select payment request file to open</source>
<translation>选择要打开的付款请求文件</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<source>Options</source>
<translation>选项</translation>
</message>
<message>
<source>&Main</source>
<translation>主要(&M)</translation>
</message>
<message>
<source>Automatically start %1 after logging in to the system.</source>
<translation>在登入系统后自动启动 %1</translation>
</message>
<message>
<source>&Start %1 on system login</source>
<translation>系统登入时启动 %1</translation>
</message>
<message>
<source>Size of &database cache</source>
<translation>数据库缓存大小(&D)</translation>
</message>
<message>
<source>MB</source>
<translation>MB</translation>
</message>
<message>
<source>Number of script &verification threads</source>
<translation>脚本验证线程数(&V)</translation>
</message>
<message>
<source>Accept connections from outside</source>
<translation>接收外部连接</translation>
</message>
<message>
<source>Allow incoming connections</source>
<translation>允许流入连接</translation>
</message>
<message>
<source>IP address of the proxy (e.g. IPv4: 127.0.0.1 / IPv6: ::1)</source>
<translation>代理的 IP 地址 (例如 IPv4: 127.0.0.1 / IPv6: ::1)</translation>
</message>
<message>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Exit in the menu.</source>
<translation>窗口被关闭时最小化而不是退出应用程序。当此选项启用时,应用程序只会在菜单中选择退出时退出。</translation>
</message>
<message>
<source>Third party URLs (e.g. a block explorer) that appear in the transactions tab as context menu items. %s in the URL is replaced by transaction hash. Multiple URLs are separated by vertical bar |.</source>
<translation>出现在交易的选项卡的上下文菜单项的第三方网址 (例如:区块链接查询) 。 %s的URL被替换为交易哈希。多个的URL需要竖线 | 分隔。</translation>
</message>
<message>
<source>Third party transaction URLs</source>
<translation>第三方交易网址</translation>
</message>
<message>
<source>Active command-line options that override above options:</source>
<translation>有效的命令行参数覆盖上述选项:</translation>
</message>
<message>
<source>Reset all client options to default.</source>
<translation>恢复客户端的缺省设置</translation>
</message>
<message>
<source>&Reset Options</source>
<translation>恢复缺省设置(&R)</translation>
</message>
<message>
<source>&Network</source>
<translation>网络(&N)</translation>
</message>
<message>
<source>(0 = auto, <0 = leave that many cores free)</source>
<translation>(0 = 自动, <0 = 离开很多免费的核心)</translation>
</message>
<message>
<source>W&allet</source>
<translation>钱包(&A)</translation>
</message>
<message>
<source>Expert</source>
<translation>专家</translation>
</message>
<message>
<source>Enable coin &control features</source>
<translation>启动货币控制功能(&C)</translation>
</message>
<message>
<source>If you disable the spending of unconfirmed change, the change from a transaction cannot be used until that transaction has at least one confirmation. This also affects how your balance is computed.</source>
<translation>如果禁用未确认的零钱,则零钱至少需要1个确认才能使用。同时账户余额计算会受到影响。</translation>
</message>
<message>
<source>&Spend unconfirmed change</source>
<translation>使用未经确认的零钱(&S)</translation>
</message>
<message>
<source>Automatically open the BLAST client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Map port using &UPnP</source>
<translation>使用 &UPnP 映射端口</translation>
</message>
<message>
<source>Connect to the BLAST network through a SOCKS5 proxy.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>&Connect through SOCKS5 proxy (default proxy):</source>
<translation>通过 SO&CKS5 代理连接(默认代理):</translation>
</message>
<message>
<source>Proxy &IP:</source>
<translation>代理服务器 &IP:</translation>
</message>
<message>
<source>&Port:</source>
<translation>端口(&P):</translation>
</message>
<message>
<source>Port of the proxy (e.g. 9050)</source>
<translation>代理端口(例如 9050)</translation>
</message>
<message>
<source>Used for reaching peers via:</source>
<translation>连接到同伴的方式:</translation>
</message>
<message>
<source>Shows, if the supplied default SOCKS5 proxy is used to reach peers via this network type.</source>
<translation>如果默认的SOCKS5代理被用于在该网络下连接同伴,则显示</translation>
</message>
<message>
<source>IPv4</source>
<translation>IPv4</translation>
</message>
<message>
<source>IPv6</source>
<translation>IPv6</translation>
</message>
<message>
<source>Tor</source>
<translation>Tor</translation>
</message>
<message>
<source>Connect to the BLAST network through a separate SOCKS5 proxy for Tor hidden services.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Use separate SOCKS5 proxy to reach peers via Tor hidden services:</source>
<translation>通过Tor隐藏服务连接节点时 使用不同的SOCKS5代理</translation>
</message>
<message>
<source>&Window</source>
<translation>窗口(&W)</translation>
</message>
<message>
<source>&Hide the icon from the system tray.</source>
<translation>不在通知区显示图标</translation>
</message>
<message>
<source>Hide tray icon</source>
<translation>不显示通知区图标</translation>
</message>
<message>
<source>Show only a tray icon after minimizing the window.</source>
<translation>最小化窗口后仅显示托盘图标</translation>
</message>
<message>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>最小化到托盘(&M)</translation>
</message>
<message>
<source>M&inimize on close</source>
<translation>单击关闭按钮最小化(&I)</translation>
</message>
<message>
<source>&Display</source>
<translation>显示(&D)</translation>
</message>
<message>
<source>User Interface &language:</source>
<translation>用户界面语言(&L):</translation>
</message>
<message>
<source>The user interface language can be set here. This setting will take effect after restarting %1.</source>
<translation>可以在这里设定用户界面的语言。这个设定在重启 %1 后才会生效。</translation>
</message>
<message>
<source>&Unit to show amounts in:</source>
<translation>比特币金额单位(&U):</translation>
</message>
<message>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>选择比特币单位。</translation>
</message>
<message>
<source>Whether to show coin control features or not.</source>
<translation>是否需要交易源地址控制功能。</translation>
</message>
<message>
<source>&OK</source>
<translation>确定(&O)</translation>
</message>
<message>
<source>&Cancel</source>
<translation>取消(&C)</translation>
</message>
<message>
<source>default</source>
<translation>默认</translation>
</message>
<message>
<source>none</source>
<translation>无</translation>
</message>
<message>
<source>Confirm options reset</source>
<translation>确认恢复缺省设置</translation>
</message>
<message>
<source>Client restart required to activate changes.</source>
<translation>更改生效需要重启客户端。</translation>
</message>
<message>
<source>Client will be shut down. Do you want to proceed?</source>
<translation>客户端即将关闭,您想继续吗?</translation>
</message>
<message>
<source>This change would require a client restart.</source>
<translation>此更改需要重启客户端。</translation>
</message>
<message>
<source>The supplied proxy address is invalid.</source>
<translation>提供的代理服务器地址无效。</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<source>Form</source>
<translation>表单</translation>
</message>
<message>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the BLAST network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Watch-only:</source>
<translation>查看-只有:</translation>
</message>
<message>
<source>Available:</source>
<translation>可使用的余额:</translation>
</message>
<message>
<source>Your current spendable balance</source>
<translation>您当前可使用的余额</translation>
</message>
<message>
<source>Pending:</source>
<translation>等待中的余额:</translation>
</message>
<message>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the spendable balance</source>
<translation>尚未确认的交易总额,未计入当前余额</translation>
</message>
<message>
<source>Immature:</source>
<translation>未成熟的:</translation>
</message>
<message>
<source>Mined balance that has not yet matured</source>
<translation>尚未成熟的挖矿收入余额</translation>
</message>
<message>
<source>Balances</source>
<translation>余额</translation>
</message>
<message>
<source>Total:</source>
<translation>总额:</translation>
</message>
<message>
<source>Your current total balance</source>
<translation>您当前的总余额</translation>
</message>
<message>
<source>Your current balance in watch-only addresses</source>
<translation>您当前 观察地址(watch-only address)的余额 </translation>
</message>
<message>
<source>Spendable:</source>
<translation>可使用:</translation>
</message>
<message>
<source>Recent transactions</source>
<translation>最近交易记录</translation>
</message>
<message>
<source>Unconfirmed transactions to watch-only addresses</source>
<translation>观察地址(watch-only address)的未确认交易记录 </translation>
</message>
<message>
<source>Mined balance in watch-only addresses that has not yet matured</source>
<translation>观察地址(watch-only address)中尚未成熟(matured)的挖矿收入余额:</translation>
</message>
<message>
<source>Current total balance in watch-only addresses</source>
<translation>观察地址(watch-only address)中的当前总余额 </translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<source>Payment request error</source>
<translation>要求付款时发生错误</translation>
</message>
<message>
<source>Cannot start blast: click-to-pay handler</source>
<translation>无法启动 blast 协议的“
一键支付”处理器</translation>
</message>
<message>
<source>URI handling</source>
<translation>URI 处理</translation>
</message>
<message>
<source>Payment request fetch URL is invalid: %1</source>
<translation>取得付款请求的 URL 无效: %1</translation>
</message>
<message>
<source>Invalid payment address %1</source>
<translation>无效的付款地址 %1</translation>
</message>
<message>
<source>URI cannot be parsed! This can be caused by an invalid BLAST address or malformed URI parameters.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Payment request file handling</source>
<translation>处理付款请求文件</translation>
</message>
<message>
<source>Payment request file cannot be read! This can be caused by an invalid payment request file.</source>
<translation>无法读取付款请求文件!可能是文件无效造成的。</translation>
</message>
<message>
<source>Payment request rejected</source>
<translation>付款请求已被拒绝</translation>
</message>
<message>
<source>Payment request network doesn't match client network.</source>
<translation>付款请求的网络类型跟客户端不符。</translation>
</message>
<message>
<source>Payment request expired.</source>
<translation>付款请求已过期。</translation>
</message>
<message>
<source>Payment acknowledged</source>
<translation>付款已确认</translation>
</message>
</context>
<context>
<name>PeerTableModel</name>
<message>
<source>User Agent</source>
<translation>用户代理</translation>
</message>
<message>
<source>Node/Service</source>
<translation>节点/服务</translation>
</message>
<message>
<source>Ping Time</source>
<translation>Ping 时间</translation>
</message>
</context>
<context>
<name>QObject</name>
<message>
<source>Amount</source>
<translation>金额</translation>
</message>
<message>
<source>Enter a BLAST address (e.g. %1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>%1 d</source>
<translation>%1 天</translation>
</message>
<message>
<source>%1 h</source>
<translation>%1 小时</translation>
</message>
<message>
<source>%1 m</source>
<translation>%1 分钟</translation>
</message>
<message>
<source>%1 s</source>
<translation>%1 秒</translation>
</message>
<message>
<source>None</source>
<translation>无</translation>
</message>
<message>
<source>N/A</source>
<translation>不可用</translation>
</message>
<message>
<source>%1 ms</source>
<translation>%1 毫秒</translation>
</message>
</context>
<context>
<name>QRImageWidget</name>
<message>
<source>&Save Image...</source>
<translation>保存图片(&S)...</translation>
</message>
<message>
<source>&Copy Image</source>
<translation>复制图片</translation>
</message>
<message>
<source>Save QR Code</source>
<translation>保存二维码</translation>
</message>
<message>
<source>PNG Image (*.png)</source>
<translation>PNG 图像(*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<source>N/A</source>
<translation>不可用</translation>
</message>
<message>
<source>Client version</source>
<translation>客户端版本</translation>
</message>
<message>
<source>&Information</source>
<translation>信息</translation>
</message>
<message>
<source>Debug window</source>
<translation>调试窗口</translation>
</message>
<message>
<source>General</source>
<translation>常规</translation>
</message>
<message>
<source>Using BerkeleyDB version</source>
<translation>使用的 BerkeleyDB 版本</translation>
</message>
<message>
<source>Datadir</source>
<translation>数据目录</translation>
</message>
<message>
<source>Startup time</source>
<translation>启动时间</translation>
</message>
<message>
<source>Network</source>
<translation>网络</translation>
</message>
<message>
<source>Name</source>
<translation>姓名</translation>
</message>
<message>
<source>Number of connections</source>
<translation>连接数</translation>
</message>
<message>
<source>Block chain</source>
<translation>数据链</translation>
</message>
<message>
<source>Current number of blocks</source>
<translation>当前数据块数量</translation>
</message>
<message>
<source>Memory Pool</source>
<translation>资金池</translation>
</message>
<message>
<source>Current number of transactions</source>
<translation>当前交易数量</translation>
</message>
<message>
<source>Memory usage</source>
<translation>内存使用</translation>
</message>
<message>
<source>Received</source>
<translation>收到</translation>
</message>
<message>
<source>Sent</source>
<translation>发送</translation>
</message>
<message>
<source>&Peers</source>
<translation>同伴(&P)</translation>
</message>
<message>
<source>Banned peers</source>
<translation>节点黑名单</translation>
</message>
<message>
<source>Select a peer to view detailed information.</source>
<translation>选择节点查看详细信息。</translation>
</message>
<message>
<source>Whitelisted</source>
<translation>白名单</translation>
</message>
<message>
<source>Direction</source>
<translation>方向</translation>
</message>
<message>
<source>Version</source>
<translation>版本</translation>
</message>
<message>
<source>Starting Block</source>
<translation>正在启动数据块</translation>
</message>
<message>
<source>Synced Headers</source>
<translation>同步区块头</translation>
</message>
<message>
<source>Synced Blocks</source>
<translation>同步区块链</translation>
</message>
<message>
<source>User Agent</source>
<translation>用户代理</translation>
</message>
<message>
<source>Decrease font size</source>
<translation>缩小文字</translation>
</message>
<message>
<source>Increase font size</source>
<translation>放大文字</translation>
</message>
<message>
<source>Services</source>
<translation>服务</translation>
</message>
<message>
<source>Ban Score</source>
<translation>禁止得分</translation>
</message>
<message>
<source>Connection Time</source>
<translation>连接时间</translation>
</message>
<message>
<source>Last Send</source>
<translation>最后发送</translation>
</message>
<message>
<source>Last Receive</source>
<translation>最后接收</translation>
</message>
<message>
<source>Ping Time</source>
<translation>Ping 时间</translation>
</message>
<message>
<source>The duration of a currently outstanding ping.</source>
<translation>目前这一次 ping 已经过去的时间。</translation>
</message>
<message>
<source>Ping Wait</source>
<translation>Ping等待</translation>
</message>
<message>
<source>Time Offset</source>
<translation>时间偏移</translation>
</message>
<message>
<source>Last block time</source>
<translation>上一数据块时间</translation>
</message>
<message>
<source>&Open</source>
<translation>打开(&O)</translation>
</message>
<message>
<source>&Console</source>
<translation>控制台(&C)</translation>
</message>
<message>
<source>&Network Traffic</source>
<translation>网络流量(&N)</translation>
</message>
<message>
<source>&Clear</source>
<translation>清除(&C)</translation>
</message>
<message>
<source>Totals</source>
<translation>总数</translation>
</message>
<message>
<source>In:</source>
<translation>输入:</translation>
</message>
<message>
<source>Out:</source>
<translation>输出:</translation>
</message>
<message>
<source>Debug log file</source>
<translation>调试日志文件</translation>
</message>
<message>
<source>Clear console</source>
<translation>清空控制台</translation>
</message>
<message>
<source>&Disconnect Node</source>
<translation>(&D)断开节点连接</translation>
</message>
<message>
<source>Ban Node for</source>
<translation>禁止节点连接时长:</translation>
</message>
<message>
<source>1 &hour</source>
<translation>1 小时(&H)</translation>
</message>
<message>
<source>1 &day</source>
<translation>1 天(&D)</translation>
</message>
<message>
<source>1 &week</source>
<translation>1 周(&W)</translation>
</message>
<message>
<source>1 &year</source>
<translation>1 年(&Y)</translation>
</message>
<message>
<source>&Unban Node</source>
<translation>(&U)允许节点连接</translation>
</message>
<message>
<source>Welcome to the %1 RPC console.</source>
<translation>欢迎使用 %1 的 RPC 控制台。</translation>
</message>
<message>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>使用上下方向键浏览历史, <b>Ctrl-L</b>清除屏幕。</translation>
</message>
<message>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>使用 <b>help</b> 命令显示帮助信息。</translation>
</message>
<message>
<source>%1 B</source>
<translation>%1 字节</translation>
</message>
<message>
<source>%1 KB</source>
<translation>%1 KB</translation>
</message>
<message>
<source>%1 MB</source>
<translation>%1 MB</translation>
</message>
<message>
<source>%1 GB</source>
<translation>%1 GB</translation>
</message>
<message>
<source>(node id: %1)</source>
<translation>(节点ID: %1)</translation>
</message>
<message>
<source>via %1</source>
<translation>通过 %1</translation>
</message>
<message>
<source>never</source>
<translation>从未</translation>
</message>
<message>
<source>Inbound</source>
<translation>传入</translation>
</message>
<message>
<source>Outbound</source>
<translation>传出</translation>
</message>
<message>
<source>Yes</source>
<translation>是</translation>
</message>
<message>
<source>No</source>
<translation>否</translation>
</message>
<message>
<source>Unknown</source>
<translation>未知</translation>
</message>
</context>
<context>
<name>ReceiveCoinsDialog</name>
<message>
<source>&Amount:</source>
<translation>总额(&A):</translation>
</message>
<message>
<source>&Label:</source>
<translation>标签(&L):</translation>
</message>
<message>
<source>&Message:</source>
<translation>消息(&M):</translation>
</message>
<message>
<source>Reuse one of the previously used receiving addresses. Reusing addresses has security and privacy issues. Do not use this unless re-generating a payment request made before.</source>
<translation>重复使用以前用过的接收地址。重用地址有安全和隐私方面的隐患。除非是为重复生成同一项支付请求,否则请不要这样做。</translation>
</message>
<message>
<source>R&euse an existing receiving address (not recommended)</source>
<translation>重用现有的接收地址(不推荐)</translation>
</message>
<message>
<source>An optional message to attach to the payment request, which will be displayed when the request is opened. Note: The message will not be sent with the payment over the BLAST network.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>An optional label to associate with the new receiving address.</source>
<translation>可为新建的收款地址添加一个标签。</translation>
</message>
<message>
<source>Use this form to request payments. All fields are <b>optional</b>.</source>
<translation>使用此表单要求付款。所有字段都是<b>可选</b>。</translation>
</message>
<message>
<source>An optional amount to request. Leave this empty or zero to not request a specific amount.</source>
<translation>可选的请求金额。留空或填零为不要求具体金额。</translation>
</message>
<message>
<source>Clear all fields of the form.</source>
<translation>清除此表单的所有字段。</translation>
</message>
<message>
<source>Clear</source>
<translation>清除</translation>
</message>
<message>
<source>Requested payments history</source>
<translation>请求付款的历史</translation>
</message>
<message>
<source>&Request payment</source>
<translation>请求付款(&R)</translation>
</message>
<message>
<source>Show the selected request (does the same as double clicking an entry)</source>
<translation>显示选中的请求 (双击也可以显示)</translation>
</message>
<message>
<source>Show</source>
<translation>显示</translation>
</message>
<message>
<source>Remove the selected entries from the list</source>
<translation>从列表中移除选中的条目</translation>
</message>
<message>
<source>Remove</source>
<translation>移除</translation>
</message>
<message>
<source>Copy label</source>
<translation>复制标签</translation>
</message>
<message>
<source>Copy message</source>
<translation>复制消息</translation>
</message>
<message>
<source>Copy amount</source>
<translation>复制金额</translation>
</message>
</context>
<context>
<name>ReceiveRequestDialog</name>
<message>
<source>QR Code</source>
<translation>二维码</translation>
</message>
<message>
<source>Copy &URI</source>
<translation>复制 URI(&U)</translation>
</message>
<message>
<source>Copy &Address</source>
<translation>复制地址(&A)</translation>
</message>
<message>
<source>&Save Image...</source>
<translation>保存图片(&S)...</translation>
</message>
<message>
<source>URI</source>
<translation>URI</translation>
</message>
<message>
<source>Address</source>
<translation>地址</translation>
</message>
<message>
<source>Amount</source>
<translation>金额</translation>
</message>
<message>
<source>Label</source>
<translation>标签</translation>
</message>
<message>
<source>Message</source>
<translation>消息</translation>
</message>
<message>
<source>Error encoding URI into QR Code.</source>
<translation>把 URI 编码成二维码时发生错误。</translation>
</message>
</context>
<context>
<name>RecentRequestsTableModel</name>
<message>
<source>Date</source>
<translation>日期</translation>
</message>
<message>
<source>Label</source>
<translation>标签</translation>
</message>
<message>
<source>Message</source>
<translation>消息</translation>
</message>
<message>
<source>(no label)</source>
<translation>(无标签)</translation>
</message>
<message>
<source>(no message)</source>
<translation>(无消息)</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<source>Send Coins</source>
<translation>发送比特币</translation>
</message>
<message>
<source>Coin Control Features</source>
<translation>交易源地址控制功能</translation>
</message>
<message>
<source>Inputs...</source>
<translation>输入...</translation>
</message>
<message>
<source>automatically selected</source>
<translation>自动选择</translation>
</message>
<message>
<source>Insufficient funds!</source>
<translation>存款不足!</translation>
</message>
<message>
<source>Quantity:</source>
<translation>总量:</translation>
</message>
<message>
<source>Bytes:</source>
<translation>字节:</translation>
</message>
<message>
<source>Amount:</source>
<translation>金额:</translation>
</message>
<message>
<source>Priority:</source>
<translation>优先级:</translation>
</message>
<message>
<source>Fee:</source>
<translation>费用:</translation>
</message>
<message>
<source>After Fee:</source>
<translation>加上交易费用后:</translation>
</message>
<message>
<source>Change:</source>
<translation>变更 : </translation>
</message>
<message>
<source>If this is activated, but the change address is empty or invalid, change will be sent to a newly generated address.</source>
<translation>如果激活该选项,但是零钱地址用光或者非法,将会新生成零钱地址,转入零钱。</translation>
</message>
<message>
<source>Custom change address</source>
<translation>自定义零钱地址</translation>
</message>
<message>
<source>Transaction Fee:</source>
<translation>交易费用:</translation>
</message>
<message>
<source>Choose...</source>
<translation>选择... </translation>
</message>
<message>
<source>collapse fee-settings</source>
<translation>收起 费用设置 </translation>
</message>
<message>
<source>per kilobyte</source>
<translation>每kb</translation>
</message>
<message>
<source>If the custom fee is set to 1000 satoshis and the transaction is only 250 bytes, then "per kilobyte" only pays 250 satoshis in fee, while "total at least" pays 1000 satoshis. For transactions bigger than a kilobyte both pay by kilobyte.</source>
<translation>如果自定义交易费设置为 1000聪而交易大小只有250字节,则“每千字节" 模式只支付250聪交易费, 而"最少"模式则支付1000聪。 大于1000字节的交易按每千字节付费。</translation>
</message>
<message>
<source>Hide</source>
<translation>隐藏</translation>
</message>
<message>
<source>total at least</source>
<translation>最小额 </translation>
</message>
<message>
<source>Paying only the minimum fee is just fine as long as there is less transaction volume than space in the blocks. But be aware that this can end up in a never confirming transaction once there is more demand for blast transactions than the network can process.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>(read the tooltip)</source>
<translation>(请注意提示信息)</translation>
</message>
<message>
<source>Recommended:</source>
<translation>推荐:</translation>
</message>
<message>
<source>Custom:</source>
<translation>自定义:</translation>
</message>
<message>
<source>(Smart fee not initialized yet. This usually takes a few blocks...)</source>
<translation>(智能交易费用 尚未初始化。 需要再下载一些数据块...)</translation>
</message>
<message>
<source>Confirmation time:</source>
<translation>确认时间:</translation>
</message>
<message>
<source>normal</source>
<translation>一般</translation>
</message>
<message>
<source>fast</source>
<translation>快速</translation>
</message>
<message>
<source>Send to multiple recipients at once</source>
<translation>一次发送给多个接收者</translation>
</message>
<message>
<source>Add &Recipient</source>
<translation>添加收款人(&R)</translation>
</message>
<message>
<source>Clear all fields of the form.</source>
<translation>清除此表单的所有字段。</translation>
</message>
<message>
<source>Dust:</source>
<translation>小额:</translation>
</message>
<message>
<source>Clear &All</source>
<translation>清除所有(&A)</translation>
</message>
<message>
<source>Balance:</source>
<translation>余额:</translation>
</message>
<message>
<source>Confirm the send action</source>
<translation>确认发送货币</translation>
</message>
<message>
<source>S&end</source>
<translation>发送(&E)</translation>
</message>
<message>
<source>Copy quantity</source>
<translation>复制数目</translation>
</message>
<message>
<source>Copy amount</source>
<translation>复制金额</translation>
</message>
<message>
<source>Copy fee</source>
<translation>复制手续费</translation>
</message>
<message>
<source>Copy after fee</source>
<translation>复制计费后金额</translation>
</message>
<message>
<source>Copy bytes</source>
<translation>复制字节数</translation>
</message>
<message>
<source>Copy priority</source>
<translation>复制优先度</translation>
</message>
<message>
<source>Copy dust</source>
<translation>复制零散金额</translation>
</message>
<message>
<source>Copy change</source>
<translation>复制找零金额</translation>
</message>
<message>
<source>Total Amount %1</source>
<translation>总金额 %1</translation>
</message>
<message>
<source>or</source>
<translation>或</translation>
</message>
<message>
<source>Payment request expired.</source>
<translation>付款请求已过期。</translation>
</message>
<message>
<source>Warning: Invalid BLAST address</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>(no label)</source>
<translation>(无标签)</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<source>A&mount:</source>
<translation>金额(&M)</translation>
</message>
<message>
<source>Pay &To:</source>
<translation>付给(&T):</translation>
</message>
<message>
<source>&Label:</source>
<translation>标签(&L):</translation>
</message>
<message>
<source>Choose previously used address</source>
<translation>选择以前用过的地址</translation>
</message>
<message>
<source>This is a normal payment.</source>
<translation>这是笔正常的支付。</translation>
</message>
<message>
<source>The BLAST address to send the payment to</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<source>Paste address from clipboard</source>
<translation>从剪贴板粘贴地址</translation>
</message>
<message>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<source>Remove this entry</source>
<translation>移除此项</translation>
</message>
<message>
<source>The fee will be deducted from the amount being sent. The recipient will receive less blasts than you enter in the amount field. If multiple recipients are selected, the fee is split equally.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>S&ubtract fee from amount</source>
<translation>从金额中减去交易费(&U)</translation>
</message>
<message>
<source>Message:</source>
<translation>消息:</translation>
</message>
<message>
<source>This is an unauthenticated payment request.</source>
<translation>这是一个未经验证的支付请求。</translation>
</message>
<message>
<source>This is an authenticated payment request.</source>
<translation>这是一个已经验证的支付请求。</translation>
</message>
<message>
<source>Enter a label for this address to add it to the list of used addresses</source>
<translation>请为此地址输入一个标签以将它加入用过的地址列表</translation>
</message>
<message>
<source>A message that was attached to the blast: URI which will be stored with the transaction for your reference. Note: This message will not be sent over the BLAST network.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Pay To:</source>
<translation>支付给:</translation>
</message>
<message>
<source>Memo:</source>
<translation>便条:</translation>
</message>
</context>
<context>
<name>SendConfirmationDialog</name>
<message>
<source>Yes</source>
<translation>是</translation>
</message>
</context>
<context>
<name>ShutdownWindow</name>
<message>
<source>%1 is shutting down...</source>
<translation>正在关闭 %1 ...</translation>
</message>
<message>
<source>Do not shut down the computer until this window disappears.</source>
<translation>在此窗口消失前不要关闭计算机。</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<source>Signatures - Sign / Verify a Message</source>
<translation>签名 - 为消息签名/验证签名消息</translation>
</message>
<message>
<source>&Sign Message</source>
<translation>签名消息(&S)</translation>
</message>
<message>
<source>You can sign messages/agreements with your addresses to prove you can receive blasts sent to them. Be careful not to sign anything vague or random, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>The BLAST address to sign the message with</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Choose previously used address</source>
<translation>选择以前用过的地址</translation>
</message>
<message>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<source>Paste address from clipboard</source>
<translation>从剪贴板粘贴地址</translation>
</message>
<message>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<source>Enter the message you want to sign here</source>
<translation>请输入您要发送的签名消息</translation>
</message>
<message>
<source>Signature</source>
<translation>签名</translation>
</message>
<message>
<source>Copy the current signature to the system clipboard</source>
<translation>复制当前签名至剪切板</translation>
</message>
<message>
<source>Sign the message to prove you own this BLAST address</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Sign &Message</source>
<translation>消息签名(&M)</translation>
</message>
<message>
<source>Reset all sign message fields</source>
<translation>清空所有签名消息栏</translation>
</message>
<message>
<source>Clear &All</source>
<translation>清除所有(&A)</translation>
</message>
<message>
<source>&Verify Message</source>
<translation>验证消息(&V)</translation>
</message>
<message>
<source>Enter the receiver's address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack. Note that this only proves the signing party receives with the address, it cannot prove sendership of any transaction!</source>
<translation>请在下面输入接收者地址、消息(确保换行符、空格符、制表符等完全相同)和签名以验证消息。请仔细核对签名信息,以提防中间人攻击。请注意,这只是证明接收方签名的地址,它不能证明任何交易!</translation>
</message>
<message>
<source>The BLAST address the message was signed with</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Verify the message to ensure it was signed with the specified BLAST address</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Verify &Message</source>
<translation>验证消息签名(&M)</translation>
</message>
<message>
<source>Reset all verify message fields</source>
<translation>清空所有验证消息栏</translation>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<source>[testnet]</source>
<translation>[测试网络]</translation>
</message>
</context>
<context>
<name>TrafficGraphWidget</name>
<message>
<source>KB/s</source>
<translation>KB/s</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<source>Date</source>
<translation>日期</translation>
</message>
<message>
<source>Message</source>
<translation>消息</translation>
</message>
<message>
<source>Merchant</source>
<translation>商家</translation>
</message>
<message>
<source>Debug information</source>
<translation>调试信息</translation>
</message>
<message>
<source>Transaction</source>
<translation>交易</translation>
</message>
<message>
<source>Inputs</source>
<translation>输入</translation>
</message>
<message>
<source>Amount</source>
<translation>金额</translation>
</message>
<message>
<source>true</source>
<translation>是</translation>
</message>
<message>
<source>false</source>
<translation>否</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<source>This pane shows a detailed description of the transaction</source>
<translation>当前面板显示了交易的详细信息</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<source>Date</source>
<translation>日期</translation>
</message>
<message>
<source>Type</source>
<translation>种类</translation>
</message>
<message>
<source>Label</source>
<translation>标签</translation>
</message>
<message>
<source>Received with</source>
<translation>收款</translation>
</message>
<message>
<source>Sent to</source>
<translation>付款</translation>
</message>
<message>
<source>Mined</source>
<translation>挖矿所得</translation>
</message>
<message>
<source>(no label)</source>
<translation>(无标签)</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<source>All</source>
<translation>全部</translation>
</message>
<message>
<source>Today</source>
<translation>今天</translation>
</message>
<message>
<source>This week</source>
<translation>这星期</translation>
</message>
<message>
<source>This month</source>
<translation>这个月</translation>
</message>
<message>
<source>Last month</source>
<translation>上个月</translation>
</message>
<message>
<source>This year</source>
<translation>今年</translation>
</message>
<message>
<source>Range...</source>
<translation>指定范围...</translation>
</message>
<message>
<source>Received with</source>
<translation>收款</translation>
</message>
<message>
<source>Sent to</source>
<translation>付款</translation>
</message>
<message>
<source>To yourself</source>
<translation>给自己</translation>
</message>
<message>
<source>Mined</source>
<translation>挖矿所得</translation>
</message>
<message>
<source>Other</source>
<translation>其它</translation>
</message>
<message>
<source>Copy address</source>
<translation>复制地址</translation>
</message>
<message>
<source>Copy label</source>
<translation>复制标签</translation>
</message>
<message>
<source>Copy amount</source>
<translation>复制金额</translation>
</message>
<message>
<source>Copy transaction ID</source>
<translation>复制交易识别码</translation>
</message>
<message>
<source>Comma separated file (*.csv)</source>
<translation>逗号分隔文件 (*.csv)</translation>
</message>
<message>
<source>Date</source>
<translation>日期</translation>
</message>
<message>
<source>Type</source>
<translation>种类</translation>
</message>
<message>
<source>Label</source>
<translation>标签</translation>
</message>
<message>
<source>Address</source>
<translation>地址</translation>
</message>
<message>
<source>Exporting Failed</source>
<translation>导出失败</translation>
</message>
<message>
<source>to</source>
<translation>到</translation>
</message>
</context>
<context>
<name>UnitDisplayStatusBarControl</name>
<message>
<source>Unit to show amounts in. Click to select another unit.</source>
<translation>金额单位。单击选择别的单位。</translation>
</message>
</context>
<context>
<name>WalletFrame</name>
</context>
<context>
<name>WalletModel</name>
</context>
<context>
<name>WalletView</name>
<message>
<source>Backup Wallet</source>
<translation>备份钱包</translation>
</message>
<message>
<source>Backup Failed</source>
<translation>备份失败</translation>
</message>
<message>
<source>Backup Successful</source>
<translation>备份成功</translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<source>Options:</source>
<translation>选项:
</translation>
</message>
<message>
<source>Specify data directory</source>
<translation>指定数据目录
</translation>
</message>
<message>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>连接一个节点并获取对端地址,然后断开连接</translation>
</message>
<message>
<source>Specify your own public address</source>
<translation>指定您的公共地址</translation>
</message>
<message>
<source>Accept command line and JSON-RPC commands</source>
<translation>接受命令行和 JSON-RPC 命令
</translation>
</message>
<message>
<source>If <category> is not supplied or if <category> = 1, output all debugging information.</source>
<translation>如果<category>未提供或<category> = 1,输出所有调试信息。</translation>
</message>
<message>
<source>Prune configured below the minimum of %d MiB. Please use a higher number.</source>
<translation>修剪值被设置为低于最小值%d MiB,请使用更大的数值。</translation>
</message>
<message>
<source>Prune: last wallet synchronisation goes beyond pruned data. You need to -reindex (download the whole blockchain again in case of pruned node)</source>
<translation>修剪:最后的钱包同步超过了修剪的数据。你需要通过 -reindex (重新下载整个区块链以防修剪节点)</translation>
</message>
<message>
<source>Reduce storage requirements by pruning (deleting) old blocks. This mode is incompatible with -txindex and -rescan. Warning: Reverting this setting requires re-downloading the entire blockchain. (default: 0 = disable pruning blocks, >%u = target size in MiB to use for block files)</source>
<translation>通过修剪(删除)旧数据块减少存储需求。此模式与 -txindex 和 -rescan不兼容。警告:还原此设置需要重新下载整个区块链。(默认: 0 = 禁用修剪数据块, >%u = 数据块文件目标大小,单位 MiB)</translation>
</message>
<message>
<source>Rescans are not possible in pruned mode. You will need to use -reindex which will download the whole blockchain again.</source>
<translation>无法在开启修剪的状态下重扫描,请使用 -reindex重新下载完整的区块链。</translation>
</message>
<message>
<source>Error: A fatal internal error occurred, see debug.log for details</source>
<translation>错误:发生了致命的内部错误,详情见 debug.log 文件</translation>
</message>
<message>
<source>Fee (in %s/kB) to add to transactions you send (default: %s)</source>
<translation>为付款交易添加交易费 (%s/kB) (默认: %s) </translation>
</message>
<message>
<source>Pruning blockstore...</source>
<translation>正在修剪区块存储...</translation>
</message>
<message>
<source>Run in the background as a daemon and accept commands</source>
<translation>在后台运行并接受命令
</translation>
</message>
<message>
<source>Unable to start HTTP server. See debug log for details.</source>
<translation>无法启动HTTP服务,查看日志获取更多信息</translation>
</message>
<message>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>接受来自外部的连接 (缺省: 如果不带 -proxy or -connect 参数设置为1)</translation>
</message>
<message>
<source>BLAST Core</source>
<translation>BLAST Core</translation>
</message>
<message>
<source>The %s developers</source>
<translation>%s 开发人员</translation>
</message>
<message>
<source>-fallbackfee is set very high! This is the transaction fee you may pay when fee estimates are not available.</source>
<translation>-fallbackfree 交易费设置得很高!这是在费用估计不可用时你可能会支付的交易费。</translation>
</message>
<message>
<source>A fee rate (in %s/kB) that will be used when fee estimation has insufficient data (default: %s)</source>
<translation>当费用估计数据(default: %s)不足时将会启用的费率 (in %s/kB) </translation>
</message>
<message>
<source>Accept relayed transactions received from whitelisted peers even when not relaying transactions (default: %d)</source>
<translation>即使在无关联交易(默认: %d)时也接受来自白名单同行的关联交易</translation>
</message>
<message>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation>绑定指定的IP地址开始监听。IPv6地址请使用[host]:port 格式</translation>
</message>
<message>
<source>Delete all wallet transactions and only recover those parts of the blockchain through -rescan on startup</source>
<translation>删除钱包的所有交易记录,且只有用 -rescan参数启动客户端才能重新取回交易记录 </translation>
</message>
<message>
<source>Distributed under the MIT software license, see the accompanying file COPYING or <http://www.opensource.org/licenses/mit-license.php>.</source>
<translation>Distributed under the MIT software license, see the accompanying file COPYING or <http://www.opensource.org/licenses/mit-license.php>.</translation>
</message>
<message>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>当最佳区块变化时执行命令 (命令行中的 %s 会被替换成区块哈希值)</translation>
</message>
<message>
<source>Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)</source>
<translation>设置脚本验证的程序 (%u 到 %d, 0 = 自动, <0 = 保留自由的核心, 默认值: %d)</translation>
</message>
<message>
<source>The block database contains a block which appears to be from the future. This may be due to your computer's date and time being set incorrectly. Only rebuild the block database if you are sure that your computer's date and time are correct</source>
<translation>区块数据库包含未来的交易,这可能是由本机错误的日期时间引起。若确认本机日期时间正确,请重新建立区块数据库。</translation>
</message>
<message>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation>这是测试用的预发布版本 - 请谨慎使用 - 不要用来挖矿,或者在正式商用环境下使用</translation>
</message>
<message>
<source>Use UPnP to map the listening port (default: 1 when listening and no -proxy)</source>
<translation>使用UPnP暴露本机监听端口(默认:1 当正在监听且不使用代理)</translation>
</message>
<message>
<source>Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.</source>
<translation>警告:网络似乎并不完全同意!有些矿工似乎遇到了问题。</translation>
</message>
<message>
<source>Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation>警告:我们的同行似乎不完全同意!您可能需要升级,或者其他节点可能需要升级。</translation>
</message>
<message>
<source>Whitelist peers connecting from the given netmask or IP address. Can be specified multiple times.</source>
<translation>节点白名单,网络掩码或IP址。可多次指定。</translation>
</message>
<message>
<source>-maxmempool must be at least %d MB</source>
<translation>-maxmempool 最小为%d MB</translation>
</message>
<message>
<source><category> can be:</source>
<translation><category> 可能是:</translation>
</message>
<message>
<source>Append comment to the user agent string</source>
<translation>为用户代理字符串附加说明</translation>
</message>
<message>
<source>Block creation options:</source>
<translation>数据块创建选项:</translation>
</message>
<message>
<source>Connect only to the specified node(s)</source>
<translation>仅连接到指定节点</translation>
</message>
<message>
<source>Connection options:</source>
<translation>连接选项:</translation>
</message>
<message>
<source>Copyright (C) %i-%i</source>
<translation>版权所有 (C) %i-%i</translation>
</message>
<message>
<source>Corrupted block database detected</source>
<translation>检测发现数据块数据库损坏。请使用 -reindex参数重启客户端。</translation>
</message>
<message>
<source>Debugging/Testing options:</source>
<translation>调试/测试选项:</translation>
</message>
<message>
<source>Do not load the wallet and disable wallet RPC calls</source>
<translation>不要加载钱包和禁用钱包的 RPC 调用</translation>
</message>
<message>
<source>Do you want to rebuild the block database now?</source>
<translation>你想现在就重建块数据库吗?</translation>
</message>
<message>
<source>Enable publish hash block in <address></source>
<translation>允许在<address>广播哈希区块</translation>
</message>
<message>
<source>Enable publish hash transaction in <address></source>
<translation>允许在<address>广播哈希交易</translation>
</message>
<message>
<source>Enable publish raw block in <address></source>
<translation>允许在<address>广播原始区块</translation>
</message>
<message>
<source>Enable publish raw transaction in <address></source>
<translation>允许在<address>广播原始交易</translation>
</message>
<message>
<source>Enable transaction replacement in the memory pool (default: %u)</source>
<translation>保证内存池中的交易更换(默认:%u)</translation>
</message>
<message>
<source>Error initializing block database</source>
<translation>初始化数据块数据库出错</translation>
</message>
<message>
<source>Error initializing wallet database environment %s!</source>
<translation>Error initializing wallet database environment %s!</translation>
</message>
<message>
<source>Error loading %s</source>
<translation>载入 %s 时发生错误</translation>
</message>
<message>
<source>Error loading block database</source>
<translation>导入数据块数据库出错</translation>
</message>
<message>
<source>Error opening block database</source>
<translation>导入数据块数据库出错</translation>
</message>
<message>
<source>Error: Disk space is low!</source>
<translation>错误:磁盘剩余空间低!</translation>
</message>
<message>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>监听端口失败。请使用 -listen=0 参数。</translation>
</message>
<message>
<source>Importing...</source>
<translation>导入中...</translation>
</message>
<message>
<source>Incorrect or no genesis block found. Wrong datadir for network?</source>
<translation>不正确或没有找到起源区块。网络错误?</translation>
</message>
<message>
<source>Invalid -onion address: '%s'</source>
<translation>无效的 -onion 地址:“%s”</translation>
</message>
<message>
<source>Invalid amount for -fallbackfee=<amount>: '%s'</source>
<translation>-fallbackfee 的无效数额=<amount>: '%s'</translation>
</message>
<message>
<source>Keep the transaction memory pool below <n> megabytes (default: %u)</source>
<translation>保持交易内存池大小低于<n>MB(默认:%u)</translation>
</message>
<message>
<source>Location of the auth cookie (default: data dir)</source>
<translation>认证Cookie的位置 (默认: data目录)</translation>
</message>
<message>
<source>Not enough file descriptors available.</source>
<translation>没有足够的文件描述符可用。</translation>
</message>
<message>
<source>Only connect to nodes in network <net> (ipv4, ipv6 or onion)</source>
<translation>只连接 <net>网络中的节点 (ipv4, ipv6 或 onion) </translation>
</message>
<message>
<source>Print version and exit</source>
<translation>打印版本信息并退出</translation>
</message>
<message>
<source>Prune cannot be configured with a negative value.</source>
<translation>修剪不能配置一个负数。</translation>
</message>
<message>
<source>Prune mode is incompatible with -txindex.</source>
<translation>修剪模式与 -txindex 不兼容。</translation>
</message>
<message>
<source>Set database cache size in megabytes (%d to %d, default: %d)</source>
<translation>设置以MB为单位的数据库缓存大小(%d 到 %d, 默认值: %d)</translation>
</message>
<message>
<source>Set maximum block size in bytes (default: %d)</source>
<translation>设置最大区块大小 (默认: %d,单位字节)</translation>
</message>
<message>
<source>Specify wallet file (within data directory)</source>
<translation>指定钱包文件(数据目录内)</translation>
</message>
<message>
<source>Starting network threads...</source>
<translation>正在启动网络线程...</translation>
</message>
<message>
<source>The source code is available from %s.</source>
<translation>源代码可以在 %s 获得。</translation>
</message>
<message>
<source>Unsupported argument -benchmark ignored, use -debug=bench.</source>
<translation>忽略不支持的选项 -benchmark,使用 -debug=bench</translation>
</message>
<message>
<source>Unsupported argument -debugnet ignored, use -debug=net.</source>
<translation>忽略不支持的选项 -debugnet,使用 -debug=net。</translation>
</message>
<message>
<source>Unsupported argument -tor found, use -onion.</source>
<translation>忽略不支持的选项 -tor,使用 -oinon</translation>
</message>
<message>
<source>Use UPnP to map the listening port (default: %u)</source>
<translation>使用UPnp映射监听端口 (默认: %u) </translation>
</message>
<message>
<source>User Agent comment (%s) contains unsafe characters.</source>
<translation>用户代理评论(%s)包含不安全的字符。</translation>
</message>
<message>
<source>Verifying blocks...</source>
<translation>正在验证区块...</translation>
</message>
<message>
<source>Verifying wallet...</source>
<translation>正在验证钱包...</translation>
</message>
<message>
<source>Wallet %s resides outside data directory %s</source>
<translation>钱包 %s 在外部的数据目录 %s</translation>
</message>
<message>
<source>Wallet options:</source>
<translation>钱包选项:</translation>
</message>
<message>
<source>Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times</source>
<translation>允许来自指定地址的 JSON-RPC 连接。 <ip>为单一IP (如: 1.2.3.4), 网络/掩码 (如: 1.2.3.4/255.255.255.0), 网络/CIDR (如: 1.2.3.4/24)。该选项可多次指定。</translation>
</message>
<message>
<source>Bind to given address and whitelist peers connecting to it. Use [host]:port notation for IPv6</source>
<translation>绑定到指定地址和连接的白名单节点。 IPv6使用 [主机]:端口 格式 </translation>
</message>
<message>
<source>Bind to given address to listen for JSON-RPC connections. Use [host]:port notation for IPv6. This option can be specified multiple times (default: bind to all interfaces)</source>
<translation>绑定到指定地址监听 JSON-RPC连接。 IPv6使用[主机]:端口 格式。该选项可多次指定 (默认: 绑定到所有接口) </translation>
</message>
<message>
<source>Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)</source>
<translation>创建系统默认权限的文件,而不是 umask 077 (只在关闭钱包功能时有效) </translation>
</message>
<message>
<source>Discover own IP addresses (default: 1 when listening and no -externalip or -proxy)</source>
<translation>发现自己的 IP 地址(默认: 监听并且无 -externalip 或 -proxy 时为 1)</translation>
</message>
<message>
<source>Error: Listening for incoming connections failed (listen returned error %s)</source>
<translation>错误:监听外部连接失败 (监听返回错误 %s) </translation>
</message>
<message>
<source>Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)</source>
<translation>当收到相关提醒或者我们看到一个长分叉时执行命令(%s 将替换为消息)</translation>
</message>
<message>
<source>Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)</source>
<translation>交易费(in %s/kB)比这更小的在关联、挖掘和生成交易时将被视为零费交易 (默认: %s)</translation>
</message>
<message>
<source>If paytxfee is not set, include enough fee so transactions begin confirmation on average within n blocks (default: %u)</source>
<translation>如果未设置交易费用,自动添加足够的交易费以确保交易在平均n个数据块内被确认 (默认: %u) </translation>
</message>
<message>
<source>Invalid amount for -maxtxfee=<amount>: '%s' (must be at least the minrelay fee of %s to prevent stuck transactions)</source>
<translation>-maxtxfee=<amount>: '%s' 的金额无效(交易费至少为 %s,以免交易滞留过久)</translation>
</message>
<message>
<source>Maximum size of data in data carrier transactions we relay and mine (default: %u)</source>
<translation>Maximum size of data in data carrier transactions we relay and mine (default: %u)</translation>
</message>
<message>
<source>Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect)</source>
<translation>通过DNS查询每个地址,如果短地址 (默认值: 1 除非 -连接)</translation>
</message>
<message>
<source>Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)</source>
<translation>为每个代理连接随机化凭据。这将启用 Tor 流隔离 (默认: %u)</translation>
</message>
<message>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: %d)</source>
<translation>设置 高优先级/低交易费 交易的最大字节 (缺省: %d)</translation>
</message>
<message>
<source>The transaction amount is too small to send after the fee has been deducted</source>
<translation>在交易费被扣除后发送的交易金额太小</translation>
</message>
<message>
<source>This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit <https://www.openssl.org/> and cryptographic software written by Eric Young and UPnP software written by Thomas Bernard.</source>
<translation>This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit <https://www.openssl.org/> and cryptographic software written by Eric Young and UPnP software written by Thomas Bernard.</translation>
</message>
<message>
<source>Whitelisted peers cannot be DoS banned and their transactions are always relayed, even if they are already in the mempool, useful e.g. for a gateway</source>
<translation>白名单节点不能被DoS banned ,且转发所有来自他们的交易(即便这些交易已经存在于mempool中),常用于网关 </translation>
</message>
<message>
<source>You need to rebuild the database using -reindex to go back to unpruned mode. This will redownload the entire blockchain</source>
<translation>您需要使用 -reindex 重新构建数据库以返回未修剪的模式。这将重新下载整个区块链</translation>
</message>
<message>
<source>(default: %u)</source>
<translation>(默认: %u)</translation>
</message>
<message>
<source>Accept public REST requests (default: %u)</source>
<translation>接受公共 REST 请求 (默认: %u)</translation>
</message>
<message>
<source>Automatically create Tor hidden service (default: %d)</source>
<translation>自动建立Tor隐藏服务 (默认:%d)</translation>
</message>
<message>
<source>Connect through SOCKS5 proxy</source>
<translation>通过 SOCKS5 代理连接</translation>
</message>
<message>
<source>Error reading from database, shutting down.</source>
<translation>读取数据库出错,关闭中。</translation>
</message>
<message>
<source>Imports blocks from external blk000??.dat file on startup</source>
<translation>启动时从其他来源的 blk000??.dat 文件导入区块</translation>
</message>
<message>
<source>Information</source>
<translation>信息</translation>
</message>
<message>
<source>Invalid amount for -paytxfee=<amount>: '%s' (must be at least %s)</source>
<translation>无效的金额 -paytxfee=<amount>: '%s' (必须至少为 %s)</translation>
</message>
<message>
<source>Invalid netmask specified in -whitelist: '%s'</source>
<translation>-whitelist: '%s' 指定的网络掩码无效</translation>
</message>
<message>
<source>Keep at most <n> unconnectable transactions in memory (default: %u)</source>
<translation>内存中最多保留 <n> 笔孤立的交易 (默认: %u) </translation>
</message>
<message>
<source>Need to specify a port with -whitebind: '%s'</source>
<translation>-whitebind: '%s' 需要指定一个端口</translation>
</message>
<message>
<source>Node relay options:</source>
<translation>节点中继选项:</translation>
</message>
<message>
<source>RPC server options:</source>
<translation>RPC 服务器选项:</translation>
</message>
<message>
<source>Reducing -maxconnections from %d to %d, because of system limitations.</source>
<translation>因为系统的限制,将 -maxconnections 参数从 %d 降到了 %d</translation>
</message>
<message>
<source>Rescan the block chain for missing wallet transactions on startup</source>
<translation>重新扫描区块链以查找遗漏的钱包交易</translation>
</message>
<message>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>跟踪/调试信息输出到控制台,不输出到 debug.log 文件</translation>
</message>
<message>
<source>Send transactions as zero-fee transactions if possible (default: %u)</source>
<translation>发送时尽可能 不支付交易费用 (默认: %u) </translation>
</message>
<message>
<source>Show all debugging options (usage: --help -help-debug)</source>
<translation>显示所有调试选项 (用法: --帮助 -帮助调试)</translation>
</message>
<message>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>客户端启动时压缩debug.log文件(缺省:no-debug模式时为1)</translation>
</message>
<message>
<source>Signing transaction failed</source>
<translation>签署交易失败</translation>
</message>
<message>
<source>The transaction amount is too small to pay the fee</source>
<translation>交易金额太小,不足以支付交易费</translation>
</message>
<message>
<source>This is experimental software.</source>
<translation>这是实验性的软件。</translation>
</message>
<message>
<source>Tor control port password (default: empty)</source>
<translation>Tor 控制端口密码 (默认值: 空白)</translation>
</message>
<message>
<source>Tor control port to use if onion listening enabled (default: %s)</source>
<translation>开启监听 onion 连接时的 Tor 控制端口号 (默认值: %s)</translation>
</message>
<message>
<source>Transaction amount too small</source>
<translation>交易量太小</translation>
</message>
<message>
<source>Transaction amounts must be positive</source>
<translation>交易金额必须是积极的</translation>
</message>
<message>
<source>Transaction too large for fee policy</source>
<translation>费用策略的交易太大</translation>
</message>
<message>
<source>Transaction too large</source>
<translation>交易太大</translation>
</message>
<message>
<source>Unable to bind to %s on this computer (bind returned error %s)</source>
<translation>无法在此计算机上绑定 %s (绑定返回错误 %s)</translation>
</message>
<message>
<source>Upgrade wallet to latest format on startup</source>
<translation>程序启动时升级钱包到最新格式</translation>
</message>
<message>
<source>Username for JSON-RPC connections</source>
<translation>JSON-RPC 连接用户名</translation>
</message>
<message>
<source>Warning</source>
<translation>警告</translation>
</message>
<message>
<source>Warning: unknown new rules activated (versionbit %i)</source>
<translation>警告: 不明的交易规则被启用了(versionbit %i)</translation>
</message>
<message>
<source>Whether to operate in a blocks only mode (default: %u)</source>
<translation>是否用块方进行 (%u)</translation>
</message>
<message>
<source>Zapping all transactions from wallet...</source>
<translation>正在消除錢包中的所有交易...</translation>
</message>
<message>
<source>ZeroMQ notification options:</source>
<translation>ZeroMQ 通知选项:</translation>
</message>
<message>
<source>Password for JSON-RPC connections</source>
<translation>JSON-RPC 连接密码
</translation>
</message>
<message>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>当最佳数据块变化时执行命令 (命令行中的 %s 会被替换成数据块哈希值)</translation>
</message>
<message>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>使用 -addnode, -seednode 和 -connect 选项时允许查询DNS</translation>
</message>
<message>
<source>Loading addresses...</source>
<translation>正在加载地址簿...</translation>
</message>
<message>
<source>(1 = keep tx meta data e.g. account owner and payment request information, 2 = drop tx meta data)</source>
<translation>(1 = 保留 tx meta data , 如 account owner 和 payment request information, 2 = 不保留 tx meta data) </translation>
</message>
<message>
<source>-maxtxfee is set very high! Fees this large could be paid on a single transaction.</source>
<translation>参数 -maxtxfee 设定了很高的金额!这是你一次交易就有可能付出的最高手续费。</translation>
</message>
<message>
<source>-paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>参数 -paytxfee 设定了很高的金额!这是你交易付款时所要付的手续费。</translation>
</message>
<message>
<source>Do not keep transactions in the mempool longer than <n> hours (default: %u)</source>
<translation>不要让交易留在内存池中超过 <n> 个小时 (默认值: %u)</translation>
</message>
<message>
<source>Fees (in %s/kB) smaller than this are considered zero fee for transaction creation (default: %s)</source>
<translation>当产生交易时,如果每千字节 (kB) 的手续费比这个值 (单位是 %s) 低,就视为没支付手续费 (默认值: %s)</translation>
</message>
<message>
<source>How thorough the block verification of -checkblocks is (0-4, default: %u)</source>
<translation>数据块验证 严密级别 -checkblocks (0-4, 默认: %u) </translation>
</message>
<message>
<source>Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)</source>
<translation>维护一份完整的交易索引, 用于 getrawtransaction RPC调用 (默认: %u)</translation>
</message>
<message>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: %u)</source>
<translation>限制 非礼节点 若干秒内不能连接 (默认: %u) </translation>
</message>
<message>
<source>Output debugging information (default: %u, supplying <category> is optional)</source>
<translation>输出调试信息 (默认: %u, 提供 <category> 是可选项)</translation>
</message>
<message>
<source>Support filtering of blocks and transaction with bloom filters (default: %u)</source>
<translation>支持用 Bloom 过滤器来过滤区块和交易(默认值: %u)</translation>
</message>
<message>
<source>Total length of network version string (%i) exceeds maximum length (%i). Reduce the number or size of uacomments.</source>
<translation>网络版本字符串的总长度 (%i) 超过最大长度 (%i) 了。请减少 uacomment 参数的数目或长度。</translation>
</message>
<message>
<source>Tries to keep outbound traffic under the given target (in MiB per 24h), 0 = no limit (default: %d)</source>
<translation>尝试保持上传带宽低于(MiB/24h),0=无限制(默认:%d)</translation>
</message>
<message>
<source>Unsupported argument -socks found. Setting SOCKS version isn't possible anymore, only SOCKS5 proxies are supported.</source>
<translation>找到不再支持的 -socks 参数。现在只支持 SOCKS5 协议的代理服务器,因此不可以指定 SOCKS 协议版本。</translation>
</message>
<message>
<source>Unsupported argument -whitelistalwaysrelay ignored, use -whitelistrelay and/or -whitelistforcerelay.</source>
<translation>一个不被支持的参数 -whitelistalwaysrelay 被忽略了。请使用 -whitelistrelay 或者 -whitelistforcerelay.</translation>
</message>
<message>
<source>Use separate SOCKS5 proxy to reach peers via Tor hidden services (default: %s)</source>
<translation>通过Tor隐藏服务连接节点时 使用不同的SOCKS5代理 (默认: %s)</translation>
</message>
<message>
<source>Username and hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcuser. This option can be specified multiple times</source>
<translation>JSON-RPC 连接要使用的用户名和散列密码。<userpw> 的格式是:<用户名>:<盐>$<散列值>。在 share/rpcuser 目录下有一个示范的 python 脚本。这个选项可以被多次指定。</translation>
</message>
<message>
<source>Warning: Unknown block versions being mined! It's possible unknown rules are in effect</source>
<translation>警告: 未知的区块版本被挖掘!未知规则可能已生效</translation>
</message>
<message>
<source>(default: %s)</source>
<translation>(默认: %s) </translation>
</message>
<message>
<source>Always query for peer addresses via DNS lookup (default: %u)</source>
<translation>始终通过 DNS 查询节点地址 (默认: %u)</translation>
</message>
<message>
<source>How many blocks to check at startup (default: %u, 0 = all)</source>
<translation>启动时检测多少个数据块(默认: %u, 0=所有)</translation>
</message>
<message>
<source>Include IP addresses in debug output (default: %u)</source>
<translation>在调试输出中包含IP地址 (默认: %u)</translation>
</message>
<message>
<source>Invalid -proxy address: '%s'</source>
<translation>无效的代理地址:%s</translation>
</message>
<message>
<source>Listen for JSON-RPC connections on <port> (default: %u or testnet: %u)</source>
<translation>使用 <port>端口监听 JSON-RPC 连接 (默认: %u ; testnet: %u) </translation>
</message>
<message>
<source>Listen for connections on <port> (default: %u or testnet: %u)</source>
<translation>使用端口 <port> 监听连接 (默认: %u ; testnet: %u) </translation>
</message>
<message>
<source>Maintain at most <n> connections to peers (default: %u)</source>
<translation>保留最多 <n> 条节点连接 (默认: %u) </translation>
</message>
<message>
<source>Make the wallet broadcast transactions</source>
<translation>钱包广播事务处理</translation>
</message>
<message>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)</source>
<translation>每个连接的最大接收缓存,<n>*1000 字节 (默认: %u)</translation>
</message>
<message>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: %u)</source>
<translation>每个连接的最大发送缓存,<n>*1000 字节 (默认: %u)</translation>
</message>
<message>
<source>Prepend debug output with timestamp (default: %u)</source>
<translation>输出调试信息时,前面加上时间戳 (默认: %u)</translation>
</message>
<message>
<source>Relay and mine data carrier transactions (default: %u)</source>
<translation>Relay and mine data carrier transactions (default: %u)</translation>
</message>
<message>
<source>Relay non-P2SH multisig (default: %u)</source>
<translation>是否转发 非P2SH格式的多签名交易 (默认: %u) </translation>
</message>
<message>
<source>Set key pool size to <n> (default: %u)</source>
<translation>设置私钥池大小为 <n> (默认:%u) </translation>
</message>
<message>
<source>Set the number of threads to service RPC calls (default: %d)</source>
<translation>设置RPC服务线程数 (默认: %d) </translation>
</message>
<message>
<source>Specify configuration file (default: %s)</source>
<translation>指定配置文件 (默认: %s) </translation>
</message>
<message>
<source>Specify connection timeout in milliseconds (minimum: 1, default: %d)</source>
<translation>指定连接超时毫秒数 (最小: 1, 默认: %d) </translation>
</message>
<message>
<source>Specify pid file (default: %s)</source>
<translation>指定 pid 文件 (默认: %s) </translation>
</message>
<message>
<source>Spend unconfirmed change when sending transactions (default: %u)</source>
<translation>付款时允许使用未确认的零钱 (默认: %u) </translation>
</message>
<message>
<source>Threshold for disconnecting misbehaving peers (default: %u)</source>
<translation>断开 非礼节点的阀值 (默认: %u) </translation>
</message>
<message>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>-onlynet 指定的是未知网络:%s</translation>
</message>
<message>
<source>Insufficient funds</source>
<translation>金额不足</translation>
</message>
<message>
<source>Loading block index...</source>
<translation>正在加载区块索引...</translation>
</message>
<message>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>添加节点并与其保持连接</translation>
</message>
<message>
<source>Loading wallet...</source>
<translation>正在加载钱包...</translation>
</message>
<message>
<source>Cannot downgrade wallet</source>
<translation>无法降级钱包</translation>
</message>
<message>
<source>Cannot write default address</source>
<translation>无法写入默认地址</translation>
</message>
<message>
<source>Rescanning...</source>
<translation>正在重新扫描...</translation>
</message>
<message>
<source>Done loading</source>
<translation>加载完成</translation>
</message>
<message>
<source>Error</source>
<translation>错误</translation>
</message>
</context>
</TS> | </message> |
token.py | from pytezos import PyTezosClient
class Token(object):
def __init__(self, client: PyTezosClient):
self.client = client
def set_admin(self, contract_id, new_admin):
print(f"Setting fa2 admin on {contract_id} to {new_admin}")
call = self.set_admin_call(contract_id, new_admin)
res = call.autofill().sign().inject(_async=False)
print(f"Done {res[0]['hash']}")
def set_admin_call(self, contract_id, new_admin):
contract = self.client.contract(contract_id)
op = contract \
.set_admin(new_admin)
return op
def set_minter_call(self, contract_id, new_admin):
| contract = self.client.contract(contract_id)
op = contract \
.set_minter(new_admin)
return op |
|
AddedFile.ts | interface AddedFile {
name: string | }
export default AddedFile | /**
* `undefined` if file. `boolean` if dir
*/
hasIndexFile?: boolean |
merge.py | #!/usr/bin/python3
# Author: Dr. Christopher C. Hall, aka DrPlantabyte
# Copyright 2021 Christopher C. Hall
# Permission granted to use and redistribute this code in accordance with the Creative Commons (CC BY 4.0) License:
# https://creativecommons.org/licenses/by/4.0/
from subprocess import call, Popen, PIPE, STDOUT
import os, sys, re
from os import path
def main():
# check if in a merge
if not merge_in_progress():
# abort if there are uncommited changes
changes = run('git', 'status', '-uall', '--porcelain', capture_stdout=True,
fail_msg='Cannot merge, current working directory is not in a git repository!')
## store current commit hash for undo capability
this_branch = run('git', 'symbolic-ref', '--short', '-q', 'HEAD',
capture_stdout=True).strip()
## with --porcelain, changes will be an empty string if there are no uncommitted changes
if len(changes.strip()) > 0:
## uncommitted changes detected, abort
print('Error: uncommitted changes detected! Commit first and then merge.')
exit(1)
# ask which branch/commit to merge from and to
run('git', 'fetch', '--all')
branch_list = [x for x in run('git', 'for-each-ref', '--format', '%(refname:short)', 'refs/heads/',
capture_stdout=True).replace('\r', '').split('\n') if len(x) > 0]
print('Currently on branch:',this_branch)
_, m_from = choose_from('Which branch do you want to merge from?', branch_list)
if len(m_from) == 0: m_from = this_branch
_, m_to = choose_from('Which branch do you want to merge into?', branch_list)
if len(m_to) == 0: m_to = this_branch
if m_from == m_to:
print('Error: From-branch and into-merge branch must be different')
exit(1)
## test that both branches exist
run('git', 'cat-file', '-e', m_from, fail_msg='Error: target "%s" does not exist' % m_from)
run('git', 'cat-file', '-e', m_to, fail_msg='Error: target "%s" does not exist' % m_to)
# test for merge conflicts
if no_merge_conflicts(m_from, m_to, this_branch):
print('No merge conflicts detected.')
# can do simple merge, ask user for confirmation
if confirm('Merge %s -> %s?' % (m_from, m_to)):
# do merge
## need to be in the into-branch and call merge on the from-branch
run('git', 'switch', m_to)
run('git', 'merge', m_from)
print('Done!')
exit(0)
else:
# merge conflicts exist
print('Merge conflicts detected. You will need to resolve them before you can merge.')
if not confirm('Start merge operation?'):
print('Merge canceled.')
exit(0)
# start merge and show conflicts
run('git', 'switch', m_to)
test_run('git', 'merge', '--no-commit', '--no-ff', m_from) # git merge will return an error code here, even on success
unresolved_files = list_unresolved()
print('Files with unresolved merge conflicts:')
for f in unresolved_files: print('\t', f, sep='')
# ask user if they would like to use the git merge tool
if not merge_tool_ui(this_branch):
print('Edit the files to resolve all conflicts, then re-run this command.')
# exit
exit(0)
else:
print('Merge operation in progress.')
# show unresolved files (git diff --name-only --diff-filter=U)
unresolved_files = list_unresolved()
if len(unresolved_files) > 0:
print('The following files are marked as unresolved:')
for f in unresolved_files: print('\t', f, sep='')
# ask if user wants to abort the merge
if confirm('Abort merge?'):
run('git', 'merge', '--abort')
run('git', 'clean', '-f')
print('Merge aborted.')
exit(0)
# if there are unresolved files, ask if user wants to run the merge tool
if len(unresolved_files) > 0: merge_tool_ui()
# Ask user if all conflicts have been resolved
if confirm('Have ALL merge conflicts been resolved and all changes tested?') and confirm('Confirm merge?'):
# If yes, complete merge and clean up (git clean -f)
merge_msg = ask_for_text('Merge commit message')
run('git', 'add', '--all')
run('git', 'commit', '-m', merge_msg)
run('git', 'clean', '-f')
print('Done!')
exit(0)
else:
# If no, exit script
print('Resolve any merge conflicts and re-run this command when you are ready to complete the merge or wish to abort.')
exit(0)
# Done
print('Done!')
def merge_tool_ui(revert_commit=None):
merge_tool = run('git', 'config', '--get', 'merge.tool', capture_stdout=True).replace('\r', '').replace('\n', '')
if confirm('Resolve conflicts using %s?' % merge_tool):
# run mergetool command
mt_sucess = test_run('git', 'mergetool', hide_output=False)
## Note: git mergetool will ask 'Was the merge successful [y/n]?' at the end if the merge tool program exits non-zero,
## and return non-zero if user says 'no' (and mark all files ar resolved if 'yes' and exits with code 0)
if mt_sucess == False:
print('Edit the files to resolve all conflicts, then re-run this command.')
exit(0)
# ask if merge is done
if confirm('Ready to complete merge operation?'):
unresolved_files = list_unresolved()
if len(unresolved_files) > 0:
print('The following files are still marked as unresolved:')
for f in unresolved_files: print('\t', f, sep='')
if confirm('Mark all files as resolved?'):
run('git', 'add', '--all')
if confirm('Have all changes been tested? Confirm merge:'):
if revert_commit is not None:
merge_msg = 'merged commit %s into this branch' % revert_commit
else:
merge_msg = ask_for_text('Merge commit message')
run('git', 'add', '--all')
run('git', 'commit', '-m', merge_msg)
run('git', 'clean', '-f')
print('Done!')
exit(0)
# if not done, ask if user wants to abort the merge (git merge --abort)
if confirm('Abort merge?'):
run('git', 'merge', '--abort')
run('git', 'clean', '-f')
if revert_commit is not None: run('git', 'switch', revert_commit)
print('Merge aborted.')
exit(0)
return False
def merge_in_progress():
## return True if a merge is in progress, False if not
root_dir = run('git', 'rev-parse', '--show-toplevel', capture_stdout=True)
if root_dir.endswith('\n'): root_dir = root_dir[:-1]
if root_dir.endswith('\r'): root_dir = root_dir[:-1]
merge_head_file = path.join(root_dir, '.git', 'MERGE_HEAD')
if path.exists(merge_head_file):
with open(merge_head_file, 'r') as fin:
return len(fin.read().strip()) > 0
return False
def no_merge_conflicts(merge_from, merge_to, revert_to):
# git checkout fails if already on that branch, switch does not
run('git', 'switch', merge_to, capture_stdout=True)
can_merge = test_run('git', 'merge', '--no-commit', '--no-ff', merge_from, hide_output=False)
test_run('git', 'merge', '--abort') ## undo test merge
run('git', 'switch', revert_to, capture_stdout=True)
return can_merge
def list_unresolved():
return [x for x in run('git', 'diff', '--name-only', '--diff-filter=U',
capture_stdout=True).replace('\r', '').split('\n') if len(x) > 0]
# | return input()
def confirm(msg):
while True:
r = input('%s [y/n]: ' % msg).strip().lower()
if r == 'y' or r == 'yes':
return True
elif r == 'n' or r == 'no':
return False
else:
continue
def choose_from(msg, options_list):
while True:
try:
print(msg)
num = 1
for opt in options_list:
print('%s:\t%s' % (num, opt))
num += 1
r = input('Enter number: ')
i = int(r)-1
return i, options_list[i]
except ValueError:
print('Not a number, try again.')
except IndexError:
print('Not a valid option, try again.')
def run(command, *args, fail_msg=None, capture_stdout=False):
args = list(args) # convert tuple to list
if fail_msg == None:
fail_msg = "Error: non-zero exit code returned by %s %s" % (command," ".join(args))
if capture_stdout == False:
exit_code = call([command]+args)
ret_val = None
else:
p = Popen([command]+args, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
o, e = p.communicate()
if e is not None and len(e) > 0: print(e.decode('utf8'), file=sys.stderr)
ret_val = o.decode('utf8')
exit_code = p.returncode
if exit_code != 0:
print(fail_msg)
exit(1)
return ret_val
def test_run(command, *args, hide_output=True):
args = list(args)
if hide_output:
p = Popen([command] + args, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
exit_code = p.wait()
else:
exit_code = call([command] + args)
return exit_code == 0
#
if __name__ == '__main__':
main() | def ask_for_text(msg, **kwargs):
print("%s: " % msg, **kwargs) |
main.go | package main
import (
"flag"
"github.com/lysu/rtvk"
"strings"
"github.com/coreos/etcd/raft/raftpb"
)
func | () {
cluster := flag.String("cluster", "http://127.0.0.1:9021", "comma separated cluster peers")
id := flag.Int("id", 1, "node ID")
kvport := flag.Int("port", 9121, "key-value server port")
join := flag.Bool("join", false, "join an existing cluster")
flag.Parse()
proposeC := make(chan string)
defer close(proposeC)
confChangeC := make(chan raftpb.ConfChange)
defer close(confChangeC)
var storage *rtvk.Storage
getSnapshot := func() ([]byte, error) { return storage.GetSnapshot()}
commitC, errorC, snapshotterReady := rtvk.NewRaftNode(*id, strings.Split(*cluster, ","), *join, getSnapshot, proposeC, confChangeC)
kvs := rtvk.NewKVStore(<-snapshotterReady, proposeC, commitC, errorC)
rtvk.ServeHttpKVAPI(kvs, *kvport, confChangeC, errorC);
}
| main |
menmosd_routing.rs | //! Test blob routing.
mod util;
use std::time::Duration;
use anyhow::Result;
use interface::RoutingConfig;
use menmos_client::{Client, Meta};
use testing::fixtures::Menmos;
use util::stream_to_bytes;
#[tokio::test]
async fn | () -> Result<()> {
let cluster = Menmos::new().await?;
// Key doesn't exist in the beginning.
let response = cluster.client.get_routing_config().await?;
assert_eq!(response, None);
let cfg = RoutingConfig::new("some_field").with_route("a", "b");
cluster.client.set_routing_config(&cfg).await?;
cluster.flush().await?;
// Key exists afterwards.
let response = cluster.client.get_routing_config().await?;
assert_eq!(response, Some(cfg.clone()));
// Other user doesn't see the routing key.
cluster.add_user("john", "bingbong").await?;
let john_client = Client::new(&cluster.directory_url, "john", "bingbong").await?;
let response = john_client.get_routing_config().await?;
assert_eq!(response, None);
// Deleting the key works.
cluster.client.delete_routing_config().await?;
cluster.flush().await?;
let response = cluster.client.get_routing_config().await?;
assert_eq!(response, None);
cluster.stop_all().await?;
Ok(())
}
#[tokio::test]
async fn move_request_full_loop() -> Result<()> {
let mut cluster = Menmos::new().await?;
cluster.add_amphora("alpha").await?;
// We add a blob on amphora alpha.
let blob_id = cluster
.push_document(
"yeet yeet",
Meta::file("file1.txt").with_meta("some_file", "bing"),
)
.await?;
cluster.flush().await?;
// We verify the blob is there.
assert!(cluster
.root_directory
.as_ref()
.join("alpha-blobs")
.join(&blob_id)
.with_extension("blob")
.exists());
// Then we add a new storage node, send a move request to move the blob over there, and wait a bit.
cluster.add_amphora("beta").await?;
cluster
.client
.set_routing_config(&RoutingConfig::new("some_file").with_route("bing", "beta"))
.await?;
cluster.flush().await?;
tokio::time::sleep(Duration::from_secs(2)).await;
// We verify the blob has moved.
assert!(!cluster
.root_directory
.as_ref()
.join("alpha-blobs")
.join(&blob_id)
.with_extension("blob")
.exists());
assert!(cluster
.root_directory
.as_ref()
.join("beta-blobs")
.join(&blob_id)
.with_extension("blob")
.exists());
// And we verify we can still fetch the blob.
let file_stream = cluster.client.get_file(&blob_id).await?;
let file_bytes = stream_to_bytes(file_stream).await?;
let file_string = String::from_utf8_lossy(file_bytes.as_ref());
assert_eq!(file_string, "yeet yeet");
cluster.stop_all().await?;
Ok(())
}
| get_set_delete_routing_config |
run_training.py | from argparse import Namespace
import csv
from logging import Logger
import os
from pprint import pformat
from typing import List
import numpy as np
from tensorboardX import SummaryWriter
import torch
from tqdm import trange
import pickle
from torch.optim.lr_scheduler import ExponentialLR
from .evaluate import evaluate, evaluate_predictions
from .predict import predict, save_predictions
from .train import train
from chemprop.data import StandardScaler
from chemprop.data.utils import flip_data, get_class_sizes, get_data, get_task_names, split_data, split_loocv
from chemprop.models import build_model
from chemprop.nn_utils import param_count
from chemprop.utils import build_optimizer, build_lr_scheduler, get_loss_func, get_metric_func, load_checkpoint,\
makedirs, save_checkpoint
def | (args: Namespace, logger: Logger = None) -> List[float]:
"""
Trains a model and returns test scores on the model checkpoint with the highest validation score.
:param args: Arguments.
:param logger: Logger.
:return: A list of ensemble scores for each task.
"""
if logger is not None:
debug, info = logger.debug, logger.info
else:
debug = info = print
# Set GPU
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
# Print args
debug(pformat(vars(args)))
# Get data
debug('Loading data')
args.task_names = get_task_names(args.data_path, args.data_format)
data = get_data(path=args.data_path, args=args, logger=logger)
args.num_tasks = data.num_tasks()
args.features_size = data.features_size()
debug(f'Number of tasks = {args.num_tasks}')
# Split data
debug(f'Splitting data with seed {args.seed}')
if args.separate_test_path:
test_data = get_data(path=args.separate_test_path, args=args, features_path=args.separate_test_features_path, logger=logger)
if args.separate_val_path:
val_data = get_data(path=args.separate_val_path, args=args, features_path=args.separate_val_features_path, logger=logger)
if args.separate_val_path and args.separate_test_path:
train_data = data
elif args.separate_val_path:
train_data, _, test_data = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.0, 0.2), seed=args.seed, args=args, logger=logger)
elif args.separate_test_path:
train_data, val_data, _ = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger)
elif args.split_type == 'loocv':
train_data, val_data, test_data = split_loocv(data=data, args=args, logger=logger)
else:
train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger)
if args.dataset_type == 'classification':
class_sizes = get_class_sizes(test_data)
debug('Class sizes in test set')
for i, task_class_sizes in enumerate(class_sizes):
debug(f'{args.task_names[i]} '
f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}')
if not args.train_all and task_class_sizes == 0: # TODO: only works for just 1 property prediction task
debug('Moved to next epoch due to homogenous targets in test set.')
return [float('nan')]
if args.save_smiles_splits:
with open(args.data_path, 'r') as f:
reader = csv.reader(f)
header = next(reader)
lines_by_smiles = {}
indices_by_smiles = {}
for i, line in enumerate(reader):
smiles = (line[0], line[1])
lines_by_smiles[smiles] = line
indices_by_smiles[smiles] = i
all_split_indices = []
for dataset, name in [(train_data, 'train'), (val_data, 'val'), (test_data, 'test')]:
with open(os.path.join(args.save_dir, name + '_smiles.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(['smiles'])
for smiles in dataset.smiles():
writer.writerow([smiles])
with open(os.path.join(args.save_dir, name + '_full.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for smiles in dataset.smiles():
writer.writerow(lines_by_smiles[smiles])
split_indices = []
for smiles in dataset.smiles():
split_indices.append(indices_by_smiles[smiles])
split_indices = sorted(split_indices)
all_split_indices.append(split_indices)
with open(os.path.join(args.save_dir, 'split_indices.pckl'), 'wb') as f:
pickle.dump(all_split_indices, f)
if args.symmetric:
train_data = flip_data(train_data)
if args.features_scaling:
drug_scaler, cmpd_scaler = train_data.normalize_features(replace_nan_token=0)
val_data.normalize_features(drug_scaler, cmpd_scaler)
test_data.normalize_features(drug_scaler, cmpd_scaler)
else:
drug_scaler, cmpd_scaler = None, None
args.train_data_size = len(train_data)
debug(f'Total size = {len(data):,} | '
f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}')
# Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
if args.dataset_type == 'regression':
debug('Fitting scaler')
train_smiles, train_targets = train_data.smiles(), train_data.targets()
scaler = StandardScaler().fit(train_targets)
scaled_targets = scaler.transform(train_targets).tolist()
train_data.set_targets(scaled_targets)
else:
scaler = None
# Get loss and metric functions
loss_func = get_loss_func(args)
metric_func = get_metric_func(metric=args.metric)
# Set up test set evaluation
test_smiles, test_targets = test_data.smiles(), test_data.targets()
if args.dataset_type == 'multiclass':
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks, args.multiclass_num_classes))
else:
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))
# Train ensemble of models
for model_idx in range(args.ensemble_size):
# Tensorboard writer
save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
makedirs(save_dir)
try:
writer = SummaryWriter(log_dir=save_dir)
except:
writer = SummaryWriter(logdir=save_dir)
# Load/build model
if args.checkpoint_paths is not None:
debug(f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}')
model = load_checkpoint(args.checkpoint_paths[model_idx], current_args=args, logger=logger)
else:
debug(f'Building model {model_idx}')
model = build_model(args)
debug(model)
debug(f'Number of parameters = {param_count(model):,}')
if args.cuda:
debug('Moving model to cuda')
model = model.cuda()
# Ensure that model is saved in correct location for evaluation if 0 epochs
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, drug_scaler, cmpd_scaler, args)
# Optimizers
optimizer = build_optimizer(model, args)
# Learning rate schedulers
scheduler = build_lr_scheduler(optimizer, args)
# Run training
best_score = float('inf') if args.minimize_score else -float('inf')
best_epoch, n_iter = 0, 0
for epoch in trange(args.epochs):
debug(f'Epoch {epoch}')
n_iter = train(
model=model,
data=train_data,
loss_func=loss_func,
optimizer=optimizer,
scheduler=scheduler,
args=args,
n_iter=n_iter,
logger=logger,
writer=writer
)
if isinstance(scheduler, ExponentialLR):
scheduler.step()
val_scores, val_loss = evaluate(
model=model,
data=val_data,
loss_func=loss_func,
num_tasks=args.num_tasks,
metric_func=metric_func,
batch_size=args.batch_size,
dataset_type=args.dataset_type,
scaler=scaler,
logger=logger
)
# Average validation score
avg_val_score = np.nanmean(val_scores)
debug(f'Validation {args.metric} = {avg_val_score:.6f}')
writer.add_scalar(f'validation_{args.metric}', avg_val_score, n_iter)
debug(f'Validation loss = {val_loss:.6f}')
writer.add_scalar(f'validation_loss', val_loss, n_iter)
if args.show_individual_scores:
# Individual validation scores
for task_name, val_score in zip(args.task_names, val_scores):
debug(f'Validation {task_name} {args.metric} = {val_score:.6f}')
writer.add_scalar(f'validation_{task_name}_{args.metric}', val_score, n_iter)
# Save model checkpoint if improved validation score
if args.minimize_score and avg_val_score < best_score or \
not args.minimize_score and avg_val_score > best_score:
best_score, best_epoch = avg_val_score, epoch
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, drug_scaler, cmpd_scaler, args)
# Evaluate on test set using model with best validation score
info(f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}')
model = load_checkpoint(os.path.join(save_dir, 'model.pt'), cuda=args.cuda, logger=logger)
test_preds = predict(
model=model,
data=test_data,
batch_size=args.batch_size,
scaler=scaler
)
if args.save_preds:
val_preds = predict(model=model, data=val_data, batch_size=args.batch_size, scaler=scaler)
train_preds = predict(model=model, data=train_data, batch_size=args.batch_size, scaler=scaler)
save_predictions(save_dir, train_data, val_data, test_data, \
train_preds, val_preds, test_preds, args.task_names, scaler)
test_scores = evaluate_predictions(
preds=test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
if len(test_preds) != 0:
sum_test_preds += np.array(test_preds)
# Average test score
avg_test_score = np.nanmean(test_scores)
info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}')
writer.add_scalar(f'test_{args.metric}', avg_test_score, 0)
if args.show_individual_scores:
# Individual test scores
for task_name, test_score in zip(args.task_names, test_scores):
info(f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}')
writer.add_scalar(f'test_{task_name}_{args.metric}', test_score, n_iter)
# Evaluate ensemble on test set
avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()
ensemble_scores = evaluate_predictions(
preds=avg_test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
# Average ensemble score
avg_ensemble_test_score = np.nanmean(ensemble_scores)
info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}')
writer.add_scalar(f'ensemble_test_{args.metric}', avg_ensemble_test_score, 0)
# Individual ensemble scores
if args.show_individual_scores:
for task_name, ensemble_score in zip(args.task_names, ensemble_scores):
info(f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}')
return ensemble_scores
| run_training |
actors_tf2_test.py | # python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for actors_tf2."""
from absl.testing import absltest
from acme import environment_loop
from acme import specs
from acme.agents import actors_tf2
from acme.testing import fakes
import dm_env
import numpy as np
import sonnet as snt
import tensorflow as tf
def _make_fake_env() -> dm_env.Environment:
env_spec = specs.EnvironmentSpec(
observations=specs.Array(shape=(10, 5), dtype=np.float32),
actions=specs.DiscreteArray(num_values=3),
rewards=specs.Array(shape=(), dtype=np.float32),
discounts=specs.BoundedArray(
shape=(), dtype=np.float32, minimum=0., maximum=1.),
)
return fakes.Environment(env_spec, episode_length=10) |
class ActorTest(absltest.TestCase):
def test_feedforward(self):
environment = _make_fake_env()
env_spec = specs.make_environment_spec(environment)
network = snt.Sequential([
snt.Flatten(),
snt.Linear(env_spec.actions.num_values),
lambda x: tf.argmax(x, axis=-1, output_type=env_spec.actions.dtype),
])
actor = actors_tf2.FeedForwardActor(network)
loop = environment_loop.EnvironmentLoop(environment, actor)
loop.run(20)
def test_recurrent(self):
environment = _make_fake_env()
env_spec = specs.make_environment_spec(environment)
network = snt.DeepRNN([
snt.Flatten(),
snt.Linear(env_spec.actions.num_values),
lambda x: tf.argmax(x, axis=-1, output_type=env_spec.actions.dtype),
])
actor = actors_tf2.RecurrentActor(network)
loop = environment_loop.EnvironmentLoop(environment, actor)
loop.run(20)
if __name__ == '__main__':
absltest.main() | |
calculate-condition.ts | import { createToken, Lexer, CstParser, CstNode, IToken } from 'chevrotain'
const Identifier = createToken({
name: 'Identifier'
, pattern: /[a-zA-Z0-9_-]+/
})
const And = createToken({ name: 'And', pattern: /and/ })
const Or = createToken({ name: 'Or', pattern: /or/ })
const Not = createToken({ name: 'Not', pattern: /not/ })
const Xor = createToken({ name: 'Xor', pattern: /xor/ })
const WhiteSpace = createToken({
name: 'WhiteSpace'
, pattern: /\s+/
, group: Lexer.SKIPPED
})
const LeftParenthesis = createToken({
name: 'LeftParenthesis'
, pattern: /\(/
})
const RightParenthesis = createToken({
name: 'RightParenthesis'
, pattern: /\)/
})
const allTokens = [
WhiteSpace
, And
, Or
, Not
, Xor
, LeftParenthesis
, RightParenthesis
, Identifier
]
const ConditionLexer = new Lexer(allTokens)
class ConditionParser extends CstParser {
constructor() {
super(allTokens)
this.performSelfAnalysis()
}
public expression = this.RULE('expression', () => {
this.SUBRULE(this.orExpression)
})
private orExpression = this.RULE('orExpression', () => {
this.SUBRULE(this.xorExpression, { LABEL: 'lhs' })
this.MANY(() => {
this.CONSUME(Or)
this.SUBRULE2(this.xorExpression, { LABEL: 'rhs' })
})
})
private xorExpression = this.RULE('xorExpression', () => {
this.SUBRULE(this.andExpression, { LABEL: 'lhs' })
this.MANY(() => {
this.CONSUME(Xor)
this.SUBRULE2(this.andExpression, { LABEL: 'rhs' })
})
})
private andExpression = this.RULE('andExpression', () => {
this.SUBRULE(this.atomicExpression, { LABEL: 'lhs' })
this.MANY(() => {
this.CONSUME(And)
this.SUBRULE2(this.atomicExpression, { LABEL: 'rhs' })
})
})
private atomicExpression = this.RULE('atomicExpression', () => {
this.OR([
{ ALT: () => this.SUBRULE(this.notExpression) }
, { ALT: () => this.SUBRULE(this.parenthesisExpression) }
, { ALT: () => this.CONSUME(Identifier) }
])
})
private notExpression = this.RULE('notExpression', () => {
this.CONSUME(Not)
this.SUBRULE(this.atomicExpression)
})
private parenthesisExpression = this.RULE('parenthesisExpression', () => {
this.CONSUME(LeftParenthesis)
this.SUBRULE(this.expression)
this.CONSUME(RightParenthesis)
})
}
const parser = new ConditionParser()
const BaseCstVisitor = parser.getBaseCstVisitorConstructor()
class ConditionInterpreter extends BaseCstVisitor {
constructor(private tags: string[]) {
super()
this.validateVisitor()
}
expression(ctx: { orExpression: CstNode[] }): boolean {
return this.visit(ctx.orExpression)
}
orExpression(ctx: { lhs: CstNode[]; rhs?: CstNode[] }): boolean {
const lhsResult = this.visit(ctx.lhs)
let result = lhsResult
if (ctx.rhs) {
ctx.rhs.forEach(rhs => {
const rhsResult = this.visit(rhs)
result = result || rhsResult
})
}
return result
}
xorExpression(ctx: { lhs: CstNode[]; rhs?: CstNode[] }): boolean {
const lhsResult = this.visit(ctx.lhs)
let result = lhsResult
if (ctx.rhs) {
ctx.rhs.forEach(rhs => {
const rhsResult = this.visit(rhs)
result = (result && !rhsResult) || (!result && rhsResult)
})
}
return result
}
andExpression(ctx: { lhs: CstNode[]; rhs?: CstNode[] }): boolean {
const lhsResult = this.visit(ctx.lhs)
let result = lhsResult
if (ctx.rhs) {
ctx.rhs.forEach(rhs => {
const rhsResult = this.visit(rhs)
result = result && rhsResult
}) | return result
}
atomicExpression(ctx: {
notExpression?: CstNode
parenthesisExpression?: CstNode
Identifier?: IToken[]
}): boolean {
if (ctx.notExpression) {
return this.visit(ctx.notExpression)
} else if (ctx.parenthesisExpression) {
return this.visit(ctx.parenthesisExpression)
} else if (ctx.Identifier) {
return this.tags.includes(ctx.Identifier[0].image)
}
throw new Error('Unknown atomic expression')
}
notExpression(ctx: { atomicExpression: CstNode }): boolean {
const lhsResult = this.visit(ctx.atomicExpression)
const result = !lhsResult
return result
}
parenthesisExpression(ctx: { expression: CstNode }): boolean {
return this.visit(ctx.expression)
}
}
export function calculateCondition(condition: string, tags: string[]): boolean {
if (/^\s*$/.test(condition)) return false
const lexingResult = ConditionLexer.tokenize(condition)
parser.input = lexingResult.tokens
const cst = parser.expression()
if (parser.errors.length > 0) {
throw parser.errors
}
return new ConditionInterpreter(tags).visit(cst)
} | }
|
get_healthz_responses.go | // Code generated by go-swagger; DO NOT EDIT.
// Copyright 2017-2022 Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package restapi
| "fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/health/models"
)
// GetHealthzReader is a Reader for the GetHealthz structure.
type GetHealthzReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetHealthzReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetHealthzOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 500:
result := NewGetHealthzFailed()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
}
}
// NewGetHealthzOK creates a GetHealthzOK with default headers values
func NewGetHealthzOK() *GetHealthzOK {
return &GetHealthzOK{}
}
/*GetHealthzOK handles this case with default header values.
Success
*/
type GetHealthzOK struct {
Payload *models.HealthResponse
}
func (o *GetHealthzOK) Error() string {
return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %+v", 200, o.Payload)
}
func (o *GetHealthzOK) GetPayload() *models.HealthResponse {
return o.Payload
}
func (o *GetHealthzOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.HealthResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetHealthzFailed creates a GetHealthzFailed with default headers values
func NewGetHealthzFailed() *GetHealthzFailed {
return &GetHealthzFailed{}
}
/*GetHealthzFailed handles this case with default header values.
Failed to contact local Cilium daemon
*/
type GetHealthzFailed struct {
Payload models.Error
}
func (o *GetHealthzFailed) Error() string {
return fmt.Sprintf("[GET /healthz][%d] getHealthzFailed %+v", 500, o.Payload)
}
func (o *GetHealthzFailed) GetPayload() models.Error {
return o.Payload
}
func (o *GetHealthzFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
} | // This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import ( |
switch_inject.py | #!/usr/bin/python
#
# run iperf to measure the effective throughput between two nodes when
# n nodes are connected to a virtual wlan; run test for testsec
# and repeat for minnodes <= n <= maxnodes with a step size of
# nodestep
from core import load_logging_config
from core.emulator.emudata import IpPrefixes
from core.enumerations import NodeTypes, EventTypes
load_logging_config()
| prefixes = IpPrefixes("10.83.0.0/16")
# create emulator instance for creating sessions and utility methods
coreemu = globals()["coreemu"]
session = coreemu.create_session()
# must be in configuration state for nodes to start, when using "node_add" below
session.set_state(EventTypes.CONFIGURATION_STATE)
# create switch network node
switch = session.add_node(_type=NodeTypes.SWITCH)
# create nodes
for _ in xrange(nodes):
node = session.add_node()
interface = prefixes.create_interface(node)
session.add_link(node.objid, switch.objid, interface_one=interface)
# instantiate session
session.instantiate()
if __name__ in {"__main__", "__builtin__"}:
example(2) | def example(nodes):
# ip generator for example |
index.ts | import { IndoorMap } from "./base/IndoorMap";
import './assets/css/indoor3D.css'; | // dataUrl: 'assets/data/testMapData.json',
selectable: true,
mapDiv: ""
});
indoorMap.setSelectListen(function (obj: any) {
console.log(obj);
})
console.log('start compile !') | // import { IndoorMap } from "./base/IndoorMap";
var indoorMap = new IndoorMap({
dataUrl: 'assets/data/mall.1.json', |
layout.js | //>>built
define(["dojo/_base/array","dojo/dom-class","dojo/dom-geometry","dojo/dom-style","dojo/_base/lang"],function(h,m,f,k,d){function n(a){return a.substring(0,1).toUpperCase()+a.substring(1)}function l(a,b){var c=a.resize?a.resize(b):f.setMarginBox(a.domNode,b);c?d.mixin(a,c):(d.mixin(a,f.getMarginBox(a.domNode)),d.mixin(a,b))}return{marginBox2contentBox:function(a,b){var c=k.getComputedStyle(a),g=f.getMarginExtents(a,c),d=f.getPadBorderExtents(a,c);return{l:k.toPixelValue(a,c.paddingLeft),t:k.toPixelValue(a, | c.paddingTop),w:b.w-(g.w+d.w),h:b.h-(g.h+d.h)}},layoutChildren:function(a,b,c,g,f){b=d.mixin({},b);m.add(a,"dijitLayoutContainer");c=h.filter(c,function(b){return"center"!=b._constraint&&"client"!=b.layoutAlign}).concat(h.filter(c,function(b){return"center"==b._constraint||"client"==b.layoutAlign}));h.forEach(c,function(a){var c=a.domNode,e=a._constraint||a.layoutAlign;if(!e)throw Error("No constraint setting for "+a.id);var d=c.style;d.left=b.l+"px";d.top=b.t+"px";d.position="absolute";m.add(c,"dijitAlign"+
n(e));c={};g&&g==a.id&&(c["top"==a._constraint||"bottom"==a._constraint?"h":"w"]=f);"top"==e||"bottom"==e?(c.w=b.w,l(a,c),b.h-=a.h,"top"==e?b.t+=a.h:d.top=b.t+b.h+"px"):"left"==e||"right"==e?(c.h=b.h,l(a,c),b.w-=a.w,"left"==e?b.l+=a.w:d.left=b.l+b.w+"px"):"client"!=e&&"center"!=e||l(a,b)})}}}); | |
redis_test.go | package cache_test
import (
"context"
"testing"
"github.com/Masterminds/semver/v3"
"github.com/mediocregopher/radix/v3"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
"github.com/xakep666/licensevalidator/internal/testutil"
"github.com/xakep666/licensevalidator/pkg/cache"
"github.com/xakep666/licensevalidator/pkg/validation"
)
type RedisCacheTestSuite struct {
suite.Suite
licenseResolverMock *validation.LicenseResolverMock
redisContainer testcontainers.Container
redisClient *radix.Pool
cache *cache.RedisCache
}
func (s *RedisCacheTestSuite) TestResolveLicense() {
module := validation.Module{
Name: "test-name",
Version: semver.MustParse("v1.0.0"),
}
license := validation.License{
Name: "MIT License",
SPDXID: "MIT",
}
s.licenseResolverMock.On("ResolveLicense", mock.Anything, module).Return(license, nil).Once()
actualLicense, err := s.cache.ResolveLicense(context.Background(), module)
if s.NoError(err) {
s.Equal(license, actualLicense)
}
// 2nd call should be in cache
actualLicense, err = s.cache.ResolveLicense(context.Background(), module)
if s.NoError(err) {
s.Equal(license, actualLicense)
}
}
func (s *RedisCacheTestSuite) TestHealth() {
s.NoError(s.cache.Check(context.Background()))
}
func (s *RedisCacheTestSuite) SetupSuite() {
var err error
s.redisContainer, err = testcontainers.GenericContainer(context.Background(), testcontainers.GenericContainerRequest{
ProviderType: testcontainers.ProviderDocker,
Started: true,
ContainerRequest: testcontainers.ContainerRequest{
Image: "redis:6-alpine",
WaitingFor: wait.ForListeningPort("6379/tcp"),
ExposedPorts: []string{"6379/tcp"},
},
})
s.Require().NoError(err)
s.redisContainer.FollowOutput(&testutil.TLogConsumer{T: s.T(), Prefix: "redis"})
s.Require().NoError(s.redisContainer.StartLogProducer(context.Background()))
redisEp, err := s.redisContainer.PortEndpoint(context.Background(), "6379/tcp", "")
s.Require().NoError(err)
s.redisClient, err = radix.NewPool("tcp", redisEp, 10)
s.Require().NoError(err)
}
func (s *RedisCacheTestSuite) SetupTest() {
s.licenseResolverMock = new(validation.LicenseResolverMock)
s.cache = &cache.RedisCache{
Backed: cache.Direct{
LicenseResolver: s.licenseResolverMock,
},
Client: s.redisClient,
}
}
func (s *RedisCacheTestSuite) TearDownTest() {
s.licenseResolverMock.AssertExpectations(s.T())
s.Require().NoError(s.redisClient.Do(radix.Cmd(nil, "FLUSHALL")))
}
func (s *RedisCacheTestSuite) TearDownSuite() {
s.Require().NoError(s.redisContainer.Terminate(context.Background()))
}
func | (t *testing.T) {
t.Parallel()
if testing.Short() {
t.Skipf("Skipping integration test in short mode")
return
}
suite.Run(t, new(RedisCacheTestSuite))
}
| TestRedisCache_Suite |
example.js | const utils = require("util");
function | () {
console.log("This is an old function!");
}
function newFunction() {
console.log("This is a new function!");
}
exports.oF = utils.deprecate(
oldFunction,
"Function is oldest. Use a newFuinction!"
);
exports.nF = newFunction;
| oldFunction |
custom-context-generator.js | /* @exclude */
/*
@license https://github.com/t2ym/thin-hook/blob/master/LICENSE.md
Copyright (c) 2020, Tetsuya Mori <[email protected]>. All rights reserved.
*/
/* @endexclude */
let hashSalt = '__hashSalt__';
let contexts = {};
hook.contextGenerators.hash = function generateHashContext(astPath) {
const hash = hook.utils.createHash('sha256');
let methodContext = hook.contextGenerators.method(astPath);
hash.update(hashSalt + methodContext);
let hashContext = hash.digest('hex');
contexts[hashContext] = methodContext;
return hashContext;
}
hook.contextGenerators.method2 = function generateMethodContext2(astPath) {
return astPath.map(([ path, node ], index) => node && node.type
? (node.id && node.id.name ? node.id.name : (node.key && node.key.name
? (node.kind === 'get' || node.kind === 'set' ? node.kind + ' ' : node.static ? 'static ' : '') + node.key.name : ''))
: index === 0 ? path : '').filter(p => p).join(',') +
(astPath[astPath.length - 1][1].range ? ':' + astPath[astPath.length - 1][1].range[0] + '-' + astPath[astPath.length - 1][1].range[1] : ''); | } |
|
useTransactionalState.ts | import { useState, useCallback } from "react";
export interface TransactionalState<T> {
committedValue: T;
uncommittedValue: T;
setValue: (value: T) => void;
setCommittedValue: (value: T) => void;
commit: () => void;
rollback: () => void;
}
export default function useTransactionalState<T>(
value: T
): TransactionalState<T> {
const [committedValue, setCommittedValue] = useState(value);
const [uncommittedValue, setUncommittedValue] = useState(value); | const setBoth = useCallback((value: T) => {
setCommittedValue(value);
setUncommittedValue(value);
}, []);
const commit = useCallback(() => {
setCommittedValue(uncommittedValue);
}, [uncommittedValue]);
const rollback = useCallback(() => {
setUncommittedValue(committedValue);
}, [committedValue]);
return {
committedValue,
uncommittedValue,
setValue: setUncommittedValue,
setCommittedValue: setBoth,
commit,
rollback,
};
} | |
train_eval.py | import os
import numpy as np
import torch
import torch.nn.functional as F
from lib.utils.bbox_transform import decode_bbox_target
from tools.kitti_object_eval_python.evaluate import evaluate as kitti_evaluate
from lib.config import cfg
import lib.utils.kitti_utils as kitti_utils
import lib.utils.iou3d.iou3d_utils as iou3d_utils
from datetime import datetime
from tensorboardX import SummaryWriter
import tqdm
np.random.seed(1024) # set the same seed
def save_kitti_format(sample_id, calib, bbox3d, kitti_output_dir, scores, img_shape):
corners3d = kitti_utils.boxes3d_to_corners3d(bbox3d)
img_boxes, _ = calib.corners3d_to_img_boxes(corners3d)
img_boxes[:, 0] = np.clip(img_boxes[:, 0], 0, img_shape[1] - 1)
img_boxes[:, 1] = np.clip(img_boxes[:, 1], 0, img_shape[0] - 1)
img_boxes[:, 2] = np.clip(img_boxes[:, 2], 0, img_shape[1] - 1)
img_boxes[:, 3] = np.clip(img_boxes[:, 3], 0, img_shape[0] - 1)
img_boxes_w = img_boxes[:, 2] - img_boxes[:, 0]
img_boxes_h = img_boxes[:, 3] - img_boxes[:, 1]
box_valid_mask = np.logical_and(
img_boxes_w < img_shape[1] * 0.8, img_boxes_h < img_shape[0] * 0.8)
kitti_output_file = os.path.join(kitti_output_dir, '%06d.txt' % sample_id)
with open(kitti_output_file, 'w') as f:
for k in range(bbox3d.shape[0]):
if box_valid_mask[k] == 0:
continue
x, z, ry = bbox3d[k, 0], bbox3d[k, 2], bbox3d[k, 6]
beta = np.arctan2(z, x)
alpha = -np.sign(beta) * np.pi / 2 + beta + ry
print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' %
(cfg.CLASSES, alpha, img_boxes[k, 0], img_boxes[k, 1], img_boxes[k, 2], img_boxes[k, 3],
bbox3d[k, 3], bbox3d[k, 4], bbox3d[k,
5], bbox3d[k, 0], bbox3d[k, 1], bbox3d[k, 2],
bbox3d[k, 6], scores[k]), file=f)
def | (model, dataloader, epoch_id, result_dir):
# print("-----------------joint____________________________*******")
np.random.seed(666)
MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()
mode = 'EVAL'
final_output_dir = os.path.join(result_dir, 'final_result', 'data')
os.makedirs(final_output_dir, exist_ok=True)
if True:
# print("------------save_result__________________*******")
roi_output_dir = os.path.join(result_dir, 'roi_result', 'data')
refine_output_dir = os.path.join(result_dir, 'refine_result', 'data')
rpn_output_dir = os.path.join(result_dir, 'rpn_result', 'data')
os.makedirs(rpn_output_dir, exist_ok=True)
os.makedirs(roi_output_dir, exist_ok=True)
os.makedirs(refine_output_dir, exist_ok=True)
model.eval()
thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]
total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0
total_roi_recalled_bbox_list = [0] * 5
dataset = dataloader.dataset
cnt = final_total = total_cls_acc = total_cls_acc_refined = total_rpn_iou = 0
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval')
for data in dataloader:
cnt += 1
calib = data['calib']
sample_id, pts_rect, pts_features, pts_input = \
data['sample_id'], data['pts_rect'], data['pts_features'], data['pts_input']
batch_size = len(sample_id)
inputs = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
input_data = {'pts_input': inputs, 'calib': calib}
# model inference
ret_dict = model(input_data)
print(ret_dict.key())
roi_scores_raw = ret_dict['roi_scores_raw'] # (B, M)
roi_boxes3d = ret_dict['rois'] # (B, M, 7)
seg_result = ret_dict['seg_result'].long() # (B, N)
rcnn_cls = ret_dict['rcnn_cls'].view(
batch_size, -1, ret_dict['rcnn_cls'].shape[1])
rcnn_reg = ret_dict['rcnn_reg'].view(
batch_size, -1, ret_dict['rcnn_reg'].shape[1]) # (B, M, C)
# bounding box regression
anchor_size = MEAN_SIZE
if cfg.RCNN.SIZE_RES_ON_ROI:
assert False
pred_boxes3d = decode_bbox_target(roi_boxes3d.view(-1, 7), rcnn_reg.view(-1, rcnn_reg.shape[-1]),
anchor_size=anchor_size,
loc_scope=cfg.RCNN.LOC_SCOPE,
loc_bin_size=cfg.RCNN.LOC_BIN_SIZE,
num_head_bin=cfg.RCNN.NUM_HEAD_BIN,
get_xz_fine=True, get_y_by_bin=cfg.RCNN.LOC_Y_BY_BIN,
loc_y_scope=cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size=cfg.RCNN.LOC_Y_BIN_SIZE,
get_ry_fine=True).view(batch_size, -1, 7)
# scoring
if rcnn_cls.shape[2] == 1:
raw_scores = rcnn_cls # (B, M, 1)
norm_scores = torch.sigmoid(raw_scores)
pred_classes = (norm_scores > cfg.RCNN.SCORE_THRESH).long()
else:
pred_classes = torch.argmax(rcnn_cls, dim=1).view(-1)
cls_norm_scores = F.softmax(rcnn_cls, dim=1)
raw_scores = rcnn_cls[:, pred_classes]
norm_scores = cls_norm_scores[:, pred_classes]
# evaluation
recalled_num = gt_num = rpn_iou = 0
if not False:
if not cfg.RPN.FIXED:
rpn_cls_label, rpn_reg_label = data['rpn_cls_label'], data['rpn_reg_label']
rpn_cls_label = torch.from_numpy(
rpn_cls_label).cuda(non_blocking=True).long()
gt_boxes3d = data['gt_boxes3d']
for k in range(batch_size):
# calculate recall
cur_gt_boxes3d = gt_boxes3d[k]
tmp_idx = cur_gt_boxes3d.__len__() - 1
while tmp_idx >= 0 and cur_gt_boxes3d[tmp_idx].sum() == 0:
tmp_idx -= 1
if tmp_idx >= 0:
cur_gt_boxes3d = cur_gt_boxes3d[:tmp_idx + 1]
cur_gt_boxes3d = torch.from_numpy(
cur_gt_boxes3d).cuda(non_blocking=True).float()
iou3d = iou3d_utils.boxes_iou3d_gpu(
pred_boxes3d[k], cur_gt_boxes3d)
gt_max_iou, _ = iou3d.max(dim=0)
refined_iou, _ = iou3d.max(dim=1)
for idx, thresh in enumerate(thresh_list):
total_recalled_bbox_list[idx] += (
gt_max_iou > thresh).sum().item()
recalled_num += (gt_max_iou > 0.7).sum().item()
gt_num += cur_gt_boxes3d.shape[0]
total_gt_bbox += cur_gt_boxes3d.shape[0]
# original recall
iou3d_in = iou3d_utils.boxes_iou3d_gpu(
roi_boxes3d[k], cur_gt_boxes3d)
gt_max_iou_in, _ = iou3d_in.max(dim=0)
for idx, thresh in enumerate(thresh_list):
total_roi_recalled_bbox_list[idx] += (
gt_max_iou_in > thresh).sum().item()
if not cfg.RPN.FIXED:
fg_mask = rpn_cls_label > 0
correct = ((seg_result == rpn_cls_label)
& fg_mask).sum().float()
union = fg_mask.sum().float() + (seg_result > 0).sum().float() - correct
rpn_iou = correct / torch.clamp(union, min=1.0)
total_rpn_iou += rpn_iou.item()
disp_dict = {
'mode': mode, 'recall': '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox)}
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if True:
# save roi and refine results
roi_boxes3d_np = roi_boxes3d.cpu().numpy()
pred_boxes3d_np = pred_boxes3d.cpu().numpy()
roi_scores_raw_np = roi_scores_raw.cpu().numpy()
raw_scores_np = raw_scores.cpu().numpy()
rpn_cls_np = ret_dict['rpn_cls'].cpu().numpy()
rpn_xyz_np = ret_dict['backbone_xyz'].cpu().numpy()
seg_result_np = seg_result.cpu().numpy()
output_data = np.concatenate((rpn_xyz_np, rpn_cls_np.reshape(batch_size, -1, 1),
seg_result_np.reshape(batch_size, -1, 1)), axis=2)
for k in range(batch_size):
cur_sample_id = sample_id[k]
calib = dataset.get_calib(cur_sample_id)
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, roi_boxes3d_np[k], roi_output_dir,
roi_scores_raw_np[k], image_shape)
save_kitti_format(cur_sample_id, calib, pred_boxes3d_np[k], refine_output_dir,
raw_scores_np[k], image_shape)
output_file = os.path.join(
rpn_output_dir, '%06d.npy' % cur_sample_id)
np.save(output_file, output_data.astype(np.float32))
# scores thresh
inds = norm_scores > cfg.RCNN.SCORE_THRESH
for k in range(batch_size):
cur_inds = inds[k].view(-1)
if cur_inds.sum() == 0:
continue
pred_boxes3d_selected = pred_boxes3d[k, cur_inds]
raw_scores_selected = raw_scores[k, cur_inds]
norm_scores_selected = norm_scores[k, cur_inds]
# NMS thresh
# rotated nms
boxes_bev_selected = kitti_utils.boxes3d_to_bev_torch(
pred_boxes3d_selected)
keep_idx = iou3d_utils.nms_gpu(
boxes_bev_selected, raw_scores_selected, cfg.RCNN.NMS_THRESH).view(-1)
pred_boxes3d_selected = pred_boxes3d_selected[keep_idx]
scores_selected = raw_scores_selected[keep_idx]
pred_boxes3d_selected, scores_selected = pred_boxes3d_selected.cpu(
).numpy(), scores_selected.cpu().numpy()
cur_sample_id = sample_id[k]
calib = dataset.get_calib(cur_sample_id)
final_total += pred_boxes3d_selected.shape[0]
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, pred_boxes3d_selected,
final_output_dir, scores_selected, image_shape)
progress_bar.close()
# dump empty files
split_file = os.path.join(dataset.imageset_dir,
'..', '..', 'ImageSets', dataset.split + '.txt')
split_file = os.path.abspath(split_file)
image_idx_list = [x.strip() for x in open(split_file).readlines()]
empty_cnt = 0
for k in range(image_idx_list.__len__()):
cur_file = os.path.join(final_output_dir, '%s.txt' % image_idx_list[k])
if not os.path.exists(cur_file):
with open(cur_file, 'w') as temp_f:
pass
empty_cnt += 1
ret_dict = {'empty_cnt': empty_cnt}
avg_rpn_iou = (total_rpn_iou / max(cnt, 1.0))
avg_cls_acc = (total_cls_acc / max(cnt, 1.0))
avg_cls_acc_refined = (total_cls_acc_refined / max(cnt, 1.0))
avg_det_num = (final_total / max(len(dataset), 1.0))
ret_dict['rpn_iou'] = avg_rpn_iou
ret_dict['rcnn_cls_acc'] = avg_cls_acc
ret_dict['rcnn_cls_acc_refined'] = avg_cls_acc_refined
ret_dict['rcnn_avg_num'] = avg_det_num
for idx, thresh in enumerate(thresh_list):
cur_roi_recall = total_roi_recalled_bbox_list[idx] / max(
total_gt_bbox, 1.0)
ret_dict['rpn_recall(thresh=%.2f)' % thresh] = cur_roi_recall
for idx, thresh in enumerate(thresh_list):
cur_recall = total_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
ret_dict['rcnn_recall(thresh=%.2f)' % thresh] = cur_recall
if cfg.TEST.SPLIT != 'test':
name_to_class = {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2}
ap_result_str, ap_dict = kitti_evaluate(dataset.label_dir, final_output_dir, label_split_file=split_file,
current_class=name_to_class[cfg.CLASSES])
ret_dict.update(ap_dict)
return ap_result_str
| eval_one_epoch_joint |
ServerExpressController.ts | /* istanbul ignore file: only one method, and not willing to test it right now*/
import { Request } from 'express';
import IServerUserSession from './IServerUserSession';
export default class ServerExpressController {
public static getInstance(): ServerExpressController {
if (!ServerExpressController.instance) {
ServerExpressController.instance = new ServerExpressController(); | private static instance: ServerExpressController = null;
private constructor() { }
public getStackContextFromReq(req: Request, session: IServerUserSession) {
return {
IS_CLIENT: true,
REFERER: req.headers.referer,
UID: session.uid,
SESSION: session,
CLIENT_TAB_ID: req.headers.client_tab_id
};
}
} | }
return ServerExpressController.instance;
}
|
auth.go | //go:generate mockgen --build_flags=--mod=mod -destination=mocks/mockauth_interface.go -package mocks . AuthInterface
package auth
import (
"github.com/mattermost/focalboard/server/model"
"github.com/mattermost/focalboard/server/services/config"
"github.com/mattermost/focalboard/server/services/permissions"
"github.com/mattermost/focalboard/server/services/store"
"github.com/mattermost/focalboard/server/utils"
"github.com/pkg/errors"
)
type AuthInterface interface {
GetSession(token string) (*model.Session, error)
IsValidReadToken(boardID string, readToken string) (bool, error)
DoesUserHaveTeamAccess(userID string, teamID string) bool
}
// Auth authenticates sessions.
type Auth struct {
config *config.Configuration
store store.Store
permissions permissions.PermissionsService
}
// New returns a new Auth.
func | (config *config.Configuration, store store.Store, permissions permissions.PermissionsService) *Auth {
return &Auth{config: config, store: store, permissions: permissions}
}
// GetSession Get a user active session and refresh the session if needed.
func (a *Auth) GetSession(token string) (*model.Session, error) {
if len(token) < 1 {
return nil, errors.New("no session token")
}
session, err := a.store.GetSession(token, a.config.SessionExpireTime)
if err != nil {
return nil, errors.Wrap(err, "unable to get the session for the token")
}
if session.UpdateAt < (utils.GetMillis() - utils.SecondsToMillis(a.config.SessionRefreshTime)) {
_ = a.store.RefreshSession(session)
}
return session, nil
}
// IsValidReadToken validates the read token for a board.
func (a *Auth) IsValidReadToken(boardID string, readToken string) (bool, error) {
sharing, err := a.store.GetSharing(boardID)
if model.IsErrNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
if sharing != nil && (sharing.ID == boardID && sharing.Enabled && sharing.Token == readToken) {
return true, nil
}
return false, nil
}
func (a *Auth) DoesUserHaveTeamAccess(userID string, teamID string) bool {
return a.permissions.HasPermissionToTeam(userID, teamID, model.PermissionViewTeam)
}
| New |
MGun.js | var RegionMStatic_W = 150; // mm (-iOx)
var RegionMStatic_H = 600; // mm
function PrintSuperFishXY_MS(x, y)
{
var el = document.getElementById("MagnetostaticTextArea");
el.value += "&po x=" + x.toPrecision(4) + "," + "y=" + (RegionEStatic_H*fScaleSuperFish - y).toPrecision(4) + " &\r\n";
}
function _moveToMS(x,y, sf_output = true) {
this.moveTo(x*fScaleGUI, y*fScaleGUI); // GUI
if (sf_output) PrintSuperFishXY_MS((x-iOx)*fScaleSuperFish, y*fScaleSuperFish); // SuperFish
}
function | (x,y, sf_output = true) {
this.lineTo(x*fScaleGUI, y*fScaleGUI); // GUI
if (sf_output) PrintSuperFishXY_MS((x-iOx)*fScaleSuperFish,y*fScaleSuperFish); // SuperFish
}
function DrawGunMagnetostatic()
{
var c = document.getElementById("FrontCanvas");
var ctx = c.getContext("2d");
ctx._moveTo = _moveToMS;
ctx._lineTo = _lineToMS;
var el = document.getElementById("MagnetostaticTextArea");
/* #region GUI params */
var Lens1DistToGun = parseFloat(document.getElementById("Lens1DistToGun").value);
var Lens1InnerR = parseFloat(document.getElementById("Lens1InnerR").value);
var Lens1Thickness = parseFloat(document.getElementById("Lens1Thickness").value);
var Lens1Height = parseFloat(document.getElementById("Lens1Height").value);
var Lens1TotCurrent = parseFloat(document.getElementById("Lens1TotCurrent").value);
var Lens1CoreThickness = parseFloat(document.getElementById("Lens1CoreThickness").value);
var Lens2DistToGun = parseFloat(document.getElementById("Lens2DistToGun").value);
var Lens2InnerR = parseFloat(document.getElementById("Lens2InnerR").value);
var Lens2Thickness = parseFloat(document.getElementById("Lens2Thickness").value);
var Lens2Height = parseFloat(document.getElementById("Lens2Height").value);
var Lens2TotCurrent = parseFloat(document.getElementById("Lens2TotCurrent").value);
var Lens2CoreThickness = parseFloat(document.getElementById("Lens2CoreThickness").value);
/* #endregion */
/* #region Print Superfish 1st region params */
el.value = "Magnetostatic problem (Focusing coils)\n";
el.value += "\n";
el.value += "® kprob=0, ! Poisson or Pandira problem\n";
el.value += "xjfact=1.0, ! Magnetostatic problem (current scaler)\n";
el.value += "dx=0.08, ! Mesh interval\n";
el.value += "icylin=1, ! Cylindrical symmetry\n";
el.value += "mode=-1, ! FIXGAM is the default value of the reluctivity for materials with MAT = 2 and higher\n";
el.value += "nbsup=0, ! Dirichlet boundary condition at upper edge\n";
el.value += "nbslo=0, ! Dirichlet boundary condition at lower edge\n";
el.value += "nbsrt=0, ! Dirichlet boundary condition at right edge\n";
el.value += "nbslf=0 & ! Dirichlet boundary condition at left edge\n";
el.value += "\n\n";
/* #endregion */
var iStart = iOy+EGUNHeight; // mm exact height where egun ends
/* #region Superfish 1st region */
el.value += "! Region 1\n";
PrintSuperFishXY_MS(0, 0);
PrintSuperFishXY_MS((RegionMStatic_W-iOx)*fScaleSuperFish, 0);
PrintSuperFishXY_MS((RegionMStatic_W-iOx)*fScaleSuperFish, RegionMStatic_H*fScaleSuperFish);
PrintSuperFishXY_MS(0, RegionMStatic_H*fScaleSuperFish);
PrintSuperFishXY_MS(0, 0);
el.value += "\n\n";
// ctx.strokeStyle = "black";
// ctx.lineWidth = 1;
// ctx.beginPath();
// ctx._moveTo(iOx, 0);
// ctx._lineTo(RegionMStatic_W, 0);
// ctx._lineTo(RegionMStatic_W, RegionMStatic_H);
// ctx._lineTo(iOx, RegionMStatic_H);
// ctx._lineTo(iOx, 0);
// el.value += "\n\n";
// ctx.stroke();
/* #endregion */
/* #region Magnetic Shield */
// // RIGHT
// el.value += "! Magnetic Shield\n";
// el.value += "® mat=2";
// el.value += " &\n";
// ctx.beginPath();
// ctx._moveTo(iOx + 15, iStart);
// ctx._lineTo(iOx + 15 + 2, iStart);
// ctx._lineTo(iOx + 15 + 2, iStart + 9);
// ctx._lineTo(iOx + 15, iStart + 9);
// ctx._lineTo(iOx + 15, iStart);
// ctx.stroke();
// // LEFT
// ctx.beginPath();
// ctx._moveTo(iOx - 15, iStart, false);
// ctx._lineTo(iOx - 15 - 2, iStart, false);
// ctx._lineTo(iOx - 15 - 2, iStart + 9, false);
// ctx._lineTo(iOx - 15, iStart + 9, false);
// ctx._lineTo(iOx - 15, iStart, false);
// ctx.fillStyle = "LightBlue";
// ctx.fill();
// ctx.stroke();
/* #endregion */
/* #region Lens #1 */
// RIGHT
el.value += "\n! Focusing Lens 1\n";
el.value += "® mat=1,cur=";
el.value += Lens1TotCurrent.toFixed(1);
el.value += " &\n";
ctx.beginPath();
ctx._moveTo(iOx + Lens1InnerR, iStart + Lens1DistToGun - Lens1Height/2.0);
ctx._lineTo(iOx + Lens1InnerR + Lens1Thickness, iStart + Lens1DistToGun - Lens1Height/2.0);
ctx._lineTo(iOx + Lens1InnerR + Lens1Thickness, iStart + Lens1DistToGun + Lens1Height/2.0);
ctx._lineTo(iOx + Lens1InnerR, iStart + Lens1DistToGun + Lens1Height/2.0);
ctx._lineTo(iOx + Lens1InnerR, iStart + Lens1DistToGun - Lens1Height/2.0);
ctx.stroke();
// LEFT
ctx.beginPath();
ctx._moveTo(iOx - Lens1InnerR, iStart + Lens1DistToGun - Lens1Height/2.0, false);
ctx._lineTo(iOx - Lens1InnerR - Lens1Thickness, iStart + Lens1DistToGun - Lens1Height/2.0, false);
ctx._lineTo(iOx - Lens1InnerR - Lens1Thickness, iStart + Lens1DistToGun + Lens1Height/2.0, false);
ctx._lineTo(iOx - Lens1InnerR, iStart + Lens1DistToGun + Lens1Height/2.0, false);
ctx._lineTo(iOx - Lens1InnerR, iStart + Lens1DistToGun - Lens1Height/2.0, false);
ctx.fillStyle = "YellowGreen";
ctx.fill();
ctx.stroke();
/* #endregion */
/* #region Lens #1 Core*/
if (Lens1CoreThickness != 0)
{
el.value += "\n! Focusing Lens 1 Core\n";
el.value += "® mat=2";
el.value += " &\n";
var Lens1Outter = Lens1InnerR + Lens1Thickness;
const skeletonThickness = 3; // thickness of a plastic frame
// RIGHT
ctx.beginPath();
ctx._moveTo(iOx + Lens1InnerR - skeletonThickness, iStart + Lens1DistToGun - Lens1Height/2.0 - 2);
ctx._lineTo(iOx + Lens1Outter + 5, iStart + Lens1DistToGun - Lens1Height/2.0 - 2);
ctx._lineTo(iOx + Lens1Outter + 5, iStart + Lens1DistToGun + Lens1Height/2.0 + 2);
ctx._lineTo(iOx + Lens1InnerR - skeletonThickness, iStart + Lens1DistToGun + Lens1Height/2.0 + 2);
ctx._lineTo(iOx + Lens1InnerR - skeletonThickness, iStart + Lens1DistToGun + Lens1Height/2.0 + Lens1CoreThickness + 2);
ctx._lineTo(iOx + Lens1Outter + Lens1CoreThickness + 5, iStart + Lens1DistToGun + Lens1Height/2.0 + Lens1CoreThickness + 2);
ctx._lineTo(iOx + Lens1Outter + Lens1CoreThickness + 5, iStart + Lens1DistToGun - Lens1Height/2.0 - Lens1CoreThickness - 2);
ctx._lineTo(iOx + Lens1InnerR - skeletonThickness, iStart + Lens1DistToGun - Lens1Height/2.0 - Lens1CoreThickness - 2);
ctx._lineTo(iOx + Lens1InnerR - skeletonThickness, iStart + Lens1DistToGun - Lens1Height/2.0 - 2);
ctx.stroke();
// LEFT
ctx.beginPath();
ctx._moveTo(iOx - Lens1InnerR + skeletonThickness, iStart + Lens1DistToGun - Lens1Height/2.0 - 2, false);
ctx._lineTo(iOx - Lens1Outter - 5, iStart + Lens1DistToGun - Lens1Height/2.0 - 2, false);
ctx._lineTo(iOx - Lens1Outter - 5, iStart + Lens1DistToGun + Lens1Height/2.0 + 2, false);
ctx._lineTo(iOx - Lens1InnerR + skeletonThickness, iStart + Lens1DistToGun + Lens1Height/2.0 + 2, false);
ctx._lineTo(iOx - Lens1InnerR + skeletonThickness, iStart + Lens1DistToGun + Lens1Height/2.0 + Lens1CoreThickness + 2, false);
ctx._lineTo(iOx - Lens1Outter - Lens1CoreThickness - 5, iStart + Lens1DistToGun + Lens1Height/2.0 + Lens1CoreThickness + 2, false);
ctx._lineTo(iOx - Lens1Outter - Lens1CoreThickness - 5, iStart + Lens1DistToGun - Lens1Height/2.0 - Lens1CoreThickness - 2, false);
ctx._lineTo(iOx - Lens1InnerR + skeletonThickness, iStart + Lens1DistToGun - Lens1Height/2.0 - Lens1CoreThickness - 2, false);
ctx._lineTo(iOx - Lens1InnerR + skeletonThickness, iStart + Lens1DistToGun - Lens1Height/2.0 - 2, false);
ctx.fillStyle = "Lavender";
ctx.fill();
ctx.stroke();
}
/* #endregion */
/* #region Lens #2 */
if (Lens2TotCurrent > 0)
{
// RIGHT
el.value += "\n! Focusing Lens 2\n";
el.value += "® mat=1,cur=";
el.value += Lens2TotCurrent.toFixed(1);
el.value += " &\n";
ctx.beginPath();
ctx._moveTo(iOx + Lens2InnerR, iStart + Lens2DistToGun - Lens2Height/2.0);
ctx._lineTo(iOx + Lens2InnerR + Lens2Thickness, iStart + Lens2DistToGun - Lens2Height/2.0);
ctx._lineTo(iOx + Lens2InnerR + Lens2Thickness, iStart + Lens2DistToGun + Lens2Height/2.0);
ctx._lineTo(iOx + Lens2InnerR, iStart + Lens2DistToGun + Lens2Height/2.0);
ctx._lineTo(iOx + Lens2InnerR, iStart + Lens2DistToGun - Lens2Height/2.0);
ctx.stroke();
// LEFT
ctx.beginPath();
ctx._moveTo(iOx - Lens2InnerR, iStart + Lens2DistToGun - Lens2Height/2.0, false);
ctx._lineTo(iOx - Lens2InnerR - Lens2Thickness, iStart + Lens2DistToGun - Lens2Height/2.0, false);
ctx._lineTo(iOx - Lens2InnerR - Lens2Thickness, iStart + Lens2DistToGun + Lens2Height/2.0, false);
ctx._lineTo(iOx - Lens2InnerR, iStart + Lens2DistToGun + Lens2Height/2.0, false);
ctx._lineTo(iOx - Lens2InnerR, iStart + Lens2DistToGun - Lens2Height/2.0, false);
ctx.fillStyle = "YellowGreen";
ctx.fill();
ctx.stroke();
/* #endregion */
/* #region Lens #2 Core*/
if (Lens2CoreThickness != 0)
{
// RIGHT
el.value += "\n! Focusing Lens 2 Core\n";
el.value += "® mat=2";
el.value += " &\n";
var Lens2Outter = Lens2InnerR + Lens2Thickness;
ctx.beginPath();
ctx._lineTo(iOx + Lens2InnerR - Lens2CoreThickness, iStart + Lens2DistToGun - Lens2Height/2.0 - Lens2CoreThickness);
ctx._lineTo(iOx + Lens2InnerR - Lens2CoreThickness, iStart + Lens2DistToGun - Lens2Height/2.0);
ctx._lineTo(iOx + Lens2Outter, iStart + Lens2DistToGun - Lens2Height/2.0);
ctx._lineTo(iOx + Lens2Outter, iStart + Lens2DistToGun + Lens2Height/2.0);
ctx._lineTo(iOx + Lens2InnerR - Lens2CoreThickness, iStart + Lens2DistToGun + Lens2Height/2.0);
ctx._lineTo(iOx + Lens2InnerR - Lens2CoreThickness, iStart + Lens2DistToGun + Lens2Height/2.0 + Lens2CoreThickness);
ctx._lineTo(iOx + Lens2Outter + Lens2CoreThickness, iStart + Lens2DistToGun + Lens2Height/2.0 + Lens2CoreThickness);
ctx._lineTo(iOx + Lens2Outter + Lens2CoreThickness, iStart + Lens2DistToGun - Lens2Height/2.0 - Lens2CoreThickness);
ctx._lineTo(iOx + Lens2InnerR - Lens2CoreThickness, iStart + Lens2DistToGun - Lens2Height/2.0 - Lens2CoreThickness);
ctx.stroke();
// LEFT
ctx.beginPath();
ctx._lineTo(iOx - Lens2InnerR + Lens2CoreThickness, iStart + Lens2DistToGun - Lens2Height/2.0 - Lens2CoreThickness, false);
ctx._lineTo(iOx - Lens2InnerR + Lens2CoreThickness, iStart + Lens2DistToGun - Lens2Height/2.0, false);
ctx._lineTo(iOx - Lens2Outter, iStart + Lens2DistToGun - Lens2Height/2.0, false);
ctx._lineTo(iOx - Lens2Outter, iStart + Lens2DistToGun + Lens2Height/2.0, false);
ctx._lineTo(iOx - Lens2InnerR + Lens2CoreThickness, iStart + Lens2DistToGun + Lens2Height/2.0, false);
ctx._lineTo(iOx - Lens2InnerR + Lens2CoreThickness, iStart + Lens2DistToGun + Lens2Height/2.0 + Lens2CoreThickness, false);
ctx._lineTo(iOx - Lens2Outter - Lens2CoreThickness, iStart + Lens2DistToGun + Lens2Height/2.0 + Lens2CoreThickness, false);
ctx._lineTo(iOx - Lens2Outter - Lens2CoreThickness, iStart + Lens2DistToGun - Lens2Height/2.0 - Lens2CoreThickness, false);
ctx._lineTo(iOx - Lens2InnerR + Lens2CoreThickness, iStart + Lens2DistToGun - Lens2Height/2.0 - Lens2CoreThickness, false);
ctx.fillStyle = "Lavender";
ctx.fill();
ctx.stroke();
}
}
/* #endregion */
/* #region Beam Guide */
// RIGHT
el.value += "\n! Beam guide\n";
el.value += "® mat=6 mtid=6";
el.value += " &\n";
ctx.beginPath();
ctx._moveTo(iOx + 15, iStart);
ctx._lineTo(iOx + 22, iStart);
ctx._lineTo(iOx + 22, iStart + 9);
ctx._lineTo(iOx + 24, iStart + 9);
ctx._lineTo(iOx + 24, iStart + 125);
ctx._lineTo(iOx + 27, iStart + 136);
ctx._lineTo(iOx + 50, iStart + 136);
ctx._lineTo(iOx + 50, iStart + 125);
ctx._lineTo(iOx + 55, iStart + 125);
ctx._lineTo(iOx + 55, iStart + 145);
ctx._lineTo(iOx + 31, iStart + 145);
ctx._lineTo(iOx + 31, iStart + 149);
ctx._lineTo(iOx + 20, iStart + 149);
ctx._lineTo(iOx + 15, iStart + 130);
ctx._lineTo(iOx + 15, iStart);
ctx.stroke();
el.value += "\n&mt mtid=6 mu=2 &";
// LEFT
ctx.beginPath();
ctx._moveTo(iOx - 15, iStart, false);
ctx._lineTo(iOx - 22, iStart, false);
ctx._lineTo(iOx - 22, iStart + 9, false);
ctx._lineTo(iOx - 24, iStart + 9, false);
ctx._lineTo(iOx - 24, iStart + 125, false);
ctx._lineTo(iOx - 27, iStart + 136, false);
ctx._lineTo(iOx - 50, iStart + 136, false);
ctx._lineTo(iOx - 50, iStart + 125, false);
ctx._lineTo(iOx - 55, iStart + 125, false);
ctx._lineTo(iOx - 55, iStart + 145, false);
ctx._lineTo(iOx - 31, iStart + 145, false);
ctx._lineTo(iOx - 31, iStart + 149, false);
ctx._lineTo(iOx - 20, iStart + 149, false);
ctx._lineTo(iOx - 15, iStart + 130, false);
ctx._lineTo(iOx - 15, iStart, false);
ctx.fillStyle = "Lavender";
ctx.fill();
ctx.stroke();
/* #endregion */
}
| _lineToMS |
dynamic.py | # pylint: disable=C0103,R0902,R0904,R0914
"""
All dynamic control cards are defined in this file. This includes:
* FREQ
* FREQ1
* FREQ2 (not implemented)
* FREQ3
* FREQ4
* FREQ5 (not implemented)
* NLPCI
* NLPARM
* TSTEP
* TSTEPNL
All cards are BaseCard objects.
"""
from math import log, exp, ceil
import numpy as np
from numpy import unique, hstack
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.cards.base_card import BaseCard
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double, double_or_blank,
string_or_blank, blank, fields, components_or_blank
)
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
class DELAY(BaseCard):
type = 'DELAY'
def __init__(self, sid, nodes, components, delays, comment=''):
"""
+-------+-----+-----------+-----+--------+------+-----+--------+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+===========+=====+========+======+=====+========+=====+
| DELAY | SID | POINT ID1 | C1 | T1 | P2 | C2 | T2 | |
+-------+-----+-----------+-----+--------+------+-----+--------+-----+
"""
if comment:
self.comment = comment
#: Identification number of DELAY entry. (Integer > 0)
self.sid = sid
#: Grid, extra, or scalar point identification number. (Integer > 0)
self.nodes = nodes
#: Component number. (Integers 1 through 6 for grid points; zero or blank for extra
#: or scalar points)
self.components = components
#: Time delay (tau) for designated point Pi and component Ci. (Real)
self.delays = delays
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a DELAY card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
nodes = [integer(card, 2, 'node')]
components = [integer(card, 3, 'components')]
delays = [double_or_blank(card, 4, 'delay')]
assert components[0] in [0, 1, 2, 3, 4, 5, 6], components
if card.field(5):
nodes.append(integer(card, 5, 'node'))
components.append(integer(card, 6, 'components'))
delays.append(double_or_blank(card, 7, 'delay'))
assert components[1] in [0, 1, 2, 3, 4, 5, 6], components
return DELAY(sid, nodes, components, delays, comment=comment)
def add(self, delay):
assert self.sid == delay.sid, 'sid=%s delay.sid=%s' % (self.sid, delay.sid)
if delay.comment:
if hasattr('_comment'):
self._comment += delay.comment
else:
self._comment = delay.comment
self.nodes += delay.nodes
self.components += delay.components
self.delays += delay.delays
def get_delay_at_freq(self, freq):
return self.nodes, self.components, self.delays
#def cross_reference(self, model: BDF) -> None:
#"""
#Cross links the card so referenced cards can be extracted directly
#Parameters
#----------
#model : BDF()
#the BDF object
#"""
#msg = ', which is required by DELAY sid=%s' % self.sid
#self.nodes_ref = model.Node(self.node_ids, msg=msg)
#@property
#def node_id1(self):
#if isinstance(self.nodes[0], integer_types):
#return self.nodes[0]
#return self.nodes_ref[0].nid
#@property
#def node_id2(self):
#if isinstance(self.nodes[1], integer_types):
#return self.nodes[1]
#return self.nodes_ref[1].nid
@property
def node_ids(self):
node_ids = [self.node_id1]
if len(self.components) == 2:
node_ids.append(self.node_id2)
return node_ids
def raw_fields(self):
list_fields = ['DELAY', self.sid]
for nid, comp, delay in zip(self.node_ids, self.components, self.delays):
if isinstance(nid, integer_types):
nidi = nid
else:
nidi = nid.nid
list_fields += [nidi, comp, delay]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
msg = self.comment
node_ids = self.node_ids
if size == 8:
for nid, comp, delay in zip(node_ids, self.components, self.delays):
msg += print_card_8(['DELAY', self.sid, nid, comp, delay])
else:
for nid, comp, delay in zip(node_ids, self.components, self.delays):
msg += print_card_16(['DELAY', self.sid, nid, comp, delay])
return msg
class DPHASE(BaseCard):
type = 'DPHASE'
def __init__(self, sid, nodes, components, phase_leads, comment=''):
"""
+--------+-----+-----------+-----+------+------+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+=====+===========+=====+======+======+=====+=====+=====+
| DPHASE | SID | POINT ID1 | C1 | TH1 | P2 | C2 | TH2 | |
+--------+-----+-----------+-----+------+------+-----+-----+-----+
"""
if comment:
self.comment = comment
self.sid = sid
self.nodes = nodes
self.components = components
self.phase_leads = phase_leads
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a DPHASE card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
nodes = [integer(card, 2, 'node')]
components = [integer(card, 3, 'components')]
phase_leads = [double_or_blank(card, 4, 'phase_lead')]
assert components[0] in [0, 1, 2, 3, 4, 5, 6], components
if card.field(5):
nodes.append(integer(card, 5, 'node'))
components.append(integer(card, 6, 'components'))
phase_leads.append(double_or_blank(card, 7, 'phase_lead'))
assert components[1] in [0, 1, 2, 3, 4, 5, 6], components
return DPHASE(sid, nodes, components, phase_leads, comment=comment)
def add(self, dphase):
assert self.sid == dphase.sid, 'sid=%s dphase.sid=%s' % (self.sid, dphase.sid)
if dphase.comment:
if hasattr('_comment'):
self._comment += dphase.comment
else:
self._comment = dphase.comment
self.nodes += dphase.nodes
self.components += dphase.components
self.phase_leads += dphase.phase_leads
#def cross_reference(self, model: BDF) -> None:
#"""
#Cross links the card so referenced cards can be extracted directly
#Parameters
#----------
#model : BDF()
#the BDF object
#"""
#msg = ', which is required by DPHASE sid=%s' % self.sid
#self.nodes_ref = model.Nodes(self.node_ids, msg=msg)
#@property
#def node_id1(self):
#if isinstance(self.nodes[0], integer_types):
#return self.nodes[0]
#return self.nodes_ref[0].nid
#@property
#def node_id2(self):
#if isinstance(self.nodes[1], integer_types):
#return self.nodes[1]
#return self.nodes_ref[1].nid
@property
def node_ids(self):
node_ids = [self.node_id1]
if len(self.components) == 2:
node_ids.append(self.node_id2)
return node_ids
def raw_fields(self):
list_fields = ['DPHASE', self.sid]
for nid, comp, delay in zip(self.nodes, self.components, self.phase_leads):
if isinstance(nid, integer_types):
nidi = nid
else:
nidi = nid.nid
list_fields += [nidi, comp, delay]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
msg = self.comment
node_ids = self.node_ids
if size == 8:
for nid, comp, delay in zip(node_ids, self.components, self.phase_leads):
msg += print_card_8(['DPHASE', self.sid, nid, comp, delay])
else:
for nid, comp, delay in zip(node_ids, self.components, self.phase_leads):
msg += print_card_16(['DPHASE', self.sid, nid, comp, delay])
return msg
class FREQ(BaseCard):
"""
Defines a set of frequencies to be used in the solution of frequency
response problems.
+------+-----+-----+-----+------+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+======+=====+=====+=====+======+=====+=====+=====+=====+
| FREQ | SID | F1 | F2 | etc. | | | | |
+------+-----+-----+-----+------+-----+-----+-----+-----+
"""
type = 'FREQ'
def __init__(self, sid, freqs, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.freqs = np.unique(freqs)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a FREQ card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
freqs = fields(double, card, 'freq', i=2, j=len(card))
return FREQ(sid, freqs, comment=comment)
def get_freqs(self):
return self.freqs
def add_frequencies(self, freqs):
"""
Combines the frequencies from 1 FREQx object with another.
All FREQi entries with the same frequency set identification numbers
will be used. Duplicate frequencies will be ignored.
Parameters
----------
freqs : ???
the frequencies for a FREQx object
"""
#print("self.freqs = ",self.freqs)
#print("freqs = ",freqs)
self.freqs = unique(hstack([self.freqs, freqs]))
def add_frequency_object(self, freq):
"""
:param freq: a FREQx object
.. seealso:: :func:`addFrequencies`
"""
self.add_frequencies(freq.freqs)
def raw_fields(self):
list_fields = ['FREQ', self.sid] + list(self.freqs)
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class FREQ1(FREQ):
"""
Defines a set of frequencies to be used in the solution of frequency
response problems by specification of a starting frequency, frequency
increment, and the number of increments desired.
+-------+-----+-----+-----+-----+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+=====+=====+=====+=====+=====+=====+=====+
| FREQ1 | SID | F1 | DF | NDF | | | | |
+-------+-----+-----+-----+-----+-----+-----+-----+-----+
.. note:: this card rewrites as a FREQ card
"""
type = 'FREQ1'
def __init__(self, sid, f1, df, ndf, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.f1 = f1
self.df = df
self.ndf = ndf
freqs = []
for i in range(ndf):
freqs.append(f1 + i * df)
self.freqs = unique(freqs)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a FREQ1 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
f1 = double_or_blank(card, 2, 'f1', 0.0)
df = double(card, 3, 'df')
ndf = integer_or_blank(card, 4, 'ndf', 1)
assert len(card) <= 5, 'len(FREQ card) = %i\ncard=%s' % (len(card), card)
return FREQ1(sid, f1, df, ndf, comment=comment)
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class FREQ2(FREQ):
"""
Defines a set of frequencies to be used in the solution of frequency
response problems by specification of a starting frequency, final
frequency, and the number of logarithmic increments desired.
+-------+-----+-----+-----+-----+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+=====+=====+=====+=====+=====+=====+=====+
| FREQ2 | SID | F1 | F2 | NDF | | | | |
+-------+-----+-----+-----+-----+-----+-----+-----+-----+
.. note:: this card rewrites as a FREQ card
"""
type = 'FREQ2'
def __init__(self, sid, f1, f2, ndf=1, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.f1 = f1
self.f2 = f2
self.ndf = ndf
d = 1. / ndf * log(f2 / f1)
freqs = []
for i in range(ndf):
freqs.append(f1 * exp(i * d)) # 0 based index
self.freqs = np.unique(freqs)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a FREQ2 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
f1 = double(card, 2, 'f1') # default=0.0 ?
f2 = double(card, 3, 'f2')
ndf = integer_or_blank(card, 4, 'nf', 1)
assert len(card) <= 5, 'len(FREQ2 card) = %i\ncard=%s' % (len(card), card)
return FREQ2(sid, f1, f2, ndf, comment=comment)
#return FREQ(sid, freqs, comment=comment)
class FREQ3(FREQ):
"""
+-------+-----+------+-------+--------+-----+---------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+=======+=====+======+=======+========+=====+=========+
| FREQ3 | SID | F1 | F2 | TYPE | NEF | CLUSTER |
+-------+-----+------+-------+--------+-----+---------+
| FREQ3 | 6 | 20.0 | 200.0 | LINEAR | 10 | 2.0 |
+-------+-----+------+-------+--------+-----+---------+
"""
type = 'FREQ3'
def __init__(self, f1, f2=None, Type='LINEAR', nef=10, cluster=1.0, comment=''):
if comment:
self.comment = comment
if f2 is None:
f2 = f1
self.sid = sid
self.f1 = f1
self.f2 = f2
self.Type = Type
self.nef = nef
self.cluster = cluster
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
f1 = double(card, 1, 'f1')
f2 = integer_or_blank(card, 1, 'f2', f1)
Type = string_or_blank(card, 1, 'Type', 'LINEAR')
nef = integer_or_blank(card, 1, 'nef', 10)
cluster = double_or_blank(card, 1, 'cluster', 1.0)
return FREQ3(sid, f1, f2, Type, nef, cluster, comment='')
def raw_fields(self):
return ['FREQ3', self.sid, self.f1, self.f2, self.Type, self.nef, self.cluster]
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class FREQ4(FREQ):
"""
Defines a set of frequencies used in the solution of modal frequency
response problems by specifying the amount of 'spread' around each natural
frequency and the number of equally spaced excitation frequencies within
the spread.
+-------+-----+-----+-----+------+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+=====+=====+======+=====+=====+=====+=====+
| FREQ4 | SID | F1 | F2 | FSPD | NFM | | | |
+-------+-----+-----+-----+------+-----+-----+-----+-----+
.. note:: this card rewrites as a FREQ card
.. todo:: not done...
"""
type = 'FREQ4'
def __init__(self, sid, f1, f2, fspread, nfm, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.f1 = f1
self.f2 = f2
self.fspread = fspread
self.nfm = nfm
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a FREQ4 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
f1 = double_or_blank(card, 2, 'f1', 0.0)
f2 = double_or_blank(card, 3, 'f2', 1.e20)
fspread = double_or_blank(card, 4, 'fspd', 0.1)
nfm = integer_or_blank(card, 5, 'nfm', 3)
assert len(card) <= 6, 'len(FREQ card) = %i\ncard=%s' % (len(card), card)
return FREQ4(sid, f1, f2, fspread, nfm, comment=comment)
def raw_fields(self):
list_fields = ['FREQ4', self.sid, self.f1, self.f2, self.fspread,
self.nfm]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
#class FREQ5(FREQ):
#type = 'FREQ5'
#def __init__(self, card=None, data=None, comment=''):
#if comment:
# self.comment = comment
#raise NotImplementedError()
#def write_card(self, size: int=8, is_double: bool=False) -> str:
#card = self.repr_fields()
#if size == 8:
#return self.comment + print_card_8(card)
#return self.comment + print_card_16(card)
class NLPARM(BaseCard):
"""
Defines a set of parameters for nonlinear static analysis iteration
strategy.
+--------+--------+------+------+---------+-------+---------+---------+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+========+======+======+=========+=======+=========+=========+========+
| NLPARM | ID | NINC | DT | KMETHOD | KSTEP | MAXITER | CONV | INTOUT |
+--------+--------+------+------+---------+-------+---------+---------+--------+
| | ESPU | EPSP | EPSW | MAXDIV | MAXQN | MAXLS | FSTRESS | LSTOL |
+--------+--------+------+------+---------+-------+---------+---------+--------+
| | MAXBIS | | | | MAXR | | RTOLB | CONV |
+--------+--------+------+------+---------+-------+---------+---------+--------+
"""
type = 'NLPARM'
def __init__(self, nlparm_id, ninc=10, dt=0.0, kmethod='AUTO', kstep=5,
max_iter=25, conv='PW', int_out='NO',
eps_u=0.01, eps_p=0.01, eps_w=0.01, max_div=3, max_qn=None, max_ls=4,
fstress=0.2, ls_tol=0.5, max_bisect=5, max_r=20., rtol_b=20., comment=''):
if comment:
self.comment = comment
self.nlparm_id = nlparm_id
self.ninc = ninc
self.dt = dt
self.kmethod = kmethod
self.kstep = kstep
self.max_iter = max_iter
self.conv = conv
self.int_out = int_out
# line 2
self.eps_p = eps_p
self.eps_u = eps_u
self.eps_w = eps_w
self.max_div = max_div
self.max_qn = max_qn
self.max_ls = max_ls
self.fstress = fstress
self.ls_tol = ls_tol
# line 3
self.max_bisect = max_bisect
self.max_r = max_r
self.rtol_b = rtol_b
if self.max_qn is None:
if kmethod == 'PFNT':
self.max_qn = 0
else:
self.max_qn = max_iter
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a NLPARM card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
nlparm_id = integer(card, 1, 'nlparm_id')
ninc = integer_or_blank(card, 2, 'ninc', 10)
dt = double_or_blank(card, 3, 'dt', 0.0)
kmethod = string_or_blank(card, 4, 'kmethod', 'AUTO')
kstep = integer_or_blank(card, 5, 'kstep', 5)
max_iter = integer_or_blank(card, 6, 'max_iter', 25)
conv = string_or_blank(card, 7, 'conv', 'PW')
int_out = string_or_blank(card, 8, 'intOut', 'NO')
# line 2
eps_u = double_or_blank(card, 9, 'eps_u', 0.01)
eps_p = double_or_blank(card, 10, 'eps_p', 0.01)
eps_w = double_or_blank(card, 11, 'eps_w', 0.01)
max_div = integer_or_blank(card, 12, 'max_div', 3)
if kmethod == 'PFNT':
max_qn = integer_or_blank(card, 13, 'max_qn', 0)
else:
max_qn = integer_or_blank(card, 13, 'max_qn', max_iter)
max_ls = integer_or_blank(card, 14, 'max_ls', 4)
fstress = double_or_blank(card, 15, 'fstress', 0.2)
ls_tol = double_or_blank(card, 16, 'ls_tol', 0.5)
# line 3
max_bisect = integer_or_blank(card, 17, 'max_bisect', 5)
max_r = double_or_blank(card, 21, 'max_r', 20.)
rtol_b = double_or_blank(card, 23, 'rtol_b', 20.)
assert len(card) <= 24, 'len(NLPARM card) = %i\ncard=%s' % (len(card), card)
return NLPARM(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv,
int_out, eps_u, eps_p, eps_w, max_div,
max_qn, max_ls, fstress,
ls_tol, max_bisect, max_r,
rtol_b, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a NLPARM card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv, int_out, eps_u, eps_p,
eps_w, max_div, max_qn, max_ls, fstress, ls_tol, max_bisect, max_r,
rtol_b) = data
if kmethod == 1:
kmethod = 'AUTO'
elif kmethod == 2:
kmethod = 'ITER'
elif kmethod == 4:
kmethod = 'SEMI'
elif kmethod == 3:
kmethod = 'ADAPT'
else:
msg = 'nlparm_id=%s kmethod=%r data=%s' % (nlparm_id, kmethod, data)
raise NotImplementedError(msg)
if conv == 1:
conv = 'W'
elif conv == 2:
conv = 'P'
elif conv == 3:
conv = 'PW'
elif conv == 4:
conv = 'U'
elif conv == 5:
conv = 'UW'
elif conv == 6:
conv = 'UP'
elif conv == 7:
conv = 'UPW'
else:
msg = 'nlparm_id=%s conv=%r data=%s' % (nlparm_id, conv, data)
raise NotImplementedError(msg)
if int_out == 0:
int_out = 'NO'
elif int_out == 1:
int_out = 'YES'
elif int_out == 2:
int_out = 'ALL'
else:
msg = 'nlparm_id=%s int_out=%r data=%s' % (nlparm_id, int_out, data)
raise NotImplementedError(msg)
return NLPARM(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv,
int_out, eps_u, eps_p, eps_w, max_div,
max_qn, max_ls, fstress,
ls_tol, max_bisect, max_r,
rtol_b, comment=comment)
def raw_fields(self):
list_fields = ['NLPARM', self.nlparm_id, self.ninc, self.dt, self.kmethod,
self.kstep, self.max_iter, self.conv, self.int_out, self.eps_u,
self.eps_p, self.eps_w, self.max_div, self.max_qn, self.max_ls,
self.fstress, self.ls_tol, self.max_bisect, None, None, None,
self.max_r, None, self.rtol_b]
return list_fields
def repr_fields(self):
|
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card) # having trouble with double precision...
return self.comment + print_card_16(card)
class NLPCI(BaseCard):
type = 'NLPCI'
def __init__(self, nlpci_id, Type='CRIS', minalr=0.25, maxalr=4.,
scale=0., desiter=12, mxinc=20, comment=''):
if comment:
self.comment = comment
self.nlpci_id = nlpci_id
self.Type = Type
self.minalr = minalr
self.maxalr = maxalr
self.scale = scale
self.desiter = desiter
self.mxinc = mxinc
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a NLPCI card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
nlpci_id = integer(card, 1, 'nlpci_id')
Type = string_or_blank(card, 2, 'Type', 'CRIS')
minalr = double_or_blank(card, 3, 'minalr', 0.25)
maxalr = double_or_blank(card, 4, 'maxalr', 4.0)
scale = double_or_blank(card, 5, 'scale', 0.0)
blank(card, 6, 'blank')
desiter = integer_or_blank(card, 7, 'desiter', 12)
mxinc = integer_or_blank(card, 8, 'mxinc', 20)
return NLPCI(nlpci_id, Type=Type, minalr=minalr, maxalr=maxalr,
scale=scale, desiter=desiter, mxinc=mxinc, comment=comment)
def raw_fields(self):
list_fields = ['NLPCI', self.nlpci_id, self.Type, self.minalr,
self.maxalr, self.scale, None, self.desiter, self.mxinc]
return list_fields
def repr_fields(self):
#minalr = set_blank_if_default(self.minalr, 0.25)
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class TF(BaseCard):
"""
Defines a dynamic transfer function of the form:
(B0 + B1 p + B2 *p2)*ud sum(A0_i + A1_i*p + A2_i*p2)*ui = 0
+----+-----+-----+------+------+------+--------+----+----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+====+=====+=====+======+======+======+========+====+====+
| TF | SID | GD | CD | B0 | B1 | B2 | | |
+----+-----+-----+------+------+------+--------+----+----+
| | G_1 | C_1 | A0_1 | A1_1 | A2_1 | etc. | | |
+----+-----+-----+------+------+------+--------+----+----+
"""
type = 'TF'
def __init__(self, sid, nid0, c, b0, b1, b2, nids, components, a, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.nid0 = nid0
self.c = c
self.b0 = b0
self.b1 = b1
self.b2 = b2
self.nids = nids
self.components = components
self.a = a
def validate(self):
pass
#assert len(self.grids1) > 0, 'ngrids1=%s\n%s' % (len(self.grids1), str(self))
#def cross_reference(self, model: BDF) -> None:
#pass
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TF card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
nid0 = integer(card, 2, 'nid0')
# component 0 means an SPOINT/EPOINT
c = components_or_blank(card, 3, 'components_0', 0)
b0 = double_or_blank(card, 4, 'b0', 0.)
b1 = double_or_blank(card, 5, 'b1', 0.)
b2 = double_or_blank(card, 6, 'b2', 0.)
nfields = len(card) - 9
nrows = nfields // 8
if nfields % 8 > 0:
nrows += 1
nids = []
components = []
a = []
for irow in range(nrows):
j = irow * 8 + 9
#ifield = irow + 1
nid = integer(card, j, 'grid_%i' % (irow + 1))
component = components_or_blank(card, j + 1, 'components_%i' % (irow + 1), 0)
a0 = double_or_blank(card, j + 2, 'a0_%i' % (irow + 1), 0.)
a1 = double_or_blank(card, j + 3, 'a1_%i' % (irow + 1), 0.)
a2 = double_or_blank(card, j + 4, 'a2_%i' % (irow + 1), 0.)
nids.append(nid)
components.append(component)
a.append([a0, a1, a2])
return TF(sid, nid0, c, b0, b1, b2, nids, components, a,
comment=comment)
def raw_fields(self):
list_fields = ['TF', self.sid, self.nid0, self.c, self.b0, self.b1, self.b2, None, None]
for grid, c, (a0, a1, a2) in zip(self.nids, self.components, self.a):
list_fields += [grid, c, a0, a1, a2, None, None, None]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
# double precision?
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class TSTEP(BaseCard):
"""
Transient Time Step
Defines time step intervals at which a solution will be generated and
output in transient analysis.
+-------+------+------+------+------+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+======+======+======+======+=====+=====+=====+=====+
| TSTEP | SID | N1 | DT1 | NO1 | | | | |
+-------+------+------+------+------+-----+-----+-----+-----+
| | | N2 | DT2 | NO2 | | | | |
+-------+------+------+------+------+-----+-----+-----+-----+
| | | etc. | | | | | | |
+-------+------+------+------+------+-----+-----+-----+-----+
+-------+------+------+------+------+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+======+======+======+======+=====+=====+=====+=====+
| TSTEP | 101 | 9000 | .001 | 9000 | | | | |
+-------+------+------+------+------+-----+-----+-----+-----+
| | | 1000 | .001 | 1 | | | | |
+-------+------+------+------+------+-----+-----+-----+-----+
"""
type = 'TSTEP'
def __init__(self, sid, N, DT, NO, comment=''):
"""
Creates a TSTEP card
Parameters
----------
sid : int
the time step id
N : List[int/None]
???
DT : List[float/None]
???
NO : List[int/None]
???
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
self.sid = sid
#: Number of time steps of value DTi. (Integer > 1)
self.N = N
#: Time increment (float)
self.DT = DT
#: Skip factor for output. Every NOi-th step will be saved for output (default=1)
self.NO = NO
def validate(self):
assert len(self.N) == len(self.DT), 'N=%s DT=%s' % (self.N, self.DT)
assert len(self.N) == len(self.NO), 'N=%s NO=%s' % (self.N, self.NO)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TSTEP card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
N = []
DT = []
NO = []
nrows = int(ceil((len(card) - 1.) / 8.))
for i in range(nrows):
n = 8 * i + 1
ni = integer_or_blank(card, n + 1, 'N' + str(i), 1)
dt = double_or_blank(card, n + 2, 'dt' + str(i), 0.)
no = integer_or_blank(card, n + 3, 'NO' + str(i), 1)
N.append(ni)
DT.append(dt)
NO.append(no)
return TSTEP(sid, N, DT, NO, comment=comment)
def raw_fields(self):
list_fields = ['TSTEP', self.sid]
for (N, dt, no) in zip(self.N, self.DT, self.NO):
list_fields += [N, dt, no, None, None, None, None, None]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class TSTEPNL(BaseCard):
"""
Defines parametric controls and data for nonlinear transient structural or
heat transfer analysis. TSTEPNL is intended for SOLs 129, 159, and 600.
Parameters for Nonlinear Transient Analysis.
+---------+--------+--------+-------+--------+--------+-------+---------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=========+========+========+=======+========+========+=======+=========+======+
| TSTEPNL | ID | NDT | DT | NO | METHOD | KSTEP | MAXITER | CONV |
+---------+--------+--------+-------+--------+--------+-------+---------+------+
| | ESPU | EPSP | EPSW | MAXDIV | MAXQN | MAXLS | FSTRESS | |
+---------+--------+--------+-------+--------+--------+-------+---------+------+
| | MAXBIS | ADJUST | MSTEP | RB | MAXR | UTOL | RTOLB | |
+---------+--------+--------+-------+--------+--------+-------+---------+------+
method = None for NX, but apparently TSTEP as well, which is not in the QRG
"""
type = 'TSTEPNL'
allowed_methods = ['AUTO', 'ITER', 'ADAPT', 'SEMI', 'FNT', 'PFNT', # MSC
'TSTEP'] # NX
def __init__(self, sid, ndt, dt, no, method='ADAPT', kstep=None,
max_iter=10, conv='PW', eps_u=1.e-2, eps_p=1.e-3,
eps_w=1.e-6, max_div=2, max_qn=10, max_ls=2,
fstress=0.2, max_bisect=5, adjust=5, mstep=None,
rb=0.6, max_r=32., utol=0.1, rtol_b=20.,
min_iter=None, comment=''):
"""
Creates a TSTEPNL card
Parameters
----------
sid : int
the time step id
ndt : ???
???
dt : ???
???
no : ???
???
eps_u : float; default=1.e-2
???
eps_p : float; default=1.e-3
???
eps_w : float; default=1.e-6
???
max_div : int; default=2
???
max_qn : int; default=10
???
max_ls : int; default=2
???
fstress : float; default=0.2
???
max_bisect : int; default=5
???
adjust : int; default=5
???
mstep : int; default=None
???
rb : float; default=0.6
???
max_r = float; default=32.
???
utol = float; default=0.1
???
rtol_b = float; default=20.
???
min_iter : int; default=None
not listed in all QRGs
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
# line 1
self.sid = sid
self.ndt = ndt
self.dt = dt
self.no = no
self.method = method
self.kstep = kstep
self.max_iter = max_iter
self.conv = conv
self.eps_u = eps_u
self.eps_p = eps_p
self.eps_w = eps_w
self.max_div = max_div
self.max_qn = max_qn
self.max_ls = max_ls
self.fstress = fstress
# line 3
self.max_bisect = max_bisect
self.adjust = adjust
self.mstep = mstep
self.rb = rb
self.max_r = max_r
self.utol = utol
self.rtol_b = rtol_b
self.min_iter = min_iter
assert self.ndt >= 3
assert self.dt > 0.
def validate(self):
if self.method not in self.allowed_methods:
msg = 'method=%r allowed_methods=[%s]' % (
self.method, ', '.join(self.allowed_methods))
raise ValueError(msg)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TSTEPNL card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
ndt = integer(card, 2, 'ndt')
dt = double(card, 3, 'dt')
no = integer_or_blank(card, 4, 'no', 1)
#: .. note:: not listed in all QRGs
method = string_or_blank(card, 5, 'method', 'ADAPT')
if method == 'ADAPT':
kstep = integer_or_blank(card, 6, 'kStep', 2)
elif method == 'ITER':
kstep = integer_or_blank(card, 6, 'kStep', 10)
elif method in ['AUTO', 'TSTEP', 'SEMI']:
kstep = None
#kstep = blank(card, 6, 'kStep') #: .. todo:: not blank
else:
msg = 'invalid TSTEPNL Method. method=%r; allowed_methods=[%s]' % (
method, ', '.join(cls.allowed_methods))
raise RuntimeError(msg)
max_iter = integer_or_blank(card, 7, 'maxIter', 10)
conv = string_or_blank(card, 8, 'conv', 'PW')
# line 2
eps_u = double_or_blank(card, 9, 'epsU', 1.E-2)
eps_p = double_or_blank(card, 10, 'epsP', 1.E-3)
eps_w = double_or_blank(card, 11, 'epsW', 1.E-6)
max_div = integer_or_blank(card, 12, 'maxDiv', 2)
max_qn = integer_or_blank(card, 13, 'maxQn', 10)
max_ls = integer_or_blank(card, 14, 'MaxLs', 2)
fstress = double_or_blank(card, 15, 'fStress', 0.2)
# line 3
max_bisect = integer_or_blank(card, 17, 'maxBisect', 5)
adjust = integer_or_blank(card, 18, 'adjust', 5)
mstep = integer_or_blank(card, 19, 'mStep')
rb = double_or_blank(card, 20, 'rb', 0.6)
max_r = double_or_blank(card, 21, 'maxR', 32.)
utol = double_or_blank(card, 22, 'uTol', 0.1)
rtol_b = double_or_blank(card, 23, 'rTolB', 20.)
# not listed in all QRGs
min_iter = integer_or_blank(card, 24, 'minIter')
assert len(card) <= 25, 'len(TSTEPNL card) = %i\ncard=%s' % (len(card), card)
return TSTEPNL(
sid, ndt, dt, no, method, kstep, max_iter, conv,
eps_u, eps_p, eps_w, max_div, max_qn, max_ls, fstress,
max_bisect, adjust, mstep, rb, max_r, utol, rtol_b, min_iter,
comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a TSTEPNL card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
(sid, ndt, dt, no, method, kstep, max_iter, conv, eps_u, eps_p, eps_w,
max_div, max_qn, max_ls, fstress, max_bisect,
adjust, mstep, rb, max_r, utol, rtol_b) = data
if method == 1:
method = 'AUTO'
elif method == 3:
method = 'ADAPT'
else:
raise NotImplementedError('tstepnl=%s method=%r data=%s' % (sid, method, data))
if conv == 3:
conv = 'PW'
elif conv == 4:
conv = 'U'
#elif conv == 3:
#conv = 'ADAPT'
else:
raise NotImplementedError('tstepnl=%s conv=%r data=%s' % (sid, conv, data))
min_iter = None # not listed in DMAP 2005
return TSTEPNL(
sid, ndt, dt, no, method, kstep, max_iter, conv,
eps_u, eps_p, eps_w, max_div, max_qn, max_ls, fstress,
max_bisect, adjust, mstep, rb, max_r, utol, rtol_b, min_iter,
comment=comment)
#self.sid = sid
#self.ndt = ndt
#self.dt = dt
#self.no = no
#self.method = method
#self.kStep = kStep
#self.maxIter = maxIter
#self.conv = conv
## line 2
#self.epsU = epsU
#self.epsP = epsP
#self.epsW = epsW
#self.maxDiv = maxDiv
#self.maxQn = maxQn
#self.MaxLs = maxLs
#self.fStress = fStress
## line 3
#self.maxBisect = maxBisect
#self.adjust = adjust
#self.mStep = mStep
#self.rb = rb
#self.maxR = maxR
#self.uTol = uTol
#self.rTolB = rTolB
def raw_fields(self):
list_fields = ['TSTEPNL', self.sid, self.ndt, self.dt, self.no,
self.method, self.kstep, self.max_iter, self.conv, self.eps_u,
self.eps_p, self.eps_w, self.max_div, self.max_qn, self.max_ls,
self.fstress, None, self.max_bisect, self.adjust, self.mstep,
self.rb, self.max_r, self.utol, self.rtol_b, self.min_iter]
return list_fields
def repr_fields(self):
#no = set_blank_if_default(self.no,1)
no = self.no
method = set_blank_if_default(self.method, 'ADAPT')
kstep = self.kstep
#if self.method == 'ADAPT':
#kStep = set_blank_if_default(self.kStep, 2)
#elif self.method == 'ITER':
#kStep = set_blank_if_default(self.kStep, 10)
#else:
#msg = 'invalid TSTEPNL Method. method=|%s|' %(self.method)
#raise RuntimeError(msg)
#maxIter = set_blank_if_default(self.maxIter, 10)
conv = set_blank_if_default(self.conv, 'PW')
eps_u = set_blank_if_default(self.eps_u, 1e-2)
eps_p = set_blank_if_default(self.eps_p, 1e-3)
eps_w = set_blank_if_default(self.eps_w, 1e-6)
max_div = set_blank_if_default(self.max_div, 2)
max_qn = set_blank_if_default(self.max_qn, 10)
max_ls = set_blank_if_default(self.max_ls, 2)
fstress = set_blank_if_default(self.fstress, 0.2)
max_bisect = set_blank_if_default(self.max_bisect, 5)
adjust = set_blank_if_default(self.adjust, 5)
rb = set_blank_if_default(self.rb, 0.6)
max_r = set_blank_if_default(self.max_r, 32.)
utol = set_blank_if_default(self.utol, 0.1)
rtol_b = set_blank_if_default(self.rtol_b, 20.)
list_fields = ['TSTEPNL', self.sid, self.ndt, self.dt, no, method,
kstep, self.max_iter, conv, eps_u, eps_p, eps_w, max_div, max_qn,
max_ls, fstress, None, max_bisect, adjust, self.mstep, rb,
max_r, utol, rtol_b, self.min_iter]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
| ninc = set_blank_if_default(self.ninc, 10)
dt = set_blank_if_default(self.dt, 0.0)
kmethod = set_blank_if_default(self.kmethod, 'AUTO')
kstep = set_blank_if_default(self.kstep, 5)
max_iter = set_blank_if_default(self.max_iter, 25)
conv = set_blank_if_default(self.conv, 'PW')
int_out = set_blank_if_default(self.int_out, 'NO')
eps_u = set_blank_if_default(self.eps_u, 0.01)
eps_p = set_blank_if_default(self.eps_p, 0.01)
eps_w = set_blank_if_default(self.eps_w, 0.01)
max_div = set_blank_if_default(self.max_div, 3)
max_qn = set_blank_if_default(self.max_qn, self.max_iter)
max_ls = set_blank_if_default(self.max_ls, 4)
fstress = set_blank_if_default(self.fstress, 0.2)
ls_tol = set_blank_if_default(self.ls_tol, 0.5)
max_bisect = set_blank_if_default(self.max_bisect, 5)
max_r = set_blank_if_default(self.max_r, 20.)
rtol_b = set_blank_if_default(self.rtol_b, 20.)
list_fields = ['NLPARM', self.nlparm_id, ninc, dt, kmethod, kstep, max_iter,
conv, int_out, eps_u, eps_p, eps_w, max_div, max_qn, max_ls,
fstress, ls_tol, max_bisect, None, None, None, max_r, None,
rtol_b]
return list_fields |
navicontrol.py | # created by atom
import math
import numpy as np
from selfdrive.config import Conversions as CV
from selfdrive.car.hyundai.values import Buttons
from common.numpy_fast import clip, interp
from cereal import log
import cereal.messaging as messaging
from common.params import Params
import common.log as trace1
import common.MoveAvg as mvAvg
LaneChangeState = log.LateralPlan.LaneChangeState
class NaviControl():
def __init__(self, p=None):
self.p = p
self.sm = messaging.SubMaster(['liveNaviData', 'lateralPlan', 'radarState', 'controlsState', 'liveMapData'])
self.btn_cnt = 0
self.seq_command = 0
self.target_speed = 0
self.set_point = 0
self.wait_timer2 = 0
self.wait_timer3 = 0
self.moveAvg = mvAvg.MoveAvg()
self.gasPressed_old = 0
self.map_spdlimit_offset = int(Params().get("OpkrSpeedLimitOffset", encoding="utf8"))
self.map_spdlimit_offset_option = int(Params().get("OpkrSpeedLimitOffsetOption", encoding="utf8"))
self.safetycam_decel_dist_gain = int(Params().get("SafetyCamDecelDistGain", encoding="utf8"))
self.map_speed_block = False
self.map_speed_dist = 0
self.map_speed = 0
self.onSpeedControl = False
self.curvSpeedControl = False
self.ctrl_speed = 0
self.vision_curv_speed = [int(Params().get("VCurvSpeed30", encoding="utf8")), int(Params().get("VCurvSpeed50", encoding="utf8")),
int(Params().get("VCurvSpeed70", encoding="utf8")), int(Params().get("VCurvSpeed90", encoding="utf8"))]
self.osm_curv_speed_offset = int(Params().get("OCurvOffset", encoding="utf8"))
self.osm_wait_timer = 0
self.stock_navi_info_enabled = Params().get_bool("StockNaviSpeedEnabled")
self.osm_speedlimit_enabled = Params().get_bool("OSMSpeedLimitEnable")
self.speedlimit_decel_off = Params().get_bool("SpeedLimitDecelOff")
self.curv_decel_option = int(Params().get("CurvDecelOption", encoding="utf8"))
self.na_timer = 0
self.t_interval = 7
def update_lateralPlan(self):
self.sm.update(0)
path_plan = self.sm['lateralPlan']
return path_plan
def button_status(self, CS):
if not CS.cruise_active or CS.cruise_buttons != Buttons.NONE:
self.wait_timer2 = 80
elif self.wait_timer2:
self.wait_timer2 -= 1
else:
return 1
return 0
# buttn acc,dec control
def switch(self, seq_cmd):
self.case_name = "case_" + str(seq_cmd)
self.case_func = getattr( self, self.case_name, lambda:"default")
return self.case_func()
def reset_btn(self):
if self.seq_command != 3:
self.seq_command = 0
def case_default(self):
self.seq_command = 0
return None
def case_0(self):
self.btn_cnt = 0
self.target_speed = self.set_point
delta_speed = round(self.target_speed - self.VSetDis)
if delta_speed > 0:
self.seq_command = 1 # case_1 번으로 이동.
elif delta_speed < 0:
self.seq_command = 2 # case_2 번으로 이동.
return None
def case_1(self): # acc
btn_signal = Buttons.RES_ACCEL
self.btn_cnt += 1
if self.target_speed == self.VSetDis:
self.btn_cnt = 0
self.seq_command = 3 # case_3 번으로 이동.
elif self.btn_cnt > 5:
self.btn_cnt = 0
self.seq_command = 3 # case_3 번으로 이동.
return btn_signal
def case_2(self): # dec
btn_signal = Buttons.SET_DECEL
self.btn_cnt += 1
if self.target_speed == self.VSetDis:
self.btn_cnt = 0
self.seq_command = 3 # case_3 번으로 이동.
elif self.btn_cnt > 5:
self.btn_cnt = 0
self.seq_command = 3# case_3 번으로 이동.
return btn_signal
def case_3(self): # None 버튼 off 유지시간. 크르즈 속도제어오류 발생시 아래의 수치를 조금 변경하여 보십시오. 수치가 크면 속도변경이 느려지고, 작으면 빨라집니다.
btn_signal = None # Buttons.NONE
self.btn_cnt += 1
#if self.btn_cnt == 1:
# btn_signal = Buttons.NONE
if self.btn_cnt > self.t_interval: # 버튼 클릭후 일정시간 기다린다. (반드시 필요함)
self.seq_command = 0 # case_0 번으로 이동. (다음 명령을 실행)
return btn_signal
def ascc_button_control(self, CS, set_speed):
self.set_point = max(20 if CS.is_set_speed_in_mph else 30, set_speed)
self.curr_speed = CS.out.vEgo * CV.MS_TO_KPH
self.VSetDis = round(CS.VSetDis)
btn_signal = self.switch(self.seq_command)
return btn_signal
def get_navi_speed(self, sm, CS, cruiseState_speed):
cruise_set_speed_kph = cruiseState_speed
v_ego_kph = CS.out.vEgo * CV.MS_TO_KPH
v_ego_mph = CS.out.vEgo * CV.MS_TO_MPH
self.liveNaviData = sm['liveNaviData']
# speedLimit = self.liveNaviData.speedLimit
# speedLimitDistance = self.liveNaviData.speedLimitDistance #speedLimitDistance
# safetySign = self.liveNaviData.safetySign
#mapValid = self.liveNaviData.mapValid
#trafficType = self.liveNaviData.trafficType
#if not mapValid or trafficType == 0:
# return cruise_set_speed_kph
if not self.speedlimit_decel_off:
if CS.map_enabled and self.liveNaviData.safetySign == 124: #과속방지턱이 있으면 주행속도에 연동하여 제한속도 30km/h까지 가변으로 감속하기
cruise_set_speed_kph = interp(v_ego_kph, [40, 60, 80], [30, 35, 40])
self.onSpeedControl = True
elif int(self.sm['liveMapData'].speedLimit) > 19 and self.osm_speedlimit_enabled and not self.sm['controlsState'].osmOffSpdLimit: # osm speedlimit
if self.stock_navi_info_enabled and CS.safety_sign > 19:
spdTarget = min(self.sm['liveMapData'].speedLimit, CS.safety_sign)
else:
spdTarget = self.sm['liveMapData'].speedLimit
if self.map_spdlimit_offset_option == 0:
cruise_set_speed_kph = spdTarget + round(spdTarget*0.01*self.map_spdlimit_offset)
else:
cruise_set_speed_kph = spdTarget + self.map_spdlimit_offset
if cruise_set_speed_kph+1 < v_ego_mph and CS.is_set_speed_in_mph and not CS.out.gasPressed:
self.onSpeedControl = True
elif cruise_set_speed_kph+1 < v_ego_kph and not CS.is_set_speed_in_mph and not CS.out.gasPressed:
self.onSpeedControl = True
else:
self.onSpeedControl = False
elif CS.map_enabled and self.liveNaviData.speedLimit > 19: # mappy speedlimit
self.map_speed_dist = max(0, self.liveNaviData.speedLimitDistance - 30)
self.map_speed = self.liveNaviData.speedLimit
if self.map_speed_dist > 1250:
self.map_speed_block = True
else:
self.map_speed_block = False
cam_distance_calc = 0
cam_distance_calc = interp(self.map_speed, [30, 60, 110], [2.6, 3.1, 3.9])
consider_speed = interp((v_ego_kph - self.map_speed), [0, 50], [1, 2])
min_control_dist = interp(self.map_speed, [30, 110], [40, 250])
final_cam_decel_start_dist = cam_distance_calc*consider_speed*v_ego_kph * (1 + self.safetycam_decel_dist_gain*0.01)
if self.map_speed_dist < final_cam_decel_start_dist:
spdTarget = self.map_speed
elif self.map_speed_dist >= final_cam_decel_start_dist and self.map_speed_block:
spdTarget = self.map_speed
elif self.map_speed_dist < min_control_dist:
spdTarget = self.map_speed
elif self.onSpeedControl and self.map_speed > 19:
spdTarget = self.map_speed
else:
return cruise_set_speed_kph
if self.map_spdlimit_offset_option == 0:
cruise_set_speed_kph = spdTarget + round(spdTarget*0.01*self.map_spdlimit_offset)
else:
cruise_set_speed_kph = spdTarget + self.map_spdlimit_offset
if cruise_set_speed_kph+1 < v_ego_mph and CS.is_set_speed_in_mph and not CS.out.gasPressed:
self.onSpeedControl = True
elif cruise_set_speed_kph+1 < v_ego_kph and not CS.is_set_speed_in_mph and not CS.out.gasPressed:
self.onSpeedControl = True
else:
self.onSpeedControl = False
elif CS.safety_sign > 19 and self.stock_navi_info_enabled: # cat stock navi speedlimit
self.map_speed_dist = max(0, CS.safety_dist - 30)
self.map_speed = CS.safety_sign
if CS.safety_block_remain_dist < 255: | cam_distance_calc = interp(self.map_speed, [30, 60, 110], [2.5, 3.0, 3.8])
consider_speed = interp((v_ego_kph - (self.map_speed * (CV.MPH_TO_KPH if CS.is_set_speed_in_mph else 1))), [0, 50], [1, 2])
min_control_dist = interp(self.map_speed, [30, 110], [40, 250])
final_cam_decel_start_dist = cam_distance_calc*consider_speed*v_ego_kph * (1 + self.safetycam_decel_dist_gain*0.01)
if self.map_speed_dist < final_cam_decel_start_dist:
spdTarget = self.map_speed
elif self.map_speed_dist >= final_cam_decel_start_dist and self.map_speed_block:
spdTarget = self.map_speed
elif self.map_speed_dist < min_control_dist:
spdTarget = self.map_speed
elif self.onSpeedControl and self.map_speed > 19:
spdTarget = self.map_speed
else:
self.onSpeedControl = False
return cruise_set_speed_kph
if self.map_spdlimit_offset_option == 0:
cruise_set_speed_kph = spdTarget + round(spdTarget*0.01*self.map_spdlimit_offset)
else:
cruise_set_speed_kph = spdTarget + self.map_spdlimit_offset
if cruise_set_speed_kph+1 < v_ego_mph and CS.is_set_speed_in_mph and not CS.out.gasPressed:
self.onSpeedControl = True
elif cruise_set_speed_kph+1 < v_ego_kph and not CS.is_set_speed_in_mph and not CS.out.gasPressed:
self.onSpeedControl = True
else:
self.onSpeedControl = False
else:
spdTarget = cruise_set_speed_kph
self.onSpeedControl = False
self.map_speed = 0
self.map_speed_dist = 0
self.map_speed_block = False
else:
spdTarget = cruise_set_speed_kph
self.onSpeedControl = False
self.map_speed = 0
self.map_speed_dist = 0
self.map_speed_block = False
# elif speedLimitDistance >= 50:
# if speedLimit <= 60:
# spdTarget = interp(speedLimitDistance, [50, 600], [ speedLimit, speedLimit + 50 ])
# else:
# spdTarget = interp(speedLimitDistance, [150, 900], [ speedLimit, speedLimit + 30 ])
# else:
# spdTarget = speedLimit
# if v_ego_kph < speedLimit:
# v_ego_kph = speedLimit
# print('cruise_set_speed_kph={}'.format(cruise_set_speed_kph))
return cruise_set_speed_kph
def auto_speed_control(self, CS, navi_speed, path_plan):
modelSpeed = path_plan.modelSpeed
min_control_speed = 20 if CS.is_set_speed_in_mph else 30
self.lead_0 = self.sm['radarState'].leadOne
self.lead_1 = self.sm['radarState'].leadTwo
if CS.driverAcc_time:
self.t_interval = 10 if CS.is_set_speed_in_mph else 7
return min(CS.clu_Vanz + (2 if CS.is_set_speed_in_mph else 3), navi_speed)
# elif self.gasPressed_old:
# clu_Vanz = CS.clu_Vanz
# ctrl_speed = max(min_control_speed, ctrl_speed, clu_Vanz)
# CS.set_cruise_speed(ctrl_speed)
elif CS.CP.resSpeed > 19:
self.t_interval = 10 if CS.is_set_speed_in_mph else 7
res_speed = max(min_control_speed, CS.CP.resSpeed)
return min(res_speed, navi_speed)
elif CS.cruise_set_mode in [1,2,4]:
if self.lead_0.status and CS.CP.vFuture >= (min_control_speed-(4 if CS.is_set_speed_in_mph else 7)):
dRel = int(self.lead_0.dRel)
vRel = int(self.lead_0.vRel * (CV.MS_TO_MPH if CS.is_set_speed_in_mph else CV.MS_TO_KPH))
if vRel >= (-2 if CS.is_set_speed_in_mph else -4):
var_speed = min(CS.CP.vFuture + max(0, dRel*0.2+vRel), navi_speed)
ttime = 100 if CS.is_set_speed_in_mph else 70
self.t_interval = int(interp(dRel, [15, 50], [7, ttime])) if not (self.onSpeedControl or self.curvSpeedControl) else 10 if CS.is_set_speed_in_mph else 7
else:
var_speed = min(CS.CP.vFuture, navi_speed)
self.t_interval = 10 if CS.is_set_speed_in_mph else 7
elif self.lead_0.status and CS.CP.vFuture < min_control_speed:
var_speed = min(CS.CP.vFuture, navi_speed)
self.t_interval = 10 if CS.is_set_speed_in_mph else 7
else:
var_speed = navi_speed
ttime = 70 if CS.is_set_speed_in_mph else 50
self.t_interval = ttime if not (self.onSpeedControl or self.curvSpeedControl) else 10 if CS.is_set_speed_in_mph else 7
else:
var_speed = navi_speed
ttime = 70 if CS.is_set_speed_in_mph else 50
self.t_interval = ttime if not ((self.onSpeedControl or self.curvSpeedControl) and self.sm['controlsState'].osmOffSpdLimit) else 10 if CS.is_set_speed_in_mph else 7
if CS.cruise_set_mode in [1,3,4] and self.curv_decel_option in [1,2]:
if CS.out.vEgo * CV.MS_TO_KPH > 40 and modelSpeed < 90 and path_plan.laneChangeState == LaneChangeState.off and \
not (CS.out.leftBlinker or CS.out.rightBlinker) and not abs(CS.out.steeringTorque) > 170:
v_curv_speed = int(interp(modelSpeed, [30, 50, 70, 90], self.vision_curv_speed)/5)*5
v_curv_speed = min(var_speed, v_curv_speed) # curve speed ratio
else:
v_curv_speed = 255
else:
v_curv_speed = 255
if CS.cruise_set_mode in [1,3,4] and self.curv_decel_option in [1,3]:
if self.sm['liveMapData'].turnSpeedLimitEndDistance > 30:
o_curv_speed = self.sm['liveMapData'].turnSpeedLimit * (1 + (self.osm_curv_speed_offset*0.01))
self.osm_wait_timer += 1 if modelSpeed > 90 else 0
if self.osm_wait_timer > 100:
o_curv_speed = 255
else:
o_curv_speed = 255
self.osm_wait_timer = 0
else:
o_curv_speed = 255
self.osm_wait_timer = 0
# self.gasPressed_old = CS.gasPressed
if var_speed > round(min(v_curv_speed, o_curv_speed)):
v_ego_kph = CS.out.vEgo * CV.MS_TO_KPH
v_ego_mph = CS.out.vEgo * CV.MS_TO_MPH
if round(min(v_curv_speed, o_curv_speed))+1 < v_ego_kph and not CS.out.gasPressed:
self.curvSpeedControl = True
else:
self.curvSpeedControl = False
else:
self.curvSpeedControl = False
return round(min(var_speed, v_curv_speed, o_curv_speed))
def update(self, CS, path_plan):
self.na_timer += 1
if self.na_timer > 100:
self.na_timer = 0
self.speedlimit_decel_off = Params().get_bool("SpeedLimitDecelOff")
btn_signal = None
if not self.button_status(CS): # 사용자가 버튼클릭하면 일정시간 기다린다.
pass
elif CS.cruise_active:
cruiseState_speed = self.sm['controlsState'].vCruise
kph_set_vEgo = self.get_navi_speed(self.sm, CS, cruiseState_speed) # camspeed
if self.osm_speedlimit_enabled:
navi_speed = kph_set_vEgo
else:
navi_speed = min(cruiseState_speed, kph_set_vEgo)
if CS.cruise_set_mode != 5:
self.ctrl_speed = self.auto_speed_control(CS, navi_speed, path_plan) # lead, curve speed
else:
self.ctrl_speed = navi_speed # navi speed
# print('self.ctrl_speed={} cruiseState_speed={}'.format(self.ctrl_speed, cruiseState_speed))
btn_signal = self.ascc_button_control(CS, self.ctrl_speed)
return btn_signal | self.map_speed_block = True
else:
self.map_speed_block = False
cam_distance_calc = 0 |
hade_context.go | package gin
import (
"context"
)
func (ctx *Context) BaseContext() context.Context {
return ctx.Request.Context()
}
func (ctx *Context) Make(key string) (interface{}, error) { |
func (ctx *Context) MustMake(key string) interface{} {
return ctx.container.MustMake(key)
}
func (ctx *Context) MakeNew(key string, params []interface{}) (interface{}, error) {
return ctx.container.MakeNew(key, params)
} | return ctx.container.Make(key)
} |
etiquette_cli.py | import argparse
import os
import re
import sys
from voussoirkit import betterhelp
from voussoirkit import interactive
from voussoirkit import pathclass
from voussoirkit import pipeable
from voussoirkit import spinal
from voussoirkit import stringtools
from voussoirkit import vlogging
import etiquette
# HELPERS ##########################################################################################
def export_symlinks_albums(albums, destination, dry_run):
album_directory_names = etiquette.helpers.decollide_names(albums, lambda a: a.display_name)
for (album, directory_name) in album_directory_names.items():
associated_directories = album.get_associated_directories()
if len(associated_directories) == 1:
album_dir = associated_directories.pop()
directory_name = etiquette.helpers.remove_path_badchars(directory_name)
symlink_dir = destination.with_child(directory_name)
if dry_run:
yield symlink_dir
continue
if not album_dir.exists:
continue
if symlink_dir.exists:
yield symlink_dir
continue
print(album, symlink_dir)
os.symlink(src=album_dir, dst=symlink_dir)
yield symlink_dir
def export_symlinks_photos(photos, destination, dry_run):
photo_filenames = etiquette.helpers.decollide_names(photos, lambda p: p.basename)
for (photo, filename) in photo_filenames.items():
symlink_path = destination.with_child(filename)
if dry_run:
yield symlink_path
continue
if not photo.real_path.exists:
continue
if symlink_path.exists:
yield symlink_path
continue
print(symlink_path.absolute_path)
os.symlink(src=photo.real_path, dst=symlink_path)
yield symlink_path
def get_photos_by_glob(pattern):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
pattern = pathclass.normalize_sep(pattern)
if pattern == '**':
return search_in_cwd(yield_photos=True, yield_albums=False)
cwd = pathclass.cwd()
(folder, pattern) = os.path.split(pattern)
if folder:
folder = cwd.join(folder)
else:
folder = cwd
files = [f for f in folder.glob(pattern) if f.is_file]
for file in files:
try:
photo = photodb.get_photo_by_path(file)
yield photo
except etiquette.exceptions.NoSuchPhoto:
pass
def get_photos_by_globs(patterns):
for pattern in patterns:
yield from get_photos_by_glob(pattern)
def get_photos_from_args(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
photos = []
if args.photo_id_args:
photos.extend(photodb.get_photos_by_id(args.photo_id_args))
if args.photo_search_args:
photos.extend(search_by_argparse(args.photo_search_args, yield_photos=True))
return photos
def get_albums_from_args(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
albums = []
if args.album_id_args:
albums.extend(photodb.get_albums_by_id(args.album_id_args))
if args.album_search_args:
albums.extend(search_by_argparse(args.album_search_args, yield_albums=True))
return albums
def search_in_cwd(**kwargs):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
cwd = pathclass.cwd()
return photodb.search(
within_directory=cwd,
**kwargs,
)
def search_by_argparse(args, yield_albums=False, yield_photos=False):
return search_in_cwd(
area=args.area,
width=args.width,
height=args.height,
ratio=args.ratio,
bytes=args.bytes,
duration=args.duration,
author=args.author,
created=args.created,
extension=args.extension,
extension_not=args.extension_not,
filename=args.filename,
has_tags=args.has_tags,
has_thumbnail=args.has_thumbnail,
is_searchhidden=args.is_searchhidden,
sha256=args.sha256,
mimetype=args.mimetype,
tag_musts=args.tag_musts,
tag_mays=args.tag_mays,
tag_forbids=args.tag_forbids,
tag_expression=args.tag_expression,
limit=args.limit,
offset=args.offset,
orderby=args.orderby,
yield_albums=yield_albums,
yield_photos=yield_photos,
)
# ARGPARSE #########################################################################################
def add_remove_tag_argparse(args, action):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
tag = photodb.get_tag(name=args.tag_name)
if args.any_id_args:
photos = get_photos_from_args(args)
elif args.globs:
photos = get_photos_by_globs(args.globs)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
for photo in photos:
if action == 'add':
photo.add_tag(tag)
elif action == 'remove':
photo.remove_tag(tag)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def delete_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
need_commit = False
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
for photo in photos:
photo.delete(delete_file=args.delete_file)
need_commit = True
if args.album_id_args or args.album_search_args:
albums = get_albums_from_args(args)
for album in albums:
album.delete()
need_commit = True
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def digest_directory_argparse(args):
directories = pipeable.input(args.directory, strip=True, skip_blank=True)
directories = [pathclass.Path(d) for d in directories]
for directory in directories:
directory.assert_is_directory()
photodb = etiquette.photodb.PhotoDB.closest_photodb()
need_commit = False
for directory in directories:
digest = photodb.digest_directory(
directory,
exclude_directories=args.exclude_directories,
exclude_filenames=args.exclude_filenames,
glob_directories=args.glob_directories,
glob_filenames=args.glob_filenames,
hash_kwargs={'bytes_per_second': args.hash_bytes_per_second},
make_albums=args.make_albums,
new_photo_ratelimit=args.ratelimit,
recurse=args.recurse,
yield_albums=True,
yield_photos=True,
)
for result in digest:
# print(result)
need_commit = True
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def easybake_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
for eb_string in args.eb_strings:
notes = photodb.easybake(eb_string)
for (action, tagname) in notes:
print(action, tagname)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def export_symlinks_argparse(args):
destination = pathclass.Path(args.destination)
destination.makedirs(exist_ok=True)
total_paths = set()
if args.album_id_args or args.album_search_args:
albums = get_albums_from_args(args)
export = export_symlinks_albums(
albums,
destination,
dry_run=args.dry_run,
)
total_paths.update(export)
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
export = export_symlinks_photos(
photos,
destination,
dry_run=args.dry_run,
)
total_paths.update(export)
if not args.prune or args.dry_run:
return 0
symlinks = spinal.walk(destination, yield_directories=True, yield_files=True)
symlinks = set(path for path in symlinks if path.is_link)
symlinks = symlinks.difference(total_paths)
for old_symlink in symlinks:
print(f'Pruning {old_symlink}.')
os.remove(old_symlink)
if not old_symlink.parent.listdir():
os.rmdir(old_symlink.parent)
checkdirs = set(spinal.walk(destination, yield_directories=True, yield_files=False))
while checkdirs:
check = checkdirs.pop()
if check not in destination:
continue
if len(check.listdir()) == 0:
os.rmdir(check)
checkdirs.add(check.parent)
return 0
def generate_thumbnail_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
need_commit = False
try:
for photo in photos:
photo.generate_thumbnail()
need_commit = True
except KeyboardInterrupt:
pass
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def init_argparse(args):
photodb = etiquette.photodb.PhotoDB(create=True)
photodb.commit()
return 0
def purge_deleted_files_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
need_commit = False
for deleted in photodb.purge_deleted_files(photos):
need_commit = True
print(deleted)
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def purge_empty_albums_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
# We do not check args.album_search_args because currently it is not
# possible for search results to find empty albums on account of the fact
# that albums are only yielded when they contain some result photo.
if args.album_id_args:
albums = get_albums_from_args(args)
else:
albums = photodb.get_albums_within_directory(pathclass.cwd())
need_commit = False
for deleted in photodb.purge_empty_albums(albums):
need_commit = True
print(deleted)
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def | (args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
hash_kwargs = {
'bytes_per_second': args.hash_bytes_per_second,
'callback_progress': spinal.callback_progress_v1,
}
need_commit = False
try:
for photo in photos:
if not photo.real_path.is_file:
continue
need_reload = (
args.force or
photo.mtime != photo.real_path.stat.st_mtime or
photo.bytes != photo.real_path.stat.st_size
)
if not need_reload:
continue
photo.reload_metadata(hash_kwargs=hash_kwargs)
need_commit = True
except KeyboardInterrupt:
pass
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def relocate_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
photo = photodb.get_photo(args.photo_id)
photo.relocate(args.filepath)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def search_argparse(args):
photos = search_by_argparse(args, yield_photos=True)
for photo in photos:
print(photo.real_path.absolute_path)
return 0
def show_associated_directories_argparse(args):
if args.album_id_args or args.album_search_args:
albums = get_albums_from_args(args)
else:
albums = search_in_cwd(yield_photos=False, yield_albums=True)
for album in albums:
directories = album.get_associated_directories()
if not directories:
continue
directories = [f'"{d.absolute_path}"' for d in directories]
directories = ' '.join(directories)
print(f'{album} | {directories}')
return 0
def set_unset_searchhidden_argparse(args, searchhidden):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_search_args:
args.photo_search_args.is_searchhidden = not searchhidden
if args.album_search_args:
args.album_search_args.is_searchhidden = not searchhidden
if args.any_id_args:
photos = get_photos_from_args(args)
albums = get_albums_from_args(args)
photos.extend(photo for album in albums for photo in album.walk_photos())
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
for photo in photos:
print(photo)
photo.set_searchhidden(searchhidden)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def tag_breplace_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
renames = []
tag_names = photodb.get_all_tag_names()
all_names = tag_names.union(photodb.get_all_synonyms())
for tag_name in tag_names:
if args.regex:
new_name = re.sub(args.replace_from, args.replace_to, tag_name)
else:
new_name = tag_name.replace(args.replace_from, args.replace_to)
new_name = photodb.normalize_tagname(new_name)
if new_name == tag_name:
continue
if new_name in all_names:
raise etiquette.exceptions.TagExists(new_name)
if args.set_synonym:
printline = f'{tag_name} -> {new_name}+{tag_name}'
else:
printline = f'{tag_name} -> {new_name}'
renames.append((tag_name, new_name, printline))
if not args.autoyes:
for (tag_name, new_name, printline) in renames:
print(printline)
if not interactive.getpermission('Ok?', must_pick=True):
return 0
for (tag_name, new_name, printline) in renames:
print(printline)
tag = photodb.get_tag(tag_name)
tag.rename(new_name)
if args.set_synonym:
tag.add_synonym(tag_name)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def tag_list_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
tags = photodb.get_all_tag_names()
synonyms = photodb.get_all_synonyms()
keys = sorted(tags.union(synonyms.keys()))
for key in keys:
if key in synonyms:
print(f'{key}={synonyms[key]}')
else:
print(key)
return 0
DOCSTRING = '''
Etiquette CLI
=============
This is the command-line interface for Etiquette, so that you can automate your
database and integrate it into other scripts.
The following commands are available:
{add_tag}
{remove_tag}
{delete}
{digest}
{easybake}
{export_symlinks}
{generate_thumbnail}
{init}
{purge_deleted_files}
{purge_empty_albums}
{reload_metadata}
{relocate}
{search}
{show_associated_directories}
{set_searchhidden}
{unset_searchhidden}
{tag_breplace}
{tag_list}
You can add --yes to avoid the "Commit?" prompt on commands that modify the db.
TO SEE DETAILS ON EACH COMMAND, RUN
> etiquette_cli.py <command> --help
'''
SUB_DOCSTRINGS = dict(
add_tag='''
add_tag:
Add a tag to photos by a filename glob or by search results.
> etiquette_cli.py add_tag tag_name glob_patterns
> etiquette_cli.py add_tag tag_name --search searchargs
Examples:
> etiquette_cli.py add_tag wallpaper wall*.jpg wall*.png
> etiquette_cli.py add_tag author.author_voussoir --search --tag-forbids author
See etiquette_cli.py search --help for more info about searchargs.
''',
remove_tag='''
remove_tag:
Remove a tag from photos by a filename glob or by search results.
> etiquette_cli.py remove_tag tag_name glob_patterns
> etiquette_cli.py remove_tag tag_name --search searchargs
Examples:
> etiquette_cli.py remove_tag watchlist spongebob*.mp4
> etiquette_cli.py remove_tag watchlist --search --tag-musts directed_by_michael_bay
See etiquette_cli.py search --help for more info about searchargs.
''',
delete='''
delete:
Remove photos or albums from the database.
flags:
--delete_file:
Delete the file from disk after committing.
Your config.json file's recycle_instead_of_delete will influence this.
Without this flag, photos are removed from the db but remain on disk.
> etiquette_cli.py delete --photos id id id
> etiquette_cli.py delete --search searchargs
> etiquette_cli.py delete --albums id id id
> etiquette_cli.py delete --album-search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
digest='''
digest:
Digest a directory, adding new files as Photos into the database.
> etiquette_cli.py digest directory <flags>
flags:
--exclude_directories A B C:
Any directories matching any pattern of A, B, C... will be skipped.
These patterns may be absolute paths like 'D:\\temp', plain names like
'thumbnails' or glob patterns like 'build_*'.
--exclude_filenames A B C:
Any filenames matching any pattern of A, B, C... will be skipped.
These patterns may be absolute paths like 'D:\\somewhere\\config.json',
plain names like 'thumbs.db' or glob patterns like '*.temp'.
--glob_directories A B C:
Only directories matching any pattern of A, B, C... will be digested.
These patterns may be plain names or glob patterns like '2021*'
--glob_filenames A B C:
Only filenames matching any pattern of A, B, C... will be digested.
These patterns may be plain names or glob patterns like '*.jpg'
--no_albums:
Do not create any albums. By default, albums are created and nested to
match the directory structure.
--ratelimit X:
Limit the ingest of new Photos to only one per X seconds. This can be
used to reduce system load or to make sure that two photos don't get the
same `created` timestamp.
--no_recurse:
Do not recurse into subdirectories. Only create Photos from files in
the current directory.
Examples:
> etiquette_cli.py digest media --ratelimit 1
> etiquette_cli.py digest photos --no-recurse --no-albums --ratelimit 0.25
> etiquette_cli.py digest . --glob-filenames *.jpg --exclude-filenames thumb*
''',
easybake='''
easybake:
Create and manipulate tags by easybake strings.
> etiquette_cli.py easybake eb_string
''',
export_symlinks='''
export_symlinks:
Search for photos or albums, then create symlinks pointing to the results.
THIS IS STILL A BIT EXPERIMENTAL.
This can be used to gather up search results for the purpose of further
uploading, transfering, etc. with other applications.
Symlinks point to files (if result is a photo) or directories (if result is
an album with an associated directory).
Albums are limited to only one associated directory since the output
symlink can't point to two places at once.
> etiquette_cli.py export_symlinks --destination directory --search searchargs
> etiquette_cli.py export_symlinks --destination directory --album-search searchargs
flags:
--destination X:
A path to a directory into which the symlinks will be placed.
--dry:
Print the results without actually creating the symlinks.
--prune:
In the destination directory, any existing symlinks whose target no
longer exists will be deleted.
See etiquette_cli.py search --help for more info about searchargs.
''',
generate_thumbnail='''
generate_thumbnail:
Generate thumbnails for photos.
With no args, all files under the cwd will be thumbnailed.
Or, you can pass specific photo ids or searchargs.
> etiquette_cli.py generate_thumbnail
> etiquette_cli.py generate_thumbnail --photos id id id
> etiquette_cli.py generate_thumbnail --search searchargs
Examples:
> etiquette_cli.py generate_thumbnail --search --has-thumbnail no
See etiquette_cli.py search --help for more info about searchargs.
''',
init='''
init:
Create a new Etiquette database in the current directory.
> etiquette_cli.py init
''',
purge_deleted_files='''
purge_deleted_files:
Delete any Photo objects whose file no longer exists on disk.
> etiquette_cli.py purge_deleted_files
> etiquette_cli.py purge_deleted_files --photos id id id
> etiquette_cli.py purge_deleted_files --search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
purge_empty_albums='''
purge_empty_albums:
Delete any albums which have no child albums or photos.
Consider running purge_deleted_files first, so that albums containing
deleted files will get cleared out and then caught by this function.
With no args, all albums will be checked.
Or you can pass specific album ids. (searchargs is not available since
albums only appear in search results when a matching photo is found, and
we're looking for albums with no photos!)
> etiquette_cli.py purge_empty_albums
> etiquette_cli.py purge_empty_albums --albums id id id
''',
reload_metadata='''
reload_metadata:
Reload photos' metadata by reading the files from disk.
With no args, all files under the cwd will be reloaded.
Or, you can pass specific photo ids or searchargs.
> etiquette_cli.py reload_metadata
> etiquette_cli.py reload_metadata --photos id id id
> etiquette_cli.py reload_metadata --search searchargs
flags:
--force:
By default, we wil skip any files that have the same mtime and byte
size as before. You can pass --force to always reload.
--hash_bytes_per_second X:
A string like "10mb" to limit the speed of file hashing for the purpose
of reducing system load.
See etiquette_cli.py search --help for more info about searchargs.
''',
relocate='''
relocate:
Change a photo's filepath. Used for updating photos that have been changed
by external tools.
> etiquette_cli.py relocate photo_id filepath
''',
search='''
search:
Search for photos and albums with complex operators.
> etiquette_cli.py search searchargs
> etiquette_cli.py search --album-search searchargs
Searchargs:
--area X-Y:
Photo/video width*height between X and Y.
--width X-Y:
Photo/video width between X and Y.
--height X-Y:
Photo/video height between X and Y.
--ratio X-Y:
Photo/video aspect ratio between X and Y.
--bytes X-Y:
File size in bytes between X and Y.
--duration X-Y:
Media duration between X and Y seconds.
--author X:
Photo authored by user with username X.
--created X-Y:
Photo creation date between X and Y unix timestamp.
--extension A,B,C:
Photo with any extension of A, B, C...
--extension_not A,B,C:
Photo without any extension of A, B, C...
--filename X:
Search terms for Photo's filename.
--has_tags yes/no/null:
If yes, Photo must have at least one tag.
If no, Photo must have no tags.
If null, doesn't matter.
--has_thumbnail yes/no/null:
--is_searchhidden yes/no/null:
--mimetype A,B,C:
Photo with any mimetype of A, B, C...
--sha256 A,B,C:
Photo with any sha256 of A, B, C...
--tag_musts A,B,C:
Photo must have all tags A and B and C...
--tag_mays A,B,C:
Photo must have at least one tag of A, B, C...
--tag_forbids A,B,C:
Photo must not have any tags of A, B, C...
--tag_expression X:
Complex expression string to match tags.
--limit X:
Limit results to first X items.
--offset X:
Skip the first X items.
--orderby X-Y:
Order the results by property X in direction Y. E.g. created-desc or
bytes-asc.
''',
show_associated_directories='''
show_associated_directories:
Show the associated directories for albums.
> etiquette_cli.py show_associated_directories
> etiquette_cli.py show_associated_directories --albums id id id
> etiquette_cli.py show_associated_directories --album-search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
set_searchhidden='''
set_searchhidden:
Mark photos as searchhidden.
> etiquette_cli.py set_searchhidden --photos id id id
> etiquette_cli.py set_searchhidden --search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
unset_searchhidden='''
unset_searchhidden:
Unmark photos as searchhidden.
> etiquette_cli.py unset_searchhidden --photos id id id
> etiquette_cli.py unset_searchhidden --search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
tag_breplace='''
tag_breplace:
For all tags in the database, use find-and-replace to rename the tags.
> etiquette_cli.py tag_breplace replace_from replace_to
''',
tag_list='''
tag_list:
Show all tags in the database.
> etiquette_cli.py tag_list
''',
)
DOCSTRING = betterhelp.add_previews(DOCSTRING, SUB_DOCSTRINGS)
@vlogging.main_decorator
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers()
primary_args = []
photo_id_args = []
photo_search_args = []
album_id_args = []
album_search_args = []
mode = primary_args
for arg in argv:
if 0:
pass
elif arg in {'--search', '--photo_search', '--photo-search'}:
mode = photo_search_args
elif arg in {'--album_search', '--album-search'}:
mode = album_search_args
elif arg == '--photos':
mode = photo_id_args
elif arg == '--albums':
mode = album_id_args
else:
mode.append(arg)
p_add_tag = subparsers.add_parser('add_tag', aliases=['add-tag'])
p_add_tag.add_argument('tag_name')
p_add_tag.add_argument('globs', nargs='*')
p_add_tag.add_argument('--yes', dest='autoyes', action='store_true')
p_add_tag.set_defaults(func=lambda args: add_remove_tag_argparse(args, action='add'))
p_remove_tag = subparsers.add_parser('remove_tag', aliases=['remove-tag'])
p_remove_tag.add_argument('tag_name')
p_remove_tag.add_argument('globs', nargs='*')
p_remove_tag.add_argument('--yes', dest='autoyes', action='store_true')
p_remove_tag.set_defaults(func=lambda args: add_remove_tag_argparse(args, action='remove'))
p_delete = subparsers.add_parser('delete')
p_delete.add_argument('--delete_file', '--delete-file', action='store_true')
p_delete.add_argument('--yes', dest='autoyes', action='store_true')
p_delete.set_defaults(func=delete_argparse)
p_digest = subparsers.add_parser('digest', aliases=['digest_directory', 'digest-directory'])
p_digest.add_argument('directory')
p_digest.add_argument('--exclude_directories', '--exclude-directories', nargs='+', default=None)
p_digest.add_argument('--exclude_filenames', '--exclude-filenames', nargs='+', default=None)
p_digest.add_argument('--glob_directories', '--glob-directories', nargs='+', default=None)
p_digest.add_argument('--glob_filenames', '--glob-filenames', nargs='+', default=None)
p_digest.add_argument('--no_albums', '--no-albums', dest='make_albums', action='store_false', default=True)
p_digest.add_argument('--ratelimit', dest='ratelimit', type=float, default=0.2)
p_digest.add_argument('--no_recurse', '--no-recurse', dest='recurse', action='store_false', default=True)
p_digest.add_argument('--hash_bytes_per_second', '--hash-bytes-per-second', default=None)
p_digest.add_argument('--yes', dest='autoyes', action='store_true')
p_digest.set_defaults(func=digest_directory_argparse)
p_easybake = subparsers.add_parser('easybake')
p_easybake.add_argument('eb_strings', nargs='+')
p_easybake.add_argument('--yes', dest='autoyes', action='store_true')
p_easybake.set_defaults(func=easybake_argparse)
p_export_symlinks = subparsers.add_parser('export_symlinks', aliases=['export-symlinks'])
p_export_symlinks.add_argument('--destination', dest='destination', required=True)
p_export_symlinks.add_argument('--dry', dest='dry_run', action='store_true')
p_export_symlinks.add_argument('--prune', dest='prune', action='store_true')
p_export_symlinks.set_defaults(func=export_symlinks_argparse)
p_generate_thumbnail = subparsers.add_parser('generate_thumbnail', aliases=['generate-thumbnail'])
p_generate_thumbnail.add_argument('--yes', dest='autoyes', action='store_true')
p_generate_thumbnail.set_defaults(func=generate_thumbnail_argparse)
p_init = subparsers.add_parser('init', aliases=['create'])
p_init.set_defaults(func=init_argparse)
p_purge_deleted_files = subparsers.add_parser('purge_deleted_files', aliases=['purge-deleted-files'])
p_purge_deleted_files.add_argument('--yes', dest='autoyes', action='store_true')
p_purge_deleted_files.set_defaults(func=purge_deleted_files_argparse)
p_purge_empty_albums = subparsers.add_parser('purge_empty_albums', aliases=['purge-empty-albums'])
p_purge_empty_albums.add_argument('--yes', dest='autoyes', action='store_true')
p_purge_empty_albums.set_defaults(func=purge_empty_albums_argparse)
p_reload_metadata = subparsers.add_parser('reload_metadata', aliases=['reload-metadata'])
p_reload_metadata.add_argument('--hash_bytes_per_second', '--hash-bytes-per-second', default=None)
p_reload_metadata.add_argument('--force', action='store_true')
p_reload_metadata.add_argument('--yes', dest='autoyes', action='store_true')
p_reload_metadata.set_defaults(func=reload_metadata_argparse)
p_relocate = subparsers.add_parser('relocate')
p_relocate.add_argument('photo_id')
p_relocate.add_argument('filepath')
p_relocate.add_argument('--yes', dest='autoyes', action='store_true')
p_relocate.set_defaults(func=relocate_argparse)
p_search = subparsers.add_parser('search')
p_search.add_argument('--area', dest='area', default=None)
p_search.add_argument('--width', dest='width', default=None)
p_search.add_argument('--height', dest='height', default=None)
p_search.add_argument('--ratio', dest='ratio', default=None)
p_search.add_argument('--bytes', dest='bytes', default=None)
p_search.add_argument('--duration', dest='duration', default=None)
p_search.add_argument('--author', dest='author', default=None)
p_search.add_argument('--created', dest='created', default=None)
p_search.add_argument('--extension', dest='extension', default=None)
p_search.add_argument('--extension_not', '--extension-not', dest='extension_not', default=None)
p_search.add_argument('--filename', dest='filename', default=None)
p_search.add_argument('--has_tags', '--has-tags', dest='has_tags', default=None)
p_search.add_argument('--has_thumbnail', '--has-thumbnail', dest='has_thumbnail', default=None)
p_search.add_argument('--is_searchhidden', '--is-searchhidden', dest='is_searchhidden', default=False)
p_search.add_argument('--sha256', default=None)
p_search.add_argument('--mimetype', dest='mimetype', default=None)
p_search.add_argument('--tag_musts', '--tag-musts', dest='tag_musts', default=None)
p_search.add_argument('--tag_mays', '--tag-mays', dest='tag_mays', default=None)
p_search.add_argument('--tag_forbids', '--tag-forbids', dest='tag_forbids', default=None)
p_search.add_argument('--tag_expression', '--tag-expression', dest='tag_expression', default=None)
p_search.add_argument('--limit', dest='limit', default=None)
p_search.add_argument('--offset', dest='offset', default=None)
p_search.add_argument('--orderby', dest='orderby', default='basename-ASC')
# p_search.add_argument('--yield_albums', '--yield-albums', dest='yield_albums', default=None)
p_search.set_defaults(func=search_argparse)
p_show_associated_directories = subparsers.add_parser('show_associated_directories', aliases=['show-associated-directories'])
p_show_associated_directories.set_defaults(func=show_associated_directories_argparse)
p_set_searchhidden = subparsers.add_parser('set_searchhidden', aliases=['set-searchhidden'])
p_set_searchhidden.add_argument('--yes', dest='autoyes', action='store_true')
p_set_searchhidden.set_defaults(func=lambda args: set_unset_searchhidden_argparse(args, searchhidden=True))
p_unset_searchhidden = subparsers.add_parser('unset_searchhidden', aliases=['unset-searchhidden'])
p_unset_searchhidden.add_argument('--yes', dest='autoyes', action='store_true')
p_unset_searchhidden.set_defaults(func=lambda args: set_unset_searchhidden_argparse(args, searchhidden=False))
p_tag_breplace = subparsers.add_parser('tag_breplace', aliases=['tag-breplace'])
p_tag_breplace.add_argument('replace_from')
p_tag_breplace.add_argument('replace_to')
p_tag_breplace.add_argument('--set_synonym', '--set-synonym', dest='set_synonym', action='store_true')
p_tag_breplace.add_argument('--regex', dest='regex', action='store_true')
p_tag_breplace.add_argument('--yes', dest='autoyes', action='store_true')
p_tag_breplace.set_defaults(func=tag_breplace_argparse)
p_tag_list = subparsers.add_parser('tag_list', aliases=['tag-list'])
p_tag_list.set_defaults(func=tag_list_argparse)
##
def postprocessor(args):
args.photo_search_args = p_search.parse_args(photo_search_args) if photo_search_args else None
args.album_search_args = p_search.parse_args(album_search_args) if album_search_args else None
args.photo_id_args = [id for arg in photo_id_args for id in stringtools.comma_space_split(arg)]
args.album_id_args = [id for arg in album_id_args for id in stringtools.comma_space_split(arg)]
args.any_id_args = bool(
args.photo_search_args or
args.album_search_args or
args.photo_id_args or
args.album_id_args
)
return args
try:
return betterhelp.subparser_main(
primary_args,
parser,
main_docstring=DOCSTRING,
sub_docstrings=SUB_DOCSTRINGS,
args_postprocessor=postprocessor,
)
except etiquette.exceptions.NoClosestPhotoDB as exc:
pipeable.stderr(exc.error_message)
pipeable.stderr('Try `etiquette_cli.py init` to create the database.')
return 1
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))
| reload_metadata_argparse |
TestConcurrentCrashWithSignal.py | import unittest2
from lldbsuite.test.decorators import * | from lldbsuite.test.concurrent_base import ConcurrentEventsBase
from lldbsuite.test.lldbtest import TestBase
@skipIfWindows
class ConcurrentCrashWithSignal(ConcurrentEventsBase):
mydir = ConcurrentEventsBase.compute_mydir(__file__)
# Atomic sequences are not supported yet for MIPS in LLDB.
@skipIf(triple='^mips')
def test(self):
""" Test a thread that crashes while another thread generates a signal."""
self.build(dictionary=self.getBuildFlags())
self.do_thread_actions(num_crash_threads=1, num_signal_threads=1) | |
session.py | import os
import logging
import pandas as pd
from datetime import date
from shioaji import Shioaji
class Session(Shioaji):
def __init__(self, simulation: bool = False, timeout: int = 10000) -> None:
"""
Args:
simulation:
timeout:
Notes: The ID of test account ranging from `PAPIUSER01` to `PAPIUSER08`,
with password `2222`.
"""
_person_id = f"PAPIUSER05" \
if simulation else os.environ['SINOTRADE_ID']
_passwd = "2222" \
if simulation else os.environ['SINOTRADE_PASSWD']
super(Session, self).__init__(simulation=simulation)
self.login(
person_id=_person_id,
passwd=_passwd,
contracts_cb=lambda security_type: logging.info(f"{repr(security_type)} fetch done."),
contracts_timeout=timeout
)
def __del__(self) -> None:
self.logout()
logging.info("session closed.")
@property
def positions(self) -> pd.DataFrame:
|
def profit_loss(self, begin_date: date, end_date: date) -> pd.DataFrame:
return pd.DataFrame(self.list_profit_loss(
self.stock_account,
begin_date=begin_date.strftime('%Y-%m-%d'),
end_date=end_date.strftime('%Y-%m-%d')
))
@property
def settlements(self) -> pd.DataFrame:
return pd.DataFrame(
self.list_settlements(self.stock_account)
)
@property
def balance(self) -> pd.DataFrame:
return pd.DataFrame(
self.account_balance()
)
| return pd.DataFrame(
self.list_positions(self.stock_account)
) |
features-routing.module.ts | import { NgModule } from '@angular/core';
import { Routes, RouterModule } from '@angular/router';
import { FeaturesPage } from './features.page';
const routes: Routes = [
{
path: '',
component: FeaturesPage
}
];
@NgModule({
imports: [RouterModule.forChild(routes)],
exports: [RouterModule],
})
export class | {}
| FeaturesPageRoutingModule |
customizeLogger.py | import logging
import sys
from pathlib import Path
import ujson
from loguru import logger
from app.extension.logging.interceptHandler import InterceptHandler
class CustomizeLogger:
@classmethod
def | (cls):
config_path = "logging_config.json"
config = cls.load_logging_config(config_path)
logging_config = config["logger"]
_logger = cls.customize_logging(
filepath=logging_config["path"] + logging_config["filename"],
level=logging_config["level"],
retention=logging_config["retention"],
rotation=logging_config["rotation"],
_format=logging_config["format"],
)
return _logger
@classmethod
def customize_logging(
cls,
filepath: Path,
level: str,
rotation: str,
retention: str,
_format: str,
):
logger.remove()
logger.add(
sys.stdout,
enqueue=True,
backtrace=True,
level=level.upper(),
format=_format,
)
logger.add(
sink=str(filepath),
rotation=rotation,
retention=retention,
enqueue=True,
backtrace=True,
level=level.upper(),
format=_format,
)
# noinspection PyArgumentList
# logging.basicConfig(handlers=[InterceptHandler()], level=0)
logging.getLogger("uvicorn.access").handlers = [InterceptHandler()]
for _log in ["fastapi", "uvicorn", "uvicorn.error"]:
_logger = logging.getLogger(_log)
_logger.handlers = [InterceptHandler()]
return logger.bind(request_id=None, method=None)
@classmethod
def load_logging_config(cls, config_path: str) -> dict:
"""
:param config_path:
:type config_path:
:return: logging config
:rtype: dict
"""
conf_path = config_path
with open(conf_path, "r") as config_file:
conf = ujson.load(config_file)
return conf
| make_logger |
remoteproxy.py | import os, time, sys, traceback, weakref
import numpy as np
import threading
try:
import __builtin__ as builtins
import cPickle as pickle
except ImportError:
import builtins
import pickle
# color printing for debugging
from ..util import cprint
class ClosedError(Exception):
"""Raised when an event handler receives a request to close the connection
or discovers that the connection has been closed."""
pass
class NoResultError(Exception):
"""Raised when a request for the return value of a remote call fails
because the call has not yet returned."""
pass
class RemoteEventHandler(object):
"""
This class handles communication between two processes. One instance is present on
each process and listens for communication from the other process. This enables
(amongst other things) ObjectProxy instances to look up their attributes and call
their methods.
This class is responsible for carrying out actions on behalf of the remote process.
Each instance holds one end of a Connection which allows python
objects to be passed between processes.
For the most common operations, see _import(), close(), and transfer()
To handle and respond to incoming requests, RemoteEventHandler requires that its
processRequests method is called repeatedly (this is usually handled by the Process
classes defined in multiprocess.processes).
"""
handlers = {} ## maps {process ID : handler}. This allows unpickler to determine which process
## an object proxy belongs to
def __init__(self, connection, name, pid, debug=False):
self.debug = debug
self.conn = connection
self.name = name
self.results = {} ## reqId: (status, result); cache of request results received from the remote process
## status is either 'result' or 'error'
## if 'error', then result will be (exception, formatted exceprion)
## where exception may be None if it could not be passed through the Connection.
self.resultLock = threading.RLock()
self.proxies = {} ## maps {weakref(proxy): proxyId}; used to inform the remote process when a proxy has been deleted.
self.proxyLock = threading.RLock()
## attributes that affect the behavior of the proxy.
## See ObjectProxy._setProxyOptions for description
self.proxyOptions = {
'callSync': 'sync', ## 'sync', 'async', 'off'
'timeout': 10, ## float
'returnType': 'auto', ## 'proxy', 'value', 'auto'
'autoProxy': False, ## bool
'deferGetattr': False, ## True, False
'noProxyTypes': [ type(None), str, int, float, tuple, list, dict, LocalObjectProxy, ObjectProxy ],
}
if int(sys.version[0]) < 3:
self.proxyOptions['noProxyTypes'].append(unicode)
else:
self.proxyOptions['noProxyTypes'].append(bytes)
self.optsLock = threading.RLock()
self.nextRequestId = 0
self.exited = False
# Mutexes to help prevent issues when multiple threads access the same RemoteEventHandler
self.processLock = threading.RLock()
self.sendLock = threading.RLock()
RemoteEventHandler.handlers[pid] = self ## register this handler as the one communicating with pid
@classmethod
def getHandler(cls, pid):
try:
return cls.handlers[pid]
except:
print(pid, cls.handlers)
raise
def debugMsg(self, msg, *args):
if not self.debug:
return
cprint.cout(self.debug, "[%d] %s\n" % (os.getpid(), str(msg)%args), -1)
def getProxyOption(self, opt):
with self.optsLock:
return self.proxyOptions[opt]
def setProxyOptions(self, **kwds):
"""
Set the default behavior options for object proxies.
See ObjectProxy._setProxyOptions for more info.
"""
with self.optsLock:
self.proxyOptions.update(kwds)
def processRequests(self):
"""Process all pending requests from the pipe, return
after no more events are immediately available. (non-blocking)
Returns the number of events processed.
"""
with self.processLock:
if self.exited:
self.debugMsg(' processRequests: exited already; raise ClosedError.')
raise ClosedError()
numProcessed = 0
while self.conn.poll():
#try:
#poll = self.conn.poll()
#if not poll:
#break
#except IOError: # this can happen if the remote process dies.
## might it also happen in other circumstances?
#raise ClosedError()
try:
self.handleRequest()
numProcessed += 1
except ClosedError:
self.debugMsg('processRequests: got ClosedError from handleRequest; setting exited=True.')
self.exited = True
raise
#except IOError as err: ## let handleRequest take care of this.
#self.debugMsg(' got IOError from handleRequest; try again.')
#if err.errno == 4: ## interrupted system call; try again
#continue
#else:
#raise
except:
print("Error in process %s" % self.name)
sys.excepthook(*sys.exc_info())
if numProcessed > 0:
self.debugMsg('processRequests: finished %d requests', numProcessed)
return numProcessed
def handleRequest(self):
"""Handle a single request from the remote process.
Blocks until a request is available."""
result = None
while True:
try:
## args, kwds are double-pickled to ensure this recv() call never fails
cmd, reqId, nByteMsgs, optStr = self.conn.recv()
break
except EOFError:
self.debugMsg(' handleRequest: got EOFError from recv; raise ClosedError.')
## remote process has shut down; end event loop
raise ClosedError()
except IOError as err:
if err.errno == 4: ## interrupted system call; try again
self.debugMsg(' handleRequest: got IOError 4 from recv; try again.')
continue
else:
self.debugMsg(' handleRequest: got IOError %d from recv (%s); raise ClosedError.', err.errno, err.strerror)
raise ClosedError()
self.debugMsg(" handleRequest: received %s %s", cmd, reqId)
## read byte messages following the main request
byteData = []
if nByteMsgs > 0:
self.debugMsg(" handleRequest: reading %d byte messages", nByteMsgs)
for i in range(nByteMsgs):
while True:
try:
byteData.append(self.conn.recv_bytes())
break
except EOFError:
self.debugMsg(" handleRequest: got EOF while reading byte messages; raise ClosedError.")
raise ClosedError()
except IOError as err:
if err.errno == 4:
self.debugMsg(" handleRequest: got IOError 4 while reading byte messages; try again.")
continue
else:
self.debugMsg(" handleRequest: got IOError while reading byte messages; raise ClosedError.")
raise ClosedError()
try:
if cmd == 'result' or cmd == 'error':
resultId = reqId
reqId = None ## prevents attempt to return information from this request
## (this is already a return from a previous request)
opts = pickle.loads(optStr)
self.debugMsg(" handleRequest: id=%s opts=%s", reqId, opts)
#print os.getpid(), "received request:", cmd, reqId, opts
returnType = opts.get('returnType', 'auto')
if cmd == 'result':
with self.resultLock:
self.results[resultId] = ('result', opts['result'])
elif cmd == 'error':
with self.resultLock:
self.results[resultId] = ('error', (opts['exception'], opts['excString']))
elif cmd == 'getObjAttr':
result = getattr(opts['obj'], opts['attr'])
elif cmd == 'callObj':
obj = opts['obj']
fnargs = opts['args']
fnkwds = opts['kwds']
## If arrays were sent as byte messages, they must be re-inserted into the
## arguments
if len(byteData) > 0:
for i,arg in enumerate(fnargs):
if isinstance(arg, tuple) and len(arg) > 0 and arg[0] == '__byte_message__':
ind = arg[1]
dtype, shape = arg[2]
fnargs[i] = np.fromstring(byteData[ind], dtype=dtype).reshape(shape)
for k,arg in fnkwds.items():
if isinstance(arg, tuple) and len(arg) > 0 and arg[0] == '__byte_message__':
ind = arg[1]
dtype, shape = arg[2]
fnkwds[k] = np.fromstring(byteData[ind], dtype=dtype).reshape(shape)
if len(fnkwds) == 0: ## need to do this because some functions do not allow keyword arguments.
try:
result = obj(*fnargs)
except:
print("Failed to call object %s: %d, %s" % (obj, len(fnargs), fnargs[1:]))
raise
else:
result = obj(*fnargs, **fnkwds)
elif cmd == 'getObjValue':
result = opts['obj'] ## has already been unpickled into its local value
returnType = 'value'
elif cmd == 'transfer':
result = opts['obj']
returnType = 'proxy'
elif cmd == 'transferArray':
## read array data from next message:
result = np.fromstring(byteData[0], dtype=opts['dtype']).reshape(opts['shape'])
returnType = 'proxy'
elif cmd == 'import':
name = opts['module']
fromlist = opts.get('fromlist', [])
mod = builtins.__import__(name, fromlist=fromlist)
if len(fromlist) == 0:
parts = name.lstrip('.').split('.')
result = mod
for part in parts[1:]:
result = getattr(result, part)
else:
result = map(mod.__getattr__, fromlist)
elif cmd == 'del':
LocalObjectProxy.releaseProxyId(opts['proxyId'])
#del self.proxiedObjects[opts['objId']]
elif cmd == 'close':
if reqId is not None:
result = True
returnType = 'value'
exc = None
except:
exc = sys.exc_info()
if reqId is not None:
if exc is None:
self.debugMsg(" handleRequest: sending return value for %d: %s", reqId, result)
#print "returnValue:", returnValue, result
if returnType == 'auto':
with self.optsLock:
noProxyTypes = self.proxyOptions['noProxyTypes']
result = self.autoProxy(result, noProxyTypes)
elif returnType == 'proxy':
result = LocalObjectProxy(result)
try:
self.replyResult(reqId, result)
except:
sys.excepthook(*sys.exc_info())
self.replyError(reqId, *sys.exc_info())
else:
self.debugMsg(" handleRequest: returning exception for %d", reqId)
self.replyError(reqId, *exc)
elif exc is not None:
sys.excepthook(*exc)
if cmd == 'close':
if opts.get('noCleanup', False) is True:
os._exit(0) ## exit immediately, do not pass GO, do not collect $200.
## (more importantly, do not call any code that would
## normally be invoked at exit)
else:
raise ClosedError()
def replyResult(self, reqId, result):
self.send(request='result', reqId=reqId, callSync='off', opts=dict(result=result))
def replyError(self, reqId, *exc):
print("error: %s %s %s" % (self.name, str(reqId), str(exc[1])))
excStr = traceback.format_exception(*exc)
try:
self.send(request='error', reqId=reqId, callSync='off', opts=dict(exception=exc[1], excString=excStr))
except:
self.send(request='error', reqId=reqId, callSync='off', opts=dict(exception=None, excString=excStr))
def send(self, request, opts=None, reqId=None, callSync='sync', timeout=10, returnType=None, byteData=None, **kwds):
"""Send a request or return packet to the remote process.
Generally it is not necessary to call this method directly; it is for internal use.
(The docstring has information that is nevertheless useful to the programmer
as it describes the internal protocol used to communicate between processes)
============== ====================================================================
**Arguments:**
request String describing the type of request being sent (see below)
reqId Integer uniquely linking a result back to the request that generated
it. (most requests leave this blank)
callSync 'sync': return the actual result of the request
'async': return a Request object which can be used to look up the
result later
'off': return no result
timeout Time in seconds to wait for a response when callSync=='sync'
opts Extra arguments sent to the remote process that determine the way
the request will be handled (see below)
returnType 'proxy', 'value', or 'auto'
byteData If specified, this is a list of objects to be sent as byte messages
to the remote process.
This is used to send large arrays without the cost of pickling.
============== ====================================================================
Description of request strings and options allowed for each:
============= ============= ========================================================
request option description
------------- ------------- --------------------------------------------------------
getObjAttr Request the remote process return (proxy to) an
attribute of an object.
obj reference to object whose attribute should be
returned
attr string name of attribute to return
returnValue bool or 'auto' indicating whether to return a proxy or
the actual value.
callObj Request the remote process call a function or
method. If a request ID is given, then the call's
return value will be sent back (or information
about the error that occurred while running the
function)
obj the (reference to) object to call
args tuple of arguments to pass to callable
kwds dict of keyword arguments to pass to callable
returnValue bool or 'auto' indicating whether to return a proxy or
the actual value.
getObjValue Request the remote process return the value of
a proxied object (must be picklable)
obj reference to object whose value should be returned
transfer Copy an object to the remote process and request
it return a proxy for the new object.
obj The object to transfer.
import Request the remote process import new symbols
and return proxy(ies) to the imported objects
module the string name of the module to import
fromlist optional list of string names to import from module
del Inform the remote process that a proxy has been
released (thus the remote process may be able to
release the original object)
proxyId id of proxy which is no longer referenced by
remote host
close Instruct the remote process to stop its event loop
and exit. Optionally, this request may return a
confirmation.
result Inform the remote process that its request has
been processed
result return value of a request
error Inform the remote process that its request failed
exception the Exception that was raised (or None if the
exception could not be pickled)
excString string-formatted version of the exception and
traceback
============= =====================================================================
"""
if self.exited:
self.debugMsg(' send: exited already; raise ClosedError.')
raise ClosedError()
with self.sendLock:
#if len(kwds) > 0:
#print "Warning: send() ignored args:", kwds
if opts is None:
opts = {}
assert callSync in ['off', 'sync', 'async'], 'callSync must be one of "off", "sync", or "async" (got %r)' % callSync
if reqId is None:
if callSync != 'off': ## requested return value; use the next available request ID
reqId = self.nextRequestId
self.nextRequestId += 1
else:
## If requestId is provided, this _must_ be a response to a previously received request.
assert request in ['result', 'error']
if returnType is not None:
opts['returnType'] = returnType
#print os.getpid(), "send request:", request, reqId, opts
## double-pickle args to ensure that at least status and request ID get through
try:
optStr = pickle.dumps(opts)
except:
print("==== Error pickling this object: ====")
print(opts)
print("=======================================")
raise
nByteMsgs = 0
if byteData is not None:
nByteMsgs = len(byteData)
## Send primary request
request = (request, reqId, nByteMsgs, optStr)
self.debugMsg('send request: cmd=%s nByteMsgs=%d id=%s opts=%s', request[0], nByteMsgs, reqId, opts)
self.conn.send(request)
## follow up by sending byte messages
if byteData is not None:
for obj in byteData: ## Remote process _must_ be prepared to read the same number of byte messages!
self.conn.send_bytes(obj)
self.debugMsg(' sent %d byte messages', len(byteData))
self.debugMsg(' call sync: %s', callSync)
if callSync == 'off':
return
req = Request(self, reqId, description=str(request), timeout=timeout)
if callSync == 'async':
return req
if callSync == 'sync':
return req.result()
def close(self, callSync='off', noCleanup=False, **kwds):
try:
self.send(request='close', opts=dict(noCleanup=noCleanup), callSync=callSync, **kwds)
self.exited = True
except ClosedError:
pass
def getResult(self, reqId):
## raises NoResultError if the result is not available yet
#print self.results.keys(), os.getpid()
with self.resultLock:
haveResult = reqId in self.results
if not haveResult:
try:
self.processRequests()
except ClosedError: ## even if remote connection has closed, we may have
## received new data during this call to processRequests()
pass
with self.resultLock:
if reqId not in self.results:
raise NoResultError()
status, result = self.results.pop(reqId)
if status == 'result':
return result
elif status == 'error':
#print ''.join(result)
exc, excStr = result
if exc is not None:
print("===== Remote process raised exception on request: =====")
print(''.join(excStr))
print("===== Local Traceback to request follows: =====")
raise exc
else:
print(''.join(excStr))
raise Exception("Error getting result. See above for exception from remote process.")
else:
raise Exception("Internal error.")
def _import(self, mod, **kwds):
"""
Request the remote process import a module (or symbols from a module)
and return the proxied results. Uses built-in __import__() function, but
adds a bit more processing:
_import('module') => returns module
_import('module.submodule') => returns submodule
(note this differs from behavior of __import__)
_import('module', fromlist=[name1, name2, ...]) => returns [module.name1, module.name2, ...]
(this also differs from behavior of __import__)
"""
return self.send(request='import', callSync='sync', opts=dict(module=mod), **kwds)
def getObjAttr(self, obj, attr, **kwds):
return self.send(request='getObjAttr', opts=dict(obj=obj, attr=attr), **kwds)
def getObjValue(self, obj, **kwds):
return self.send(request='getObjValue', opts=dict(obj=obj), **kwds)
def callObj(self, obj, args, kwds, **opts):
opts = opts.copy()
args = list(args)
## Decide whether to send arguments by value or by proxy
with self.optsLock:
noProxyTypes = opts.pop('noProxyTypes', None)
if noProxyTypes is None:
noProxyTypes = self.proxyOptions['noProxyTypes']
autoProxy = opts.pop('autoProxy', self.proxyOptions['autoProxy'])
if autoProxy is True:
args = [self.autoProxy(v, noProxyTypes) for v in args]
for k, v in kwds.items():
opts[k] = self.autoProxy(v, noProxyTypes)
byteMsgs = []
## If there are arrays in the arguments, send those as byte messages.
## We do this because pickling arrays is too expensive.
for i,arg in enumerate(args):
if arg.__class__ == np.ndarray:
args[i] = ("__byte_message__", len(byteMsgs), (arg.dtype, arg.shape))
byteMsgs.append(arg)
for k,v in kwds.items():
if v.__class__ == np.ndarray:
kwds[k] = ("__byte_message__", len(byteMsgs), (v.dtype, v.shape))
byteMsgs.append(v)
return self.send(request='callObj', opts=dict(obj=obj, args=args, kwds=kwds), byteData=byteMsgs, **opts)
def registerProxy(self, proxy):
with self.proxyLock:
ref = weakref.ref(proxy, self.deleteProxy)
self.proxies[ref] = proxy._proxyId
def deleteProxy(self, ref):
if self.send is None:
# this can happen during shutdown
return
with self.proxyLock:
proxyId = self.proxies.pop(ref)
try:
self.send(request='del', opts=dict(proxyId=proxyId), callSync='off')
except ClosedError: ## if remote process has closed down, there is no need to send delete requests anymore
pass
def transfer(self, obj, **kwds):
"""
Transfer an object by value to the remote host (the object must be picklable)
and return a proxy for the new remote object.
"""
if obj.__class__ is np.ndarray:
opts = {'dtype': obj.dtype, 'shape': obj.shape}
return self.send(request='transferArray', opts=opts, byteData=[obj], **kwds)
else:
return self.send(request='transfer', opts=dict(obj=obj), **kwds)
def autoProxy(self, obj, noProxyTypes):
## Return object wrapped in LocalObjectProxy _unless_ its type is in noProxyTypes.
for typ in noProxyTypes:
if isinstance(obj, typ):
return obj
return LocalObjectProxy(obj)
class Request(object):
"""
Request objects are returned when calling an ObjectProxy in asynchronous mode
or if a synchronous call has timed out. Use hasResult() to ask whether
the result of the call has been returned yet. Use result() to get
the returned value.
"""
def __init__(self, process, reqId, description=None, timeout=10):
self.proc = process
self.description = description
self.reqId = reqId
self.gotResult = False
self._result = None
self.timeout = timeout
def result(self, block=True, timeout=None):
"""
Return the result for this request.
If block is True, wait until the result has arrived or *timeout* seconds passes.
If the timeout is reached, raise NoResultError. (use timeout=None to disable)
If block is False, raise NoResultError immediately if the result has not arrived yet.
If the process's connection has closed before the result arrives, raise ClosedError.
"""
if self.gotResult:
return self._result
if timeout is None:
timeout = self.timeout
if block:
start = time.time()
while not self.hasResult():
if self.proc.exited:
raise ClosedError()
time.sleep(0.005)
if timeout >= 0 and time.time() - start > timeout:
print("Request timed out: %s" % self.description)
import traceback
traceback.print_stack()
raise NoResultError()
return self._result
else:
self._result = self.proc.getResult(self.reqId) ## raises NoResultError if result is not available yet
self.gotResult = True
return self._result
def hasResult(self):
"""Returns True if the result for this request has arrived."""
try:
self.result(block=False)
except NoResultError:
pass
return self.gotResult
class LocalObjectProxy(object):
"""
Used for wrapping local objects to ensure that they are send by proxy to a remote host.
Note that 'proxy' is just a shorter alias for LocalObjectProxy.
For example::
data = [1,2,3,4,5]
remotePlot.plot(data) ## by default, lists are pickled and sent by value
remotePlot.plot(proxy(data)) ## force the object to be sent by proxy
"""
nextProxyId = 0
proxiedObjects = {} ## maps {proxyId: object}
@classmethod
def registerObject(cls, obj):
## assign it a unique ID so we can keep a reference to the local object
pid = cls.nextProxyId
cls.nextProxyId += 1
cls.proxiedObjects[pid] = obj
#print "register:", cls.proxiedObjects
return pid
@classmethod
def lookupProxyId(cls, pid):
return cls.proxiedObjects[pid]
@classmethod
def releaseProxyId(cls, pid):
del cls.proxiedObjects[pid]
#print "release:", cls.proxiedObjects
def __init__(self, obj, **opts):
"""
Create a 'local' proxy object that, when sent to a remote host,
will appear as a normal ObjectProxy to *obj*.
Any extra keyword arguments are passed to proxy._setProxyOptions()
on the remote side.
"""
self.processId = os.getpid()
#self.objectId = id(obj)
self.typeStr = repr(obj)
#self.handler = handler
self.obj = obj
self.opts = opts
def __reduce__(self):
## a proxy is being pickled and sent to a remote process.
## every time this happens, a new proxy will be generated in the remote process,
## so we keep a new ID so we can track when each is released.
pid = LocalObjectProxy.registerObject(self.obj)
return (unpickleObjectProxy, (self.processId, pid, self.typeStr, None, self.opts))
## alias
proxy = LocalObjectProxy
def unpickleObjectProxy(processId, proxyId, typeStr, attributes=None, opts=None):
if processId == os.getpid():
obj = LocalObjectProxy.lookupProxyId(proxyId)
if attributes is not None:
for attr in attributes:
obj = getattr(obj, attr)
return obj
else:
proxy = ObjectProxy(processId, proxyId=proxyId, typeStr=typeStr)
if opts is not None:
proxy._setProxyOptions(**opts)
return proxy
class ObjectProxy(object):
"""
Proxy to an object stored by the remote process. Proxies are created
by calling Process._import(), Process.transfer(), or by requesting/calling
attributes on existing proxy objects.
For the most part, this object can be used exactly as if it
were a local object::
rsys = proc._import('sys') # returns proxy to sys module on remote process
rsys.stdout # proxy to remote sys.stdout
rsys.stdout.write # proxy to remote sys.stdout.write
rsys.stdout.write('hello') # calls sys.stdout.write('hello') on remote machine
# and returns the result (None)
When calling a proxy to a remote function, the call can be made synchronous
(result of call is returned immediately), asynchronous (result is returned later),
or return can be disabled entirely::
ros = proc._import('os')
## synchronous call; result is returned immediately
pid = ros.getpid()
## asynchronous call
request = ros.getpid(_callSync='async')
while not request.hasResult():
time.sleep(0.01)
pid = request.result()
## disable return when we know it isn't needed
rsys.stdout.write('hello', _callSync='off')
Additionally, values returned from a remote function call are automatically
returned either by value (must be picklable) or by proxy.
This behavior can be forced::
rnp = proc._import('numpy')
arrProxy = rnp.array([1,2,3,4], _returnType='proxy')
arrValue = rnp.array([1,2,3,4], _returnType='value')
The default callSync and returnType behaviors (as well as others) can be set
for each proxy individually using ObjectProxy._setProxyOptions() or globally using
proc.setProxyOptions().
"""
def __init__(self, processId, proxyId, typeStr='', parent=None):
object.__init__(self)
## can't set attributes directly because setattr is overridden.
self.__dict__['_processId'] = processId
self.__dict__['_typeStr'] = typeStr
self.__dict__['_proxyId'] = proxyId
self.__dict__['_attributes'] = ()
## attributes that affect the behavior of the proxy.
## in all cases, a value of None causes the proxy to ask
## its parent event handler to make the decision
self.__dict__['_proxyOptions'] = {
'callSync': None, ## 'sync', 'async', None
'timeout': None, ## float, None
'returnType': None, ## 'proxy', 'value', 'auto', None
'deferGetattr': None, ## True, False, None
'noProxyTypes': None, ## list of types to send by value instead of by proxy
'autoProxy': None,
}
self.__dict__['_handler'] = RemoteEventHandler.getHandler(processId)
self.__dict__['_handler'].registerProxy(self) ## handler will watch proxy; inform remote process when the proxy is deleted.
def _setProxyOptions(self, **kwds):
"""
Change the behavior of this proxy. For all options, a value of None
will cause the proxy to instead use the default behavior defined
by its parent Process.
Options are:
============= =============================================================
callSync 'sync', 'async', 'off', or None.
If 'async', then calling methods will return a Request object
which can be used to inquire later about the result of the
method call.
If 'sync', then calling a method
will block until the remote process has returned its result
or the timeout has elapsed (in this case, a Request object
is returned instead).
If 'off', then the remote process is instructed _not_ to
reply and the method call will return None immediately.
returnType 'auto', 'proxy', 'value', or None.
If 'proxy', then the value returned when calling a method
will be a proxy to the object on the remote process.
If 'value', then attempt to pickle the returned object and
send it back.
If 'auto', then the decision is made by consulting the
'noProxyTypes' option.
autoProxy bool or None. If True, arguments to __call__ are
automatically converted to proxy unless their type is
listed in noProxyTypes (see below). If False, arguments
are left untouched. Use proxy(obj) to manually convert
arguments before sending.
timeout float or None. Length of time to wait during synchronous
requests before returning a Request object instead.
deferGetattr True, False, or None.
If False, all attribute requests will be sent to the remote
process immediately and will block until a response is
received (or timeout has elapsed).
If True, requesting an attribute from the proxy returns a
new proxy immediately. The remote process is _not_ contacted
to make this request. This is faster, but it is possible to
request an attribute that does not exist on the proxied
object. In this case, AttributeError will not be raised
until an attempt is made to look up the attribute on the
remote process.
noProxyTypes List of object types that should _not_ be proxied when
sent to the remote process.
============= =============================================================
"""
for k in kwds:
if k not in self._proxyOptions:
raise KeyError("Unrecognized proxy option '%s'" % k)
self._proxyOptions.update(kwds)
def | (self):
"""
Return the value of the proxied object
(the remote object must be picklable)
"""
return self._handler.getObjValue(self)
def _getProxyOption(self, opt):
val = self._proxyOptions[opt]
if val is None:
return self._handler.getProxyOption(opt)
return val
def _getProxyOptions(self):
return dict([(k, self._getProxyOption(k)) for k in self._proxyOptions])
def __reduce__(self):
return (unpickleObjectProxy, (self._processId, self._proxyId, self._typeStr, self._attributes))
def __repr__(self):
#objRepr = self.__getattr__('__repr__')(callSync='value')
return "<ObjectProxy for process %d, object 0x%x: %s >" % (self._processId, self._proxyId, self._typeStr)
def __getattr__(self, attr, **kwds):
"""
Calls __getattr__ on the remote object and returns the attribute
by value or by proxy depending on the options set (see
ObjectProxy._setProxyOptions and RemoteEventHandler.setProxyOptions)
If the option 'deferGetattr' is True for this proxy, then a new proxy object
is returned _without_ asking the remote object whether the named attribute exists.
This can save time when making multiple chained attribute requests,
but may also defer a possible AttributeError until later, making
them more difficult to debug.
"""
opts = self._getProxyOptions()
for k in opts:
if '_'+k in kwds:
opts[k] = kwds.pop('_'+k)
if opts['deferGetattr'] is True:
return self._deferredAttr(attr)
else:
#opts = self._getProxyOptions()
return self._handler.getObjAttr(self, attr, **opts)
def _deferredAttr(self, attr):
return DeferredObjectProxy(self, attr)
def __call__(self, *args, **kwds):
"""
Attempts to call the proxied object from the remote process.
Accepts extra keyword arguments:
_callSync 'off', 'sync', or 'async'
_returnType 'value', 'proxy', or 'auto'
If the remote call raises an exception on the remote process,
it will be re-raised on the local process.
"""
opts = self._getProxyOptions()
for k in opts:
if '_'+k in kwds:
opts[k] = kwds.pop('_'+k)
return self._handler.callObj(obj=self, args=args, kwds=kwds, **opts)
## Explicitly proxy special methods. Is there a better way to do this??
def _getSpecialAttr(self, attr):
## this just gives us an easy way to change the behavior of the special methods
return self._deferredAttr(attr)
def __getitem__(self, *args):
return self._getSpecialAttr('__getitem__')(*args)
def __setitem__(self, *args):
return self._getSpecialAttr('__setitem__')(*args, _callSync='off')
def __setattr__(self, *args):
return self._getSpecialAttr('__setattr__')(*args, _callSync='off')
def __str__(self, *args):
return self._getSpecialAttr('__str__')(*args, _returnType='value')
def __len__(self, *args):
return self._getSpecialAttr('__len__')(*args)
def __add__(self, *args):
return self._getSpecialAttr('__add__')(*args)
def __sub__(self, *args):
return self._getSpecialAttr('__sub__')(*args)
def __div__(self, *args):
return self._getSpecialAttr('__div__')(*args)
def __truediv__(self, *args):
return self._getSpecialAttr('__truediv__')(*args)
def __floordiv__(self, *args):
return self._getSpecialAttr('__floordiv__')(*args)
def __mul__(self, *args):
return self._getSpecialAttr('__mul__')(*args)
def __pow__(self, *args):
return self._getSpecialAttr('__pow__')(*args)
def __iadd__(self, *args):
return self._getSpecialAttr('__iadd__')(*args, _callSync='off')
def __isub__(self, *args):
return self._getSpecialAttr('__isub__')(*args, _callSync='off')
def __idiv__(self, *args):
return self._getSpecialAttr('__idiv__')(*args, _callSync='off')
def __itruediv__(self, *args):
return self._getSpecialAttr('__itruediv__')(*args, _callSync='off')
def __ifloordiv__(self, *args):
return self._getSpecialAttr('__ifloordiv__')(*args, _callSync='off')
def __imul__(self, *args):
return self._getSpecialAttr('__imul__')(*args, _callSync='off')
def __ipow__(self, *args):
return self._getSpecialAttr('__ipow__')(*args, _callSync='off')
def __rshift__(self, *args):
return self._getSpecialAttr('__rshift__')(*args)
def __lshift__(self, *args):
return self._getSpecialAttr('__lshift__')(*args)
def __irshift__(self, *args):
return self._getSpecialAttr('__irshift__')(*args, _callSync='off')
def __ilshift__(self, *args):
return self._getSpecialAttr('__ilshift__')(*args, _callSync='off')
def __eq__(self, *args):
return self._getSpecialAttr('__eq__')(*args)
def __ne__(self, *args):
return self._getSpecialAttr('__ne__')(*args)
def __lt__(self, *args):
return self._getSpecialAttr('__lt__')(*args)
def __gt__(self, *args):
return self._getSpecialAttr('__gt__')(*args)
def __le__(self, *args):
return self._getSpecialAttr('__le__')(*args)
def __ge__(self, *args):
return self._getSpecialAttr('__ge__')(*args)
def __and__(self, *args):
return self._getSpecialAttr('__and__')(*args)
def __or__(self, *args):
return self._getSpecialAttr('__or__')(*args)
def __xor__(self, *args):
return self._getSpecialAttr('__xor__')(*args)
def __iand__(self, *args):
return self._getSpecialAttr('__iand__')(*args, _callSync='off')
def __ior__(self, *args):
return self._getSpecialAttr('__ior__')(*args, _callSync='off')
def __ixor__(self, *args):
return self._getSpecialAttr('__ixor__')(*args, _callSync='off')
def __mod__(self, *args):
return self._getSpecialAttr('__mod__')(*args)
def __radd__(self, *args):
return self._getSpecialAttr('__radd__')(*args)
def __rsub__(self, *args):
return self._getSpecialAttr('__rsub__')(*args)
def __rdiv__(self, *args):
return self._getSpecialAttr('__rdiv__')(*args)
def __rfloordiv__(self, *args):
return self._getSpecialAttr('__rfloordiv__')(*args)
def __rtruediv__(self, *args):
return self._getSpecialAttr('__rtruediv__')(*args)
def __rmul__(self, *args):
return self._getSpecialAttr('__rmul__')(*args)
def __rpow__(self, *args):
return self._getSpecialAttr('__rpow__')(*args)
def __rrshift__(self, *args):
return self._getSpecialAttr('__rrshift__')(*args)
def __rlshift__(self, *args):
return self._getSpecialAttr('__rlshift__')(*args)
def __rand__(self, *args):
return self._getSpecialAttr('__rand__')(*args)
def __ror__(self, *args):
return self._getSpecialAttr('__ror__')(*args)
def __rxor__(self, *args):
return self._getSpecialAttr('__ror__')(*args)
def __rmod__(self, *args):
return self._getSpecialAttr('__rmod__')(*args)
def __hash__(self):
## Required for python3 since __eq__ is defined.
return id(self)
class DeferredObjectProxy(ObjectProxy):
"""
This class represents an attribute (or sub-attribute) of a proxied object.
It is used to speed up attribute requests. Take the following scenario::
rsys = proc._import('sys')
rsys.stdout.write('hello')
For this simple example, a total of 4 synchronous requests are made to
the remote process:
1) import sys
2) getattr(sys, 'stdout')
3) getattr(stdout, 'write')
4) write('hello')
This takes a lot longer than running the equivalent code locally. To
speed things up, we can 'defer' the two attribute lookups so they are
only carried out when neccessary::
rsys = proc._import('sys')
rsys._setProxyOptions(deferGetattr=True)
rsys.stdout.write('hello')
This example only makes two requests to the remote process; the two
attribute lookups immediately return DeferredObjectProxy instances
immediately without contacting the remote process. When the call
to write() is made, all attribute requests are processed at the same time.
Note that if the attributes requested do not exist on the remote object,
making the call to write() will raise an AttributeError.
"""
def __init__(self, parentProxy, attribute):
## can't set attributes directly because setattr is overridden.
for k in ['_processId', '_typeStr', '_proxyId', '_handler']:
self.__dict__[k] = getattr(parentProxy, k)
self.__dict__['_parent'] = parentProxy ## make sure parent stays alive
self.__dict__['_attributes'] = parentProxy._attributes + (attribute,)
self.__dict__['_proxyOptions'] = parentProxy._proxyOptions.copy()
def __repr__(self):
return ObjectProxy.__repr__(self) + '.' + '.'.join(self._attributes)
def _undefer(self):
"""
Return a non-deferred ObjectProxy referencing the same object
"""
return self._parent.__getattr__(self._attributes[-1], _deferGetattr=False)
| _getValue |
rolebinding.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
api "k8s.io/client-go/pkg/api"
v1 "k8s.io/client-go/pkg/api/v1"
v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1"
rest "k8s.io/client-go/rest"
)
// RoleBindingsGetter has a method to return a RoleBindingInterface.
// A group's client should implement this interface.
type RoleBindingsGetter interface {
RoleBindings(namespace string) RoleBindingInterface
}
// RoleBindingInterface has methods to work with RoleBinding resources.
type RoleBindingInterface interface {
Create(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error)
Update(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions meta_v1.ListOptions) error
Get(name string, options meta_v1.GetOptions) (*v1beta1.RoleBinding, error)
List(opts meta_v1.ListOptions) (*v1beta1.RoleBindingList, error)
Watch(opts meta_v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error)
RoleBindingExpansion
}
// roleBindings implements RoleBindingInterface
type roleBindings struct {
client rest.Interface
ns string
}
// newRoleBindings returns a RoleBindings
func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings |
// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any.
func (c *roleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) {
result = &v1beta1.RoleBinding{}
err = c.client.Post().
Namespace(c.ns).
Resource("rolebindings").
Body(roleBinding).
Do().
Into(result)
return
}
// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
func (c *roleBindings) Update(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) {
result = &v1beta1.RoleBinding{}
err = c.client.Put().
Namespace(c.ns).
Resource("rolebindings").
Name(roleBinding.Name).
Body(roleBinding).
Do().
Into(result)
return
}
// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs.
func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("rolebindings").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("rolebindings").
VersionedParams(&listOptions, api.ParameterCodec).
Body(options).
Do().
Error()
}
// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
func (c *roleBindings) Get(name string, options meta_v1.GetOptions) (result *v1beta1.RoleBinding, err error) {
result = &v1beta1.RoleBinding{}
err = c.client.Get().
Namespace(c.ns).
Resource("rolebindings").
Name(name).
VersionedParams(&options, api.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
func (c *roleBindings) List(opts meta_v1.ListOptions) (result *v1beta1.RoleBindingList, err error) {
result = &v1beta1.RoleBindingList{}
err = c.client.Get().
Namespace(c.ns).
Resource("rolebindings").
VersionedParams(&opts, api.ParameterCodec).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested roleBindings.
func (c *roleBindings) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
return c.client.Get().
Prefix("watch").
Namespace(c.ns).
Resource("rolebindings").
VersionedParams(&opts, api.ParameterCodec).
Watch()
}
// Patch applies the patch and returns the patched roleBinding.
func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) {
result = &v1beta1.RoleBinding{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("rolebindings").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}
| {
return &roleBindings{
client: c.RESTClient(),
ns: namespace,
}
} |
vite.config.ts | import path from 'path';
import { defineConfig } from 'vite';
import vue from '@vitejs/plugin-vue';
// https://vitejs.dev/config/ | export default defineConfig({
plugins: [vue()],
resolve: {
alias: {
'@src': path.resolve(__dirname, 'src'),
},
},
}); | |
raster.rs | // Copyright 2018 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(test)]
extern crate test;
extern crate font_rs;
use test::Bencher;
use font_rs::raster::*;
use font_rs::geom::Point;
fn draw_shape(r: &mut Raster, s: f32) {
r.draw_line(&Point{x:s*10.0, y:s*10.5}, &Point{x: s*20.0, y: s*150.0});
r.draw_line(&Point{x:s*20.0, y:s*150.0}, &Point{x: s*50.0, y: s*139.0});
r.draw_quad(&Point{x:s*50.0, y:s*139.0}, &Point{x: s*100.0, y: s*60.0}, &Point{x: s*10.0, y: s*10.5});
}
#[bench]
fn empty200(b: &mut Bencher) {
b.iter(|| {
let w = 200;
let h = 200;
let r = Raster::new(w, h);
r.get_bitmap()
})
}
#[bench]
fn render200(b: &mut Bencher) {
b.iter(|| {
let w = 200;
let h = 200;
let mut r = Raster::new(w, h);
draw_shape(&mut r, 1.0);
r.get_bitmap()
})
}
#[bench]
fn prep200(b: &mut Bencher) {
b.iter(|| {
let w = 200;
let h = 200;
let mut r = Raster::new(w, h);
draw_shape(&mut r, 1.0);
})
}
#[bench]
fn prep400(b: &mut Bencher) {
b.iter(|| {
let w = 400;
let h = 400;
let mut r = Raster::new(w, h);
draw_shape(&mut r, 2.0);
})
}
#[bench]
fn render400(b: &mut Bencher) {
b.iter(|| {
let w = 400;
let h = 400;
let mut r = Raster::new(w, h);
draw_shape(&mut r, 2.0);
r.get_bitmap()
})
}
#[bench]
fn empty400(b: &mut Bencher) {
b.iter(|| {
let w = 400;
let h = 400;
let r = Raster::new(w, h);
r.get_bitmap()
})
}
#[bench]
fn | (b: &mut Bencher) {
b.iter(|| vec![0.0; 400 * 400 + 1])
}
#[bench]
fn alloc200(b: &mut Bencher) {
b.iter(|| vec![0.0; 200 * 200 + 1])
}
| alloc400 |
metrics.js | import { getMetrics } from 'lib/queries';
import { methodNotAllowed, ok, unauthorized } from 'lib/response';
import { allowQuery } from 'lib/auth';
export default async (req, res) => { | }
const { id, start_at, end_at, url } = req.query;
const websiteId = +id;
const startDate = new Date(+start_at);
const endDate = new Date(+end_at);
const metrics = await getMetrics(websiteId, startDate, endDate, url);
const stats = Object.keys(metrics[0]).reduce((obj, key) => {
obj[key] = Number(metrics[0][key]) || 0;
return obj;
}, {});
return ok(res, stats);
}
return methodNotAllowed(res);
}; | if (req.method === 'GET') {
if (!(await allowQuery(req))) {
return unauthorized(res); |
debug.rs | use std::ptr::null_mut;
pub type DebugCallbackFn = Box<dyn FnMut(DebugCallbackInfo, &str)>;
#[repr(u32)]
#[derive(Debug, Clone, Copy, PartialEq, Hash)]
pub enum Source {
Api = rgl::DEBUG_SOURCE_API,
WindowSystem = rgl::DEBUG_SOURCE_WINDOW_SYSTEM,
ShaderCompiler = rgl::DEBUG_SOURCE_SHADER_COMPILER,
ThirdParty = rgl::DEBUG_SOURCE_THIRD_PARTY,
Application = rgl::DEBUG_SOURCE_APPLICATION,
Other = rgl::DEBUG_SOURCE_OTHER,
}
#[repr(u32)]
#[derive(Debug, Clone, Copy, PartialEq, Hash)]
pub enum MessageType {
Error = rgl::DEBUG_TYPE_ERROR,
DeprecatedBehavior = rgl::DEBUG_TYPE_DEPRECATED_BEHAVIOR,
UndefinedBehavior = rgl::DEBUG_TYPE_UNDEFINED_BEHAVIOR,
Portability = rgl::DEBUG_TYPE_PORTABILITY,
Performance = rgl::DEBUG_TYPE_PERFORMANCE,
Marker = rgl::DEBUG_TYPE_MARKER,
PushGroup = rgl::DEBUG_TYPE_PUSH_GROUP,
PopGroup = rgl::DEBUG_TYPE_POP_GROUP,
Other = rgl::DEBUG_TYPE_OTHER,
}
#[repr(u32)] | pub enum Severity {
Notification = rgl::DEBUG_SEVERITY_NOTIFICATION,
Low = rgl::DEBUG_SEVERITY_LOW,
Medium = rgl::DEBUG_SEVERITY_MEDIUM,
High = rgl::DEBUG_SEVERITY_HIGH,
}
#[derive(Debug, Clone, PartialEq)]
pub struct DebugCallbackInfo {
pub source: Source,
pub message_type: MessageType,
pub severity: Severity,
pub id: u32,
}
static mut USER_CALLBACK: Option<DebugCallbackFn> = None;
extern "system" fn internal_debug_callback(
source: u32,
ty: u32,
id: u32,
severity: u32,
_length: i32,
message: *const i8,
_: *mut std::os::raw::c_void,
) {
if let Some(func) = unsafe { &mut USER_CALLBACK } {
let (source, message_type, severity): (Source, MessageType, Severity) = unsafe {
use std::mem::transmute;
(transmute(source), transmute(ty), transmute(severity)) // todo bounds checking
};
let info = DebugCallbackInfo {
source,
message_type,
severity,
id,
};
let message = unsafe {
String::from_utf8(std::ffi::CStr::from_ptr(message).to_bytes().to_vec()).unwrap()
};
func(info, &message);
}
}
pub fn debug_message_callback(func: Option<DebugCallbackFn>) {
unsafe {
if func.is_some() {
rgl::DebugMessageCallback(Some(internal_debug_callback), null_mut());
} else {
rgl::DebugMessageCallback(None, null_mut());
}
USER_CALLBACK = func;
}
} | #[derive(Debug, Clone, Copy, PartialEq, Hash)] |
test_DaskJob.py | from tethys_sdk.testing import TethysTestCase
from tethys_compute.models.dask.dask_scheduler import Scheduler, DaskScheduler
from tethys_compute.models.dask.dask_job import DaskJob
from django.contrib.auth.models import User
import dask
from unittest import mock
import time
@dask.delayed
def inc(x):
return x + 1
@dask.delayed
def double(x):
return x + 2
@dask.delayed
def add(x, y):
time.sleep(2)
return x + y
class DaskJobTest(TethysTestCase):
def set_up(self):
self.user = User.objects.create_user('tethys_super', '[email protected]', 'pass')
self.scheduler = DaskScheduler(
name='test_dask_scheduler',
host='127.0.0.1:8000',
timeout=10,
heartbeat_interval=5,
dashboard='test_dashboard',
)
self.scheduler.save()
def tear_down(self):
self.scheduler.delete()
@mock.patch('tethys_compute.models.dask.dask_job.Client')
def test_client_prop_with_invalid_scheduler(self, mock_client):
mock_client.return_value = 'test_client'
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=None)
# Execute
ret = djob.client
# Check result
self.assertEqual('test_client', ret)
mock_client.assert_called()
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_client_prop_with_valid_scheduler(self, mock_client):
mock_client.return_value = 'test_client'
dask_scheduler = Scheduler.objects.get_subclass(name='test_dask_scheduler')
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=dask_scheduler)
# Execute
ret = djob.client
# Check result
self.assertEqual('test_client', ret)
mock_client.assert_called_with(address='127.0.0.1:8000', heartbeat_interval=5, timeout=10)
@mock.patch('tethys_compute.models.dask.dask_job.Client')
def test_client_no_scheduler_prop(self, mock_client):
mock_client.return_value = 'test_default_client'
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label')
# Execute
ret = djob.client
# Check result
self.assertEqual('test_default_client', ret)
mock_client.assert_called_with()
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
@mock.patch('tethys_compute.models.dask.dask_job.Future')
def test_future_prop(self, mock_future, mock_client):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_client_ret.submit.return_value = mock.MagicMock(key='test_key')
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# Get Scheduler Client from DaskJob using client property
client = djob.client
# Use this Client to run rando function with a future handler
future = client.submit(inc, 1)
# Get the key from future handler and assign it to DaskJob key to keep track of this inc function
djob.key = future.key
# Use DaskJob future property to get back the inc function
ret = djob.future
# Check result
mock_future.assert_called_with(key='test_key', client=mock_client_ret)
self.assertEqual(mock_future(), ret)
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_future_prop_no_key(self, mock_client):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_client_ret.submit.return_value = mock.MagicMock(key='test_key')
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# Get Scheduler Client from DaskJob using client property
client = djob.client
# Use this Client to run inc function with a future handler
client.submit(inc, 1)
# Use DaskJob future property to get back the inc function
ret = djob.future
# Check result
self.assertIsNone(ret)
@mock.patch('tethys_compute.models.dask.dask_job.log')
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
@mock.patch('tethys_compute.models.dask.dask_job.Future')
def test_future_prop_exception(self, mock_future, mock_client, mock_log):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_client_ret.submit.return_value = mock.MagicMock(key='test_key')
mock_future.side_effect = Exception('exception in creating future')
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# Get Scheduler Client from DaskJob using client property
client = djob.client
# Use this Client to run inc function with a future handler
future = client.submit(inc, 1)
# Get the key from future handler and assign it to DaskJob key to keep track of this inc function
djob.key = future.key
# Use DaskJob future property to get back the inc function
ret = djob.future
# Check result
self.assertIsNone(ret)
mock_log.exception.assert_called_with('Dask Future Init Error')
@mock.patch('tethys_compute.models.dask.dask_job.fire_and_forget')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_execute_delayed(self, mock_client, mock_save, mock_ff):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_future = mock.MagicMock(key='test_key')
mock_client_ret.compute.return_value = mock_future
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# Delayed option
delayed = dask.delayed(inc)(1)
# _Execute
djob._execute(delayed)
# Check result
mock_client_ret.compute.assert_called_with(delayed)
self.assertEqual('test_key', djob.key)
mock_save.assert_called()
mock_ff.assert_called_with(mock_future)
@mock.patch('tethys_compute.models.dask.dask_job.isinstance')
@mock.patch('tethys_compute.models.dask.dask_job.fire_and_forget')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_execute_future(self, mock_client, mock_save, mock_ff, mock_isinstance):
mock_client.return_value = mock.MagicMock()
mock_isinstance.side_effect = [True, False]
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
# get client from DaskJob
client = djob.client
# Future option
future = client.submit(inc, 2)
# _Execute
djob._execute(future)
# Check result
self.assertEqual(future.key, djob.key)
mock_save.assert_called()
mock_ff.assert_called_with(future)
def test_execute_not_future_delayed(self):
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
# _Execute
self.assertRaises(ValueError, djob._execute, 1)
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_update_status(self, mock_future, mock_save, mock_client):
mock_future.status = 'finished'
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
# call the function
djob._update_status()
# check the results
mock_client.close.assert_called()
mock_save.assert_called()
def test_update_status_with_no_future(self):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# check the results
self.assertIsNone(djob._update_status())
@mock.patch('tethys_compute.models.dask.dask_job.log')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_update_status_exception(self, mock_future, mock_save, mock_log):
# Invalid status key
mock_future.status = 'foo'
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
# call the function
djob._update_status()
# check the results
mock_log.error.assert_called_with('Unknown Dask Status: "foo"')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
def test_process_result_with_failed_lock(self, mock_re_lock, mock_apl):
mock_apl.return_value = False
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler,
_process_results_function='test_function')
# call the function
self.assertIsNone(djob._process_results())
# check the result
mock_re_lock.assert_not_called()
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future',
new_callable=mock.PropertyMock(return_value=None))
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
def test_process_result_no_future(self, mock_apl, _):
mock_apl.return_value = True
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler,
_process_results_function='test_function')
# call the function
self.assertIsNone(djob._process_results())
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future',
new_callable=mock.PropertyMock())
def test_process_result_forget(self, _, mock_client):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler, forget=True)
# call the function
ret = djob._process_results()
# check the result
mock_client.close.assert_called()
self.assertIsNone(ret)
@mock.patch('tethys_compute.models.tethys_job.TethysFunctionExtractor')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
def test_process_result_with_result_function(self, mock_re_lock, mock_apl, mock_client, mock_future, mock_tfe):
fake_key = 'sum_faef'
mock_function_extractor = mock.MagicMock()
mock_function = mock.MagicMock(return_value='foo')
mock_function_extractor.valid = True
mock_function_extractor.function = mock_function
mock_tfe.return_value = mock_function_extractor
mock_apl.return_value = True
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler,
_process_results_function='test_function'
)
djob.key = fake_key
# call the function
djob._process_results()
# check the result
mock_client.close.assert_called()
mock_client.gather.assert_called_with(mock_future)
mock_function.assert_called_with(mock_client.gather())
mock_client.set_metadata.assert_called_with(fake_key, False)
self.assertEqual('', djob.key)
mock_re_lock.assert_called()
@mock.patch('tethys_compute.models.tethys_job.TethysFunctionExtractor')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.log')
def test_process_result_with_client_gather_exception(self, mock_logger, mock_re_lock, mock_apl, mock_client,
mock_future, mock_tfe):
mock_function_extractor = mock.MagicMock()
mock_function = mock.MagicMock(return_value='foo')
mock_function_extractor.valid = True
mock_function_extractor.function = mock_function
mock_tfe.return_value = mock_function_extractor
mock_apl.return_value = True
gather_exception = Exception('Fake exception')
mock_client.gather.side_effect = gather_exception
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler,
_process_results_function='test_function'
)
# call the function
djob._process_results()
# check the result
mock_client.gather.assert_called_with(mock_future)
mock_logger.warning.assert_called()
mock_function.assert_called_with(gather_exception)
mock_re_lock.assert_called()
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_job.log')
@mock.patch('tethys_compute.models.tethys_job.TethysFunctionExtractor')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
def test_process_result_with_result_function_with_exception(self, mock_re_lock, mock_apl, _, mock_client,
mock_tfe, mock_log, mock_save):
mock_function_extractor = mock.MagicMock()
mock_function = mock.MagicMock()
mock_function.side_effect = Exception
mock_function_extractor.valid = True
mock_function_extractor.function = mock_function
mock_tfe.return_value = mock_function_extractor
mock_apl.return_value = True
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler,
_process_results_function='test_function')
# call the function
djob._process_results()
# check the result
mock_log.exception.assert_called_with('Process Results Function Error')
self.assertEqual('ERR', djob._status)
mock_save.assert_called()
mock_re_lock.assert_called()
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_stop(self, mock_future):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# call the stop function
djob.stop()
# Check result
mock_future.cancel.assert_called()
def test_pause(self):
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=self.scheduler)
# Execute and heck result
self.assertRaises(NotImplementedError, djob.pause)
def test_resume(self):
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=self.scheduler)
# Execute and heck result
self.assertRaises(NotImplementedError, djob.resume)
def test_result(self):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# need to convert to string because it will convert to string when saving to the database
djob.result = 'serialized_results'
# call the function
ret = djob.result
# Check result
self.assertEqual('serialized_results', ret)
def test_result_none(self):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
djob.result = None
# call the function
ret = djob.result
# Check result
self.assertIsNone(ret)
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def | (self, mock_future):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# call the done function
ret = djob.done()
# Check result
mock_future.done.assert_called()
self.assertEqual(mock_future.done(), ret)
def test_done_with_no_future(self):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# Check result
self.assertIsNone(djob.done())
def test_update_status_interval_prop(self):
from datetime import timedelta
# Create DaskJob
djob = DaskJob(name='test_daskjob', user=self.user, label='label')
djob.save()
ret = DaskJob.objects.get(name='test_daskjob').update_status_interval
# Check result
self.assertIsInstance(ret, timedelta)
self.assertEqual(timedelta(0, 0), ret)
djob.delete()
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_retry(self, mock_future):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# call the done function
djob.retry()
# Check result
mock_future.retry.assert_called()
def test_retry_no_future(self):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# call the done function
self.assertIsNone(djob.retry())
@mock.patch('tethys_compute.models.dask.dask_job.log')
def test_fail_acquire_pr_lock(self, mock_log):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
djob.extended_properties['processing_results'] = True
self.assertFalse(djob._acquire_pr_lock())
mock_log.warning.assert_called_with('Unable to aquire lock. Processing results already occurring. Skipping...')
@mock.patch('django.db.models.base.Model.save')
def test_fail_release_pr_lock(self, mock_save):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
djob.extended_properties['processing_results'] = True
djob._release_pr_lock()
self.assertFalse(djob.extended_properties['processing_results'])
mock_save.assert_called()
| test_done |
taskDetail.tsx | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { useRef, useState } from 'react';
import { history } from 'umi';
import { message, Dropdown, Menu, Tooltip, Button, Space, Modal } from 'antd';
import CopyToClipboard from 'react-copy-to-clipboard';
import { Utils } from '@dtinsight/dt-utils';
import { formatDateTime } from '@/utils';
import type { IActionRef } from '@/components/sketch';
import Sketch from '@/components/sketch';
import type { TASK_STATUS } from '@/constant';
import { JOB_STAGE_ENUM } from '@/constant';
import type { ColumnsType } from 'antd/lib/table';
import { SyncOutlined, CopyOutlined, DownOutlined, CloseCircleOutlined } from '@ant-design/icons';
import ViewDetail from '../../components/viewDetail';
import Api from '../../api';
import { taskStatusText } from '@/utils/enums';
const JOB_STAGE_OPTIONS = [
{
label: '已存储',
value: JOB_STAGE_ENUM.Saved,
},
{
label: '队列中',
value: JOB_STAGE_ENUM.Queueing,
},
{
label: '等待重试',
value: JOB_STAGE_ENUM.WaitTry,
},
{
label: '等待资源',
value: JOB_STAGE_ENUM.WaitResource,
},
{
label: '运行中',
value: JOB_STAGE_ENUM.Running,
},
];
interface IQueueTaskProps {
computeType: 0;
engineType: string;
execStartTime: number;
generateTime: number;
gmtCreate: number;
gmtModified: number;
id: number;
isFailover: number;
jobId: string;
jobInfo: any;
jobName: string;
jobPriority: number;
jobResource: string;
nodeAddress: string;
stage: JOB_STAGE_ENUM;
status: TASK_STATUS;
tenantName: string;
waitReason: string | null;
waitTime: string;
}
interface IFormFieldProps {
radioValue: number;
}
export default () => {
const sketchRef = useRef<IActionRef>(null);
const [taskDetailVisible, setVisible] = useState(false);
const [details, setDetails] = useState<string>('');
const handleRequestSearch = (
values: IFormFieldProps,
{
current,
pageSize,
}: {
current: number;
pageSize: number;
},
) => {
const { node, jobResource } = history.location.query || {};
return Api.getViewDetail({
nodeAddress: node as string,
pageSize,
currentPage: current,
stage: values.radioValue,
jobResource: jobResource as string,
}).then((res) => {
if (res.code === 1) {
return {
total: res.data.totalCount,
data: res.data.data,
};
}
});
};
const requestKillJob = (jobIdList: string[]) => {
const { node = '', jobResource } = history.location.query || {};
return new Promise<void>((resolve) => {
Api.killTasks({
stage: sketchRef.current?.form.getFieldValue('radioValue'),
jobIdList,
jobResource: jobResource as string,
nodeAddress: node as string,
}).then((res) => {
if (res.code === 1) {
resolve();
}
});
});
};
const handleRefresh = () => {
sketchRef.current?.submit();
};
// 杀死选中的任务
const handleKillSelect = () => { | const selected = (sketchRef.current?.selectedRowKeys || []) as string[];
if (!selected || selected.length <= 0) {
message.error('您没有选择任何任务!');
return false;
}
Modal.confirm({
title: '杀死选中任务',
okText: '杀死选中任务',
okButtonProps: {
danger: true,
},
cancelText: '取消',
width: '460px',
icon: <CloseCircleOutlined />,
content: (
<span style={{ color: '#ff5f5c' }}>本操作将杀死列表(非跨分页)中的选中任务</span>
),
async onOk() {
return new Promise<void>((resolve) => {
requestKillJob(selected).then(() => {
message.success('操作成功');
sketchRef.current?.submit();
resolve();
});
});
},
});
};
const handleKillAll = () => {
Modal.confirm({
title: '杀死全部任务',
okText: '杀死全部任务',
okButtonProps: {
danger: true,
},
cancelText: '取消',
width: '460px',
icon: <CloseCircleOutlined />,
content: (
<span style={{ color: '#ff5f5c' }}>
本操作将杀死列表(跨分页)中的全部任务,不仅是当前页
<br />
杀死运行中的任务需要较长时间
</span>
),
async onOk() {
return new Promise<void>((resolve) => {
requestKillJob([]).then(() => {
// 杀死全部任务为异步有延迟,需要延迟执行刷新数据操作
setTimeout(() => {
message.success('操作成功');
sketchRef.current?.submit();
resolve();
}, 1000);
});
});
},
});
};
// 查看详情
const viewDetails = (record: IQueueTaskProps) => {
setDetails(JSON.stringify(record, null, 2));
setVisible(true);
};
const showTaskParams = (record: IQueueTaskProps) => {
setDetails(record?.jobInfo?.taskParams ?? '');
setVisible(true);
};
// 置顶
const stickTask = (record: IQueueTaskProps, msg: string) => {
const { jobResource } = history.location.query || {};
Api.stickJob({
jobId: record.jobId,
jobResource: jobResource as string,
}).then((res) => {
if (res.code === 1) {
message.success(`${msg}成功`);
sketchRef.current?.submit();
}
});
};
const killTask = (record: IQueueTaskProps) => {
Modal.confirm({
title: '杀任务',
cancelText: '取消',
icon: <CloseCircleOutlined />,
content: '是否要杀死此任务?',
async onOk() {
return new Promise<void>((resolve) => {
requestKillJob([record.jobId]).then(() => {
message.success('操作成功');
sketchRef.current?.submit();
resolve();
});
});
},
});
};
const handleCloseViewModal = () => {
setVisible(false);
};
const columns: ColumnsType<IQueueTaskProps> = [
{
title: '任务名称',
dataIndex: 'jobName',
fixed: 'left',
width: 280,
},
{
title: '任务ID',
dataIndex: 'jobId',
render(_, record) {
return (
<span>
{record.jobId}
<CopyToClipboard
text={record.jobId}
onCopy={() => message.success('复制成功')}
>
<Tooltip placement="right" title="复制">
<CopyOutlined
className="copy-hover"
style={{
cursor: 'pointer',
fontSize: '13px',
}}
/>
</Tooltip>
</CopyToClipboard>
</span>
);
},
},
{
title: '状态',
dataIndex: 'status',
render(text) {
return taskStatusText(text);
},
},
{
title: '节点',
dataIndex: 'nodeAddress',
},
{
title: '已等待',
dataIndex: 'waitTime',
},
{
title: '等待原因',
dataIndex: 'waitReason',
width: 300,
render(_, record) {
return (
<Tooltip title={record.waitReason} placement="top">
{Utils.textOverflowExchange(record.waitReason ?? '-', 20)}
</Tooltip>
);
},
},
{
title: '提交时间',
dataIndex: 'generateTime',
render(text) {
return formatDateTime(text);
},
},
{
title: '租户',
dataIndex: 'tenantName',
},
{
title: '操作',
dataIndex: 'deal',
fixed: 'right',
width: 250,
render: (_, record) => {
const isSaved = record.stage === JOB_STAGE_ENUM.Saved;
const isQueueing = record.stage === JOB_STAGE_ENUM.Queueing;
const insert = isSaved ? '插入队列头' : null;
const stickTxt = isQueueing ? '置顶' : insert;
return (
<Space split={<span style={{ color: '#3F87FF' }}>|</span>}>
<a onClick={() => viewDetails(record)}>查看详情</a>
<a onClick={() => killTask(record)}>杀任务</a>
{stickTxt && <a onClick={() => stickTask(record, stickTxt)}>{stickTxt}</a>}
<a onClick={() => showTaskParams(record)}>任务参数</a>
</Space>
);
},
},
];
return (
<>
<Sketch<IQueueTaskProps, IFormFieldProps>
actionRef={sketchRef}
className="dt-queue-detail"
header={[
{
name: 'radioGroup',
props: {
formItemProps: {
name: 'radioValue',
initialValue: Number(history.location.query!.jobStage),
},
slotProps: {
options: JOB_STAGE_OPTIONS,
},
},
},
]}
extra={
<Tooltip title="刷新数据">
<Button className="dt-refresh">
<SyncOutlined onClick={() => handleRefresh()} />
</Button>
</Tooltip>
}
tableProps={{
rowKey: 'jobId',
}}
tableFooter={
<Dropdown.Button
key="kill"
type="primary"
onClick={handleKillSelect}
overlay={
<Menu onClick={() => handleKillAll()}>
<Menu.Item key="1">杀死全部任务</Menu.Item>
</Menu>
}
trigger={['click']}
icon={<DownOutlined />}
>
杀死选中任务
</Dropdown.Button>
}
request={handleRequestSearch}
columns={columns}
/>
<ViewDetail
visible={taskDetailVisible}
onCancel={handleCloseViewModal}
resource={details}
/>
</>
);
}; | |
test_taxon.py | import pytest
from app.routers import taxons
from fastapi.testclient import TestClient
TEST_JSON = {"gbif_id": 15, "canonical_name": "test", "rank": "class"}
TEST_JSON_0 = {
"gbif_id": 0,
"canonical_name": "Canis Lupus Familiaris",
"rank": "subspecies",
}
client = TestClient(taxons.router)
def test_read_taxon():
response = client.get(
"/taxon/0",
)
assert response.status_code == 200
assert response.json() == TEST_JSON_0
def test_post_taxon():
response = client.post("/taxon", json=TEST_JSON)
assert response.status_code == 200
assert response.json() == TEST_JSON
def | ():
with pytest.raises(Exception) as e:
response = client.post("/taxon", json=TEST_JSON_0)
assert response.status_code == 409
assert response.json() == {"msg": "species already exists"}
| test_existing_species |
text_widget.rs | #[derive(Serialize)]
pub struct TextWidget {}
impl realm::Widget for TextWidget {
fn realm_id(&self) -> &'static str { | "Widgets.BWidgets.TextWidget"
}
}
impl TextWidget {
pub fn new(_req: &realm::Request) -> TextWidget {
TextWidget {}
}
} | |
lib.rs | // Copyright 2018 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The time oracle service for Exonum.
//!
//! See [the Exonum documentation][docs:time] for a high-level overview of the service,
//! in particular, its design rationale and the proof of correctness.
//!
//! [docs:time]: https://exonum.com/doc/advanced/time
#![deny(
missing_debug_implementations,
missing_docs,
unsafe_code,
bare_trait_objects
)]
#[macro_use]
extern crate failure;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate exonum_derive;
/// Node API.
pub mod api;
/// Protobuf generated structs.
pub mod proto;
/// Database schema.
pub mod schema;
/// System time provider.
pub mod time_provider;
/// Node transactions.
pub mod transactions;
use exonum::{
api::ServiceApiBuilder,
blockchain::{Service, ServiceContext, Transaction, TransactionSet},
crypto::Hash,
helpers::fabric::{Context, ServiceFactory},
messages::RawTransaction,
storage::{Fork, Snapshot},
};
use serde_json::Value;
use crate::{
schema::TimeSchema,
time_provider::{SystemTimeProvider, TimeProvider},
transactions::*,
};
/// Time service id.
pub const SERVICE_ID: u16 = 4;
/// Time service name.
pub const SERVICE_NAME: &str = "exonum_time";
/// Define the service.
#[derive(Debug)]
pub struct TimeService {
/// Current time.
time: Box<dyn TimeProvider>,
}
impl Default for TimeService {
fn default() -> TimeService {
TimeService {
time: Box::new(SystemTimeProvider) as Box<dyn TimeProvider>,
}
}
}
impl TimeService {
/// Create a new `TimeService`.
pub fn new() -> TimeService {
TimeService::default()
}
/// Create a new `TimeService` with time provider `T`.
pub fn with_provider<T: Into<Box<dyn TimeProvider>>>(time_provider: T) -> TimeService {
TimeService {
time: time_provider.into(),
}
}
}
impl Service for TimeService {
fn service_name(&self) -> &str {
SERVICE_NAME
}
fn state_hash(&self, snapshot: &dyn Snapshot) -> Vec<Hash> {
let schema = TimeSchema::new(snapshot);
schema.state_hash()
}
fn service_id(&self) -> u16 {
SERVICE_ID
}
fn tx_from_raw(&self, raw: RawTransaction) -> Result<Box<dyn Transaction>, failure::Error> {
TimeTransactions::tx_from_raw(raw).map(Into::into)
}
fn initialize(&self, _fork: &mut Fork) -> Value {
Value::Null
}
/// Creates transaction after commit of the block.
fn after_commit(&self, context: &ServiceContext) {
// The transaction must be created by the validator.
if context.validator_id().is_none() {
return;
}
context.broadcast_transaction(TxTime::new(self.time.current_time()));
}
fn wire_api(&self, builder: &mut ServiceApiBuilder) {
api::PublicApi::wire(builder);
api::PrivateApi::wire(builder);
}
}
/// A time service creator for the `NodeBuilder`.
#[derive(Debug)]
pub struct | ;
impl ServiceFactory for TimeServiceFactory {
fn service_name(&self) -> &str {
SERVICE_NAME
}
fn make_service(&mut self, _: &Context) -> Box<dyn Service> {
Box::new(TimeService::new())
}
}
| TimeServiceFactory |
ad-service.service.ts | import { Injectable } from '@angular/core';
@Injectable()
export class | {
constructor() { }
}
| AdServiceService |
serviceprofile.go | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha2
import (
v1alpha2 "github.com/linkerd/linkerd2/controller/gen/apis/serviceprofile/v1alpha2"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// ServiceProfileLister helps list ServiceProfiles.
// All objects returned here must be treated as read-only.
type ServiceProfileLister interface {
// List lists all ServiceProfiles in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha2.ServiceProfile, err error)
// ServiceProfiles returns an object that can list and get ServiceProfiles.
ServiceProfiles(namespace string) ServiceProfileNamespaceLister
ServiceProfileListerExpansion
}
// serviceProfileLister implements the ServiceProfileLister interface.
type serviceProfileLister struct {
indexer cache.Indexer
}
// NewServiceProfileLister returns a new ServiceProfileLister.
func NewServiceProfileLister(indexer cache.Indexer) ServiceProfileLister {
return &serviceProfileLister{indexer: indexer}
}
// List lists all ServiceProfiles in the indexer.
func (s *serviceProfileLister) List(selector labels.Selector) (ret []*v1alpha2.ServiceProfile, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha2.ServiceProfile))
})
return ret, err
}
// ServiceProfiles returns an object that can list and get ServiceProfiles.
func (s *serviceProfileLister) ServiceProfiles(namespace string) ServiceProfileNamespaceLister {
return serviceProfileNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// ServiceProfileNamespaceLister helps list and get ServiceProfiles.
// All objects returned here must be treated as read-only.
type ServiceProfileNamespaceLister interface {
// List lists all ServiceProfiles in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha2.ServiceProfile, err error)
// Get retrieves the ServiceProfile from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1alpha2.ServiceProfile, error)
ServiceProfileNamespaceListerExpansion
}
// serviceProfileNamespaceLister implements the ServiceProfileNamespaceLister
// interface.
type serviceProfileNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all ServiceProfiles in the indexer for a given namespace.
func (s serviceProfileNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.ServiceProfile, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha2.ServiceProfile))
})
return ret, err
}
// Get retrieves the ServiceProfile from the indexer for a given namespace and name.
func (s serviceProfileNamespaceLister) Get(name string) (*v1alpha2.ServiceProfile, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil |
if !exists {
return nil, errors.NewNotFound(v1alpha2.Resource("serviceprofile"), name)
}
return obj.(*v1alpha2.ServiceProfile), nil
}
| {
return nil, err
} |
test_block_device.py | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo.serialization import jsonutils
from nova import block_device
from nova import context
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import matchers
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.volume import cinder
from nova.volume import encryptors
class TestDriverBlockDevice(test.NoDBTestCase):
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'snapshot': driver_block_device.DriverSnapshotBlockDevice,
'image': driver_block_device.DriverImageBlockDevice,
'blank': driver_block_device.DriverBlankBlockDevice
}
swap_bdm = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
snapshot_bdm = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
snapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
snapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
image_bdm = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
image_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
image_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
blank_bdm = block_device.BlockDeviceDict(
{'id': 6, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'blank',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
blank_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
blank_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
self.context = context.RequestContext('fake_user',
'fake_project')
def test_no_device_raises(self):
for name, cls in self.driver_classes.items():
self.assertRaises(driver_block_device._NotTransformable,
cls, {'no_device': True})
def _test_driver_device(self, name):
db_bdm = getattr(self, "%s_bdm" % name)
test_bdm = self.driver_classes[name](db_bdm)
self.assertThat(test_bdm, matchers.DictMatches(
getattr(self, "%s_driver_bdm" % name)))
for k, v in db_bdm.iteritems():
field_val = getattr(test_bdm._bdm_obj, k)
if isinstance(field_val, bool):
v = bool(v)
self.assertEqual(field_val, v)
self.assertThat(test_bdm.legacy(),
matchers.DictMatches(
getattr(self, "%s_legacy_driver_bdm" % name)))
# Test passthru attributes
for passthru in test_bdm._proxy_as_attr:
self.assertEqual(getattr(test_bdm, passthru),
getattr(test_bdm._bdm_obj, passthru))
# Make sure that all others raise _invalidType
for other_name, cls in self.driver_classes.iteritems():
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
# Test the save method
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
test_bdm.save(self.context)
for fld, alias in test_bdm._update_on_save.iteritems():
self.assertEqual(test_bdm[alias or fld],
getattr(test_bdm._bdm_obj, fld))
save_mock.assert_called_once_with(self.context)
# Test the save method with no context passed
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
test_bdm.save()
save_mock.assert_called_once_with()
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm['connection_info'],
jsonutils.loads(test_bdm._bdm_obj.connection_info))
self.assertEqual(test_bdm._bdm_obj.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
def test_driver_snapshot_block_device(self):
self._test_driver_device("snapshot")
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
def test_driver_image_block_device(self):
self._test_driver_device('image')
test_bdm = self.driver_classes['image'](
self.image_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('image')
bdm = self.image_bdm.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['image'], bdm)
def test_driver_blank_block_device(self):
self._test_driver_device('blank')
test_bdm = self.driver_classes['blank'](
self.blank_bdm)
self.assertEqual(6, test_bdm._bdm_obj.id)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.assertEqual(3, test_bdm.volume_size)
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, check_attach=True,
fail_check_attach=False, driver_attach=False,
fail_driver_attach=False, volume_attach=True,
access_mode='rw'):
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'access_mode': access_mode}}
expected_conn_info = {'data': {'access_mode': access_mode},
'serial': fake_volume['id']}
enc_data = {'fake': 'enc_data'}
self.volume_api.get(self.context,
fake_volume['id']).AndReturn(fake_volume)
if check_attach:
if not fail_check_attach:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndReturn(None)
else:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndRaise(
test.TestingException)
return instance, expected_conn_info
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(connection_info)
if driver_attach:
|
if volume_attach:
self.volume_api.attach(elevated_context, fake_volume['id'],
'fake_uuid', bdm_dict['device_name'],
mode=access_mode).AndReturn(None)
driver_bdm._bdm_obj.save(self.context).AndReturn(None)
return instance, expected_conn_info
def test_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_ro(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, access_mode='ro')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_check_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver)
def test_volume_no_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=False)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=False)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_no_check_driver_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=True)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_driver_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
def test_refresh_connection(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
'serial': 'fake-volume-id-2'}
self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
self.context, test_bdm.volume_id,
connector).AndReturn(connection_info)
test_bdm._bdm_obj.save(self.context).AndReturn(None)
self.mox.ReplayAll()
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.snapshot_bdm.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3,
'', '', snapshot).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume(self):
no_volume_image = self.image_bdm.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](no_volume_image)
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1,
'', '', image_id=image['id']).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_volume(self):
test_bdm = self.driver_classes['image'](
self.image_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_blank_attach_volume(self):
no_blank_volume = self.blank_bdm.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['blank'](no_blank_volume)
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': 'fake-uuid'})
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': 'fake-uuid-blank-vol'}
with contextlib.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(volume_class, 'attach')
) as (vol_create, vol_attach):
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(self.context,
test_bdm.volume_size,
'fake-uuid-blank-vol',
'')
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver,
do_check_attach=True)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_convert_block_devices(self):
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'],
[self.volume_bdm, self.ephemeral_bdm])
self.assertEqual(converted, [self.volume_driver_bdm])
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['snapshot'](
self.snapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
self.snapshot_legacy_driver_bdm])
# Test that the ephemerals work as expected
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in xrange(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in xrange(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertIsNone(driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.snapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.image_bdm.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(local_image))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
test_image = self.driver_classes['image'](self.image_bdm)
test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm)
test_volume = self.driver_classes['volume'](self.volume_bdm)
test_blank = self.driver_classes['blank'](self.blank_bdm)
for bdm in (test_image, test_snapshot, test_volume, test_blank):
self.assertTrue(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
for bdm in (test_swap, test_ephemeral):
self.assertFalse(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
| encryptors.get_encryption_metadata(
elevated_context, self.volume_api, fake_volume['id'],
connection_info).AndReturn(enc_data)
if not fail_driver_attach:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndReturn(None)
else:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndRaise(test.TestingException)
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
expected_conn_info).AndReturn(None)
return instance, expected_conn_info |
HomePage.dto.ts | export class | {
link: string;
constructor(isUrl: boolean) {
if(isUrl) {
this.link = "https://www.trendyol.com";
} else {
this.link = "ty://?Page=Home";
}
}
public getDeeplink(): string {
return "ty://?Page=Home";
}
}
| HomePageDto |
service.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/cloud/automl/v1/service.proto
package automl
import (
context "context"
fmt "fmt"
math "math"
proto "github.com/golang/protobuf/proto"
_ "google.golang.org/genproto/googleapis/api/annotations"
longrunning "google.golang.org/genproto/googleapis/longrunning"
field_mask "google.golang.org/genproto/protobuf/field_mask"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Request message for
// [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset].
type CreateDatasetRequest struct {
// The resource name of the project to create the dataset for.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// The dataset to create.
Dataset *Dataset `protobuf:"bytes,2,opt,name=dataset,proto3" json:"dataset,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateDatasetRequest) Reset() { *m = CreateDatasetRequest{} }
func (m *CreateDatasetRequest) String() string { return proto.CompactTextString(m) }
func (*CreateDatasetRequest) ProtoMessage() {}
func (*CreateDatasetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{0}
}
func (m *CreateDatasetRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateDatasetRequest.Unmarshal(m, b)
}
func (m *CreateDatasetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateDatasetRequest.Marshal(b, m, deterministic)
}
func (m *CreateDatasetRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateDatasetRequest.Merge(m, src)
}
func (m *CreateDatasetRequest) XXX_Size() int {
return xxx_messageInfo_CreateDatasetRequest.Size(m)
}
func (m *CreateDatasetRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreateDatasetRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreateDatasetRequest proto.InternalMessageInfo
func (m *CreateDatasetRequest) GetParent() string {
if m != nil {
return m.Parent
}
return ""
}
func (m *CreateDatasetRequest) GetDataset() *Dataset {
if m != nil {
return m.Dataset
}
return nil
}
// Request message for
// [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset].
type GetDatasetRequest struct {
// The resource name of the dataset to retrieve.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetDatasetRequest) Reset() { *m = GetDatasetRequest{} }
func (m *GetDatasetRequest) String() string { return proto.CompactTextString(m) }
func (*GetDatasetRequest) ProtoMessage() {}
func (*GetDatasetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{1}
}
func (m *GetDatasetRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetDatasetRequest.Unmarshal(m, b)
}
func (m *GetDatasetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetDatasetRequest.Marshal(b, m, deterministic)
}
func (m *GetDatasetRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetDatasetRequest.Merge(m, src)
}
func (m *GetDatasetRequest) XXX_Size() int {
return xxx_messageInfo_GetDatasetRequest.Size(m)
}
func (m *GetDatasetRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetDatasetRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetDatasetRequest proto.InternalMessageInfo
func (m *GetDatasetRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// Request message for
// [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets].
type ListDatasetsRequest struct {
// The resource name of the project from which to list datasets.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// An expression for filtering the results of the request.
//
// * `dataset_metadata` - for existence of the case (e.g.
// image_classification_dataset_metadata:*).
// Some examples of using the filter are:
//
// * `translation_dataset_metadata:*` --> The dataset has
// translation_dataset_metadata.
Filter string `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"`
// Requested page size. Server may return fewer results than requested.
// If unspecified, server will pick a default size.
PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// A token identifying a page of results for the server to return
// Typically obtained via
// [ListDatasetsResponse.next_page_token][google.cloud.automl.v1.ListDatasetsResponse.next_page_token]
// of the previous
// [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets] call.
PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListDatasetsRequest) Reset() { *m = ListDatasetsRequest{} }
func (m *ListDatasetsRequest) String() string { return proto.CompactTextString(m) }
func (*ListDatasetsRequest) ProtoMessage() {}
func (*ListDatasetsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{2}
}
func (m *ListDatasetsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListDatasetsRequest.Unmarshal(m, b)
}
func (m *ListDatasetsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListDatasetsRequest.Marshal(b, m, deterministic)
}
func (m *ListDatasetsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListDatasetsRequest.Merge(m, src)
}
func (m *ListDatasetsRequest) XXX_Size() int {
return xxx_messageInfo_ListDatasetsRequest.Size(m)
}
func (m *ListDatasetsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ListDatasetsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ListDatasetsRequest proto.InternalMessageInfo
func (m *ListDatasetsRequest) GetParent() string {
if m != nil {
return m.Parent
}
return ""
}
func (m *ListDatasetsRequest) GetFilter() string {
if m != nil {
return m.Filter
}
return ""
}
func (m *ListDatasetsRequest) GetPageSize() int32 {
if m != nil {
return m.PageSize
}
return 0
}
func (m *ListDatasetsRequest) GetPageToken() string {
if m != nil {
return m.PageToken
}
return ""
}
// Response message for
// [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets].
type ListDatasetsResponse struct {
// The datasets read.
Datasets []*Dataset `protobuf:"bytes,1,rep,name=datasets,proto3" json:"datasets,omitempty"`
// A token to retrieve next page of results.
// Pass to
// [ListDatasetsRequest.page_token][google.cloud.automl.v1.ListDatasetsRequest.page_token]
// to obtain that page.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListDatasetsResponse) Reset() { *m = ListDatasetsResponse{} }
func (m *ListDatasetsResponse) String() string { return proto.CompactTextString(m) }
func (*ListDatasetsResponse) ProtoMessage() {}
func (*ListDatasetsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{3}
}
func (m *ListDatasetsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListDatasetsResponse.Unmarshal(m, b)
}
func (m *ListDatasetsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListDatasetsResponse.Marshal(b, m, deterministic)
}
func (m *ListDatasetsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListDatasetsResponse.Merge(m, src)
}
func (m *ListDatasetsResponse) XXX_Size() int {
return xxx_messageInfo_ListDatasetsResponse.Size(m)
}
func (m *ListDatasetsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ListDatasetsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ListDatasetsResponse proto.InternalMessageInfo
func (m *ListDatasetsResponse) GetDatasets() []*Dataset {
if m != nil {
return m.Datasets
}
return nil
}
func (m *ListDatasetsResponse) GetNextPageToken() string {
if m != nil {
return m.NextPageToken
}
return ""
}
// Request message for
// [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset]
type UpdateDatasetRequest struct {
// The dataset which replaces the resource on the server.
Dataset *Dataset `protobuf:"bytes,1,opt,name=dataset,proto3" json:"dataset,omitempty"`
// Required. The update mask applies to the resource.
UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UpdateDatasetRequest) Reset() { *m = UpdateDatasetRequest{} }
func (m *UpdateDatasetRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateDatasetRequest) ProtoMessage() {}
func (*UpdateDatasetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{4}
}
func (m *UpdateDatasetRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UpdateDatasetRequest.Unmarshal(m, b)
}
func (m *UpdateDatasetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UpdateDatasetRequest.Marshal(b, m, deterministic)
}
func (m *UpdateDatasetRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_UpdateDatasetRequest.Merge(m, src)
}
func (m *UpdateDatasetRequest) XXX_Size() int {
return xxx_messageInfo_UpdateDatasetRequest.Size(m)
}
func (m *UpdateDatasetRequest) XXX_DiscardUnknown() {
xxx_messageInfo_UpdateDatasetRequest.DiscardUnknown(m)
}
var xxx_messageInfo_UpdateDatasetRequest proto.InternalMessageInfo
func (m *UpdateDatasetRequest) GetDataset() *Dataset {
if m != nil {
return m.Dataset
}
return nil
}
func (m *UpdateDatasetRequest) GetUpdateMask() *field_mask.FieldMask {
if m != nil {
return m.UpdateMask
}
return nil
}
// Request message for
// [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset].
type DeleteDatasetRequest struct {
// The resource name of the dataset to delete.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DeleteDatasetRequest) Reset() { *m = DeleteDatasetRequest{} }
func (m *DeleteDatasetRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteDatasetRequest) ProtoMessage() {}
func (*DeleteDatasetRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{5}
}
func (m *DeleteDatasetRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DeleteDatasetRequest.Unmarshal(m, b)
}
func (m *DeleteDatasetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DeleteDatasetRequest.Marshal(b, m, deterministic)
}
func (m *DeleteDatasetRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeleteDatasetRequest.Merge(m, src)
}
func (m *DeleteDatasetRequest) XXX_Size() int {
return xxx_messageInfo_DeleteDatasetRequest.Size(m)
}
func (m *DeleteDatasetRequest) XXX_DiscardUnknown() {
xxx_messageInfo_DeleteDatasetRequest.DiscardUnknown(m)
}
var xxx_messageInfo_DeleteDatasetRequest proto.InternalMessageInfo
func (m *DeleteDatasetRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// Request message for
// [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData].
type ImportDataRequest struct {
// Required. Dataset name. Dataset must already exist. All imported
// annotations and examples will be added.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Required. The desired input location and its domain specific semantics,
// if any.
InputConfig *InputConfig `protobuf:"bytes,3,opt,name=input_config,json=inputConfig,proto3" json:"input_config,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ImportDataRequest) Reset() { *m = ImportDataRequest{} }
func (m *ImportDataRequest) String() string { return proto.CompactTextString(m) }
func (*ImportDataRequest) ProtoMessage() {}
func (*ImportDataRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{6}
}
func (m *ImportDataRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ImportDataRequest.Unmarshal(m, b)
}
func (m *ImportDataRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ImportDataRequest.Marshal(b, m, deterministic)
}
func (m *ImportDataRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ImportDataRequest.Merge(m, src)
}
func (m *ImportDataRequest) XXX_Size() int {
return xxx_messageInfo_ImportDataRequest.Size(m)
}
func (m *ImportDataRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ImportDataRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ImportDataRequest proto.InternalMessageInfo
func (m *ImportDataRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ImportDataRequest) GetInputConfig() *InputConfig {
if m != nil {
return m.InputConfig
}
return nil
}
// Request message for
// [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData].
type ExportDataRequest struct {
// Required. The resource name of the dataset.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Required. The desired output location.
OutputConfig *OutputConfig `protobuf:"bytes,3,opt,name=output_config,json=outputConfig,proto3" json:"output_config,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ExportDataRequest) Reset() { *m = ExportDataRequest{} }
func (m *ExportDataRequest) String() string { return proto.CompactTextString(m) }
func (*ExportDataRequest) ProtoMessage() {}
func (*ExportDataRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{7}
}
func (m *ExportDataRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExportDataRequest.Unmarshal(m, b)
}
func (m *ExportDataRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExportDataRequest.Marshal(b, m, deterministic)
}
func (m *ExportDataRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportDataRequest.Merge(m, src)
}
func (m *ExportDataRequest) XXX_Size() int {
return xxx_messageInfo_ExportDataRequest.Size(m)
}
func (m *ExportDataRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ExportDataRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ExportDataRequest proto.InternalMessageInfo
func (m *ExportDataRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ExportDataRequest) GetOutputConfig() *OutputConfig {
if m != nil {
return m.OutputConfig
}
return nil
}
// Request message for
// [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel].
type CreateModelRequest struct {
// Resource name of the parent project where the model is being created.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// The model to create.
Model *Model `protobuf:"bytes,4,opt,name=model,proto3" json:"model,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateModelRequest) Reset() { *m = CreateModelRequest{} }
func (m *CreateModelRequest) String() string { return proto.CompactTextString(m) }
func (*CreateModelRequest) ProtoMessage() {}
func (*CreateModelRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{8}
}
func (m *CreateModelRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateModelRequest.Unmarshal(m, b)
}
func (m *CreateModelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateModelRequest.Marshal(b, m, deterministic)
}
func (m *CreateModelRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateModelRequest.Merge(m, src)
}
func (m *CreateModelRequest) XXX_Size() int {
return xxx_messageInfo_CreateModelRequest.Size(m)
}
func (m *CreateModelRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreateModelRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreateModelRequest proto.InternalMessageInfo
func (m *CreateModelRequest) GetParent() string {
if m != nil {
return m.Parent
}
return ""
}
func (m *CreateModelRequest) GetModel() *Model {
if m != nil {
return m.Model
}
return nil
}
// Request message for
// [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel].
type GetModelRequest struct {
// Resource name of the model.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetModelRequest) Reset() { *m = GetModelRequest{} }
func (m *GetModelRequest) String() string { return proto.CompactTextString(m) }
func (*GetModelRequest) ProtoMessage() {}
func (*GetModelRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{9}
}
func (m *GetModelRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetModelRequest.Unmarshal(m, b)
}
func (m *GetModelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetModelRequest.Marshal(b, m, deterministic)
}
func (m *GetModelRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetModelRequest.Merge(m, src)
}
func (m *GetModelRequest) XXX_Size() int {
return xxx_messageInfo_GetModelRequest.Size(m)
}
func (m *GetModelRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetModelRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetModelRequest proto.InternalMessageInfo
func (m *GetModelRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// Request message for
// [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels].
type ListModelsRequest struct {
// Resource name of the project, from which to list the models.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// An expression for filtering the results of the request.
//
// * `model_metadata` - for existence of the case (e.g.
// video_classification_model_metadata:*).
// * `dataset_id` - for = or !=. Some examples of using the filter are:
//
// * `image_classification_model_metadata:*` --> The model has
// image_classification_model_metadata.
// * `dataset_id=5` --> The model was created from a dataset with ID 5.
Filter string `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"`
// Requested page size.
PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// A token identifying a page of results for the server to return
// Typically obtained via
// [ListModelsResponse.next_page_token][google.cloud.automl.v1.ListModelsResponse.next_page_token]
// of the previous
// [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels] call.
PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListModelsRequest) Reset() { *m = ListModelsRequest{} }
func (m *ListModelsRequest) String() string { return proto.CompactTextString(m) }
func (*ListModelsRequest) ProtoMessage() {}
func (*ListModelsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{10}
}
func (m *ListModelsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListModelsRequest.Unmarshal(m, b)
}
func (m *ListModelsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListModelsRequest.Marshal(b, m, deterministic)
}
func (m *ListModelsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListModelsRequest.Merge(m, src)
}
func (m *ListModelsRequest) XXX_Size() int {
return xxx_messageInfo_ListModelsRequest.Size(m)
}
func (m *ListModelsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ListModelsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ListModelsRequest proto.InternalMessageInfo
func (m *ListModelsRequest) GetParent() string {
if m != nil {
return m.Parent
}
return ""
}
func (m *ListModelsRequest) GetFilter() string {
if m != nil {
return m.Filter
}
return ""
}
func (m *ListModelsRequest) GetPageSize() int32 {
if m != nil {
return m.PageSize
}
return 0
}
func (m *ListModelsRequest) GetPageToken() string {
if m != nil {
return m.PageToken
}
return ""
}
// Response message for
// [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels].
type ListModelsResponse struct {
// List of models in the requested page.
Model []*Model `protobuf:"bytes,1,rep,name=model,proto3" json:"model,omitempty"`
// A token to retrieve next page of results.
// Pass to
// [ListModelsRequest.page_token][google.cloud.automl.v1.ListModelsRequest.page_token]
// to obtain that page.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListModelsResponse) Reset() { *m = ListModelsResponse{} }
func (m *ListModelsResponse) String() string { return proto.CompactTextString(m) }
func (*ListModelsResponse) ProtoMessage() {}
func (*ListModelsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{11}
}
func (m *ListModelsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListModelsResponse.Unmarshal(m, b)
}
func (m *ListModelsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListModelsResponse.Marshal(b, m, deterministic)
}
func (m *ListModelsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListModelsResponse.Merge(m, src)
}
func (m *ListModelsResponse) XXX_Size() int {
return xxx_messageInfo_ListModelsResponse.Size(m)
}
func (m *ListModelsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ListModelsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ListModelsResponse proto.InternalMessageInfo
func (m *ListModelsResponse) GetModel() []*Model {
if m != nil {
return m.Model
}
return nil
}
func (m *ListModelsResponse) GetNextPageToken() string {
if m != nil {
return m.NextPageToken
}
return ""
}
// Request message for
// [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel].
type DeleteModelRequest struct {
// Resource name of the model being deleted.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DeleteModelRequest) Reset() { *m = DeleteModelRequest{} }
func (m *DeleteModelRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteModelRequest) ProtoMessage() {}
func (*DeleteModelRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{12}
}
func (m *DeleteModelRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DeleteModelRequest.Unmarshal(m, b)
}
func (m *DeleteModelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DeleteModelRequest.Marshal(b, m, deterministic)
}
func (m *DeleteModelRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeleteModelRequest.Merge(m, src)
}
func (m *DeleteModelRequest) XXX_Size() int {
return xxx_messageInfo_DeleteModelRequest.Size(m)
}
func (m *DeleteModelRequest) XXX_DiscardUnknown() {
xxx_messageInfo_DeleteModelRequest.DiscardUnknown(m)
}
var xxx_messageInfo_DeleteModelRequest proto.InternalMessageInfo
func (m *DeleteModelRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// Request message for
// [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel]
type UpdateModelRequest struct {
// The model which replaces the resource on the server.
Model *Model `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// Required. The update mask applies to the resource.
UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UpdateModelRequest) Reset() { *m = UpdateModelRequest{} }
func (m *UpdateModelRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateModelRequest) ProtoMessage() {}
func (*UpdateModelRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{13}
}
func (m *UpdateModelRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UpdateModelRequest.Unmarshal(m, b)
}
func (m *UpdateModelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UpdateModelRequest.Marshal(b, m, deterministic)
}
func (m *UpdateModelRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_UpdateModelRequest.Merge(m, src)
}
func (m *UpdateModelRequest) XXX_Size() int {
return xxx_messageInfo_UpdateModelRequest.Size(m)
}
func (m *UpdateModelRequest) XXX_DiscardUnknown() {
xxx_messageInfo_UpdateModelRequest.DiscardUnknown(m)
}
var xxx_messageInfo_UpdateModelRequest proto.InternalMessageInfo
func (m *UpdateModelRequest) GetModel() *Model {
if m != nil {
return m.Model
}
return nil
}
func (m *UpdateModelRequest) GetUpdateMask() *field_mask.FieldMask {
if m != nil {
return m.UpdateMask
}
return nil
}
// Request message for
// [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation].
type GetModelEvaluationRequest struct {
// Resource name for the model evaluation.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetModelEvaluationRequest) Reset() { *m = GetModelEvaluationRequest{} }
func (m *GetModelEvaluationRequest) String() string { return proto.CompactTextString(m) }
func (*GetModelEvaluationRequest) ProtoMessage() {}
func (*GetModelEvaluationRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{14}
}
func (m *GetModelEvaluationRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetModelEvaluationRequest.Unmarshal(m, b)
}
func (m *GetModelEvaluationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetModelEvaluationRequest.Marshal(b, m, deterministic)
}
func (m *GetModelEvaluationRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetModelEvaluationRequest.Merge(m, src)
}
func (m *GetModelEvaluationRequest) XXX_Size() int {
return xxx_messageInfo_GetModelEvaluationRequest.Size(m)
}
func (m *GetModelEvaluationRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetModelEvaluationRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetModelEvaluationRequest proto.InternalMessageInfo
func (m *GetModelEvaluationRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// Request message for
// [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations].
type ListModelEvaluationsRequest struct {
// Resource name of the model to list the model evaluations for.
// If modelId is set as "-", this will list model evaluations from across all
// models of the parent location.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// An expression for filtering the results of the request.
//
// * `annotation_spec_id` - for =, != or existence. See example below for
// the last.
//
// Some examples of using the filter are:
//
// * `annotation_spec_id!=4` --> The model evaluation was done for
// annotation spec with ID different than 4.
// * `NOT annotation_spec_id:*` --> The model evaluation was done for
// aggregate of all annotation specs.
Filter string `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"`
// Requested page size.
PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// A token identifying a page of results for the server to return.
// Typically obtained via
// [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1.ListModelEvaluationsResponse.next_page_token]
// of the previous
// [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]
// call.
PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListModelEvaluationsRequest) Reset() { *m = ListModelEvaluationsRequest{} }
func (m *ListModelEvaluationsRequest) String() string { return proto.CompactTextString(m) }
func (*ListModelEvaluationsRequest) ProtoMessage() {}
func (*ListModelEvaluationsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{15}
}
func (m *ListModelEvaluationsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListModelEvaluationsRequest.Unmarshal(m, b)
}
func (m *ListModelEvaluationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListModelEvaluationsRequest.Marshal(b, m, deterministic)
}
func (m *ListModelEvaluationsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListModelEvaluationsRequest.Merge(m, src)
}
func (m *ListModelEvaluationsRequest) XXX_Size() int {
return xxx_messageInfo_ListModelEvaluationsRequest.Size(m)
}
func (m *ListModelEvaluationsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ListModelEvaluationsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ListModelEvaluationsRequest proto.InternalMessageInfo
func (m *ListModelEvaluationsRequest) GetParent() string {
if m != nil {
return m.Parent
}
return ""
}
func (m *ListModelEvaluationsRequest) GetFilter() string {
if m != nil {
return m.Filter
}
return ""
}
func (m *ListModelEvaluationsRequest) GetPageSize() int32 {
if m != nil {
return m.PageSize
}
return 0
}
func (m *ListModelEvaluationsRequest) GetPageToken() string {
if m != nil {
return m.PageToken
}
return ""
}
// Response message for
// [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations].
type ListModelEvaluationsResponse struct {
// List of model evaluations in the requested page.
ModelEvaluation []*ModelEvaluation `protobuf:"bytes,1,rep,name=model_evaluation,json=modelEvaluation,proto3" json:"model_evaluation,omitempty"`
// A token to retrieve next page of results.
// Pass to the
// [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1.ListModelEvaluationsRequest.page_token]
// field of a new
// [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]
// request to obtain that page.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListModelEvaluationsResponse) Reset() { *m = ListModelEvaluationsResponse{} }
func (m *ListModelEvaluationsResponse) String() string { return proto.CompactTextString(m) }
func (*ListModelEvaluationsResponse) ProtoMessage() {}
func (*ListModelEvaluationsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_d14ac936eda1c3a8, []int{16}
}
func (m *ListModelEvaluationsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListModelEvaluationsResponse.Unmarshal(m, b)
}
func (m *ListModelEvaluationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListModelEvaluationsResponse.Marshal(b, m, deterministic)
}
func (m *ListModelEvaluationsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListModelEvaluationsResponse.Merge(m, src)
}
func (m *ListModelEvaluationsResponse) XXX_Size() int {
return xxx_messageInfo_ListModelEvaluationsResponse.Size(m)
}
func (m *ListModelEvaluationsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ListModelEvaluationsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ListModelEvaluationsResponse proto.InternalMessageInfo
func (m *ListModelEvaluationsResponse) GetModelEvaluation() []*ModelEvaluation {
if m != nil {
return m.ModelEvaluation
}
return nil
}
func (m *ListModelEvaluationsResponse) GetNextPageToken() string {
if m != nil {
return m.NextPageToken
}
return ""
}
func init() {
proto.RegisterType((*CreateDatasetRequest)(nil), "google.cloud.automl.v1.CreateDatasetRequest")
proto.RegisterType((*GetDatasetRequest)(nil), "google.cloud.automl.v1.GetDatasetRequest")
proto.RegisterType((*ListDatasetsRequest)(nil), "google.cloud.automl.v1.ListDatasetsRequest")
proto.RegisterType((*ListDatasetsResponse)(nil), "google.cloud.automl.v1.ListDatasetsResponse")
proto.RegisterType((*UpdateDatasetRequest)(nil), "google.cloud.automl.v1.UpdateDatasetRequest")
proto.RegisterType((*DeleteDatasetRequest)(nil), "google.cloud.automl.v1.DeleteDatasetRequest")
proto.RegisterType((*ImportDataRequest)(nil), "google.cloud.automl.v1.ImportDataRequest")
proto.RegisterType((*ExportDataRequest)(nil), "google.cloud.automl.v1.ExportDataRequest")
proto.RegisterType((*CreateModelRequest)(nil), "google.cloud.automl.v1.CreateModelRequest")
proto.RegisterType((*GetModelRequest)(nil), "google.cloud.automl.v1.GetModelRequest")
proto.RegisterType((*ListModelsRequest)(nil), "google.cloud.automl.v1.ListModelsRequest")
proto.RegisterType((*ListModelsResponse)(nil), "google.cloud.automl.v1.ListModelsResponse")
proto.RegisterType((*DeleteModelRequest)(nil), "google.cloud.automl.v1.DeleteModelRequest")
proto.RegisterType((*UpdateModelRequest)(nil), "google.cloud.automl.v1.UpdateModelRequest")
proto.RegisterType((*GetModelEvaluationRequest)(nil), "google.cloud.automl.v1.GetModelEvaluationRequest")
proto.RegisterType((*ListModelEvaluationsRequest)(nil), "google.cloud.automl.v1.ListModelEvaluationsRequest")
proto.RegisterType((*ListModelEvaluationsResponse)(nil), "google.cloud.automl.v1.ListModelEvaluationsResponse")
}
func init() {
proto.RegisterFile("google/cloud/automl/v1/service.proto", fileDescriptor_d14ac936eda1c3a8)
}
var fileDescriptor_d14ac936eda1c3a8 = []byte{
// 1219 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x58, 0x41, 0x6f, 0xdc, 0x44,
0x1b, 0xd6, 0xa4, 0x6d, 0xbe, 0xf4, 0xdd, 0x44, 0xf9, 0x32, 0x84, 0xb0, 0x75, 0x5b, 0x35, 0x72,
0x0b, 0x09, 0x26, 0xb5, 0xd9, 0x4d, 0xa4, 0x2a, 0x4e, 0xa3, 0x2a, 0x49, 0xd3, 0x28, 0x52, 0xa3,
0x46, 0x0b, 0xe4, 0x80, 0x22, 0xad, 0xdc, 0xdd, 0xc9, 0xd6, 0xc4, 0xeb, 0x71, 0xed, 0x71, 0x12,
0x5a, 0x55, 0x08, 0x84, 0x2a, 0x21, 0xa1, 0x1e, 0xe8, 0x01, 0x51, 0x0e, 0x9c, 0xb8, 0xf0, 0x07,
0xfa, 0x17, 0x50, 0x8f, 0xf0, 0x17, 0x38, 0xf1, 0x2b, 0x90, 0x67, 0xc6, 0xbb, 0xde, 0xf5, 0x7a,
0xed, 0x15, 0x42, 0xbd, 0xd9, 0x9e, 0x67, 0xe6, 0x7d, 0xe6, 0x79, 0xdf, 0x99, 0xe7, 0xdd, 0x85,
0x1b, 0x2d, 0x4a, 0x5b, 0x0e, 0x31, 0x1a, 0x0e, 0x0d, 0x9b, 0x86, 0x15, 0x32, 0xda, 0x76, 0x8c,
0x93, 0x8a, 0x11, 0x10, 0xff, 0xc4, 0x6e, 0x10, 0xdd, 0xf3, 0x29, 0xa3, 0x78, 0x4e, 0xa0, 0x74,
0x8e, 0xd2, 0x05, 0x4a, 0x3f, 0xa9, 0x28, 0x57, 0xe4, 0x6c, 0xcb, 0xb3, 0x0d, 0xcb, 0x75, 0x29, | 0xd3, 0x62, 0x56, 0x40, 0xe2, 0x65, 0xaf, 0x65, 0xa0, 0x6c, 0x2a, 0x01, 0x6a, 0x06, 0xa0, 0x4d,
0x9b, 0xc4, 0x91, 0x98, 0x9b, 0xc3, 0x30, 0x75, 0x72, 0x62, 0x39, 0x21, 0x67, 0x28, 0xe1, 0x0b,
0x19, 0x70, 0xea, 0x11, 0xbf, 0x47, 0x8c, 0xeb, 0x12, 0xe8, 0x50, 0xb7, 0xe5, 0x87, 0xae, 0x6b,
0xbb, 0xad, 0x34, 0x68, 0x5e, 0x82, 0xf8, 0xdb, 0xc3, 0xf0, 0xc8, 0x38, 0xb2, 0x89, 0xd3, 0xac,
0xb7, 0xad, 0xe0, 0x58, 0x20, 0x54, 0x1b, 0x66, 0xb7, 0x7c, 0x62, 0x31, 0x72, 0x57, 0x6c, 0xbd,
0x46, 0x1e, 0x87, 0x24, 0x60, 0x78, 0x0e, 0xc6, 0x3d, 0xcb, 0x27, 0x2e, 0x2b, 0xa3, 0x79, 0xb4,
0x78, 0xb1, 0x26, 0xdf, 0xf0, 0x2a, 0xfc, 0x4f, 0x8a, 0x54, 0x1e, 0x9b, 0x47, 0x8b, 0xa5, 0xea,
0x35, 0x7d, 0x70, 0x2e, 0xf5, 0x78, 0xc1, 0x18, 0xaf, 0x2e, 0xc0, 0xcc, 0x0e, 0x61, 0x7d, 0x71,
0x30, 0x9c, 0x77, 0xad, 0x36, 0x91, 0x51, 0xf8, 0xb3, 0xfa, 0x35, 0x82, 0x77, 0xee, 0xdb, 0x41,
0x0c, 0x0d, 0xf2, 0x38, 0xcd, 0xc1, 0xf8, 0x91, 0xed, 0x30, 0xe2, 0x97, 0xcf, 0x89, 0xef, 0xe2,
0x0d, 0x5f, 0x86, 0x8b, 0x9e, 0xd5, 0x22, 0xf5, 0xc0, 0x7e, 0x42, 0xca, 0xe7, 0xe7, 0xd1, 0xe2,
0x85, 0xda, 0x44, 0xf4, 0xe1, 0x13, 0xfb, 0x09, 0xc1, 0x57, 0x01, 0xf8, 0x20, 0xa3, 0xc7, 0xc4,
0x2d, 0x8f, 0xf3, 0x89, 0x1c, 0xfe, 0x69, 0xf4, 0x41, 0x7d, 0x0a, 0xb3, 0xbd, 0x14, 0x02, 0x8f,
0xba, 0x01, 0xc1, 0x6b, 0x30, 0x21, 0xf7, 0x13, 0x94, 0xd1, 0xfc, 0xb9, 0x22, 0x02, 0x74, 0x26,
0xe0, 0x0f, 0x60, 0xda, 0x25, 0x67, 0xac, 0x9e, 0x08, 0x3c, 0xc6, 0x03, 0x4f, 0x45, 0x9f, 0xf7,
0x3b, 0xc1, 0x5f, 0x20, 0x98, 0xfd, 0xcc, 0x6b, 0xa6, 0xb3, 0x92, 0x50, 0x1f, 0x8d, 0xa6, 0x3e,
0x5e, 0x83, 0x52, 0xc8, 0x97, 0xe4, 0xd9, 0x97, 0xc9, 0x53, 0xe2, 0xe9, 0x71, 0x81, 0xe8, 0xf7,
0xa2, 0x02, 0xd9, 0xb3, 0x82, 0xe3, 0x1a, 0x08, 0x78, 0xf4, 0xac, 0x6a, 0x30, 0x7b, 0x97, 0x38,
0x24, 0xc5, 0x67, 0x50, 0xf6, 0x28, 0xcc, 0xec, 0xb6, 0x3d, 0xea, 0x73, 0xed, 0x86, 0x00, 0xf1,
0x3d, 0x98, 0xb4, 0x5d, 0x2f, 0x64, 0xf5, 0x06, 0x75, 0x8f, 0xec, 0x16, 0x4f, 0x5e, 0xa9, 0x7a,
0x3d, 0x6b, 0x47, 0xbb, 0x11, 0x76, 0x8b, 0x43, 0x6b, 0x25, 0xbb, 0xfb, 0xa2, 0xfa, 0x30, 0xb3,
0x7d, 0x56, 0x24, 0xe0, 0x2e, 0x4c, 0xd1, 0x90, 0xa5, 0x22, 0xde, 0xc8, 0x8a, 0xf8, 0x80, 0x83,
0x65, 0xc8, 0x49, 0x9a, 0x78, 0x53, 0x2d, 0xc0, 0xe2, 0xd8, 0xec, 0x45, 0xc7, 0x38, 0xaf, 0x40,
0x97, 0xe1, 0x02, 0x3f, 0xee, 0xbc, 0x08, 0x4b, 0xd5, 0xab, 0x59, 0x01, 0xc5, 0x62, 0x02, 0xab,
0xbe, 0x0f, 0xd3, 0x3b, 0x84, 0xf5, 0xac, 0x3f, 0x48, 0xee, 0xaf, 0x60, 0x26, 0x2a, 0x54, 0x8e,
0x7b, 0x2b, 0x27, 0xe5, 0x31, 0xe0, 0x24, 0x01, 0x79, 0x4e, 0x3a, 0x5b, 0x16, 0x87, 0xa4, 0xd0,
0x96, 0x0b, 0x9f, 0x8f, 0x45, 0xc0, 0xa2, 0x1c, 0x73, 0xd5, 0x79, 0x8e, 0x00, 0x8b, 0x93, 0xd4,
0x03, 0x4d, 0xb0, 0x2b, 0x9c, 0x90, 0x7f, 0x77, 0x82, 0x0c, 0xb8, 0x14, 0x67, 0x73, 0xbb, 0x73,
0xe7, 0x0f, 0x63, 0xfe, 0x1d, 0x82, 0xcb, 0x1d, 0x5d, 0xbb, 0x53, 0xde, 0x4a, 0x8a, 0x5f, 0x21,
0xb8, 0x32, 0x98, 0x8b, 0xcc, 0x76, 0x0d, 0xfe, 0xdf, 0xef, 0x67, 0x32, 0xf1, 0x0b, 0x43, 0xa5,
0x4d, 0x48, 0x31, 0xdd, 0xee, 0xfd, 0x50, 0xb4, 0x18, 0xaa, 0xdf, 0x62, 0x18, 0xdf, 0x08, 0x19,
0xdd, 0x73, 0xf0, 0xcf, 0x08, 0xa6, 0x7a, 0xdc, 0x0c, 0x2f, 0x65, 0x85, 0x1f, 0x64, 0x7a, 0x4a,
0xa7, 0x0e, 0x12, 0xa6, 0xaa, 0x3f, 0x88, 0x4d, 0x55, 0x5d, 0xff, 0xe6, 0xcf, 0xbf, 0x5e, 0x8e,
0xdd, 0x52, 0x97, 0x22, 0x43, 0x7e, 0x2a, 0xf4, 0x5e, 0xf7, 0x7c, 0xfa, 0x05, 0x69, 0xb0, 0xc0,
0xd0, 0x0c, 0x87, 0x36, 0x84, 0x2e, 0x86, 0xf6, 0x2c, 0x6e, 0x25, 0x02, 0xb3, 0x73, 0x03, 0xbf,
0x40, 0x00, 0x5d, 0x03, 0xc4, 0x1f, 0x66, 0x51, 0x4b, 0x99, 0xa4, 0x92, 0x77, 0xcb, 0xab, 0x2b,
0x9c, 0x99, 0x8e, 0x05, 0xb3, 0xa8, 0x7e, 0x32, 0x78, 0x75, 0x68, 0x19, 0xda, 0x33, 0xfc, 0x0b,
0x82, 0xc9, 0xa4, 0xc9, 0xe1, 0x8f, 0xb2, 0xe2, 0x0c, 0x70, 0x63, 0x65, 0xa9, 0x18, 0x58, 0x54,
0x48, 0x1f, 0xc3, 0x82, 0xda, 0xe1, 0x5f, 0x11, 0x4c, 0xf5, 0x18, 0x61, 0x76, 0x42, 0x07, 0xf9,
0x65, 0xbe, 0x70, 0xdb, 0x9c, 0xd6, 0x9d, 0xea, 0x0a, 0xa7, 0x15, 0xb7, 0x7f, 0x05, 0x05, 0xec,
0xa6, 0xf6, 0x07, 0x04, 0x53, 0x3d, 0x06, 0x99, 0xcd, 0x73, 0x90, 0x8f, 0xe6, 0x15, 0x9e, 0x14,
0x4f, 0x1b, 0x2d, 0xbd, 0xaf, 0x10, 0x40, 0xd7, 0x89, 0xb3, 0xeb, 0x2d, 0xe5, 0xd6, 0x79, 0x74,
0x36, 0x39, 0x9d, 0xdb, 0xea, 0xad, 0x51, 0xe8, 0x98, 0x76, 0x27, 0x8c, 0x89, 0x34, 0x4e, 0xae,
0xeb, 0xda, 0xd9, 0xe4, 0x52, 0xce, 0xfe, 0xdf, 0x90, 0x23, 0x67, 0x49, 0x72, 0x2f, 0x11, 0x94,
0x12, 0xf6, 0x8e, 0xb5, 0xe1, 0xb7, 0x48, 0xd2, 0x5a, 0xf2, 0xe8, 0xad, 0x72, 0x7a, 0xcb, 0xaa,
0x56, 0xe4, 0x1c, 0xf0, 0x2b, 0x31, 0x30, 0xa5, 0xff, 0x3c, 0x47, 0x30, 0x11, 0x7b, 0x08, 0x5e,
0x18, 0x72, 0x7b, 0x0c, 0xe6, 0x33, 0xf0, 0x02, 0x56, 0xab, 0x9c, 0xcf, 0x12, 0xd6, 0x72, 0xe5,
0x12, 0x64, 0xa2, 0xc2, 0xfa, 0x11, 0x01, 0x74, 0x2d, 0x3f, 0x3b, 0x77, 0xa9, 0xbe, 0x44, 0xd1,
0x8a, 0x40, 0xe5, 0x8d, 0xd1, 0xcb, 0xac, 0x90, 0x52, 0xf8, 0x7b, 0x04, 0xa5, 0x44, 0x67, 0x90,
0x9d, 0xb8, 0x74, 0xfb, 0x90, 0x97, 0x38, 0x49, 0x47, 0x1b, 0x45, 0xa8, 0x9f, 0x10, 0x94, 0x12,
0xdd, 0x47, 0x36, 0x9d, 0x74, 0x8b, 0x92, 0x97, 0xb7, 0x3b, 0x9c, 0xce, 0x6a, 0xf5, 0x63, 0x4e,
0x47, 0xfc, 0xe0, 0x2c, 0x44, 0x2a, 0xae, 0xa6, 0xd7, 0x08, 0x70, 0xba, 0x23, 0xc1, 0x95, 0xbc,
0xba, 0x4a, 0x75, 0x2f, 0x4a, 0x51, 0x8b, 0x8f, 0x2f, 0x5b, 0xbc, 0x5e, 0x58, 0x42, 0xa3, 0xaf,
0x29, 0xe0, 0xaa, 0xfe, 0x8e, 0xc4, 0x6f, 0xb3, 0xfe, 0x6e, 0x04, 0x2f, 0xe7, 0x56, 0x57, 0xba,
0x8f, 0x52, 0x56, 0x46, 0x9b, 0x24, 0x8b, 0xb3, 0x77, 0x2b, 0x43, 0x8b, 0xb3, 0x2b, 0x7d, 0x6a,
0x37, 0xca, 0xee, 0x9b, 0x8d, 0x77, 0x65, 0x44, 0xc1, 0xc2, 0xf2, 0xec, 0x40, 0x6f, 0xd0, 0xf6,
0x1f, 0x1b, 0xfa, 0x23, 0xc6, 0xbc, 0xc0, 0x34, 0x8c, 0xd3, 0xd3, 0xd3, 0xbe, 0x41, 0xc3, 0x0a,
0xd9, 0x23, 0xf1, 0x0f, 0xc1, 0x4d, 0xcf, 0xb1, 0xd8, 0x11, 0xf5, 0xdb, 0x9b, 0xaf, 0x11, 0x28,
0x0d, 0xda, 0xce, 0xd8, 0xcd, 0x66, 0x49, 0xb4, 0x48, 0xfb, 0x51, 0x97, 0xba, 0x8f, 0x3e, 0xbf,
0x2d, 0x61, 0x2d, 0xea, 0x58, 0x6e, 0x4b, 0xa7, 0x7e, 0xcb, 0x68, 0x11, 0x97, 0xf7, 0xb0, 0x46,
0x37, 0x58, 0xff, 0xbf, 0x10, 0x6b, 0xe2, 0xe9, 0xb7, 0xb1, 0xb9, 0x1d, 0x31, 0x7d, 0x8b, 0x47,
0xe1, 0x4b, 0xdf, 0xd7, 0x0f, 0x2a, 0x6f, 0xe2, 0x81, 0x43, 0x3e, 0x70, 0x28, 0x06, 0x0e, 0x0f,
0x2a, 0x7f, 0x8f, 0x5d, 0x12, 0x03, 0xa6, 0xc9, 0x47, 0x4c, 0x53, 0x0c, 0x99, 0xe6, 0x41, 0xe5,
0xe1, 0x38, 0x0f, 0xbb, 0xfc, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xea, 0x29, 0x44, 0x8d, 0x3b,
0x12, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// AutoMlClient is the client API for AutoMl service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type AutoMlClient interface {
// Creates a dataset.
CreateDataset(ctx context.Context, in *CreateDatasetRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Gets a dataset.
GetDataset(ctx context.Context, in *GetDatasetRequest, opts ...grpc.CallOption) (*Dataset, error)
// Lists datasets in a project.
ListDatasets(ctx context.Context, in *ListDatasetsRequest, opts ...grpc.CallOption) (*ListDatasetsResponse, error)
// Updates a dataset.
UpdateDataset(ctx context.Context, in *UpdateDatasetRequest, opts ...grpc.CallOption) (*Dataset, error)
// Deletes a dataset and all of its contents.
// Returns empty response in the
// [response][google.longrunning.Operation.response] field when it completes,
// and `delete_details` in the
// [metadata][google.longrunning.Operation.metadata] field.
DeleteDataset(ctx context.Context, in *DeleteDatasetRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Imports data into a dataset.
ImportData(ctx context.Context, in *ImportDataRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Exports dataset's data to the provided output location.
// Returns an empty response in the
// [response][google.longrunning.Operation.response] field when it completes.
ExportData(ctx context.Context, in *ExportDataRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Creates a model.
// Returns a Model in the [response][google.longrunning.Operation.response]
// field when it completes.
// When you create a model, several model evaluations are created for it:
// a global evaluation, and one evaluation for each annotation spec.
CreateModel(ctx context.Context, in *CreateModelRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Gets a model.
GetModel(ctx context.Context, in *GetModelRequest, opts ...grpc.CallOption) (*Model, error)
// Lists models.
ListModels(ctx context.Context, in *ListModelsRequest, opts ...grpc.CallOption) (*ListModelsResponse, error)
// Deletes a model.
// Returns `google.protobuf.Empty` in the
// [response][google.longrunning.Operation.response] field when it completes,
// and `delete_details` in the
// [metadata][google.longrunning.Operation.metadata] field.
DeleteModel(ctx context.Context, in *DeleteModelRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Updates a model.
UpdateModel(ctx context.Context, in *UpdateModelRequest, opts ...grpc.CallOption) (*Model, error)
// Gets a model evaluation.
GetModelEvaluation(ctx context.Context, in *GetModelEvaluationRequest, opts ...grpc.CallOption) (*ModelEvaluation, error)
// Lists model evaluations.
ListModelEvaluations(ctx context.Context, in *ListModelEvaluationsRequest, opts ...grpc.CallOption) (*ListModelEvaluationsResponse, error)
}
type autoMlClient struct {
cc *grpc.ClientConn
}
func NewAutoMlClient(cc *grpc.ClientConn) AutoMlClient {
return &autoMlClient{cc}
}
func (c *autoMlClient) CreateDataset(ctx context.Context, in *CreateDatasetRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.automl.v1.AutoMl/CreateDataset", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *autoMlClient) GetDataset(ctx context.Context, in *GetDatasetRequest, opts ...grpc.CallOption) (*Dataset, error) {
out := new(Dataset)
err := c.cc.Invoke(ctx, "/google.cloud.automl.v1.AutoMl/GetDataset", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *autoMlClient) ListDatasets(ctx context.Context, in *ListDatasetsRequest, opts ...grpc.CallOption) (*ListDatasetsResponse, error) {
out := new(ListDatasetsResponse)
err := c.cc.Invoke(ctx, "/google.cloud.automl.v1.AutoMl/ListDatasets", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *autoMlClient) UpdateDataset(ctx context.Context, in *UpdateDatasetRequest, opts ...grpc.CallOption) (*Dataset, error) {
out := new(Dataset)
err := c.cc.Invoke(ctx, "/google.cloud.automl.v1.AutoMl/UpdateDataset", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *autoMlClient) DeleteDataset(ctx context.Context, in *DeleteDatasetRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.automl.v1.AutoMl/DeleteDataset", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *autoMlClient) ImportData(ctx context.Context, in *ImportDataRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.automl.v1.AutoMl/ImportData", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *autoMlClient) ExportData(ctx context.Context, in *ExportDataRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.automl.v1.AutoMl/ExportData", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *autoMlClient) CreateModel(ctx context.Context, in *CreateModelRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.automl.v1.AutoMl/CreateModel", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *autoMlClient) GetModel(ctx context.Context, in *GetModelRequest, opts ...grpc.CallOption) (*Model, error) {
out := new(Model)
err := c.cc.Invoke(ctx, "/google.cloud.automl.v1.AutoMl/GetModel", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *autoMlClient) ListModels(ctx context.Context, in *ListModelsRequest, opts ...grpc.CallOption) (*ListModelsResponse, error) {
out := new(ListModelsResponse)
err := c.cc.Invoke(ctx, "/google.cloud.automl.v1.AutoMl/ListModels", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *autoMlClient) DeleteModel(ctx context.Context, in *DeleteModelRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.automl.v1.AutoMl/DeleteModel", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *autoMlClient) UpdateModel(ctx context.Context, in *UpdateModelRequest, opts ...grpc.CallOption) (*Model, error) {
out := new(Model)
err := c.cc.Invoke(ctx, "/google.cloud.automl.v1.AutoMl/UpdateModel", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *autoMlClient) GetModelEvaluation(ctx context.Context, in *GetModelEvaluationRequest, opts ...grpc.CallOption) (*ModelEvaluation, error) {
out := new(ModelEvaluation)
err := c.cc.Invoke(ctx, "/google.cloud.automl.v1.AutoMl/GetModelEvaluation", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *autoMlClient) ListModelEvaluations(ctx context.Context, in *ListModelEvaluationsRequest, opts ...grpc.CallOption) (*ListModelEvaluationsResponse, error) {
out := new(ListModelEvaluationsResponse)
err := c.cc.Invoke(ctx, "/google.cloud.automl.v1.AutoMl/ListModelEvaluations", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// AutoMlServer is the server API for AutoMl service.
type AutoMlServer interface {
// Creates a dataset.
CreateDataset(context.Context, *CreateDatasetRequest) (*longrunning.Operation, error)
// Gets a dataset.
GetDataset(context.Context, *GetDatasetRequest) (*Dataset, error)
// Lists datasets in a project.
ListDatasets(context.Context, *ListDatasetsRequest) (*ListDatasetsResponse, error)
// Updates a dataset.
UpdateDataset(context.Context, *UpdateDatasetRequest) (*Dataset, error)
// Deletes a dataset and all of its contents.
// Returns empty response in the
// [response][google.longrunning.Operation.response] field when it completes,
// and `delete_details` in the
// [metadata][google.longrunning.Operation.metadata] field.
DeleteDataset(context.Context, *DeleteDatasetRequest) (*longrunning.Operation, error)
// Imports data into a dataset.
ImportData(context.Context, *ImportDataRequest) (*longrunning.Operation, error)
// Exports dataset's data to the provided output location.
// Returns an empty response in the
// [response][google.longrunning.Operation.response] field when it completes.
ExportData(context.Context, *ExportDataRequest) (*longrunning.Operation, error)
// Creates a model.
// Returns a Model in the [response][google.longrunning.Operation.response]
// field when it completes.
// When you create a model, several model evaluations are created for it:
// a global evaluation, and one evaluation for each annotation spec.
CreateModel(context.Context, *CreateModelRequest) (*longrunning.Operation, error)
// Gets a model.
GetModel(context.Context, *GetModelRequest) (*Model, error)
// Lists models.
ListModels(context.Context, *ListModelsRequest) (*ListModelsResponse, error)
// Deletes a model.
// Returns `google.protobuf.Empty` in the
// [response][google.longrunning.Operation.response] field when it completes,
// and `delete_details` in the
// [metadata][google.longrunning.Operation.metadata] field.
DeleteModel(context.Context, *DeleteModelRequest) (*longrunning.Operation, error)
// Updates a model.
UpdateModel(context.Context, *UpdateModelRequest) (*Model, error)
// Gets a model evaluation.
GetModelEvaluation(context.Context, *GetModelEvaluationRequest) (*ModelEvaluation, error)
// Lists model evaluations.
ListModelEvaluations(context.Context, *ListModelEvaluationsRequest) (*ListModelEvaluationsResponse, error)
}
// UnimplementedAutoMlServer can be embedded to have forward compatible implementations.
type UnimplementedAutoMlServer struct {
}
func (*UnimplementedAutoMlServer) CreateDataset(ctx context.Context, req *CreateDatasetRequest) (*longrunning.Operation, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateDataset not implemented")
}
func (*UnimplementedAutoMlServer) GetDataset(ctx context.Context, req *GetDatasetRequest) (*Dataset, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetDataset not implemented")
}
func (*UnimplementedAutoMlServer) ListDatasets(ctx context.Context, req *ListDatasetsRequest) (*ListDatasetsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListDatasets not implemented")
}
func (*UnimplementedAutoMlServer) UpdateDataset(ctx context.Context, req *UpdateDatasetRequest) (*Dataset, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateDataset not implemented")
}
func (*UnimplementedAutoMlServer) DeleteDataset(ctx context.Context, req *DeleteDatasetRequest) (*longrunning.Operation, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteDataset not implemented")
}
func (*UnimplementedAutoMlServer) ImportData(ctx context.Context, req *ImportDataRequest) (*longrunning.Operation, error) {
return nil, status.Errorf(codes.Unimplemented, "method ImportData not implemented")
}
func (*UnimplementedAutoMlServer) ExportData(ctx context.Context, req *ExportDataRequest) (*longrunning.Operation, error) {
return nil, status.Errorf(codes.Unimplemented, "method ExportData not implemented")
}
func (*UnimplementedAutoMlServer) CreateModel(ctx context.Context, req *CreateModelRequest) (*longrunning.Operation, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateModel not implemented")
}
func (*UnimplementedAutoMlServer) GetModel(ctx context.Context, req *GetModelRequest) (*Model, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetModel not implemented")
}
func (*UnimplementedAutoMlServer) ListModels(ctx context.Context, req *ListModelsRequest) (*ListModelsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListModels not implemented")
}
func (*UnimplementedAutoMlServer) DeleteModel(ctx context.Context, req *DeleteModelRequest) (*longrunning.Operation, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteModel not implemented")
}
func (*UnimplementedAutoMlServer) UpdateModel(ctx context.Context, req *UpdateModelRequest) (*Model, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateModel not implemented")
}
func (*UnimplementedAutoMlServer) GetModelEvaluation(ctx context.Context, req *GetModelEvaluationRequest) (*ModelEvaluation, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetModelEvaluation not implemented")
}
func (*UnimplementedAutoMlServer) ListModelEvaluations(ctx context.Context, req *ListModelEvaluationsRequest) (*ListModelEvaluationsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListModelEvaluations not implemented")
}
func RegisterAutoMlServer(s *grpc.Server, srv AutoMlServer) {
s.RegisterService(&_AutoMl_serviceDesc, srv)
}
func _AutoMl_CreateDataset_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateDatasetRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AutoMlServer).CreateDataset(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.automl.v1.AutoMl/CreateDataset",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AutoMlServer).CreateDataset(ctx, req.(*CreateDatasetRequest))
}
return interceptor(ctx, in, info, handler)
}
func _AutoMl_GetDataset_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetDatasetRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AutoMlServer).GetDataset(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.automl.v1.AutoMl/GetDataset",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AutoMlServer).GetDataset(ctx, req.(*GetDatasetRequest))
}
return interceptor(ctx, in, info, handler)
}
func _AutoMl_ListDatasets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListDatasetsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AutoMlServer).ListDatasets(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.automl.v1.AutoMl/ListDatasets",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AutoMlServer).ListDatasets(ctx, req.(*ListDatasetsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _AutoMl_UpdateDataset_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateDatasetRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AutoMlServer).UpdateDataset(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.automl.v1.AutoMl/UpdateDataset",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AutoMlServer).UpdateDataset(ctx, req.(*UpdateDatasetRequest))
}
return interceptor(ctx, in, info, handler)
}
func _AutoMl_DeleteDataset_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteDatasetRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AutoMlServer).DeleteDataset(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.automl.v1.AutoMl/DeleteDataset",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AutoMlServer).DeleteDataset(ctx, req.(*DeleteDatasetRequest))
}
return interceptor(ctx, in, info, handler)
}
func _AutoMl_ImportData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ImportDataRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AutoMlServer).ImportData(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.automl.v1.AutoMl/ImportData",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AutoMlServer).ImportData(ctx, req.(*ImportDataRequest))
}
return interceptor(ctx, in, info, handler)
}
func _AutoMl_ExportData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ExportDataRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AutoMlServer).ExportData(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.automl.v1.AutoMl/ExportData",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AutoMlServer).ExportData(ctx, req.(*ExportDataRequest))
}
return interceptor(ctx, in, info, handler)
}
func _AutoMl_CreateModel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateModelRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AutoMlServer).CreateModel(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.automl.v1.AutoMl/CreateModel",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AutoMlServer).CreateModel(ctx, req.(*CreateModelRequest))
}
return interceptor(ctx, in, info, handler)
}
func _AutoMl_GetModel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetModelRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AutoMlServer).GetModel(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.automl.v1.AutoMl/GetModel",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AutoMlServer).GetModel(ctx, req.(*GetModelRequest))
}
return interceptor(ctx, in, info, handler)
}
func _AutoMl_ListModels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListModelsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AutoMlServer).ListModels(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.automl.v1.AutoMl/ListModels",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AutoMlServer).ListModels(ctx, req.(*ListModelsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _AutoMl_DeleteModel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteModelRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AutoMlServer).DeleteModel(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.automl.v1.AutoMl/DeleteModel",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AutoMlServer).DeleteModel(ctx, req.(*DeleteModelRequest))
}
return interceptor(ctx, in, info, handler)
}
func _AutoMl_UpdateModel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateModelRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AutoMlServer).UpdateModel(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.automl.v1.AutoMl/UpdateModel",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AutoMlServer).UpdateModel(ctx, req.(*UpdateModelRequest))
}
return interceptor(ctx, in, info, handler)
}
func _AutoMl_GetModelEvaluation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetModelEvaluationRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AutoMlServer).GetModelEvaluation(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.automl.v1.AutoMl/GetModelEvaluation",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AutoMlServer).GetModelEvaluation(ctx, req.(*GetModelEvaluationRequest))
}
return interceptor(ctx, in, info, handler)
}
func _AutoMl_ListModelEvaluations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListModelEvaluationsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AutoMlServer).ListModelEvaluations(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.automl.v1.AutoMl/ListModelEvaluations",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AutoMlServer).ListModelEvaluations(ctx, req.(*ListModelEvaluationsRequest))
}
return interceptor(ctx, in, info, handler)
}
var _AutoMl_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.cloud.automl.v1.AutoMl",
HandlerType: (*AutoMlServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CreateDataset",
Handler: _AutoMl_CreateDataset_Handler,
},
{
MethodName: "GetDataset",
Handler: _AutoMl_GetDataset_Handler,
},
{
MethodName: "ListDatasets",
Handler: _AutoMl_ListDatasets_Handler,
},
{
MethodName: "UpdateDataset",
Handler: _AutoMl_UpdateDataset_Handler,
},
{
MethodName: "DeleteDataset",
Handler: _AutoMl_DeleteDataset_Handler,
},
{
MethodName: "ImportData",
Handler: _AutoMl_ImportData_Handler,
},
{
MethodName: "ExportData",
Handler: _AutoMl_ExportData_Handler,
},
{
MethodName: "CreateModel",
Handler: _AutoMl_CreateModel_Handler,
},
{
MethodName: "GetModel",
Handler: _AutoMl_GetModel_Handler,
},
{
MethodName: "ListModels",
Handler: _AutoMl_ListModels_Handler,
},
{
MethodName: "DeleteModel",
Handler: _AutoMl_DeleteModel_Handler,
},
{
MethodName: "UpdateModel",
Handler: _AutoMl_UpdateModel_Handler,
},
{
MethodName: "GetModelEvaluation",
Handler: _AutoMl_GetModelEvaluation_Handler,
},
{
MethodName: "ListModelEvaluations",
Handler: _AutoMl_ListModelEvaluations_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/cloud/automl/v1/service.proto",
} | 0xb3, 0x98, 0x4d, 0xdd, 0x40, 0xcc, 0x52, 0xde, 0x4b, 0x8c, 0x36, 0x1c, 0x9b, 0xb8, 0x4c, 0x0e,
0x18, 0x19, 0x41, 0xbb, 0x4b, 0xd4, 0x3d, 0xeb, 0x4b, 0x87, 0x5a, 0x4d, 0x39, 0x21, 0x8b, 0x65, |
testBasics.py | import unittest
from sampleproject_tests import mayaTest
from mayatdd.mayatest import insideMaya
if insideMaya:
from maya import cmds
@mayaTest
class Test(unittest.TestCase):
def | (self):
'''
do something with maya.cmds to prove we're actually running this test in Maya.
'''
print "running in maya!"
cmds.sphere()
| testMinimal |
model.py | """YOLO_v3 Model Defined in Keras."""
from functools import wraps
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
import keras
from keras import backend as K
from keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.regularizers import l2
from ..yolo3.utils import compose
@wraps(Conv2D)
def DarknetConv2D(*args, **kwargs):
"""Wrapper to set Darknet parameters for Convolution2D."""
darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
darknet_conv_kwargs.update(kwargs)
return Conv2D(*args, **darknet_conv_kwargs)
def DarknetConv2D_BN_Leaky(*args, **kwargs):
"""Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
BatchNormalization(),
LeakyReLU(alpha=0.1))
def resblock_body(x, num_filters, num_blocks):
'''A series of resblocks starting with a downsampling Convolution2D'''
# Darknet uses left and top padding instead of 'same' mode
x = ZeroPadding2D(((1,0),(1,0)))(x)
x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x)
for i in range(num_blocks):
y = compose(
DarknetConv2D_BN_Leaky(num_filters//2, (1,1)),
DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x)
x = Add()([x,y])
return x
def darknet_body(x):
'''Darknent body having 52 Convolution2D layers'''
x = DarknetConv2D_BN_Leaky(32, (3,3))(x)
x = resblock_body(x, 64, 1)
x = resblock_body(x, 128, 2)
x = resblock_body(x, 256, 8)
x = resblock_body(x, 512, 8)
x = resblock_body(x, 1024, 4)
return x
def make_last_layers(x, num_filters, out_filters):
|
def yolo_body(inputs, num_anchors, num_classes):
"""Create YOLO_V3 model CNN body in Keras."""
darknet = Model(inputs, darknet_body(inputs))
x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5))
x = compose(
DarknetConv2D_BN_Leaky(256, (1,1)),
UpSampling2D(2))(x)
x = Concatenate()([x,darknet.layers[152].output])
x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5))
x = compose(
DarknetConv2D_BN_Leaky(128, (1,1)),
UpSampling2D(2))(x)
x = Concatenate()([x,darknet.layers[92].output])
x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5))
return Model(inputs, [y1,y2,y3])
def tiny_yolo_body(inputs, num_anchors, num_classes):
'''Create Tiny YOLO_v3 model CNN body in keras.'''
x1 = compose(
DarknetConv2D_BN_Leaky(16, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(32, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(64, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(128, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(256, (3,3)))(inputs)
x2 = compose(
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(512, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='same'),
DarknetConv2D_BN_Leaky(1024, (3,3)),
DarknetConv2D_BN_Leaky(256, (1,1)))(x1)
y1 = compose(
DarknetConv2D_BN_Leaky(512, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x2)
x2 = compose(
DarknetConv2D_BN_Leaky(128, (1,1)),
UpSampling2D(2))(x2)
y2 = compose(
Concatenate(),
DarknetConv2D_BN_Leaky(256, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))([x2,x1])
return Model(inputs, [y1,y2])
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
"""Convert final layer features to bounding box parameters."""
num_anchors = len(anchors)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
feats = K.reshape(
feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
# Adjust preditions to each spatial grid point and anchor size.
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.sigmoid(feats[..., 5:])
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
'''Get corrected boxes'''
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = K.cast(input_shape, K.dtype(box_yx))
image_shape = K.cast(image_shape, K.dtype(box_yx))
new_shape = K.round(image_shape * K.min(input_shape/image_shape))
offset = (input_shape-new_shape)/2./input_shape
scale = input_shape/new_shape
box_yx = (box_yx - offset) * scale
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = K.concatenate([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
])
# Scale boxes back to original image shape.
boxes *= K.concatenate([image_shape, image_shape])
return boxes
def yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):
'''Process Conv layer output'''
box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats,
anchors, num_classes, input_shape)
boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)
boxes = K.reshape(boxes, [-1, 4])
box_scores = box_confidence * box_class_probs
box_scores = K.reshape(box_scores, [-1, num_classes])
return boxes, box_scores
def yolo_eval(yolo_outputs,
anchors,
num_classes,
image_shape,
max_boxes=20,
score_threshold=.6,
iou_threshold=.5):
"""Evaluate YOLO model on given input and return filtered boxes."""
num_layers = len(yolo_outputs)
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting
input_shape = K.shape(yolo_outputs[0])[1:3] * 32
boxes = []
box_scores = []
for l in range(num_layers):
_boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
anchors[anchor_mask[l]], num_classes, input_shape, image_shape)
boxes.append(_boxes)
box_scores.append(_box_scores)
boxes = K.concatenate(boxes, axis=0)
box_scores = K.concatenate(box_scores, axis=0)
mask = box_scores >= score_threshold
max_boxes_tensor = K.constant(max_boxes, dtype='int32')
boxes_ = []
scores_ = []
classes_ = []
for c in range(num_classes):
# TODO: use keras backend instead of tf.
class_boxes = tf.boolean_mask(boxes, mask[:, c])
class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
nms_index = tf.image.non_max_suppression(
class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
class_boxes = K.gather(class_boxes, nms_index)
class_box_scores = K.gather(class_box_scores, nms_index)
classes = K.ones_like(class_box_scores, 'int32') * c
boxes_.append(class_boxes)
scores_.append(class_box_scores)
classes_.append(classes)
boxes_ = K.concatenate(boxes_, axis=0)
scores_ = K.concatenate(scores_, axis=0)
classes_ = K.concatenate(classes_, axis=0)
return boxes_, scores_, classes_
def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):
'''Preprocess true boxes to training input format
Parameters
----------
true_boxes: array, shape=(m, T, 5)
Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.
input_shape: array-like, hw, multiples of 32
anchors: array, shape=(N, 2), wh
num_classes: integer
Returns
-------
y_true: list of array, shape like yolo_outputs, xywh are reletive value
'''
assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes'
num_layers = len(anchors)//3 # default setting
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(input_shape, dtype='int32')
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
true_boxes[..., 0:2] = boxes_xy/input_shape[::-1]
true_boxes[..., 2:4] = boxes_wh/input_shape[::-1]
m = true_boxes.shape[0]
grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)]
y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes),
dtype='float32') for l in range(num_layers)]
# Expand dim to apply broadcasting.
anchors = np.expand_dims(anchors, 0)
anchor_maxes = anchors / 2.
anchor_mins = -anchor_maxes
valid_mask = boxes_wh[..., 0]>0
for b in range(m):
# Discard zero rows.
wh = boxes_wh[b, valid_mask[b]]
if len(wh)==0: continue
# Expand dim to apply broadcasting.
wh = np.expand_dims(wh, -2)
box_maxes = wh / 2.
box_mins = -box_maxes
intersect_mins = np.maximum(box_mins, anchor_mins)
intersect_maxes = np.minimum(box_maxes, anchor_maxes)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
# Find best anchor for each true box
best_anchor = np.argmax(iou, axis=-1)
for t, n in enumerate(best_anchor):
for l in range(num_layers):
if n in anchor_mask[l]:
i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32')
k = anchor_mask[l].index(n)
c = true_boxes[b,t, 4].astype('int32')
y_true[l][b, j, i, k, 0:4] = true_boxes[b,t, 0:4]
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5+c] = 1
return y_true
def box_iou(b1, b2):
'''Return iou tensor
Parameters
----------
b1: tensor, shape=(i1,...,iN, 4), xywh
b2: tensor, shape=(j, 4), xywh
Returns
-------
iou: tensor, shape=(i1,...,iN, j)
'''
# Expand dim to apply broadcasting.
b1 = K.expand_dims(b1, -2)
b1_xy = b1[..., :2]
b1_wh = b1[..., 2:4]
b1_wh_half = b1_wh/2.
b1_mins = b1_xy - b1_wh_half
b1_maxes = b1_xy + b1_wh_half
# Expand dim to apply broadcasting.
b2 = K.expand_dims(b2, 0)
b2_xy = b2[..., :2]
b2_wh = b2[..., 2:4]
b2_wh_half = b2_wh/2.
b2_mins = b2_xy - b2_wh_half
b2_maxes = b2_xy + b2_wh_half
intersect_mins = K.maximum(b1_mins, b2_mins)
intersect_maxes = K.minimum(b1_maxes, b2_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b1_area = b1_wh[..., 0] * b1_wh[..., 1]
b2_area = b2_wh[..., 0] * b2_wh[..., 1]
iou = intersect_area / (b1_area + b2_area - intersect_area)
return iou
def yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False):
'''Return yolo_loss tensor
Parameters
----------
yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body
y_true: list of array, the output of preprocess_true_boxes
anchors: array, shape=(N, 2), wh
num_classes: integer
ignore_thresh: float, the iou threshold whether to ignore object confidence loss
Returns
-------
loss: tensor, shape=(1,)
'''
num_layers = len(anchors)//3 # default setting
yolo_outputs = args[:num_layers]
y_true = args[num_layers:]
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)]
loss = 0
m = K.shape(yolo_outputs[0])[0] # batch size, tensor
mf = K.cast(m, K.dtype(yolo_outputs[0]))
for l in range(num_layers):
object_mask = y_true[l][..., 4:5]
true_class_probs = y_true[l][..., 5:]
grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True)
pred_box = K.concatenate([pred_xy, pred_wh])
# Darknet raw box to calculate loss.
raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid
raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])
raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf
box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]
# Find ignore mask, iterate over each of batch.
ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
object_mask_bool = K.cast(object_mask, 'bool')
def loop_body(b, ignore_mask):
true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])
iou = box_iou(pred_box[b], true_box)
best_iou = K.max(iou, axis=-1)
ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))
return b+1, ignore_mask
_, ignore_mask = control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])
ignore_mask = ignore_mask.stack()
ignore_mask = K.expand_dims(ignore_mask, -1)
# K.binary_crossentropy is helpful to avoid exp overflow.
xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)
wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4])
confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \
(1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask
class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True)
xy_loss = K.sum(xy_loss) / mf
wh_loss = K.sum(wh_loss) / mf
confidence_loss = K.sum(confidence_loss) / mf
class_loss = K.sum(class_loss) / mf
loss += xy_loss + wh_loss + confidence_loss + class_loss
if print_loss:
loss = tf.Print(loss, [loss, xy_loss, wh_loss, confidence_loss, class_loss, K.sum(ignore_mask)], message='loss: ')
return loss
| '''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
y = compose(
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D(out_filters, (1,1)))(x)
return x, y |
config.py | from __future__ import print_function
import functools
import json
import os
import sys
import warnings
from fnmatch import fnmatch
from os.path import expanduser
from typing import Any
import six
from pathlib2 import Path
from ..utilities.pyhocon import ConfigTree, ConfigFactory
from pyparsing import (
ParseFatalException,
ParseException,
RecursiveGrammarException,
ParseSyntaxException,
)
from six.moves.urllib.parse import urlparse
from .bucket_config import S3BucketConfig
from .defs import (
Environment,
DEFAULT_CONFIG_FOLDER,
LOCAL_CONFIG_PATHS,
ENV_CONFIG_PATHS,
LOCAL_CONFIG_FILES,
LOCAL_CONFIG_FILE_OVERRIDE_VAR,
ENV_CONFIG_PATH_OVERRIDE_VAR,
)
from .defs import is_config_file
from .entry import Entry, NotSet
from .errors import ConfigurationError
from .log import initialize as initialize_log, logger
from .utils import get_options
try:
from typing import Text
except ImportError:
# windows conda-less hack
Text = Any
log = logger(__file__)
class | (Entry):
logger = None
def __init__(self, config, *keys, **kwargs):
# type: (Config, Text, Any) -> None
super(ConfigEntry, self).__init__(*keys, **kwargs)
self.config = config
def _get(self, key):
# type: (Text) -> Any
return self.config.get(key, NotSet)
def error(self, message):
# type: (Text) -> None
log.error(message.capitalize())
class Config(object):
"""
Represents a server configuration.
If watch=True, will watch configuration folders for changes and reload itself.
NOTE: will not watch folders that were created after initialization.
"""
# used in place of None in Config.get as default value because None is a valid value
_MISSING = object()
def __init__(
self,
config_folder=None,
env=None,
verbose=True,
relative_to=None,
app=None,
is_server=False,
**_
):
self._app = app
self._verbose = verbose
self._folder_name = config_folder or DEFAULT_CONFIG_FOLDER
self._roots = []
self._config = ConfigTree()
self._env = env or os.environ.get("TRAINS_ENV", Environment.default)
self.config_paths = set()
self.is_server = is_server
if self._verbose:
print("Config env:%s" % str(self._env))
if not self._env:
raise ValueError(
"Missing environment in either init of environment variable"
)
if self._env not in get_options(Environment):
raise ValueError("Invalid environment %s" % env)
if relative_to is not None:
self.load_relative_to(relative_to)
@property
def root(self):
return self.roots[0] if self.roots else None
@property
def roots(self):
return self._roots
@roots.setter
def roots(self, value):
self._roots = value
@property
def env(self):
return self._env
def logger(self, path=None):
return logger(path)
def load_relative_to(self, *module_paths):
def normalize(p):
return Path(os.path.abspath(str(p))).with_name(self._folder_name)
self.roots = list(map(normalize, module_paths))
self.reload()
def _reload(self):
env = self._env
config = self._config.copy()
if self.is_server:
env_config_paths = ENV_CONFIG_PATHS
else:
env_config_paths = []
env_config_path_override = os.environ.get(ENV_CONFIG_PATH_OVERRIDE_VAR)
if env_config_path_override:
env_config_paths = [expanduser(env_config_path_override)]
# merge configuration from root and other environment config paths
if self.roots or env_config_paths:
config = functools.reduce(
lambda cfg, path: ConfigTree.merge_configs(
cfg,
self._read_recursive_for_env(path, env, verbose=self._verbose),
copy_trees=True,
),
self.roots + env_config_paths,
config,
)
# merge configuration from local configuration paths
if LOCAL_CONFIG_PATHS:
config = functools.reduce(
lambda cfg, path: ConfigTree.merge_configs(
cfg, self._read_recursive(path, verbose=self._verbose), copy_trees=True
),
LOCAL_CONFIG_PATHS,
config,
)
local_config_files = LOCAL_CONFIG_FILES
local_config_override = os.environ.get(LOCAL_CONFIG_FILE_OVERRIDE_VAR)
if local_config_override:
local_config_files = [expanduser(local_config_override)]
# merge configuration from local configuration files
if local_config_files:
config = functools.reduce(
lambda cfg, file_path: ConfigTree.merge_configs(
cfg,
self._read_single_file(file_path, verbose=self._verbose),
copy_trees=True,
),
local_config_files,
config,
)
config["env"] = env
return config
def replace(self, config):
self._config = config
def reload(self):
self.replace(self._reload())
def initialize_logging(self):
logging_config = self._config.get("logging", None)
if not logging_config:
return False
# handle incomplete file handlers
deleted = []
handlers = logging_config.get("handlers", {})
for name, handler in list(handlers.items()):
cls = handler.get("class", None)
is_file = cls and "FileHandler" in cls
if cls is None or (is_file and "filename" not in handler):
deleted.append(name)
del handlers[name]
elif is_file:
file = Path(handler.get("filename"))
if not file.is_file():
file.parent.mkdir(parents=True, exist_ok=True)
file.touch()
# remove dependency in deleted handlers
root_logger = logging_config.get("root", None)
loggers = list(logging_config.get("loggers", {}).values()) + (
[root_logger] if root_logger else []
)
for logger in loggers:
handlers = logger.get("handlers", None)
if not handlers:
continue
logger["handlers"] = [h for h in handlers if h not in deleted]
extra = None
if self._app:
extra = {"app": self._app}
initialize_log(logging_config, extra=extra)
return True
def __getitem__(self, key):
return self._config[key]
def get(self, key, default=_MISSING):
value = self._config.get(key, default)
if value is self._MISSING and not default:
raise KeyError(
"Unable to find value for key '{}' and default value was not provided.".format(
key
)
)
return value
def to_dict(self):
return self._config.as_plain_ordered_dict()
def as_json(self):
return json.dumps(self.to_dict(), indent=2)
def _read_recursive_for_env(self, root_path_str, env, verbose=True):
root_path = Path(root_path_str)
if root_path.exists():
default_config = self._read_recursive(
root_path / Environment.default, verbose=verbose
)
if (root_path / env) != (root_path / Environment.default):
env_config = self._read_recursive(
root_path / env, verbose=verbose
) # None is ok, will return empty config
config = ConfigTree.merge_configs(default_config, env_config, True)
else:
config = default_config
else:
config = ConfigTree()
return config
def _read_recursive(self, conf_root, verbose=True):
conf = ConfigTree()
if not conf_root:
return conf
conf_root = Path(conf_root)
if not conf_root.exists():
if verbose:
print("No config in %s" % str(conf_root))
return conf
if verbose:
print("Loading config from %s" % str(conf_root))
for root, dirs, files in os.walk(str(conf_root)):
rel_dir = str(Path(root).relative_to(conf_root))
if rel_dir == ".":
rel_dir = ""
prefix = rel_dir.replace("/", ".")
for filename in files:
if not is_config_file(filename):
continue
if prefix != "":
key = prefix + "." + Path(filename).stem
else:
key = Path(filename).stem
file_path = str(Path(root) / filename)
conf.put(key, self._read_single_file(file_path, verbose=verbose))
return conf
@staticmethod
def _read_single_file(file_path, verbose=True):
if not file_path or not Path(file_path).is_file():
return ConfigTree()
if verbose:
print("Loading config from file %s" % file_path)
try:
return ConfigFactory.parse_file(file_path)
except ParseSyntaxException as ex:
msg = "Failed parsing {0} ({1.__class__.__name__}): (at char {1.loc}, line:{1.lineno}, col:{1.column})".format(
file_path, ex
)
six.reraise(
ConfigurationError,
ConfigurationError(msg, file_path=file_path),
sys.exc_info()[2],
)
except (ParseException, ParseFatalException, RecursiveGrammarException) as ex:
msg = "Failed parsing {0} ({1.__class__.__name__}): {1}".format(
file_path, ex
)
six.reraise(ConfigurationError, ConfigurationError(msg), sys.exc_info()[2])
except Exception as ex:
print("Failed loading %s: %s" % (file_path, ex))
raise
def get_config_for_bucket(self, base_url, extra_configurations=None):
"""
Get the credentials for an AWS S3 bucket from the config
:param base_url: URL of bucket
:param extra_configurations:
:return: bucket config
:rtype: bucket config
"""
warnings.warn(
"Use backend_config.bucket_config.BucketList.get_config_for_uri",
DeprecationWarning,
)
configs = S3BucketConfig.from_list(self.get("sdk.aws.s3.credentials", []))
if extra_configurations:
configs.extend(extra_configurations)
def find_match(host=None, bucket=None):
if not host and not bucket:
raise ValueError("host or bucket required")
try:
if host:
res = {
config
for config in configs
if (config.host and fnmatch(host, config.host))
and (
not bucket
or not config.bucket
or fnmatch(bucket.lower(), config.bucket.lower())
)
}
else:
res = {
config
for config in configs
if config.bucket
and fnmatch(bucket.lower(), config.bucket.lower())
}
return next(iter(res))
except StopIteration:
pass
parsed = urlparse(base_url)
parts = Path(parsed.path.strip("/")).parts
if parsed.netloc:
# We have a netloc (either an actual hostname or an AWS bucket name).
# First, we'll try with the netloc as host, but if we don't find anything, we'll try without a host and
# with the netloc as the bucket name
match = None
if parts:
# try host/bucket only if path parts contain any element
match = find_match(host=parsed.netloc, bucket=parts[0])
if not match:
# no path parts or no config found for host/bucket, try netloc as bucket
match = find_match(bucket=parsed.netloc)
else:
# No netloc, so we'll simply search by bucket
match = find_match(bucket=parts[0])
if match:
return match
non_aws_s3_host_suffix = ":9000"
if parsed.netloc.endswith(non_aws_s3_host_suffix):
host = parsed.netloc
bucket = parts[0] if parts else None
else:
host = None
bucket = parsed.netloc
return S3BucketConfig(
key=self.get("sdk.aws.s3.key", None),
secret=self.get("sdk.aws.s3.secret", None),
region=self.get("sdk.aws.s3.region", None),
multipart=True,
bucket=bucket,
host=host,
)
| ConfigEntry |
searchresult_string.go |
package algorithms
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[SearchResultFound-0]
_ = x[SearchResultGoLower-1]
_ = x[SearchResultGoHigher-2]
_ = x[SearchResultInvalid-3]
}
const _SearchResult_name = "SearchResultFoundSearchResultGoLowerSearchResultGoHigherSearchResultInvalid"
var _SearchResult_index = [...]uint8{0, 17, 36, 56, 75}
func (i SearchResult) String() string {
if i < 0 || i >= SearchResult(len(_SearchResult_index)-1) {
return "SearchResult(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _SearchResult_name[_SearchResult_index[i]:_SearchResult_index[i+1]]
} | // Code generated by "stringer -type=SearchResult"; DO NOT EDIT. |
|
package.py | #!/usr/bin/env python
import os
import shutil
import sys
import subprocess
import cairo
def main():
version = '0.1.11'
script_location = sys.argv[0]
script_path = os.path.abspath(script_location)
app_path = os.sep.join(script_path.split(os.sep)[:-3])
|
# remove destination dir in case it exists
try:
shutil.rmtree(dest)
except OSError:
pass
shutil.copytree(src,dest)
# draw logo
imgpath = os.path.join(dest,'theory','public','img','theory-logo.png')
logo_exec = os.path.join(app_path,'theory','scripts','draw_theory_logo.py')
args = [logo_exec,version,imgpath]
subprocess.call(args)
os.chdir(app_path)
args = ["tar","jcvf",tar_file,"--exclude-from=%s" % exclude_file,"--exclude-vcs","theory-%s" % version]
subprocess.call(args)
def exclude_check(f):
print "check_exclude: %s" % f
if __name__ == "__main__":
main() | src = os.path.join(app_path,'theory')
dest = os.path.join(app_path,"theory-%s" % version)
tar_file = os.path.join(app_path,"theory-%s.tar.bz2" % version)
exclude_file = os.path.join(src,"tar_exclude") |
test_assets.py | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
import sys
from unittest import TestCase
from datetime import datetime, timedelta
import pickle
import uuid
import warnings
import pandas as pd
from pandas.tseries.tools import normalize_date
from pandas.util.testing import assert_frame_equal
from nose_parameterized import parameterized
from numpy import full
from zipline.assets import Asset, Equity, Future, AssetFinder
from zipline.assets.futures import FutureChain
from zipline.errors import (
SymbolNotFound,
MultipleSymbolsFound,
SidAssignmentError,
RootSymbolNotFound,
)
from zipline.finance.trading import with_environment
from zipline.utils.test_utils import (
all_subindices,
make_rotating_asset_info,
)
def build_lookup_generic_cases():
"""
Generate test cases for AssetFinder test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
frame = pd.DataFrame.from_records(
[
{
'sid': 0,
'file_name': 'duplicated',
'company_name': 'duplicated_0',
'start_date_nano': dupe_0_start.value,
'end_date_nano': dupe_0_end.value,
'exchange': '',
},
{
'sid': 1,
'file_name': 'duplicated',
'company_name': 'duplicated_1',
'start_date_nano': dupe_1_start.value,
'end_date_nano': dupe_1_end.value,
'exchange': '',
},
{
'sid': 2,
'file_name': 'unique',
'company_name': 'unique',
'start_date_nano': unique_start.value,
'end_date_nano': unique_end.value,
'exchange': '',
},
],
)
finder = AssetFinder(metadata=frame)
dupe_0, dupe_1, unique = assets = [
finder.retrieve_asset(i)
for i in range(3)
]
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
cases = [
##
# Scalars
# Asset object
(finder, assets[0], None, assets[0]),
(finder, assets[1], None, assets[1]),
(finder, assets[2], None, assets[2]),
# int
(finder, 0, None, assets[0]),
(finder, 1, None, assets[1]),
(finder, 2, None, assets[2]),
# Duplicated symbol with resolution date
(finder, 'duplicated', dupe_0_start, dupe_0),
(finder, 'duplicated', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(finder, 'unique', unique_start, unique),
(finder, 'unique', None, unique),
##
# Iterables
# Iterables of Asset objects.
(finder, assets, None, assets),
(finder, iter(assets), None, assets),
# Iterables of ints
(finder, (0, 1), None, assets[:-1]),
(finder, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(finder, ('duplicated', 'unique'), dupe_0_start, [dupe_0, unique]),
(finder, ('duplicated', 'unique'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(finder,
('duplicated', 2, 'unique', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
]
return cases
class AssetTestCase(TestCase):
def test_asset_object(self):
self.assertEquals({5061: 'foo'}[Asset(5061)], 'foo')
self.assertEquals(Asset(5061), 5061)
self.assertEquals(5061, Asset(5061))
self.assertEquals(Asset(5061), Asset(5061))
self.assertEquals(int(Asset(5061)), 5061)
self.assertEquals(str(Asset(5061)), 'Asset(5061)')
def test_asset_is_pickleable(self):
# Very wow
s = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
exchange='THE MOON',
)
s_unpickled = pickle.loads(pickle.dumps(s))
attrs_to_check = ['end_date',
'exchange',
'first_traded',
'end_date',
'asset_name',
'start_date',
'sid',
'start_date',
'symbol']
for attr in attrs_to_check:
self.assertEqual(getattr(s, attr), getattr(s_unpickled, attr))
def test_asset_comparisons(self):
s_23 = Asset(23)
s_24 = Asset(24)
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(Asset(3) < Asset(4))
self.assertFalse(Asset(4) < Asset(4))
self.assertFalse(Asset(5) < Asset(4))
def test_le(self):
self.assertTrue(Asset(3) <= Asset(4))
self.assertTrue(Asset(4) <= Asset(4))
self.assertFalse(Asset(5) <= Asset(4))
def test_eq(self):
self.assertFalse(Asset(3) == Asset(4))
self.assertTrue(Asset(4) == Asset(4))
self.assertFalse(Asset(5) == Asset(4))
def test_ge(self):
self.assertFalse(Asset(3) >= Asset(4))
self.assertTrue(Asset(4) >= Asset(4))
self.assertTrue(Asset(5) >= Asset(4))
def test_gt(self):
self.assertFalse(Asset(3) > Asset(4))
self.assertFalse(Asset(4) > Asset(4))
self.assertTrue(Asset(5) > Asset(4))
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(Asset(3) < 'a')
self.assertIsNotNone('a' < Asset(3))
else:
with self.assertRaises(TypeError):
Asset(3) < 'a'
with self.assertRaises(TypeError):
'a' < Asset(3)
class TestFuture(TestCase):
future = Future(
2468,
symbol='OMH15',
root_symbol='OM',
notice_date=pd.Timestamp('2014-01-20', tz='UTC'),
expiration_date=pd.Timestamp('2014-02-20', tz='UTC'),
contract_multiplier=500
)
def test_str(self):
strd = self.future.__str__()
self.assertEqual("Future(2468 [OMH15])", strd)
def test_repr(self):
reprd = self.future.__repr__()
self.assertTrue("Future" in reprd)
self.assertTrue("2468" in reprd)
self.assertTrue("OMH15" in reprd)
self.assertTrue("root_symbol='OM'" in reprd)
self.assertTrue(("notice_date=Timestamp('2014-01-20 00:00:00+0000', "
"tz='UTC')") in reprd)
self.assertTrue("expiration_date=Timestamp('2014-02-20 00:00:00+0000'"
in reprd)
self.assertTrue("contract_multiplier=500" in reprd)
def test_reduce(self):
reduced = self.future.__reduce__()
self.assertEqual(Future, reduced[0])
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
self.assertTrue('root_symbol' in dictd)
self.assertTrue('notice_date' in dictd)
self.assertTrue('expiration_date' in dictd)
self.assertTrue('contract_multiplier' in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
def test_root_symbol(self):
self.assertEqual('OM', self.future.root_symbol)
class AssetFinderTestCase(TestCase):
def test_lookup_symbol_fuzzy(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
frame = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'TEST@%d' % i,
'company_name': "company%d" % i,
'start_date_nano': as_of.value,
'end_date_nano': as_of.value,
'exchange': uuid.uuid4().hex,
}
for i in range(3)
]
)
finder = AssetFinder(frame, fuzzy_char='@')
asset_0, asset_1, asset_2 = (
finder.retrieve_asset(i) for i in range(3)
)
for i in range(2): # we do it twice to test for caching bugs
self.assertIsNone(finder.lookup_symbol('test', as_of))
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of)
)
# Adding an unnecessary fuzzy shouldn't matter.
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of, fuzzy=True)
)
# Shouldn't find this with no fuzzy_str passed.
self.assertIsNone(finder.lookup_symbol('test1', as_of))
# Should find exact match.
self.assertEqual(
asset_1,
finder.lookup_symbol('test1', as_of, fuzzy=True),
)
def test_lookup_symbol_resolve_multiple(self):
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date.)
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'existing',
'company_name': 'existing',
'start_date_nano': date.value,
'end_date_nano': (date + timedelta(days=1)).value,
'exchange': 'NYSE',
}
for i, date in enumerate(dates)
]
)
finder = AssetFinder(df)
for _ in range(2): # Run checks twice to test for caching bugs.
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol_resolve_multiple('non_existing', dates[0])
with self.assertRaises(MultipleSymbolsFound):
finder.lookup_symbol_resolve_multiple('existing', None)
for i, date in enumerate(dates):
# Verify that we correctly resolve multiple symbols using
# the supplied date
result = finder.lookup_symbol_resolve_multiple(
'existing',
date,
)
self.assertEqual(result.symbol, 'existing')
self.assertEqual(result.sid, i)
@parameterized.expand(
build_lookup_generic_cases()
)
def test_lookup_generic(self, finder, symbols, reference_date, expected):
"""
Ensure that lookup_generic works with various permutations of inputs.
"""
results, missing = finder.lookup_generic(symbols, reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
def test_lookup_generic_handle_missing(self):
data = pd.DataFrame.from_records(
[
# Sids that will be found when we do lookups.
{
'sid': 0,
'file_name': 'real',
'company_name': 'real',
'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
{
'sid': 1,
'file_name': 'also_real',
'company_name': 'also_real',
'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 2,
'file_name': 'real_but_old',
'company_name': 'real_but_old',
'start_date_nano': pd.Timestamp('2002-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2003-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 3,
'file_name': 'real_but_in_the_future',
'company_name': 'real_but_in_the_future',
'start_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2020-1-1', tz='UTC'),
'exchange': 'THE FUTURE',
},
]
)
finder = AssetFinder(data)
results, missing = finder.lookup_generic(
['real', 1, 'fake', 'real_but_old', 'real_but_in_the_future'],
pd.Timestamp('2013-02-01', tz='UTC'),
)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].symbol, 'real')
self.assertEqual(results[0].sid, 0)
self.assertEqual(results[1].symbol, 'also_real')
self.assertEqual(results[1].sid, 1)
self.assertEqual(len(missing), 2)
self.assertEqual(missing[0], 'fake')
self.assertEqual(missing[1], 'real_but_in_the_future')
def test_insert_metadata(self):
finder = AssetFinder()
finder.insert_metadata(0,
asset_type='equity',
start_date='2014-01-01',
end_date='2015-01-01',
symbol="PLAY",
foo_data="FOO",)
# Test proper insertion
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
self.assertEqual(pd.Timestamp('2015-01-01', tz='UTC'),
equity.end_date)
# Test invalid field
self.assertFalse('foo_data' in finder.metadata_cache[0])
def test_consume_metadata(self):
# Test dict consumption
finder = AssetFinder()
dict_to_consume = {0: {'symbol': 'PLAY'},
1: {'symbol': 'MSFT'}}
finder.consume_metadata(dict_to_consume)
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
finder = AssetFinder()
# Test dataframe consumption
df = pd.DataFrame(columns=['asset_name', 'exchange'], index=[0, 1])
df['asset_name'][0] = "Dave'N'Busters"
df['exchange'][0] = "NASDAQ"
df['asset_name'][1] = "Microsoft"
df['exchange'][1] = "NYSE"
finder.consume_metadata(df)
self.assertEqual('NASDAQ', finder.metadata_cache[0]['exchange'])
self.assertEqual('Microsoft', finder.metadata_cache[1]['asset_name'])
def test_consume_asset_as_identifier(self):
# Build some end dates
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
fut_end = pd.Timestamp('2008-01-01', tz='UTC')
# Build some simple Assets
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
future_asset = Future(200, symbol="TESTFUT", end_date=fut_end)
# Consume the Assets
finder = AssetFinder()
finder.consume_identifiers([equity_asset, future_asset])
# Test equality with newly built Assets
self.assertEqual(equity_asset, finder.retrieve_asset(1))
self.assertEqual(future_asset, finder.retrieve_asset(200))
self.assertEqual(eq_end, finder.retrieve_asset(1).end_date)
self.assertEqual(fut_end, finder.retrieve_asset(200).end_date)
def test_sid_assignment(self):
# This metadata does not contain SIDs
metadata = {'PLAY': {'symbol': 'PLAY'},
'MSFT': {'symbol': 'MSFT'}}
today = normalize_date(pd.Timestamp('2015-07-09', tz='UTC'))
# Build a finder that is allowed to assign sids
finder = AssetFinder(metadata=metadata,
allow_sid_assignment=True)
# Verify that Assets were built and different sids were assigned | self.assertEqual('PLAY', play.symbol)
self.assertIsNotNone(play.sid)
self.assertNotEqual(play.sid, msft.sid)
def test_sid_assignment_failure(self):
# This metadata does not contain SIDs
metadata = {'PLAY': {'symbol': 'PLAY'},
'MSFT': {'symbol': 'MSFT'}}
# Build a finder that is not allowed to assign sids, asserting failure
with self.assertRaises(SidAssignmentError):
AssetFinder(metadata=metadata, allow_sid_assignment=False)
def test_security_dates_warning(self):
# Build an asset with an end_date
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
# Catch all warnings
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered
warnings.simplefilter("always")
equity_asset.security_start_date
equity_asset.security_end_date
equity_asset.security_name
# Verify the warning
self.assertEqual(3, len(w))
for warning in w:
self.assertTrue(issubclass(warning.category,
DeprecationWarning))
def test_lookup_future_chain(self):
metadata = {
# Notice day is today, so not valid
2: {
'symbol': 'ADN15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-05-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
1: {
'symbol': 'ADV15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-08-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
# Starts trading today, so should be valid.
0: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-05-14', tz='UTC')
},
# Copy of the above future, but starts trading in August,
# so it isn't valid.
3: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-08-01', tz='UTC')
},
}
finder = AssetFinder(metadata=metadata)
dt = pd.Timestamp('2015-05-14', tz='UTC')
last_year = pd.Timestamp('2014-01-01', tz='UTC')
first_day = pd.Timestamp('2015-01-01', tz='UTC')
# Check that we get the expected number of contracts, in the
# right order
ad_contracts = finder.lookup_future_chain('AD', dt, dt)
self.assertEqual(len(ad_contracts), 2)
self.assertEqual(ad_contracts[0].sid, 1)
self.assertEqual(ad_contracts[1].sid, 0)
# Check that we get nothing if our knowledge date is last year
ad_contracts = finder.lookup_future_chain('AD', dt, last_year)
self.assertEqual(len(ad_contracts), 0)
# Check that we get things that start on the knowledge date
ad_contracts = finder.lookup_future_chain('AD', dt, first_day)
self.assertEqual(len(ad_contracts), 1)
def test_map_identifier_index_to_sids(self):
# Build an empty finder and some Assets
dt = pd.Timestamp('2014-01-01', tz='UTC')
finder = AssetFinder()
asset1 = Equity(1, symbol="AAPL")
asset2 = Equity(2, symbol="GOOG")
asset200 = Future(200, symbol="CLK15")
asset201 = Future(201, symbol="CLM15")
# Check for correct mapping and types
pre_map = [asset1, asset2, asset200, asset201]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([1, 2, 200, 201], post_map)
for sid in post_map:
self.assertIsInstance(sid, int)
# Change order and check mapping again
pre_map = [asset201, asset2, asset200, asset1]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([201, 2, 200, 1], post_map)
@with_environment()
def test_compute_lifetimes(self, env=None):
num_assets = 4
trading_day = env.trading_day
first_start = pd.Timestamp('2015-04-01', tz='UTC')
frame = make_rotating_asset_info(
num_assets=num_assets,
first_start=first_start,
frequency=env.trading_day,
periods_between_starts=3,
asset_lifetime=5
)
finder = AssetFinder(frame)
all_dates = pd.date_range(
start=first_start,
end=frame.end_date.max(),
freq=trading_day,
)
for dates in all_subindices(all_dates):
expected_mask = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
for i, date in enumerate(dates):
it = frame[['start_date', 'end_date']].itertuples()
for j, start, end in it:
if start <= date <= end:
expected_mask[i, j] = True
# Filter out columns with all-empty columns.
expected_result = pd.DataFrame(
data=expected_mask,
index=dates,
columns=frame.sid.values,
)
actual_result = finder.lifetimes(dates)
assert_frame_equal(actual_result, expected_result)
class TestFutureChain(TestCase):
metadata = {
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC')},
1: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC')},
2: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC')},
3: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC')}
}
asset_finder = AssetFinder(metadata=metadata)
def test_len(self):
""" Test the __len__ method of FutureChain.
"""
# None of the contracts have started yet.
cl = FutureChain(self.asset_finder, lambda: '2005-11-30', 'CL')
self.assertEqual(len(cl), 0)
# Sids 0, 1, & 2 have started, 3 has not yet started.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is still valid the day before its notice date.
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is now invalid, leaving only Sids 1 & 2 valid.
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(len(cl), 2)
# Sid 3 has started, so 1, 2, & 3 are now valid.
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(len(cl), 3)
# All contracts are no longer valid.
cl = FutureChain(self.asset_finder, lambda: '2006-09-20', 'CL')
self.assertEqual(len(cl), 0)
def test_getitem(self):
""" Test the __getitem__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl[0], 0)
self.assertEqual(cl[1], 1)
self.assertEqual(cl[2], 2)
with self.assertRaises(IndexError):
cl[3]
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(cl[0], 0)
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(cl[0], 1)
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(cl[-1], 3)
def test_root_symbols(self):
""" Test that different variations on root symbols are handled
as expected.
"""
# Make sure this successfully gets the chain for CL.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl.root_symbol, 'CL')
# These root symbols don't exist, so RootSymbolNotFound should
# be raised immediately.
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', 'CLZ')
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', '')
def test_repr(self):
""" Test the __repr__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
cl_feb = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL',
as_of_date='2006-02-01')
# The default chain should not include the as of date.
self.assertEqual(repr(cl), "FutureChain(root_symbol='CL')")
# An explicit as of date should show up in the repr.
self.assertEqual(
repr(cl_feb),
("FutureChain(root_symbol='CL', "
"as_of_date='2006-02-01 00:00:00+00:00')")
)
def test_as_of(self):
""" Test the as_of method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that the as_of_date is set correctly to the future
feb = '2006-02-01'
cl_feb = cl.as_of(feb)
self.assertEqual(
cl_feb.as_of_date,
pd.Timestamp(feb, tz='UTC')
)
# Test that the as_of_date is set correctly to the past, with
# args of str, datetime.datetime, and pd.Timestamp.
feb_prev = '2005-02-01'
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = datetime(year=2005, month=2, day=1)
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = pd.Timestamp('2005-02-01')
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
# The chain as of the current dt should always be the same as
# the defualt chain. Tests date as str, pd.Timestamp, and
# datetime.datetime.
self.assertEqual(cl[0], cl.as_of('2005-12-01')[0])
self.assertEqual(cl[0], cl.as_of(pd.Timestamp('2005-12-01'))[0])
self.assertEqual(
cl[0],
cl.as_of(datetime(year=2005, month=12, day=1))[0]
)
def test_offset(self):
""" Test the offset method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that an offset forward sets as_of_date as expected
self.assertEqual(
cl.offset('3 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=3)
)
# Test that an offset backward sets as_of_date as expected, with
# time delta given as str, datetime.timedelta, and pd.Timedelta.
self.assertEqual(
cl.offset('-1000 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(timedelta(days=-1000)).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(pd.Timedelta('-1000 days')).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
# An offset of zero should give the original chain.
self.assertEqual(cl[0], cl.offset(0)[0])
self.assertEqual(cl[0], cl.offset("0 days")[0])
# A string that doesn't represent a time delta should raise a
# ValueError.
with self.assertRaises(ValueError):
cl.offset("blah") | play = finder.lookup_symbol('PLAY', today)
msft = finder.lookup_symbol('MSFT', today) |
main.py | import asyncio
import ujson
from band import logger, expose
"""
Listen events and write to output
"""
@expose.listener()
async def | (**params):
logger.info('Broadcast', params=params)
| broadcast |
issue_3136_a.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms. | trait x {
fn use_x<T>(&self);
}
struct y(());
impl x for y {
fn use_x<T>(&self) {
struct foo { //~ ERROR quux
i: ()
}
fn new_foo<T>(i: ()) -> foo {
foo { i: i }
}
}
} | |
__init__.py | class DigitalSignatureScheme(object):
def get_public_key(self):
|
def sign(self, messsage):
raise NotImplementedError
def verify(self, message, signature):
raise NotImplementedError
| return self.public_key |
SlotItemsMixin.js | import ContentItemsMixin from "./ContentItemsMixin.js";
import ReactiveElement from "../core/ReactiveElement.js"; // eslint-disable-line no-unused-vars
import SlotContentMixin from "./SlotContentMixin.js";
/**
* Treats the elements assigned to the default slot as list items
*
* This is simply a combination of
* [ContentItemsMixin](ContentItemsMixin) and
* [SlotContentMixin](SlotContentMixin).
*
* @module SlotItemsMixin
* @mixes ContentItemsMixin
* @mixes SlotContentMixin
* @param {Constructor<ReactiveElement>} Base
*/
export default function | (Base) {
return ContentItemsMixin(SlotContentMixin(Base));
}
| SlotItemsMixin |
ActionCard.tsx | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
import React, { ReactNode } from 'react';
import { WidgetContainerProps, WidgetComponent } from '@bfc/extension-client';
import { ActionHeader } from '../ActionHeader';
import { CardTemplate } from './CardTemplate';
import { ActionCardBody } from './ActionCardBody';
export interface ActionCardProps extends WidgetContainerProps {
header?: ReactNode;
body?: ReactNode;
footer?: ReactNode;
hideFooter?: boolean;
}
const safeRender = (input: object | React.ReactNode) => {
if (React.isValidElement(input)) return input;
// null value is not Valid React element
if (input === null) return null;
if (typeof input === 'object') {
try {
return JSON.stringify(input);
} catch (err) {
// In case 'input' has circular reference / prototype funcs.
return '';
}
}
return input;
};
const renderBody = (rawBody: React.ReactNode, ctx: any) => {
const body = safeRender(rawBody);
if (React.isValidElement(body) && body.type === ActionCardBody) {
return body;
}
return <ActionCardBody {...ctx} body={body} />;
};
export const ActionCard: WidgetComponent<ActionCardProps> = ({
header,
body,
footer,
hideFooter = false,
...widgetContext
}) => { | const footerNode = hideFooter ? null : safeRender(footer);
return (
<CardTemplate {...widgetContext} body={bodyNode} disabled={disabled} footer={footerNode} header={headerNode} />
);
}; | const disabled = widgetContext.data.disabled === true;
const headerNode = safeRender(header) || <ActionHeader {...widgetContext} />;
const bodyNode = renderBody(body, widgetContext); |
reverse_related.py | """
"Rel objects" for related fields.
"Rel objects" (for lack of a better name) carry information about the relation
modeled by a related field and provide some utility functions. They're stored
in the ``remote_field`` attribute of the field.
They also act as reverse fields for the purposes of the Meta API because
they're the closest concept currently available.
"""
from django.core import exceptions
from django.utils.functional import cached_property
from . import BLANK_CHOICE_DASH
from .mixins import FieldCacheMixin
class ForeignObjectRel(FieldCacheMixin):
"""
Used by ForeignObject to store information about the relation.
``_meta.get_fields()`` returns this class to provide access to the field
flags for the reverse relation.
"""
# Field flags
auto_created = True
concrete = False
editable = False
is_relation = True
# Reverse relations are always nullable (Django can't enforce that a
# foreign key on the related model points to this model).
null = True
empty_strings_allowed = False
def __init__(
self,
field,
to,
related_name=None,
related_query_name=None,
limit_choices_to=None,
parent_link=False,
on_delete=None,
):
self.field = field
self.model = to
self.related_name = related_name
self.related_query_name = related_query_name
self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to
self.parent_link = parent_link
self.on_delete = on_delete
self.symmetrical = False
self.multiple = True
# Some of the following cached_properties can't be initialized in
# __init__ as the field doesn't have its model yet. Calling these methods
# before field.contribute_to_class() has been called will result in
# AttributeError
@cached_property
def hidden(self):
return self.is_hidden()
@cached_property
def name(self):
return self.field.related_query_name()
@property
def remote_field(self):
return self.field
@property
def target_field(self):
"""
When filtering against this relation, return the field on the remote
model against which the filtering should happen.
"""
target_fields = self.get_path_info()[-1].target_fields
if len(target_fields) > 1:
raise exceptions.FieldError(
"Can't use target_field for multicolumn relations."
)
return target_fields[0]
@cached_property
def related_model(self):
if not self.field.model:
raise AttributeError(
"This property can't be accessed before self.field.contribute_to_class has been called."
)
return self.field.model
@cached_property
def many_to_many(self):
return self.field.many_to_many
@cached_property
def many_to_one(self):
return self.field.one_to_many
@cached_property
def one_to_many(self):
return self.field.many_to_one
@cached_property
def | (self):
return self.field.one_to_one
def get_lookup(self, lookup_name):
return self.field.get_lookup(lookup_name)
def get_internal_type(self):
return self.field.get_internal_type()
@property
def db_type(self):
return self.field.db_type
def __repr__(self):
return "<%s: %s.%s>" % (
type(self).__name__,
self.related_model._meta.app_label,
self.related_model._meta.model_name,
)
def get_choices(
self,
include_blank=True,
blank_choice=BLANK_CHOICE_DASH,
limit_choices_to=None,
ordering=(),
):
"""
Return choices with a default blank choices included, for use
as <select> choices for this field.
Analog of django.db.models.fields.Field.get_choices(), provided
initially for utilization by RelatedFieldListFilter.
"""
limit_choices_to = limit_choices_to or self.limit_choices_to
qs = self.related_model._default_manager.complex_filter(limit_choices_to)
if ordering:
qs = qs.order_by(*ordering)
return (blank_choice if include_blank else []) + [(x.pk, str(x)) for x in qs]
def is_hidden(self):
"""Should the related object be hidden?"""
return bool(self.related_name) and self.related_name[-1] == "+"
def get_joining_columns(self):
return self.field.get_reverse_joining_columns()
def get_extra_restriction(self, where_class, alias, related_alias):
return self.field.get_extra_restriction(where_class, related_alias, alias)
def set_field_name(self):
"""
Set the related field's name, this is not available until later stages
of app loading, so set_field_name is called from
set_attributes_from_rel()
"""
# By default foreign object doesn't relate to any remote field (for
# example custom multicolumn joins currently have no remote field).
self.field_name = None
def get_accessor_name(self, model=None):
# This method encapsulates the logic that decides what name to give an
# accessor descriptor that retrieves related many-to-one or
# many-to-many objects. It uses the lowercased object_name + "_set",
# but this can be overridden with the "related_name" option. Due to
# backwards compatibility ModelForms need to be able to provide an
# alternate model. See BaseInlineFormSet.get_default_prefix().
opts = model._meta if model else self.related_model._meta
model = model or self.related_model
if self.multiple:
# If this is a symmetrical m2m relation on self, there is no reverse accessor.
if self.symmetrical and model == self.model:
return None
if self.related_name:
return self.related_name
return opts.model_name + ("_set" if self.multiple else "")
def get_path_info(self, filtered_relation=None):
return self.field.get_reverse_path_info(filtered_relation)
def get_cache_name(self):
"""
Return the name of the cache key to use for storing an instance of the
forward model on the reverse model.
"""
return self.get_accessor_name()
class ManyToOneRel(ForeignObjectRel):
"""
Used by the ForeignKey field to store information about the relation.
``_meta.get_fields()`` returns this class to provide access to the field
flags for the reverse relation.
Note: Because we somewhat abuse the Rel objects by using them as reverse
fields we get the funny situation where
``ManyToOneRel.many_to_one == False`` and
``ManyToOneRel.one_to_many == True``. This is unfortunate but the actual
ManyToOneRel class is a private API and there is work underway to turn
reverse relations into actual fields.
"""
def __init__(
self,
field,
to,
field_name,
related_name=None,
related_query_name=None,
limit_choices_to=None,
parent_link=False,
on_delete=None,
):
super().__init__(
field,
to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
self.field_name = field_name
def __getstate__(self):
state = self.__dict__.copy()
state.pop("related_model", None)
return state
def get_related_field(self):
"""
Return the Field in the 'to' object to which this relationship is tied.
"""
field = self.model._meta.get_field(self.field_name)
if not field.concrete:
raise exceptions.FieldDoesNotExist(
"No related field named '%s'" % self.field_name
)
return field
def set_field_name(self):
self.field_name = self.field_name or self.model._meta.pk.name
class OneToOneRel(ManyToOneRel):
"""
Used by OneToOneField to store information about the relation.
``_meta.get_fields()`` returns this class to provide access to the field
flags for the reverse relation.
"""
def __init__(
self,
field,
to,
field_name,
related_name=None,
related_query_name=None,
limit_choices_to=None,
parent_link=False,
on_delete=None,
):
super().__init__(
field,
to,
field_name,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
self.multiple = False
class ManyToManyRel(ForeignObjectRel):
"""
Used by ManyToManyField to store information about the relation.
``_meta.get_fields()`` returns this class to provide access to the field
flags for the reverse relation.
"""
def __init__(
self,
field,
to,
related_name=None,
related_query_name=None,
limit_choices_to=None,
symmetrical=True,
through=None,
through_fields=None,
db_constraint=True,
):
super().__init__(
field,
to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
if through and not db_constraint:
raise ValueError("Can't supply a through model and db_constraint=False")
self.through = through
if through_fields and not through:
raise ValueError("Cannot specify through_fields without a through model")
self.through_fields = through_fields
self.symmetrical = symmetrical
self.db_constraint = db_constraint
def get_related_field(self):
"""
Return the field in the 'to' object to which this relationship is tied.
Provided for symmetry with ManyToOneRel.
"""
opts = self.through._meta
if self.through_fields:
field = opts.get_field(self.through_fields[0])
else:
for field in opts.fields:
rel = getattr(field, "remote_field", None)
if rel and rel.model == self.model:
break
return field.foreign_related_fields[0]
| one_to_one |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.