blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2ca65305c59f566dc9043beda4fac38b816d1645 | cc3dd8ee82c854666680b086e55ba7bead421bef | /2.python_module/3.example/finance/methodForFin.py | 175a26041cf5c1924b98336735c8ab00a8588ffb | []
| no_license | eggeggss/PythonTutorial | b46a4377fdc0967f3c13e4d70f8ebfdebc93ef53 | a35fbf5d9c78ec6896b757529d97b262f6f0637b | refs/heads/master | 2020-04-01T01:27:42.793973 | 2018-12-01T15:14:49 | 2018-12-01T15:14:49 | 152,739,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py |
def printMethodName():
print('Finance Method') | [
"[email protected]"
]
| |
b9e6bf9944097897f35f1edbb712bbb2fa99f251 | c51eef37bb983a9c35635c7ccc96a0cf689a7438 | /com/chapter03_python_begin/05_for/02_range.py | 6e97b3c6cbd2c9345bac79003fba8ca69d3a9ff6 | []
| no_license | Kyeongrok/python_crawler | 0a717b43be36584af1b0f7c1ad0c79108a5d11e0 | 5a5da8af7bb080f752a9a066741ac8adab136a3a | refs/heads/master | 2022-09-13T03:15:08.053639 | 2022-08-02T15:45:03 | 2022-08-02T15:45:03 | 124,719,435 | 40 | 34 | null | 2019-02-27T08:29:52 | 2018-03-11T03:20:32 | HTML | UTF-8 | Python | false | false | 25 | py | print(list(range(1, 10))) | [
"[email protected]"
]
| |
65a186d1f261b126882e2435ee2ae83f22c7970b | 48c47c714502fdc8cb8bb59601f30c344945a6d0 | /sdt/util/logging.py | 2269f55874bf16b4b6e19049b6a40526ff113113 | []
| no_license | ronvree/SoftDecisionTree | 327ef0e89eb600f0ee16d3f9cb0ad619b8bb9ba7 | b3ad17be8870c08be66d78974e0f78ae6f0439c7 | refs/heads/master | 2022-12-12T13:35:51.805748 | 2020-08-22T15:05:13 | 2020-08-22T15:05:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,456 | py | import os
class TrainLog:
"""
Object for managing the train log directory
"""
def __init__(self, log_dir: str): # Store log in log_dir
self._log_dir = log_dir
self._logs = dict()
# Ensure the directories exist
if not os.path.isdir(self.log_dir):
os.mkdir(self.log_dir)
if not os.path.isdir(self.metadata_dir):
os.mkdir(self.metadata_dir)
if not os.path.isdir(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
@property
def log_dir(self):
return self._log_dir
@property
def checkpoint_dir(self):
return self._log_dir + '/checkpoints'
@property
def metadata_dir(self):
return self._log_dir + '/metadata'
def log_message(self, msg: str):
"""
Write a message to the log file
:param msg: the message string to be written to the log file
"""
with open(self.log_dir + '/log.txt', 'w') as f:
f.write(msg)
def create_log(self, log_name: str, key_name: str, *value_names):
"""
Create a csv for logging information
:param log_name: The name of the log. The log filename will be <log_name>.csv.
:param key_name: The name of the attribute that is used as key (e.g. epoch number)
:param value_names: The names of the attributes that are logged
"""
if log_name in self._logs.keys():
raise Exception('Log already exists!')
# Add to existing logs
self._logs[log_name] = (key_name, value_names)
# Create log file. Create columns
with open(self.log_dir + f'/{log_name}.csv', 'w') as f:
f.write(','.join((key_name,) + value_names) + '\n')
def log_values(self, log_name, key, *values):
"""
Log values in an existent log file
:param log_name: The name of the log file
:param key: The key attribute for logging these values
:param values: value attributes that will be stored in the log
"""
if log_name not in self._logs.keys():
raise Exception('Log not existent!')
if len(values) != len(self._logs[log_name][1]):
raise Exception('Not all required values are logged!')
# Write a new line with the given values
with open(self.log_dir + f'/{log_name}.csv', 'a') as f:
f.write(','.join(str(v) for v in (key,) + values) + '\n')
| [
"[email protected]"
]
| |
bcb2bac7460fd22247f6850cfb190c7713966a7e | 490ffe1023a601760ae7288e86723f0c6e366bba | /kolla-docker/nova/nova/compute/resource_tracker.py | 4f32a9eff29481b02245cdfa874c8e6d7ccb2a78 | [
"Apache-2.0"
]
| permissive | bopopescu/Cloud-User-Management | 89696a5ea5d2f95191327fbeab6c3e400bbfb2b8 | 390988bf4915a276c7bf8d96b62c3051c17d9e6e | refs/heads/master | 2022-11-19T10:09:36.662906 | 2018-11-07T20:28:31 | 2018-11-07T20:28:31 | 281,786,345 | 0 | 0 | null | 2020-07-22T21:26:07 | 2020-07-22T21:26:06 | null | UTF-8 | Python | false | false | 67,693 | py | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Track resources like memory and disk for a compute host. Provides the
scheduler with useful information about availability through the ComputeNode
model.
"""
import collections
import copy
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova.compute import claims
from nova.compute import monitors
from nova.compute import stats
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import migration as migration_obj
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import utils
from nova.virt import hardware
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
def _instance_in_resize_state(instance):
"""Returns True if the instance is in one of the resizing states.
:param instance: `nova.objects.Instance` object
"""
vm = instance.vm_state
task = instance.task_state
if vm == vm_states.RESIZED:
return True
if (vm in [vm_states.ACTIVE, vm_states.STOPPED]
and task in [task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH, task_states.REBUILDING]):
return True
return False
def _is_trackable_migration(migration):
# Only look at resize/migrate migration and evacuation records
# NOTE(danms): RT should probably examine live migration
# records as well and do something smart. However, ignore
# those for now to avoid them being included in below calculations.
return migration.migration_type in ('resize', 'migration',
'evacuation')
def _normalize_inventory_from_cn_obj(inv_data, cn):
"""Helper function that injects various information from a compute node
object into the inventory dict returned from the virt driver's
get_inventory() method. This function allows us to marry information like
*_allocation_ratio and reserved memory amounts that are in the
compute_nodes DB table and that the virt driver doesn't know about with the
information the virt driver *does* know about.
Note that if the supplied inv_data contains allocation_ratio, reserved or
other fields, we DO NOT override the value with that of the compute node.
This is to ensure that the virt driver is the single source of truth
regarding inventory information. For instance, the Ironic virt driver will
always return a very specific inventory with allocation_ratios pinned to
1.0.
:param inv_data: Dict, keyed by resource class, of inventory information
returned from virt driver's get_inventory() method
:param compute_node: `objects.ComputeNode` describing the compute node
"""
if fields.ResourceClass.VCPU in inv_data:
cpu_inv = inv_data[fields.ResourceClass.VCPU]
if 'allocation_ratio' not in cpu_inv:
cpu_inv['allocation_ratio'] = cn.cpu_allocation_ratio
if 'reserved' not in cpu_inv:
cpu_inv['reserved'] = CONF.reserved_host_cpus
if fields.ResourceClass.MEMORY_MB in inv_data:
mem_inv = inv_data[fields.ResourceClass.MEMORY_MB]
if 'allocation_ratio' not in mem_inv:
mem_inv['allocation_ratio'] = cn.ram_allocation_ratio
if 'reserved' not in mem_inv:
mem_inv['reserved'] = CONF.reserved_host_memory_mb
if fields.ResourceClass.DISK_GB in inv_data:
disk_inv = inv_data[fields.ResourceClass.DISK_GB]
if 'allocation_ratio' not in disk_inv:
disk_inv['allocation_ratio'] = cn.disk_allocation_ratio
if 'reserved' not in disk_inv:
# TODO(johngarbutt) We should either move to reserved_host_disk_gb
# or start tracking DISK_MB.
reserved_mb = CONF.reserved_host_disk_mb
reserved_gb = compute_utils.convert_mb_to_ceil_gb(reserved_mb)
disk_inv['reserved'] = reserved_gb
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
"""
def __init__(self, host, driver):
self.host = host
self.driver = driver
self.pci_tracker = None
# Dict of objects.ComputeNode objects, keyed by nodename
self.compute_nodes = {}
self.stats = stats.Stats()
self.tracked_instances = {}
self.tracked_migrations = {}
monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.old_resources = collections.defaultdict(objects.ComputeNode)
self.scheduler_client = scheduler_client.SchedulerClient()
self.reportclient = self.scheduler_client.reportclient
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
def get_node_uuid(self, nodename):
try:
return self.compute_nodes[nodename].uuid
except KeyError:
raise exception.ComputeHostNotFound(host=nodename)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def instance_claim(self, context, instance, nodename, limits=None):
"""Indicate that some resources are needed for an upcoming compute
instance build operation.
This should be called before the compute node is about to perform
an instance build operation that will consume additional resources.
:param context: security context
:param instance: instance to reserve resources for.
:type instance: nova.objects.instance.Instance object
:param nodename: The Ironic nodename selected by the scheduler
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. It can
be used to revert the resource usage if an error occurs
during the instance build.
"""
if self.disabled(nodename):
# instance_claim() was called before update_available_resource()
# (which ensures that a compute node exists for nodename). We
# shouldn't get here but in case we do, just set the instance's
# host and nodename attribute (probably incorrect) and return a
# NoopClaim.
# TODO(jaypipes): Remove all the disabled junk from the resource
# tracker. Servicegroup API-level active-checking belongs in the
# nova-compute manager.
self._set_instance_host_and_node(instance, nodename)
return claims.NopClaim()
# sanity checks:
if instance.host:
LOG.warning("Host field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
if instance.node:
LOG.warning("Node field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
# get the overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(instance)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance.flavor.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
context, instance.uuid)
claim = claims.Claim(context, instance, nodename, self, cn,
pci_requests, overhead=overhead, limits=limits)
# self._set_instance_host_and_node() will save instance to the DB
# so set instance.numa_topology first. We need to make sure
# that numa_topology is saved while under COMPUTE_RESOURCE_SEMAPHORE
# so that the resource audit knows about any cpus we've pinned.
instance_numa_topology = claim.claimed_numa_topology
instance.numa_topology = instance_numa_topology
self._set_instance_host_and_node(instance, nodename)
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
self.pci_tracker.claim_instance(context, pci_requests,
instance_numa_topology)
# Mark resources in-use and update stats
self._update_usage_from_instance(context, instance, nodename)
elevated = context.elevated()
# persist changes to the compute node:
self._update(elevated, cn)
return claim
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def rebuild_claim(self, context, instance, nodename, limits=None,
image_meta=None, migration=None):
"""Create a claim for a rebuild operation."""
instance_type = instance.flavor
return self._move_claim(context, instance, instance_type, nodename,
migration, move_type='evacuation',
limits=limits, image_meta=image_meta)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def resize_claim(self, context, instance, instance_type, nodename,
migration, image_meta=None, limits=None):
"""Create a claim for a resize or cold-migration move."""
return self._move_claim(context, instance, instance_type, nodename,
migration, image_meta=image_meta,
limits=limits)
def _move_claim(self, context, instance, new_instance_type, nodename,
migration, move_type=None, image_meta=None, limits=None):
"""Indicate that resources are needed for a move to this host.
Move can be either a migrate/resize, live-migrate or an
evacuate/rebuild operation.
:param context: security context
:param instance: instance object to reserve resources for
:param new_instance_type: new instance_type being resized to
:param nodename: The Ironic nodename selected by the scheduler
:param image_meta: instance image metadata
:param move_type: move type - can be one of 'migration', 'resize',
'live-migration', 'evacuate'
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs
:param migration: A migration object if one was already created
elsewhere for this operation (otherwise None)
:returns: A Claim ticket representing the reserved resources. This
should be turned into finalize a resource claim or free
resources after the compute operation is finished.
"""
image_meta = image_meta or {}
if migration:
self._claim_existing_migration(migration, nodename)
else:
migration = self._create_migration(context, instance,
new_instance_type,
nodename, move_type)
if self.disabled(nodename):
# compute_driver doesn't support resource tracking, just
# generate the migration record and continue the resize:
return claims.NopClaim(migration=migration)
# get memory overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(new_instance_type)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': new_instance_type.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
# TODO(moshele): we are recreating the pci requests even if
# there was no change on resize. This will cause allocating
# the old/new pci device in the resize phase. In the future
# we would like to optimise this.
new_pci_requests = pci_request.get_pci_requests_from_flavor(
new_instance_type)
new_pci_requests.instance_uuid = instance.uuid
# PCI requests come from two sources: instance flavor and
# SR-IOV ports. SR-IOV ports pci_request don't have an alias_name.
# On resize merge the SR-IOV ports pci_requests with the new
# instance flavor pci_requests.
if instance.pci_requests:
for request in instance.pci_requests.requests:
if request.alias_name is None:
new_pci_requests.requests.append(request)
claim = claims.MoveClaim(context, instance, nodename,
new_instance_type, image_meta, self, cn,
new_pci_requests, overhead=overhead,
limits=limits)
claim.migration = migration
claimed_pci_devices_objs = []
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
claimed_pci_devices_objs = self.pci_tracker.claim_instance(
context, new_pci_requests, claim.claimed_numa_topology)
claimed_pci_devices = objects.PciDeviceList(
objects=claimed_pci_devices_objs)
# TODO(jaypipes): Move claimed_numa_topology out of the Claim's
# constructor flow so the Claim constructor only tests whether
# resources can be claimed, not consume the resources directly.
mig_context = objects.MigrationContext(
context=context, instance_uuid=instance.uuid,
migration_id=migration.id,
old_numa_topology=instance.numa_topology,
new_numa_topology=claim.claimed_numa_topology,
old_pci_devices=instance.pci_devices,
new_pci_devices=claimed_pci_devices,
old_pci_requests=instance.pci_requests,
new_pci_requests=new_pci_requests)
instance.migration_context = mig_context
instance.save()
# Mark the resources in-use for the resize landing on this
# compute host:
self._update_usage_from_migration(context, instance, migration,
nodename)
elevated = context.elevated()
self._update(elevated, cn)
return claim
def _create_migration(self, context, instance, new_instance_type,
nodename, move_type=None):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
migration = objects.Migration(context=context.elevated())
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = new_instance_type.id
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.source_node = instance.node
if move_type:
migration.migration_type = move_type
else:
migration.migration_type = migration_obj.determine_migration_type(
migration)
migration.create()
return migration
def _claim_existing_migration(self, migration, nodename):
"""Make an existing migration record count for resource tracking.
If a migration record was created already before the request made
it to this compute host, only set up the migration so it's included in
resource tracking. This should be done while the
COMPUTE_RESOURCES_SEMAPHORE is held.
"""
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.status = 'pre-migrating'
migration.save()
def _set_instance_host_and_node(self, instance, nodename):
"""Tag the instance as belonging to this host. This should be done
while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim
will not be lost if the audit process starts.
"""
instance.host = self.host
instance.launched_on = self.host
instance.node = nodename
instance.save()
def _unset_instance_host_and_node(self, instance):
"""Untag the instance so it no longer belongs to the host.
This should be done while the COMPUTE_RESOURCES_SEMAPHORE is held so
the resource claim will not be lost if the audit process starts.
"""
instance.host = None
instance.node = None
instance.save()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def abort_instance_claim(self, context, instance, nodename):
"""Remove usage from the given instance."""
self._update_usage_from_instance(context, instance, nodename,
is_removed=True)
instance.clear_numa_topology()
self._unset_instance_host_and_node(instance)
self._update(context.elevated(), self.compute_nodes[nodename])
def _drop_pci_devices(self, instance, nodename, prefix):
if self.pci_tracker:
# free old/new allocated pci devices
pci_devices = self._get_migration_context_resource(
'pci_devices', instance, prefix=prefix)
if pci_devices:
for pci_device in pci_devices:
self.pci_tracker.free_device(pci_device, instance)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_nodes[nodename].pci_device_pools = dev_pools_obj
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def drop_move_claim(self, context, instance, nodename,
instance_type=None, prefix='new_'):
# Remove usage for an incoming/outgoing migration on the destination
# node.
if instance['uuid'] in self.tracked_migrations:
migration = self.tracked_migrations.pop(instance['uuid'])
if not instance_type:
ctxt = context.elevated()
instance_type = self._get_instance_type(ctxt, instance, prefix,
migration)
if instance_type is not None:
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix=prefix)
usage = self._get_usage_dict(
instance_type, numa_topology=numa_topology)
self._drop_pci_devices(instance, nodename, prefix)
self._update_usage(usage, nodename, sign=-1)
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
# Remove usage for an instance that is not tracked in migrations (such
# as on the source node after a migration).
# NOTE(lbeliveau): On resize on the same node, the instance is
# included in both tracked_migrations and tracked_instances.
elif (instance['uuid'] in self.tracked_instances):
self.tracked_instances.pop(instance['uuid'])
self._drop_pci_devices(instance, nodename, prefix)
# TODO(lbeliveau): Validate if numa needs the same treatment.
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def update_usage(self, context, instance, nodename):
"""Update the resource usage and stats after a change in an
instance
"""
if self.disabled(nodename):
return
uuid = instance['uuid']
# don't update usage for this instance unless it submitted a resource
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(context, instance, nodename)
self._update(context.elevated(), self.compute_nodes[nodename])
def disabled(self, nodename):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
def _check_for_nodes_rebalance(self, context, resources, nodename):
"""Check if nodes rebalance has happened.
The ironic driver maintains a hash ring mapping bare metal nodes
to compute nodes. If a compute dies, the hash ring is rebuilt, and
some of its bare metal nodes (more precisely, those not in ACTIVE
state) are assigned to other computes.
This method checks for this condition and adjusts the database
accordingly.
:param context: security context
:param resources: initial values
:param nodename: node name
:returns: True if a suitable compute node record was found, else False
"""
if not self.driver.rebalances_nodes:
return False
# Its possible ironic just did a node re-balance, so let's
# check if there is a compute node that already has the correct
# hypervisor_hostname. We can re-use that rather than create a
# new one and have to move existing placement allocations
cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
context, nodename)
if len(cn_candidates) == 1:
cn = cn_candidates[0]
LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
{"name": nodename, "old": cn.host, "new": self.host})
cn.host = self.host
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return True
elif len(cn_candidates) > 1:
LOG.error(
"Found more than one ComputeNode for nodename %s. "
"Please clean up the orphaned ComputeNode records in your DB.",
nodename)
return False
def _init_compute_node(self, context, resources):
"""Initialize the compute node if it does not already exist.
The resource tracker will be inoperable if compute_node
is not defined. The compute_node will remain undefined if
we fail to create it or if there is no associated service
registered.
If this method has to create a compute node it needs initial
values - these come from resources.
:param context: security context
:param resources: initial values
"""
nodename = resources['hypervisor_hostname']
# if there is already a compute node just use resources
# to initialize
if nodename in self.compute_nodes:
cn = self.compute_nodes[nodename]
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
cn = self._get_compute_node(context, nodename)
if cn:
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return
if self._check_for_nodes_rebalance(context, resources, nodename):
return
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources)
self.compute_nodes[nodename] = cn
cn.create()
LOG.info('Compute node record created for '
'%(host)s:%(node)s with uuid: %(uuid)s',
{'host': self.host, 'node': nodename, 'uuid': cn.uuid})
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
def _setup_pci_tracker(self, context, compute_node, resources):
if not self.pci_tracker:
n_id = compute_node.id
self.pci_tracker = pci_manager.PciDevTracker(context, node_id=n_id)
if 'pci_passthrough_devices' in resources:
dev_json = resources.pop('pci_passthrough_devices')
self.pci_tracker.update_devices_from_hypervisor_resources(
dev_json)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
compute_node.pci_device_pools = dev_pools_obj
def _copy_resources(self, compute_node, resources):
"""Copy resource values to supplied compute_node."""
# purge old stats and init with anything passed in by the driver
self.stats.clear()
self.stats.digest_stats(resources.get('stats'))
compute_node.stats = copy.deepcopy(self.stats)
# update the allocation ratios for the related ComputeNode object
compute_node.ram_allocation_ratio = self.ram_allocation_ratio
compute_node.cpu_allocation_ratio = self.cpu_allocation_ratio
compute_node.disk_allocation_ratio = self.disk_allocation_ratio
# now copy rest to compute_node
compute_node.update_from_virt_driver(resources)
def _get_host_metrics(self, context, nodename):
"""Get the metrics from monitors and
notify information to message bus.
"""
metrics = objects.MonitorMetricList()
metrics_info = {}
for monitor in self.monitors:
try:
monitor.populate_metrics(metrics)
except NotImplementedError:
LOG.debug("The compute driver doesn't support host "
"metrics for %(mon)s", {'mon': monitor})
except Exception as exc:
LOG.warning("Cannot get the metrics from %(mon)s; "
"error: %(exc)s",
{'mon': monitor, 'exc': exc})
# TODO(jaypipes): Remove this when compute_node.metrics doesn't need
# to be populated as a JSONified string.
metrics = metrics.to_list()
if len(metrics):
metrics_info['nodename'] = nodename
metrics_info['metrics'] = metrics
metrics_info['host'] = self.host
metrics_info['host_ip'] = CONF.my_ip
notifier = rpc.get_notifier(service='compute', host=nodename)
notifier.info(context, 'compute.metrics.update', metrics_info)
return metrics
def update_available_resource(self, context, nodename):
"""Override in-memory calculations of compute node resource usage based
on data audited from the hypervisor layer.
Add in resource claims in progress to account for operations that have
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
:param nodename: Temporary parameter representing the Ironic resource
node. This parameter will be removed once Ironic
baremetal resource nodes are handled like any other
resource in the system.
"""
LOG.debug("Auditing locally available compute resources for "
"%(host)s (node: %(node)s)",
{'node': nodename,
'host': self.host})
resources = self.driver.get_available_resource(nodename)
# NOTE(jaypipes): The resources['hypervisor_hostname'] field now
# contains a non-None value, even for non-Ironic nova-compute hosts. It
# is this value that will be populated in the compute_nodes table.
resources['host_ip'] = CONF.my_ip
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
# just force it to empty string
if "cpu_info" not in resources or resources["cpu_info"] is None:
resources["cpu_info"] = ''
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
self._update_available_resource(context, resources)
def _pair_instances_to_migrations(self, migrations, instances):
instance_by_uuid = {inst.uuid: inst for inst in instances}
for migration in migrations:
try:
migration.instance = instance_by_uuid[migration.instance_uuid]
except KeyError:
# NOTE(danms): If this happens, we don't set it here, and
# let the code either fail or lazy-load the instance later
# which is what happened before we added this optimization.
# NOTE(tdurakov) this situation is possible for resize/cold
# migration when migration is finished but haven't yet
# confirmed/reverted in that case instance already changed host
# to destination and no matching happens
LOG.debug('Migration for instance %(uuid)s refers to '
'another host\'s instance!',
{'uuid': migration.instance_uuid})
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def _update_available_resource(self, context, resources):
# initialize the compute node object, creating it
# if it does not already exist.
self._init_compute_node(context, resources)
nodename = resources['hypervisor_hostname']
# if we could not init the compute node the tracker will be
# disabled and we should quit now
if self.disabled(nodename):
return
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, nodename,
expected_attrs=['system_metadata',
'numa_topology',
'flavor', 'migration_context'])
# Now calculate usage based on instance utilization:
self._update_usage_from_instances(context, instances, nodename)
# Grab all in-progress migrations:
migrations = objects.MigrationList.get_in_progress_by_host_and_node(
context, self.host, nodename)
self._pair_instances_to_migrations(migrations, instances)
self._update_usage_from_migrations(context, migrations, nodename)
self._remove_deleted_instances_allocations(
context, self.compute_nodes[nodename], migrations)
# Detect and account for orphaned instances that may exist on the
# hypervisor, but are not in the DB:
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(orphans, nodename)
cn = self.compute_nodes[nodename]
# NOTE(yjiang5): Because pci device tracker status is not cleared in
# this periodic task, and also because the resource tracker is not
# notified when instances are deleted, we need remove all usages
# from deleted instances.
self.pci_tracker.clean_usage(instances, migrations, orphans)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
metrics = self._get_host_metrics(context, nodename)
# TODO(pmurray): metrics should not be a json string in ComputeNode,
# but it is. This should be changed in ComputeNode
cn.metrics = jsonutils.dumps(metrics)
# update the compute_node
self._update(context, cn)
LOG.debug('Compute_service record updated for %(host)s:%(node)s',
{'host': self.host, 'node': nodename})
def _get_compute_node(self, context, nodename):
"""Returns compute node for the host and nodename."""
try:
return objects.ComputeNode.get_by_host_and_nodename(
context, self.host, nodename)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
{'host': self.host, 'node': nodename})
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
This is just a snapshot of resource usage recorded by the
virt driver.
The following resources are logged:
- free memory
- free disk
- free CPUs
- assignable PCI devices
"""
nodename = resources['hypervisor_hostname']
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
else:
free_vcpus = 'unknown'
pci_devices = resources.get('pci_passthrough_devices')
LOG.debug("Hypervisor/Node resource view: "
"name=%(node)s "
"free_ram=%(free_ram)sMB "
"free_disk=%(free_disk)sGB "
"free_vcpus=%(free_vcpus)s "
"pci_devices=%(pci_devices)s",
{'node': nodename,
'free_ram': free_ram_mb,
'free_disk': free_disk_gb,
'free_vcpus': free_vcpus,
'pci_devices': pci_devices})
def _report_final_resource_view(self, nodename):
"""Report final calculate of physical memory, used virtual memory,
disk, usable vCPUs, used virtual CPUs and PCI devices,
including instance calculations and in-progress resource claims. These
values will be exposed via the compute node table to the scheduler.
"""
cn = self.compute_nodes[nodename]
vcpus = cn.vcpus
if vcpus:
tcpu = vcpus
ucpu = cn.vcpus_used
LOG.debug("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s",
{'tcpu': vcpus,
'ucpu': ucpu})
else:
tcpu = 0
ucpu = 0
pci_stats = (list(cn.pci_device_pools) if
cn.pci_device_pools else [])
LOG.info("Final resource view: "
"name=%(node)s "
"phys_ram=%(phys_ram)sMB "
"used_ram=%(used_ram)sMB "
"phys_disk=%(phys_disk)sGB "
"used_disk=%(used_disk)sGB "
"total_vcpus=%(total_vcpus)s "
"used_vcpus=%(used_vcpus)s "
"pci_stats=%(pci_stats)s",
{'node': nodename,
'phys_ram': cn.memory_mb,
'used_ram': cn.memory_mb_used,
'phys_disk': cn.local_gb,
'used_disk': cn.local_gb_used,
'total_vcpus': tcpu,
'used_vcpus': ucpu,
'pci_stats': pci_stats})
def _resource_change(self, compute_node):
"""Check to see if any resources have changed."""
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if not obj_base.obj_equal_prims(
compute_node, old_compute, ['updated_at']):
self.old_resources[nodename] = copy.deepcopy(compute_node)
return True
return False
def _update(self, context, compute_node):
"""Update partial stats locally and populate them to Scheduler."""
if self._resource_change(compute_node):
# If the compute_node's resource changed, update to DB.
# NOTE(jianghuaw): Once we completely move to use get_inventory()
# for all resource provider's inv data. We can remove this check.
# At the moment we still need this check and save compute_node.
compute_node.save()
# NOTE(jianghuaw): Some resources(e.g. VGPU) are not saved in the
# object of compute_node; instead the inventory data for these
# resource is reported by driver's get_inventory(). So even there
# is no resource change for compute_node as above, we need proceed
# to get inventory and use scheduler_client interfaces to update
# inventory to placement. It's scheduler_client's responsibility to
# ensure the update request to placement only happens when inventory
# is changed.
nodename = compute_node.hypervisor_hostname
# Persist the stats to the Scheduler
try:
inv_data = self.driver.get_inventory(nodename)
_normalize_inventory_from_cn_obj(inv_data, compute_node)
self.scheduler_client.set_inventory_for_provider(
context,
compute_node.uuid,
compute_node.hypervisor_hostname,
inv_data,
)
except NotImplementedError:
# Eventually all virt drivers will return an inventory dict in the
# format that the placement API expects and we'll be able to remove
# this code branch
self.scheduler_client.update_compute_node(context, compute_node)
try:
traits = self.driver.get_traits(nodename)
except NotImplementedError:
pass
else:
# NOTE(mgoddard): set_traits_for_provider does not refresh the
# provider tree in the report client, so we rely on the above call
# to set_inventory_for_provider or update_compute_node to ensure
# that the resource provider exists in the tree and has had its
# cached traits refreshed.
self.reportclient.set_traits_for_provider(
context, compute_node.uuid, traits)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_usage(self, usage, nodename, sign=1):
mem_usage = usage['memory_mb']
disk_usage = usage.get('root_gb', 0)
vcpus_usage = usage.get('vcpus', 0)
overhead = self.driver.estimate_instance_overhead(usage)
mem_usage += overhead['memory_mb']
disk_usage += overhead.get('disk_gb', 0)
vcpus_usage += overhead.get('vcpus', 0)
cn = self.compute_nodes[nodename]
cn.memory_mb_used += sign * mem_usage
cn.local_gb_used += sign * disk_usage
cn.local_gb_used += sign * usage.get('ephemeral_gb', 0)
cn.vcpus_used += sign * vcpus_usage
# free ram and disk may be negative, depending on policy:
cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used
cn.free_disk_gb = cn.local_gb - cn.local_gb_used
cn.running_vms = self.stats.num_instances
# Calculate the numa usage
free = sign == -1
updated_numa_topology = hardware.get_host_numa_usage_from_instance(
cn, usage, free)
cn.numa_topology = updated_numa_topology
def _get_migration_context_resource(self, resource, instance,
prefix='new_'):
migration_context = instance.migration_context
resource = prefix + resource
if migration_context and resource in migration_context:
return getattr(migration_context, resource)
return None
def _update_usage_from_migration(self, context, instance, migration,
nodename):
"""Update usage for a single migration. The record may
represent an incoming or outbound migration.
"""
if not _is_trackable_migration(migration):
return
uuid = migration.instance_uuid
LOG.info("Updating from migration %s", uuid)
incoming = (migration.dest_compute == self.host and
migration.dest_node == nodename)
outbound = (migration.source_compute == self.host and
migration.source_node == nodename)
same_node = (incoming and outbound)
record = self.tracked_instances.get(uuid, None)
itype = None
numa_topology = None
sign = 0
if same_node:
# Same node resize. Record usage for the 'new_' resources. This
# is executed on resize_claim().
if (instance['instance_type_id'] ==
migration.old_instance_type_id):
itype = self._get_instance_type(context, instance, 'new_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
else:
# The instance is already set to the new flavor (this is done
# by the compute manager on finish_resize()), hold space for a
# possible revert to the 'old_' resources.
# NOTE(lbeliveau): When the periodic audit timer gets
# triggered, the compute usage gets reset. The usage for an
# instance that is migrated to the new flavor but not yet
# confirmed/reverted will first get accounted for by
# _update_usage_from_instances(). This method will then be
# called, and we need to account for the '_old' resources
# (just in case).
itype = self._get_instance_type(context, instance, 'old_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
elif incoming and not record:
# instance has not yet migrated here:
itype = self._get_instance_type(context, instance, 'new_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
elif outbound and not record:
# instance migrated, but record usage for a possible revert:
itype = self._get_instance_type(context, instance, 'old_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
if itype:
cn = self.compute_nodes[nodename]
usage = self._get_usage_dict(
itype, numa_topology=numa_topology)
if self.pci_tracker and sign:
self.pci_tracker.update_pci_for_instance(
context, instance, sign=sign)
self._update_usage(usage, nodename)
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
obj = objects.PciDevicePoolList()
cn.pci_device_pools = obj
self.tracked_migrations[uuid] = migration
def _update_usage_from_migrations(self, context, migrations, nodename):
filtered = {}
instances = {}
self.tracked_migrations.clear()
# do some defensive filtering against bad migrations records in the
# database:
for migration in migrations:
uuid = migration.instance_uuid
try:
if uuid not in instances:
instances[uuid] = migration.instance
except exception.InstanceNotFound as e:
# migration referencing deleted instance
LOG.debug('Migration instance not found: %s', e)
continue
# skip migration if instance isn't in a resize state:
if not _instance_in_resize_state(instances[uuid]):
LOG.warning("Instance not resizing, skipping migration.",
instance_uuid=uuid)
continue
# filter to most recently updated migration for each instance:
other_migration = filtered.get(uuid, None)
# NOTE(claudiub): In Python 3, you cannot compare NoneTypes.
if other_migration:
om = other_migration
other_time = om.updated_at or om.created_at
migration_time = migration.updated_at or migration.created_at
if migration_time > other_time:
filtered[uuid] = migration
else:
filtered[uuid] = migration
for migration in filtered.values():
instance = instances[migration.instance_uuid]
# Skip migration (and mark it as error) if it doesn't match the
# instance migration id.
# This can happen if we have a stale migration record.
# We want to proceed if instance.migration_context is None
if (instance.migration_context is not None and
instance.migration_context.migration_id != migration.id):
LOG.info("Current instance migration %(im)s doesn't match "
"migration %(m)s, marking migration as error. "
"This can occur if a previous migration for this "
"instance did not complete.",
{'im': instance.migration_context.migration_id,
'm': migration.id})
migration.status = "error"
migration.save()
continue
try:
self._update_usage_from_migration(context, instance, migration,
nodename)
except exception.FlavorNotFound:
LOG.warning("Flavor could not be found, skipping migration.",
instance_uuid=instance.uuid)
continue
def _update_usage_from_instance(self, context, instance, nodename,
is_removed=False, require_allocation_refresh=False):
"""Update usage for a single instance."""
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
# NOTE(sfinucan): Both brand new instances as well as instances that
# are being unshelved will have is_new_instance == True
is_removed_instance = not is_new_instance and (is_removed or
instance['vm_state'] in vm_states.ALLOW_RESOURCE_REMOVAL)
if is_new_instance:
self.tracked_instances[uuid] = obj_base.obj_to_primitive(instance)
sign = 1
if is_removed_instance:
self.tracked_instances.pop(uuid)
sign = -1
cn = self.compute_nodes[nodename]
self.stats.update_stats_for_instance(instance, is_removed_instance)
cn.stats = copy.deepcopy(self.stats)
# if it's a new or deleted instance:
if is_new_instance or is_removed_instance:
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context,
instance,
sign=sign)
if require_allocation_refresh:
LOG.debug("Auto-correcting allocations.")
self.reportclient.update_instance_allocation(context, cn,
instance, sign)
# new instance, update compute node resource usage:
self._update_usage(self._get_usage_dict(instance), nodename,
sign=sign)
cn.current_workload = self.stats.calculate_workload()
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
cn.pci_device_pools = objects.PciDevicePoolList()
def _update_usage_from_instances(self, context, instances, nodename):
"""Calculate resource usage based on instance utilization. This is
different than the hypervisor's view as it will account for all
instances assigned to the local compute host, even if they are not
currently powered on.
"""
self.tracked_instances.clear()
cn = self.compute_nodes[nodename]
# set some initial values, reserve room for host/hypervisor:
cn.local_gb_used = CONF.reserved_host_disk_mb / 1024
cn.memory_mb_used = CONF.reserved_host_memory_mb
cn.vcpus_used = CONF.reserved_host_cpus
cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used)
cn.free_disk_gb = (cn.local_gb - cn.local_gb_used)
cn.current_workload = 0
cn.running_vms = 0
# NOTE(jaypipes): In Pike, we need to be tolerant of Ocata compute
# nodes that overwrite placement allocations to look like what the
# resource tracker *thinks* is correct. When an instance is
# migrated from an Ocata compute node to a Pike compute node, the
# Pike scheduler will have created a "doubled-up" allocation that
# contains allocated resources against both the source and
# destination hosts. The Ocata source compute host, during its
# update_available_resource() periodic call will find the instance
# in its list of known instances and will call
# update_instance_allocation() in the report client. That call will
# pull the allocations for the instance UUID which will contain
# both the source and destination host providers in the allocation
# set. Seeing that this is different from what the Ocata source
# host thinks it should be and will overwrite the allocation to
# only be an allocation against itself.
#
# And therefore, here we need to have Pike compute hosts
# "correct" the improper healing that the Ocata source host did
# during its periodic interval. When the instance is fully migrated
# to the Pike compute host, the Ocata compute host will find an
# allocation that refers to itself for an instance it no longer
# controls and will *delete* all allocations that refer to that
# instance UUID, assuming that the instance has been deleted. We
# need the destination Pike compute host to recreate that
# allocation to refer to its own resource provider UUID.
#
# For Pike compute nodes that migrate to either a Pike compute host
# or a Queens compute host, we do NOT want the Pike compute host to
# be "healing" allocation information. Instead, we rely on the Pike
# scheduler to properly create allocations during scheduling.
#
# Pike compute hosts may still rework an
# allocation for an instance in a move operation during
# confirm_resize() on the source host which will remove the
# source resource provider from any allocation for an
# instance.
#
# In Queens and beyond, the scheduler will understand when
# a move operation has been requested and instead of
# creating a doubled-up allocation that contains both the
# source and destination host, the scheduler will take the
# original allocation (against the source host) and change
# the consumer ID of that allocation to be the migration
# UUID and not the instance UUID. The scheduler will
# allocate the resources for the destination host to the
# instance UUID.
compute_version = objects.Service.get_minimum_version(
context, 'nova-compute')
has_ocata_computes = compute_version < 22
# Some drivers (ironic) still need the allocations to be
# fixed up, as they transition the way their inventory is reported.
require_allocation_refresh = (
has_ocata_computes or
self.driver.requires_allocation_refresh)
msg_allocation_refresh = (
"Compute driver doesn't require allocation refresh and we're on a "
"compute host in a deployment that only has compute hosts with "
"Nova versions >=16 (Pike). Skipping auto-correction of "
"allocations. ")
if require_allocation_refresh:
if self.driver.requires_allocation_refresh:
msg_allocation_refresh = (
"Compute driver requires allocation refresh. ")
elif has_ocata_computes:
msg_allocation_refresh = (
"We're on a compute host from Nova version >=16 (Pike or "
"later) in a deployment with at least one compute host "
"version <16 (Ocata or earlier). ")
msg_allocation_refresh += (
"Will auto-correct allocations to handle "
"Ocata-style assumptions.")
for instance in instances:
if instance.vm_state not in vm_states.ALLOW_RESOURCE_REMOVAL:
if msg_allocation_refresh:
LOG.debug(msg_allocation_refresh)
msg_allocation_refresh = False
self._update_usage_from_instance(context, instance, nodename,
require_allocation_refresh=require_allocation_refresh)
def _remove_deleted_instances_allocations(self, context, cn,
migrations):
migration_uuids = [migration.uuid for migration in migrations
if 'uuid' in migration]
# NOTE(jaypipes): All of this code sucks. It's basically dealing with
# all the corner cases in move, local delete, unshelve and rebuild
# operations for when allocations should be deleted when things didn't
# happen according to the normal flow of events where the scheduler
# always creates allocations for an instance
known_instances = set(self.tracked_instances.keys())
allocations = self.reportclient.get_allocations_for_resource_provider(
context, cn.uuid) or {}
read_deleted_context = context.elevated(read_deleted='yes')
for consumer_uuid, alloc in allocations.items():
if consumer_uuid in known_instances:
LOG.debug("Instance %s actively managed on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
if consumer_uuid in migration_uuids:
LOG.debug("Migration %s is active on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
# We know these are instances now, so proceed
instance_uuid = consumer_uuid
try:
instance = objects.Instance.get_by_uuid(read_deleted_context,
instance_uuid,
expected_attrs=[])
except exception.InstanceNotFound:
# The instance isn't even in the database. Either the scheduler
# _just_ created an allocation for it and we're racing with the
# creation in the cell database, or the instance was deleted
# and fully archived before we got a chance to run this. The
# former is far more likely than the latter. Avoid deleting
# allocations for a building instance here.
LOG.info("Instance %(uuid)s has allocations against this "
"compute host but is not found in the database.",
{'uuid': instance_uuid},
exc_info=False)
continue
if instance.deleted:
# The instance is gone, so we definitely want to remove
# allocations associated with it.
# NOTE(jaypipes): This will not be true if/when we support
# cross-cell migrations...
LOG.debug("Instance %s has been deleted (perhaps locally). "
"Deleting allocations that remained for this "
"instance against this compute host: %s.",
instance_uuid, alloc)
self.reportclient.delete_allocation_for_instance(context,
instance_uuid)
continue
if not instance.host:
# Allocations related to instances being scheduled should not
# be deleted if we already wrote the allocation previously.
LOG.debug("Instance %s has been scheduled to this compute "
"host, the scheduler has made an allocation "
"against this compute node but the instance has "
"yet to start. Skipping heal of allocation: %s.",
instance_uuid, alloc)
continue
if (instance.host == cn.host and
instance.node == cn.hypervisor_hostname):
# The instance is supposed to be on this compute host but is
# not in the list of actively managed instances.
LOG.warning("Instance %s is not being actively managed by "
"this compute host but has allocations "
"referencing this compute host: %s. Skipping "
"heal of allocation because we do not know "
"what to do.", instance_uuid, alloc)
continue
if instance.host != cn.host:
# The instance has been moved to another host either via a
# migration, evacuation or unshelve in between the time when we
# ran InstanceList.get_by_host_and_node(), added those
# instances to RT.tracked_instances and the above
# Instance.get_by_uuid() call. We SHOULD attempt to remove any
# allocations that reference this compute host if the VM is in
# a stable terminal state (i.e. it isn't in a state of waiting
# for resize to confirm/revert), however if the destination
# host is an Ocata compute host, it will delete the allocation
# that contains this source compute host information anyway and
# recreate an allocation that only refers to itself. So we
# don't need to do anything in that case. Just log the
# situation here for information but don't attempt to delete or
# change the allocation.
LOG.warning("Instance %s has been moved to another host "
"%s(%s). There are allocations remaining against "
"the source host that might need to be removed: "
"%s.",
instance_uuid, instance.host, instance.node, alloc)
def delete_allocation_for_evacuated_instance(self, context, instance, node,
node_type='source'):
self._delete_allocation_for_moved_instance(
context, instance, node, 'evacuated', node_type)
def delete_allocation_for_migrated_instance(self, context, instance, node):
self._delete_allocation_for_moved_instance(context, instance, node,
'migrated')
def _delete_allocation_for_moved_instance(
self, context, instance, node, move_type, node_type='source'):
# Clean up the instance allocation from this node in placement
cn_uuid = self.compute_nodes[node].uuid
if not scheduler_utils.remove_allocation_from_compute(
context, instance, cn_uuid, self.reportclient):
LOG.error("Failed to clean allocation of %s "
"instance on the %s node %s",
move_type, node_type, cn_uuid, instance=instance)
def delete_allocation_for_failed_resize(self, context, instance, node,
flavor):
"""Delete instance allocations for the node during a failed resize
:param context: The request context.
:param instance: The instance being resized/migrated.
:param node: The node provider on which the instance should have
allocations to remove. If this is a resize to the same host, then
the new_flavor resources are subtracted from the single allocation.
:param flavor: This is the new_flavor during a resize.
"""
cn = self.compute_nodes[node]
if not scheduler_utils.remove_allocation_from_compute(
context, instance, cn.uuid, self.reportclient, flavor):
if instance.instance_type_id == flavor.id:
operation = 'migration'
else:
operation = 'resize'
LOG.error('Failed to clean allocation after a failed '
'%(operation)s on node %(node)s',
{'operation': operation, 'node': cn.uuid},
instance=instance)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for
by resource tracker, sanity check the hypervisor to determine
if there are any "orphaned" instances left hanging around.
Orphans could be consuming memory and should be accounted for in
usage calculations to guard against potential out of memory
errors.
"""
uuids1 = frozenset(self.tracked_instances.keys())
uuids2 = frozenset(self.tracked_migrations.keys())
uuids = uuids1 | uuids2
usage = self.driver.get_per_instance_usage()
vuuids = frozenset(usage.keys())
orphan_uuids = vuuids - uuids
orphans = [usage[uuid] for uuid in orphan_uuids]
return orphans
def _update_usage_from_orphans(self, orphans, nodename):
"""Include orphaned instances in usage."""
for orphan in orphans:
memory_mb = orphan['memory_mb']
LOG.warning("Detected running orphan instance: %(uuid)s "
"(consuming %(memory_mb)s MB memory)",
{'uuid': orphan['uuid'], 'memory_mb': memory_mb})
# just record memory usage for the orphan
usage = {'memory_mb': memory_mb}
self._update_usage(usage, nodename)
def delete_allocation_for_shelve_offloaded_instance(self, context,
instance):
self.reportclient.delete_allocation_for_instance(context,
instance.uuid)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _get_instance_type(self, context, instance, prefix, migration):
"""Get the instance type from instance."""
stashed_flavors = migration.migration_type in ('resize',)
if stashed_flavors:
return getattr(instance, '%sflavor' % prefix)
else:
# NOTE(ndipanov): Certain migration types (all but resize)
# do not change flavors so there is no need to stash
# them. In that case - just get the instance flavor.
return instance.flavor
def _get_usage_dict(self, object_or_dict, **updates):
"""Make a usage dict _update methods expect.
Accepts a dict or an Instance or Flavor object, and a set of updates.
Converts the object to a dict and applies the updates.
:param object_or_dict: instance or flavor as an object or just a dict
:param updates: key-value pairs to update the passed object.
Currently only considers 'numa_topology', all other
keys are ignored.
:returns: a dict with all the information from object_or_dict updated
with updates
"""
usage = {}
if isinstance(object_or_dict, objects.Instance):
usage = {'memory_mb': object_or_dict.flavor.memory_mb,
'vcpus': object_or_dict.flavor.vcpus,
'root_gb': object_or_dict.flavor.root_gb,
'ephemeral_gb': object_or_dict.flavor.ephemeral_gb,
'numa_topology': object_or_dict.numa_topology}
elif isinstance(object_or_dict, objects.Flavor):
usage = obj_base.obj_to_primitive(object_or_dict)
else:
usage.update(object_or_dict)
for key in ('numa_topology',):
if key in updates:
usage[key] = updates[key]
return usage
| [
"[email protected]"
]
| |
9e2613d43c8d4d08d227418cedbf0dd8bf1c3c42 | c09cfdb1302f4d409bfc042dd0fff6a6d59f4a87 | /Apocalypse/textRNN_half2.py | 6b50d99efbdabc8f4d5a19027bbc5fc8bd9a864f | []
| no_license | Utschie/ML_Monitoring_Trade | 30773b91024c7ffca4f95a8e18ed56c4e2147529 | 3791e8405ae314dee4f6008c3feb214427f14a55 | refs/heads/master | 2021-11-08T03:17:06.834889 | 2021-07-05T18:10:37 | 2021-07-05T18:10:37 | 166,300,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,151 | py | #本程序是采用平衡数据集的用tsvd的模型,看看会不会比非平衡数据集好————20210126
#把优化器的eps跳到1e-16,并把初始学习率调低两个数量级,看看loss会不会继续持续下降
'''
经过初始学习率分别为0.01,0.001,0.0001,0.00001的比对后,发现0.001甚至0.01是最好的学习率,loss最低降到1.1左右
而0.0001和0.00001下降非常慢,甚至在初始学习率为0.00001时loss到了5之后下降就非常慢了
而当学习率为0.01时loss很快就下降到了1.1,而且非常稳定,甚至比0.001还稳
'''
#这次试一下如果不用正交初始化而是用默认初始化的话,loss会是个什么结果
#使用默认初始化的效果比正交初始化的结果要略好,甚至大冷的平均收益率超过1(约1.002这样)。
#虽然只跑了可能3个epoch,但是总准确率提高到了42%,所以还是默认初始化比较好————20210202
#如果按照这个测试结果,那么爆大冷,小冷和正常的预测准确率分别为28.14%,29.01%和64.62%————20210203
#按照各种预测结果的最后一帧的最大值购买,那么平均收益分别为12.89%,3.87%和10.52%————20210203
#如果分别按每次命中收益算,即相当于加权,那么算出的平均收益分别为11%,3.6%和7%————20210203
#但实际上就算只卖大冷或者只买小冷,正常,只要都取最后一帧的最大值平均收益也有14%,1%和6.6%————20210203
#如果把模型预测的概率当做投资比例来做投资,那么72532场比赛的平均收益是6.6%,但标准差高达22%————20210203
#加入分布来自均值为6.6%,标准差为22%,那么样本均值的标准差=总标准差/sqrt(n),也就意味着至少要抽12个,才能是1倍标准差内(即以68%的概率)为正收益
#2倍标准差才能以95%的概率>1,此时需要抽45个,才能有95%的概率>1————20210203
#如果以99.7%的概率(3倍标准差以内)保证则需抽100个,然后每个都按照这样的比例投资
#如果不用模型预测的概率,而是用最后一帧的平均概率来投资,然后也用最大那一组赔率,它的均值和标准差会是多少呢?————20210203
import os
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
import sys
import pandas as pd
import numpy as np
import csv
import random
import re
from sklearn.decomposition import TruncatedSVD
from torch.nn.utils.rnn import pad_sequence#用来填充序列
import time
from prefetch_generator import BackgroundGenerator
from torch.utils.tensorboard import SummaryWriter
from pywick.optimizers.nadam import Nadam#使用pywick包里的nadam优化器
from torch.optim import lr_scheduler
from torch.optim import Adam
with open('/home/jsy/data/cidlist_complete.csv') as f:
reader = csv.reader(f)
cidlist = [row[1] for row in reader]#得到cid对应表
cidlist = list(map(float,cidlist))#把各个元素字符串类型转成浮点数类型
class DataLoaderX(DataLoader):
def __iter__(self):
return BackgroundGenerator(super().__iter__())
class BisaiDataset(Dataset):#数据预处理器
def __init__(self,filepath):#filepath是个列表
with open(filepath,'r') as f:#读取filepath文件并做成filelist列表
self.filelist = []
for line in f:
self.filelist.append(line.strip('\n'))
self.lablelist = pd.read_csv('/home/jsy/data/lablelist.csv',index_col = 0)#比赛id及其对应赛果的列表
self.lables = {'win':0,'draw':1,'lose':2}#分类问题要从0开始编号,而且要对应好了表中的顺序编,
def __getitem__(self, index):
#todo
# 1. Read one data from file (e.g. using numpy.fromfile, PIL.Image.open).
#这里需要注意的是,第一步:read one data,是一个dat
data_path = self.filelist[index]
bisai_id = int(re.findall(r'/(\d*?).csv',data_path)[0])
# 2. Preprocess the data (e.g. torchvision.Transform).
data = self.csv2frame(data_path)
# 3. Return a data pair (e.g. image and label).
lable = self.lablelist.loc[bisai_id].result
lable = self.lables[lable]
return data,lable
def __len__(self):
# You should change 0 to the total size of your dataset.
return len(self.filelist)
def csv2frame(self,filepath):#给出单场比赛的csv文件路径,并转化成帧列表和对应变帧时间列表,以及比赛结果
data = pd.read_csv(filepath)#读取文件
data = data.drop(columns=['league','zhudui','kedui','companyname'])#去除非数字的列
frametimelist=data.frametime.value_counts().sort_index(ascending=False).index#将frametime的值读取成列表
framelist =list()#framelist为一个空列表,长度与frametimelist相同,一定要规定好具体形状和float类型,否则dataloader无法读取
'''
此处两个循环算法太慢,用pandas更慢,完全抛弃pandas后,数据处理速度从109秒降到了10秒,降到10秒后cpu利用率20%,再往上提也提不上去了,可能需要C++或C来写了
'''
new_data = np.array(data)
lables = new_data[:,0]
if len(frametimelist)>250:
frametimelist = [frametimelist[0]]+random.sample(list(frametimelist)[1:-1],248)+[frametimelist[-1]]#如果长度大于500,保留头尾,并在中间随机抽取498个,共计500个
frametimelist.sort(reverse=True)#并降序排列
for i in frametimelist:
state = new_data[lables==i]#从第一次变盘开始得到当次转移
#state = np.array(state)#不必转成numpy多维数组,因为已经是了
state = np.delete(state,(0,1), axis=-1)#去掉frametime和cid
#在填充成矩阵之前需要知道所有数据中到底有多少个cid
framelist.append(state)
frametimelist = np.array(frametimelist)
vectensor = self.mrx2vec(framelist)
len_frame = vectensor.shape[0]
if len_frame<250:
vectensor = np.concatenate((np.zeros((250-len_frame,10),dtype=np.float64),vectensor),axis=0)#如果不足500,则在前面用0填充
vectensor = torch.from_numpy(vectensor)
return vectensor#传出一个帧列表,也可以把frametimelist一并传出来,此处暂不考虑位置参数的问题
def tsvd(self,frame):
tsvd = TruncatedSVD(1)
if frame.shape[0] != 1:
newframe = tsvd.fit_transform(np.transpose(frame))#降维成(1,10)的矩阵
else:
return frame.reshape((10,1))#第一行需要reshape一下
return newframe
def mrx2vec(self,flist):#把截断奇异值的方法把矩阵变成向量(matrix2vec/img2vec),传入:len(frametimelist)*(306*10),传出:len(frametimelist)*10
vectensor = np.array(list(map(self.tsvd,flist))).squeeze(2)
#veclist = veclist.transpose()
#vectensor = torch.from_numpy(veclist)#转成张量
return vectensor#传出一个形状为(1,序列长度,10)的张量,因为后面传入模型之前,还需要做一下pad_sequence(0维是batch_size维)
class Lstm(nn.Module):#在模型建立之处就把它默认初始化
def __init__(self):
super().__init__()
self.encoder = nn.LSTM(input_size=10,
hidden_size=250,#选择对帧进行保留首尾的均匀截断采样
num_layers=1,#暂时就只有一层
bidirectional=True)
#nn.init.orthogonal_(self.encoder.weight_ih_l0)
#nn.init.orthogonal_(self.encoder.weight_hh_l0)
#nn.init.constant_(self.encoder.bias_ih_l0,0.0)
#nn.init.constant_(self.encoder.bias_hh_l0,0.0)
self.decoder = nn.Sequential(
nn.Linear(1000, 250),#把LSTM的输出
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(250, 3)
)
#nn.init.normal_(self.decoder[0].weight,mean=0.0)
#nn.init.constant_(self.decoder[0].bias, 0.0)
#nn.init.normal_(self.decoder[3].weight,mean=0.0)
#nn.init.constant_(self.decoder[3].bias, 0.0)
def forward(self,inputs):
output, _= self.encoder(inputs.permute(1,0,2))#inputs需要转置一下再输入lstm层,因为pytorch要求第0维为长度,第二维才是batch_size
encoding = torch.cat((output[0], output[-1]), -1)#双向的lstm,就把两个都放进去
return self.decoder(encoding)#把最后一个时间步的输出输入MLP
def get_parameter_number(model):#参数统计
total_num = sum(p.numel() for p in model.parameters())
trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return {'Total': total_num, 'Trainable': trainable_num}
if __name__ == "__main__":
train_writer = SummaryWriter('/home/jsy/log3/train')#自动建立
test_writer = SummaryWriter('/home/jsy/log3/test')#自动建立
checkpoint_path = '/home/jsy/log3/checkpoint.pth'#ckpoint文件夹需要提前建立
train_path = '/home/jsy/balanced_train_path.txt'
test_path = '/home/jsy/balanced_test_path.txt'
dataset = BisaiDataset(train_path)#训练集
test_set = BisaiDataset(test_path)#验证集
print('数据集读取完成')
loader = DataLoaderX(dataset, 128 ,shuffle=True,num_workers=4,pin_memory=True)#num_workers>0情况下无法在交互模式下运行
test_loader = DataLoaderX(test_set, 128, shuffle=True,num_workers=4,pin_memory=True)#验证dataloader
print('dataloader准备完成')
net = Lstm().double().cuda()#双精度
print('网络构建完成')
stat1 = get_parameter_number(net)
print(str(stat1))
lr, num_epochs = 0.001, 2000
optimizer= Adam(net.parameters(), lr=lr,eps=1e-16)
start_epoch = 1#如果没有checkpoint则初始epoch为1
gesamt_counter = 0
if os.path.exists(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = checkpoint['epoch']+1
gesamt_counter = checkpoint['gesamt_counter']
loss = nn.CrossEntropyLoss()
scheuler = lr_scheduler.StepLR(optimizer,step_size=5,gamma=0.25)
for epoch in range(start_epoch, num_epochs + 1):
l_list = list()
epoch_start = time.time()#记录整个epoch除验证外所用的时间
net.train()
counter = 0
start = time.time()
train_output = torch.zeros((1,3))
train_y = torch.zeros((1)).long()#得是long类型
#if epoch == 6:
# loader = DataLoaderX(dataset,64,shuffle=True,num_workers=4,pin_memory=True)#在经过10个epoch后,batch改成64
# optimizer= Nadam(net.parameters(), lr=lr/pow(4,2))
for x, y in iter(loader):
#但是还需要使填充后的那些0不参与计算,所以可能需要制作掩码矩阵
#或者需要时序全局最大池化层来消除填充的后果
x = x.double().cuda()
y = y.long().cuda()
output = net(x)#x要求是一个固定shape的第0维是batch_size的张量,所以需要批量填充
l = loss(output, y)
optimizer.zero_grad() # 梯度清零,等价于net.zero_grad()
l.backward()
optimizer.step()
end = time.time()
train_period = end-start
counter+=1
gesamt_counter+=1
print('第'+str(epoch)+'个epoch已学习'+str(counter)+'个batch,'+'用时'+str(train_period)+'秒')
l_list.append(l.item())
train_writer.add_scalar('step_loss',l.item(),gesamt_counter)#随着每一步学习的loss下降图
#print('loss: %f' % (l.item()))
start = time.time()
train_output = torch.cat((train_output,output.cpu()),0)#把这一个batch的输出连起来
train_y = torch.cat((train_y,y.cpu()),0)#把这一个batch的lable连起来
epoch_end = time.time()
print('epoch %d, loss: %f' % (epoch,np.mean(l_list)))
print('第'+str(epoch)+'个epoch训练用时'+str(int(epoch_end-epoch_start))+'秒')
prediction = torch.argmax(train_output, 1)#找出每场比赛预测输出的最大值的坐标
correct = (prediction == train_y).sum().float()#找出预测正确的总个数
accuracy = correct/len(train_y)#计算Top-1正确率,总共就三分类,就不看top-2的了
train_writer.add_scalar('Top-1 Accuracy',accuracy,epoch)#写入文件
#下面是一个epoch结束的验证部分
#if epoch>=20:
print('开始验证......')
test_start = time.time()
net.eval()
torch.cuda.empty_cache()#释放一下显存
with torch.no_grad():#这样在验证时显存才不会爆
test_output = torch.zeros((1,3))
test_y = torch.zeros((1)).long()#得是long类型
test_counter = 0
for x,y in iter(test_loader):
x = x.double().cuda()
output = net(x).cpu()#把输出转到内存
test_output = torch.cat((test_output,output),0)#把这一个batch的输出连起来
test_y = torch.cat((test_y,y),0)#把这一个batch的lable连起来
#torch.cuda.empty_cache()
test_counter+=1
print('验证集已完成'+str(test_counter)+'个batch')
#验证输出和验证lable的第一个元素都是0,鉴于到时候占比很小就不删除了
print('计算结果......')
l_test = loss(test_output,test_y)#用整个验证集的输出和lable算一个总平均loss(nn.CrossEntropyLoss默认就是求平均值)
test_writer.add_scalar('epoch_loss',l_test.item(),epoch)#每一个epoch算一次验证loss
train_writer.add_scalar('epoch_loss',np.mean(l_list),epoch)#每一个epoch把训练loss也加上
prediction = torch.argmax(test_output, 1)#找出每场比赛预测输出的最大值的坐标
correct = (prediction == test_y).sum().float()#找出预测正确的总个数
accuracy = correct/len(test_y)#计算Top-1正确率,总共就三分类,就不看top-2的了
test_writer.add_scalar('Top-1 Accuracy',accuracy,epoch)#写入文件
test_end = time.time()
print('验证已完成,用时'+str(int(test_end-test_start))+'秒')
print('验证完成,开始保存......')
#下面是模型保存部分
checkpoint = {
'epoch': epoch,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'gesamt_counter':gesamt_counter
}
torch.save(checkpoint, checkpoint_path)#保存checkpoint到路径
torch.cuda.empty_cache()#释放一下显存
scheuler.step()
print('保存完毕')
| [
"[email protected]"
]
| |
8bd85a71ed32a09c3f871431ee97970c9134121b | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/FrontCache/FPythonCode/FC_TCOLL_01_ATS_40.py | 11d35ced4c4937f267029d8778ff4dfce2f825ed | []
| no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,150 | py |
'''----------------------------------------------------------------------------------------------------------
MODULE : FC_TCOLL_01_ATS_40
PROJECT : FX onto Front Arena
PURPOSE : This module is the entry point for the Trade Collection ATSs. These ATSs will
subscribe to Trade Collection Requests. They will pull the relevant Front Cache
data from Front Cache Tradign Manager Template for the specific trades in the
incoming request. Once a Request and/or Batch is complete, a Response message
will be posted onto the AMB so that the Response can be send to subscribing
consumers to notify them that the data for the Request or Batch is avaiable
for consumption.
DEPARTMENT AND DESK : All Departments and all Desks.
REQUASTER : FX onto Front Arena Project
DEVELOPER : Heinrich Cronje
CR NUMBER : XXXXXX
-------------------------------------------------------------------------------------------------------------
'''
'''----------------------------------------------------------------------------------------------------------
Importing all relevant Python and custom modules needed for the ATS to start up. Initializing the FC_UTILS
module to load all Parameters, Logging, Error Handler.
----------------------------------------------------------------------------------------------------------'''
import FC_ERROR_HANDLER_DEFAULT as ERROR_HANDLER_DEFAULT
import traceback
try:
from FC_UTILS import FC_UTILS as UTILS
except ImportError, e:
ERROR_HANDLER_DEFAULT.handelError('Import Error in module %s.' %__name__, e, traceback)
raise ImportError('Import Error in module %s. ERROR: %s.' %(__name__, str(e)))
try:
UTILS.Initialize(__name__)
except Exception, e:
ERROR_HANDLER_DEFAULT.handelError('Initialization Error in module %s. FC_UTILS could not be initialized. '
'No Parameters, Logging or Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved.' %__name__, e, traceback)
raise Exception('Initialization Error in module %s. FC_UTILS could not be initialized. '
'No Parameters, Logging or Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved. ERROR: %s. ' %(__name__, str(e)))
try:
from FC_EXCEPTION import FC_EXCEPTION as EXCEPTION
except ImportError, e:
ERROR_HANDLER_DEFAULT.handelError('Import Error in module %s. FC_EXCEPTION could not be imported. '
'No Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved.' %__name__, e, traceback)
raise Exception('Import Error in module %s. FC_EXCEPTION could not be imported. '
'No Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved. ERROR: %s. ' %(__name__, str(e)))
try:
from datetime import datetime
except ImportError, e:
UTILS.ErrorHandler.processError(None, EXCEPTION('Import Error in module %s. Module datetime could not be imported. '
'The ATS will not start until the root issue is resolved.' %__name__, traceback, 'CRITICAL', e), __name__)
raise Exception('Import Error in module %s. Module datetime could not be imported. '
'The ATS will not start until the root issue is resolved. ERROR: %s' %(__name__, str(e)))
try:
from FC_TCOLL_ATS_WORKER import FC_TCOLL_ATS_WORKER as TCOLL_ATS_WORKER
except ImportError, e:
UTILS.ErrorHandler.processError(None, EXCEPTION('Could not import the worker module in module %s' %__name__, traceback, 'CRITICAL', None), __name__)
raise Exception('Could not import the worker module in module %s. ERROR: %s' %(__name__, str(e)))
'''----------------------------------------------------------------------------------------------------------
Global variables
-------------------------------------------------------------------------------------------------------------
'''
global worker
worker = None
'''----------------------------------------------------------------------------------------------------------
work function which the ATS will call once started.
-------------------------------------------------------------------------------------------------------------
'''
def work():
global worker
if not worker:
UTILS.ErrorHandler.processError(None, EXCEPTION(UTILS.Constants.fcExceptionConstants.WORKER_VARIABLE_S_IS_NOT_INSTANTIATED %__name__, traceback, UTILS.Constants.fcGenericConstants.CRITICAL, None), __name__)
else:
worker.work()
'''----------------------------------------------------------------------------------------------------------
start function which the ATS will call when the ATS is starting.
-------------------------------------------------------------------------------------------------------------
'''
def start():
UTILS.Logger.flogger.info(UTILS.Constants.fcFloggerConstants.STARTING_ATS_S_AT_S %(__name__, datetime.now()))
global worker
if not worker:
worker = TCOLL_ATS_WORKER()
worker.start()
'''----------------------------------------------------------------------------------------------------------
stop function which the ATS will call when the ATS is stopping.
-------------------------------------------------------------------------------------------------------------
'''
def stop():
global worker
if not worker:
UTILS.ErrorHandler.processError(None, EXCEPTION(UTILS.Constants.fcExceptionConstants.WORKER_VARIABLE_IN_S_IS_NOT_INSTANTIATED_STOP %__name__, traceback, UTILS.Constants.fcGenericConstants.MEDIUM, None), __name__)
else:
worker.stop()
#start()
#work()
#stop()
| [
"[email protected]"
]
| |
97f54bfaf9957347fb4254fc70ebbe9c10c2fb2f | 03e4331a8d5c107f7cc1d814215ed1547ba6a0f0 | /xTool/models/models.py | 315e176f675bac1087ca7cd370482d941f7dd775 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | KqSMea8/xTool | ba61a4f56e2e5ddda7caaf429cfb452be06a65c6 | eade656ca77347d2c05e66a3d680e236c8779683 | refs/heads/master | 2020-04-11T19:01:29.673695 | 2018-12-16T16:15:50 | 2018-12-16T16:15:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | #coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.standard_library import install_aliases
from builtins import str
from builtins import object, bytes
import dill
from urllib.parse import urlparse, quote, parse_qsl
from sqlalchemy import (
Column, Integer, String, DateTime, Text, Boolean, ForeignKey, PickleType,
Index, Float, LargeBinary)
from sqlalchemy import func, or_, and_, true as sqltrue
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.orm import reconstructor, relationship, synonym
install_aliases()
Base = declarative_base()
# 主键ID的长度
ID_LEN = 250
# 中间表的默认key
XCOM_RETURN_KEY = 'return_value'
| [
"[email protected]"
]
| |
8123f8b823863a2cdfac01616013fec780ac3e16 | ef4a1748a5bfb5d02f29390d6a66f4a01643401c | /algorithm/2_algo/strting_search.py | cd08c5997becfb887981008a564f5f0a36907fff | []
| no_license | websvey1/TIL | aa86c1b31d3efc177df45503d705b3e58b800f8e | 189e797ba44e2fd22a033d1024633f9e0128d5cf | refs/heads/master | 2023-01-12T10:23:45.677578 | 2019-12-09T07:26:59 | 2019-12-09T07:26:59 | 162,102,142 | 0 | 1 | null | 2022-12-11T16:31:08 | 2018-12-17T08:57:58 | Python | UTF-8 | Python | false | false | 667 | py | import sys, time
start = time.time()
sys.stdin = open("./tc/strting_search.txt","r")
T = int(input())
for tc in range(1,T+1):
data = input()
all = input()
############# 쉬운버전 ############3 0.0초
result = 5
if data in all:
result = 1
else:
result = 0
###############3 어렵 ####################3 0.001초
# result = 5
#
# for i in range(len(all)-len(data)+1):
# if all[i:i+len(data)] == data:
# result = 1
# break
# else:
# result = 0
# print(data)
# print(all[i:i+len(data)])
print("#%d %d" %(tc,result), time.time()- start)
| [
"[email protected]"
]
| |
bd090aca89d155016d194168fac8a7c7b8509f17 | ea393959886a5cd13da4539d634f2ca0bbcd06a2 | /82.py | 7ccff9c2594f1e475a361dff197c8395f4f63aba | []
| no_license | zhangchizju2012/LeetCode | f605f35b82f16282559af71e4e61ec2629a90ebc | 0c4c38849309124121b03cc0b4bf39071b5d1c8c | refs/heads/master | 2020-04-05T12:12:14.810639 | 2018-08-09T10:24:52 | 2018-08-09T10:24:52 | 81,021,830 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 15 01:07:11 2017
@author: zhangchi
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None:
return None
result = ListNode(0)
temp = result
data = head.val
label = True
while head.next is not None:
head = head.next
if head.val != data and label == True:
temp.next = ListNode(data)
temp = temp.next
data = head.val
elif head.val != data and label != True:
label = True
data = head.val
else:
label = False
if label == True:
temp.next = ListNode(head.val)
return result.next | [
"[email protected]"
]
| |
67545b2050a0a9a4e4595f07aeedbc7bf6d89031 | 5945903ff7b3c0be799d8b228aa96309e8d6b68a | /PTA_AL_1011.py | ccbd0a00df6ca36344b78bfa9460a3742a7ea3c2 | []
| no_license | freesan44/LeetCode | 44fd01fa37e2d7e729ae947da2350b1649c163ae | 2ed9f1955c527d43fe1a02e5bebf5a6f981ef388 | refs/heads/master | 2021-12-07T20:07:02.308097 | 2021-11-01T23:58:11 | 2021-11-01T23:58:11 | 245,178,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | inputList = []
for _ in range(3):
inputData = list(map(float,input().split()))
# inputData = list(map(float, "1.1 2.5 1.7".split()))
inputList.append(inputData)
retList = []
res = 1
for i in inputList:
maxVal = max(i)
indexVal = i.index(maxVal)
if indexVal == 0:
retList.append("W")
elif indexVal == 1:
retList.append("T")
else:
retList.append("L")
res *= maxVal
res = 2 * (res*0.65 - 1)
## 难点是格式化方式
res = "%.2f" % res
retList.append(res)
print(" ".join(retList))
| [
"[email protected]"
]
| |
35e7366e76f6e50c77b6fa3fcf1065b6905128ef | 05780fe9a74b116832611a35fce38fa24b4d4ffc | /madgraph/madgraph_binaries/models/taudecay_UFO/__init__.py | 3c9f65445319ce87b7af17ac5a5968bbe0ceae11 | []
| no_license | cesarotti/Dark-Photons | d810658190297528470abe757c4a678075ef48f6 | c6dce1df70c660555bf039a78765e4efbffb4877 | refs/heads/master | 2021-01-22T19:26:13.892225 | 2015-01-28T05:43:20 | 2015-01-28T05:49:54 | 20,692,647 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 893 | py |
import particles
import couplings
import lorentz
import parameters
import vertices
import coupling_orders
import write_param_card
import propagators
all_particles = particles.all_particles
all_vertices = vertices.all_vertices
all_couplings = couplings.all_couplings
all_lorentz = lorentz.all_lorentz
all_parameters = parameters.all_parameters
all_orders = coupling_orders.all_orders
all_functions = function_library.all_functions
all_propagators = propagators.all_propagators
try:
import decays
except ImportError:
pass
else:
all_decays = decays.all_decays
try:
import form_factors
except ImportError:
pass
else:
all_form_factors = form_factors.all_form_factors
try:
import CT_vertices
except ImportError:
pass
else:
all_CTvertices = CT_vertices.all_CTvertices
gauge = [0]
__author__ = "K. Mawatari, J. Nakamura"
__date__ = "2014.05.08"
__version__= "2.0"
| [
"[email protected]"
]
| |
071cf340c23a15c39e29549b47c35d45036859f0 | 551ef0567aca428a535775d3949f5d9670c0d29c | /abc/173/d/main.py | 17b7c7ad3e7efd9f8db31585e1d42505e573aa4d | []
| no_license | komo-fr/AtCoder | 7451a9402466ce8d487d0c521128732061c647df | c916889294cb12f21e74254de43b3e17e1b354bc | refs/heads/master | 2023-07-22T07:05:52.955188 | 2023-03-01T14:22:16 | 2023-03-01T14:22:16 | 213,109,943 | 0 | 0 | null | 2023-07-06T22:01:28 | 2019-10-06T04:44:49 | Python | UTF-8 | Python | false | false | 370 | py | #!/usr/bin/env python3
from collections import deque
N = int(input().split()[0])
a_list = list(map(int, input().split()))
a_list = sorted(a_list, reverse=True)
c_list = a_list[: (N + 1) // 2]
if len(c_list) == 1:
ans = c_list[0]
elif N % 2 == 0:
ans = c_list[0] + sum(c_list[1:]) * 2
else:
ans = c_list[0] + sum(c_list[1:-1]) * 2 + c_list[-1]
print(ans)
| [
"[email protected]"
]
| |
fe9f5ac55217dfc033c9cc3c4fd89943726640c8 | 614e01d08c8bb5adbe4d263d9dba04688502a12f | /toggl_driver/commands/start_timer.py | fe1b1ed883f441770071e74e3ae2ab9cf118f09e | [
"MIT"
]
| permissive | cloew/TogglDriver | 28b2b2ebd396d08000fc92e0013f15722975ae06 | 7b0528710e7686690a88a22cf5cca1f3ac55ebbf | refs/heads/master | 2021-01-10T13:05:58.759515 | 2015-10-01T03:43:52 | 2015-10-01T03:43:52 | 43,025,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | from ..args import OptionalProjectArg
from ..config import GlobalConfig
from kao_command.args import Arg, BareWords
class StartTimer:
""" Represents a command to start the Toggl Timer """
description = "Start the Toggl Timer"
args = [Arg('description', nargs='+', provider=BareWords),
OptionalProjectArg(help="start the timer within")]
def run(self, *, description, project=None, workspace=None):
""" Start the timer """
entry = None
if project:
entry = GlobalConfig.connection.TimeEntry(description=description, pid=project.id)
else:
entry = GlobalConfig.connection.TimeEntry(description=description, wid=workspace.id)
entry.start() | [
"[email protected]"
]
| |
bcb1548eaff70ab7970362c482e0a054b23840d0 | 775fdec8dd3d959560450fec3cf17c82a79e3f61 | /apps/user_login/views.py | 75b7d126f265a28fa96c48312207d196e04a6e1f | []
| no_license | HarmsA/Dojo_Ninja | f2ff9833ea1b7707bed567ab869d1a645f8694a4 | 23ce11de538e600fccf64ac3c28348ca7bf38422 | refs/heads/master | 2020-04-09T03:13:10.591710 | 2018-12-02T18:27:29 | 2018-12-02T18:27:29 | 159,974,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | from django.shortcuts import render, HttpResponse
# Create your views here.
def index(request):
return HttpResponse('hi') | [
"[email protected]"
]
| |
9a77046a8b02899002884bdbcb8f4b15478e20c2 | eff7effdc4ada534be1c76ca83ac026ace4f4c05 | /leetcode/242.py | 715f3b2f70d33cf3920ab152d51059243bef0a29 | []
| no_license | ceuity/algorithm | 470951d9fe77de3b0b28ae06f8224cf8a619d5b5 | dd28a842709ae00c3793741e411f2cb8e5086fda | refs/heads/master | 2023-06-20T11:32:56.994859 | 2021-07-19T20:31:07 | 2021-07-19T20:31:07 | 279,136,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from collections import Counter
# 48ms
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
if sorted(s) == sorted(t):
return True
else:
return False
# 32ms
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
if Counter(s) == Counter(t):
return True
else:
return False
"""
처음엔 간단하게 sorted 함수를 이용하여 풀었으나, Counter 함수를 이용했을 때 더 빨랐다.
""" | [
"[email protected]"
]
| |
3adadf9d0a7cf055b3e864a876187d3d5e847789 | 58f38f1d69d4bfc650ad18e0045c36ae29c9d84a | /Django基础部分代码/chapter04/orm_lookup_demo/manage.py | ffb9c5d178fd47e763791ff1172e7f8d84831441 | []
| no_license | zjf201811/DjangoWebProject | 0670c61b89387901089bf67cf2423d9341f69913 | fab15784fb326ba4517951e180418ea54de03afe | refs/heads/master | 2020-04-18T12:03:08.798484 | 2019-05-06T03:59:46 | 2019-05-06T03:59:46 | 167,522,193 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "orm_lookup_demo.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
]
| |
7c5d2d23bc243408c2cc2678b42f0f0f589019e4 | 2d74104aaa132896a65ea0032951eee5d4c97840 | /chemman/floor_map/apps.py | a55b9d94eeb570eacc258399a39914b0a100a76f | []
| no_license | Whitie/ChemManager | 6e228e8713f9dfeca21adbd3e9a65c8871a822bc | d40792361527219514b1b4cc03718ea7c2a92777 | refs/heads/master | 2023-06-09T09:29:41.626087 | 2022-12-14T13:29:44 | 2022-12-14T13:29:44 | 189,994,861 | 0 | 0 | null | 2023-04-21T21:40:13 | 2019-06-03T11:47:23 | Python | UTF-8 | Python | false | false | 92 | py | from django.apps import AppConfig
class FloorMapConfig(AppConfig):
name = 'floor_map'
| [
"[email protected]"
]
| |
2d740379b638a22df79119c84d3b7dddf824aa09 | 4ef80242cf22a1ccd0d7a2042476b5b6ac1eb03e | /build/lib/scadparser/ScadModel.py | c00db947830a69e33733ad984fc06ea2a68a7bc0 | []
| no_license | rblack42/ScadParser | 71081adb99ec03e78bc78b4101562b7fa1bab134 | a9cc10b23c6515a53065dfb58b23881d0145f88d | refs/heads/master | 2023-07-11T03:51:53.434534 | 2021-08-27T02:03:37 | 2021-08-27T02:03:37 | 397,718,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py | from __future__ import annotations
import os
import tatsu
from tatsu.ast import AST
symbol_table = {}
def store(id, val):
symbol_table[id] = val
def lookup(id):
if not id in symbol_table:
return id
else:
return symbol_table[id]
class ScadSemantics(object):
def include(self, ast):
print("CWD" + str(os.getcwd()))
grammar = open('grammar/scad.tatsu').read()
incfile = os.path.join('scad', ast.file)
prog = open(incfile).read()
parser = tatsu.compile(grammar)
ast = parser.parse(prog,
trace=False, colorize=True, semantics=ScadSemantics())
return ast
def int(self, ast):
return int(ast)
def fract(self, ast):
return float(ast)
def ident(self, ast):
return lookup(ast)
def declaration(self, ast):
store(ast.id, ast.value)
return ast
def addition(self, ast):
return ast.left + ast.right
def subtraction(self, ast):
return ast.left - ast.right
def multiplication(self, ast):
return ast.left * ast.right
def division(self, ast):
return ast.left / ast.right
| [
"[email protected]"
]
| |
bbc92ccd5d682422acb5a8364021fb0f1838bea1 | 3ec32f5aba8624125918adad5cfbc174d698526d | /test/functional/zmq_test.py | 3a7b77bc01b67f696e4585ac2662d14f1115b421 | [
"MIT"
]
| permissive | aixinwang/atc | b51b85bd91956657d70b72ca128d30132754269e | 9f0b53af19735ce0d6a5a6feed6733a51f109019 | refs/heads/master | 2021-04-03T06:48:24.497048 | 2018-03-14T04:53:58 | 2018-03-14T04:59:04 | 125,152,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,297 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Ai the coins developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ API."""
import configparser
import os
import struct
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import (assert_equal,
bytes_to_hex_str,
)
class ZMQTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
def setup_nodes(self):
# Try to import python3-zmq. Skip this test if the import fails.
try:
import zmq
except ImportError:
raise SkipTest("python3-zmq module not available.")
# Check that bitcoin has been built with ZMQ enabled
config = configparser.ConfigParser()
if not self.options.configfile:
self.options.configfile = os.path.dirname(__file__) + "/../config.ini"
config.read_file(open(self.options.configfile))
if not config["components"].getboolean("ENABLE_ZMQ"):
raise SkipTest("bitcoind has not been built with zmq enabled.")
self.zmqContext = zmq.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.set(zmq.RCVTIMEO, 60000)
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
ip_address = "tcp://127.0.0.1:28332"
self.zmqSubSocket.connect(ip_address)
extra_args = [['-zmqpubhashtx=%s' % ip_address, '-zmqpubhashblock=%s' % ip_address], []]
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
def run_test(self):
try:
self._zmq_test()
finally:
# Destroy the zmq context
self.log.debug("Destroying zmq context")
self.zmqContext.destroy(linger=None)
def _zmq_test(self):
genhashes = self.nodes[0].generate(1)
self.sync_all()
self.log.info("Wait for tx")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"hashtx")
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) # must be sequence 0 on hashtx
self.log.info("Wait for block")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) # must be sequence 0 on hashblock
blkhash = bytes_to_hex_str(body)
assert_equal(genhashes[0], blkhash) # blockhash from generate must be equal to the hash received over zmq
self.log.info("Generate 10 blocks (and 10 coinbase txes)")
n = 10
genhashes = self.nodes[1].generate(n)
self.sync_all()
zmqHashes = []
blockcount = 0
for x in range(n * 2):
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
if topic == b"hashblock":
zmqHashes.append(bytes_to_hex_str(body))
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount + 1)
blockcount += 1
for x in range(n):
assert_equal(genhashes[x], zmqHashes[x]) # blockhash from generate must be equal to the hash received over zmq
self.log.info("Wait for tx from second node")
# test tx from a second node
hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# now we should receive a zmq msg because the tx was broadcast
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
assert_equal(topic, b"hashtx")
hashZMQ = bytes_to_hex_str(body)
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount + 1)
assert_equal(hashRPC, hashZMQ) # txid from sendtoaddress must be equal to the hash received over zmq
if __name__ == '__main__':
ZMQTest().main()
| [
"[email protected]"
]
| |
6e07ce4368cf52e75c822a991a574494f9378a4d | f2575444e57696b83ce6dcec40ad515b56a1b3a9 | /Algorithms/Implementation/JumpingOnTheCloudsRevisited.py | 6ac69a0db82d7aeb16eaa9fcb0a6ad2d256bdec5 | []
| no_license | abhi10010/Hackerrank-Solutions | 046487d79fc5bf84b4df5ef2117578d29cb19243 | da2a57b8ebfcc330d94d104c1755b8c62a9e3e65 | refs/heads/master | 2021-07-24T09:41:49.995295 | 2020-07-12T09:31:58 | 2020-07-12T09:31:58 | 195,647,097 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | import math
import os
import random
import re
import sys
.
def jumpingOnClouds(c, k):
e=100
for i in range(0,len(c),k):
if c[i]==0:
e-=1
else:
e-=3
if i>=len(c) and c[0]==0:
e-=1
break
if i>len(c) and c[0]==1:
e-=3
break
return e
| [
"[email protected]"
]
| |
49f1a41f9ba2b58896b0eb4e7a76d13dbb45c2a1 | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/neutron_list_security_group_rules_request.py | c2fd8e52b4af1f422cfe563c56bf84d24458f81d | [
"Apache-2.0"
]
| permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 13,621 | py | # coding: utf-8
import pprint
import re
import six
class NeutronListSecurityGroupRulesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'limit': 'int',
'marker': 'str',
'id': 'str',
'direction': 'str',
'protocol': 'str',
'ethertype': 'str',
'description': 'str',
'remote_ip_prefix': 'str',
'remote_group_id': 'str',
'security_group_id': 'str',
'port_range_max': 'str',
'port_range_min': 'str',
'tenant_id': 'str'
}
attribute_map = {
'limit': 'limit',
'marker': 'marker',
'id': 'id',
'direction': 'direction',
'protocol': 'protocol',
'ethertype': 'ethertype',
'description': 'description',
'remote_ip_prefix': 'remote_ip_prefix',
'remote_group_id': 'remote_group_id',
'security_group_id': 'security_group_id',
'port_range_max': 'port_range_max',
'port_range_min': 'port_range_min',
'tenant_id': 'tenant_id'
}
def __init__(self, limit=None, marker=None, id=None, direction=None, protocol=None, ethertype=None, description=None, remote_ip_prefix=None, remote_group_id=None, security_group_id=None, port_range_max=None, port_range_min=None, tenant_id=None):
"""NeutronListSecurityGroupRulesRequest - a model defined in huaweicloud sdk"""
self._limit = None
self._marker = None
self._id = None
self._direction = None
self._protocol = None
self._ethertype = None
self._description = None
self._remote_ip_prefix = None
self._remote_group_id = None
self._security_group_id = None
self._port_range_max = None
self._port_range_min = None
self._tenant_id = None
self.discriminator = None
if limit is not None:
self.limit = limit
if marker is not None:
self.marker = marker
if id is not None:
self.id = id
if direction is not None:
self.direction = direction
if protocol is not None:
self.protocol = protocol
if ethertype is not None:
self.ethertype = ethertype
if description is not None:
self.description = description
if remote_ip_prefix is not None:
self.remote_ip_prefix = remote_ip_prefix
if remote_group_id is not None:
self.remote_group_id = remote_group_id
if security_group_id is not None:
self.security_group_id = security_group_id
if port_range_max is not None:
self.port_range_max = port_range_max
if port_range_min is not None:
self.port_range_min = port_range_min
if tenant_id is not None:
self.tenant_id = tenant_id
@property
def limit(self):
"""Gets the limit of this NeutronListSecurityGroupRulesRequest.
每页返回的个数
:return: The limit of this NeutronListSecurityGroupRulesRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this NeutronListSecurityGroupRulesRequest.
每页返回的个数
:param limit: The limit of this NeutronListSecurityGroupRulesRequest.
:type: int
"""
self._limit = limit
@property
def marker(self):
"""Gets the marker of this NeutronListSecurityGroupRulesRequest.
分页查询起始的资源ID,为空时查询第一页
:return: The marker of this NeutronListSecurityGroupRulesRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this NeutronListSecurityGroupRulesRequest.
分页查询起始的资源ID,为空时查询第一页
:param marker: The marker of this NeutronListSecurityGroupRulesRequest.
:type: str
"""
self._marker = marker
@property
def id(self):
"""Gets the id of this NeutronListSecurityGroupRulesRequest.
按照安全组规则对应的id过滤查询结果
:return: The id of this NeutronListSecurityGroupRulesRequest.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this NeutronListSecurityGroupRulesRequest.
按照安全组规则对应的id过滤查询结果
:param id: The id of this NeutronListSecurityGroupRulesRequest.
:type: str
"""
self._id = id
@property
def direction(self):
"""Gets the direction of this NeutronListSecurityGroupRulesRequest.
按照安全组规则的方向过滤查询结果,支持ingress和egress进行过滤
:return: The direction of this NeutronListSecurityGroupRulesRequest.
:rtype: str
"""
return self._direction
@direction.setter
def direction(self, direction):
"""Sets the direction of this NeutronListSecurityGroupRulesRequest.
按照安全组规则的方向过滤查询结果,支持ingress和egress进行过滤
:param direction: The direction of this NeutronListSecurityGroupRulesRequest.
:type: str
"""
self._direction = direction
@property
def protocol(self):
"""Gets the protocol of this NeutronListSecurityGroupRulesRequest.
按照安全组规则的IP协议过滤查询结果
:return: The protocol of this NeutronListSecurityGroupRulesRequest.
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""Sets the protocol of this NeutronListSecurityGroupRulesRequest.
按照安全组规则的IP协议过滤查询结果
:param protocol: The protocol of this NeutronListSecurityGroupRulesRequest.
:type: str
"""
self._protocol = protocol
@property
def ethertype(self):
"""Gets the ethertype of this NeutronListSecurityGroupRulesRequest.
按照网络类型过滤查询结果,支持IPv4或者IPv6
:return: The ethertype of this NeutronListSecurityGroupRulesRequest.
:rtype: str
"""
return self._ethertype
@ethertype.setter
def ethertype(self, ethertype):
"""Sets the ethertype of this NeutronListSecurityGroupRulesRequest.
按照网络类型过滤查询结果,支持IPv4或者IPv6
:param ethertype: The ethertype of this NeutronListSecurityGroupRulesRequest.
:type: str
"""
self._ethertype = ethertype
@property
def description(self):
"""Gets the description of this NeutronListSecurityGroupRulesRequest.
按照安全组规则的描述过滤查询结果
:return: The description of this NeutronListSecurityGroupRulesRequest.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this NeutronListSecurityGroupRulesRequest.
按照安全组规则的描述过滤查询结果
:param description: The description of this NeutronListSecurityGroupRulesRequest.
:type: str
"""
self._description = description
@property
def remote_ip_prefix(self):
"""Gets the remote_ip_prefix of this NeutronListSecurityGroupRulesRequest.
按照与此安全组规则匹配的远端IP网段过滤查询结果
:return: The remote_ip_prefix of this NeutronListSecurityGroupRulesRequest.
:rtype: str
"""
return self._remote_ip_prefix
@remote_ip_prefix.setter
def remote_ip_prefix(self, remote_ip_prefix):
"""Sets the remote_ip_prefix of this NeutronListSecurityGroupRulesRequest.
按照与此安全组规则匹配的远端IP网段过滤查询结果
:param remote_ip_prefix: The remote_ip_prefix of this NeutronListSecurityGroupRulesRequest.
:type: str
"""
self._remote_ip_prefix = remote_ip_prefix
@property
def remote_group_id(self):
"""Gets the remote_group_id of this NeutronListSecurityGroupRulesRequest.
按照与此安全组规则关联的远端安全组ID过滤查询结果
:return: The remote_group_id of this NeutronListSecurityGroupRulesRequest.
:rtype: str
"""
return self._remote_group_id
@remote_group_id.setter
def remote_group_id(self, remote_group_id):
"""Sets the remote_group_id of this NeutronListSecurityGroupRulesRequest.
按照与此安全组规则关联的远端安全组ID过滤查询结果
:param remote_group_id: The remote_group_id of this NeutronListSecurityGroupRulesRequest.
:type: str
"""
self._remote_group_id = remote_group_id
@property
def security_group_id(self):
"""Gets the security_group_id of this NeutronListSecurityGroupRulesRequest.
按照与此安全组规则所属的安全组ID过滤查询结果
:return: The security_group_id of this NeutronListSecurityGroupRulesRequest.
:rtype: str
"""
return self._security_group_id
@security_group_id.setter
def security_group_id(self, security_group_id):
"""Sets the security_group_id of this NeutronListSecurityGroupRulesRequest.
按照与此安全组规则所属的安全组ID过滤查询结果
:param security_group_id: The security_group_id of this NeutronListSecurityGroupRulesRequest.
:type: str
"""
self._security_group_id = security_group_id
@property
def port_range_max(self):
"""Gets the port_range_max of this NeutronListSecurityGroupRulesRequest.
按照最大端口过滤查询结果
:return: The port_range_max of this NeutronListSecurityGroupRulesRequest.
:rtype: str
"""
return self._port_range_max
@port_range_max.setter
def port_range_max(self, port_range_max):
"""Sets the port_range_max of this NeutronListSecurityGroupRulesRequest.
按照最大端口过滤查询结果
:param port_range_max: The port_range_max of this NeutronListSecurityGroupRulesRequest.
:type: str
"""
self._port_range_max = port_range_max
@property
def port_range_min(self):
"""Gets the port_range_min of this NeutronListSecurityGroupRulesRequest.
按照最小端口过滤查询结果
:return: The port_range_min of this NeutronListSecurityGroupRulesRequest.
:rtype: str
"""
return self._port_range_min
@port_range_min.setter
def port_range_min(self, port_range_min):
"""Sets the port_range_min of this NeutronListSecurityGroupRulesRequest.
按照最小端口过滤查询结果
:param port_range_min: The port_range_min of this NeutronListSecurityGroupRulesRequest.
:type: str
"""
self._port_range_min = port_range_min
@property
def tenant_id(self):
"""Gets the tenant_id of this NeutronListSecurityGroupRulesRequest.
按照安全组规则所属的项目ID过滤查询结果
:return: The tenant_id of this NeutronListSecurityGroupRulesRequest.
:rtype: str
"""
return self._tenant_id
@tenant_id.setter
def tenant_id(self, tenant_id):
"""Sets the tenant_id of this NeutronListSecurityGroupRulesRequest.
按照安全组规则所属的项目ID过滤查询结果
:param tenant_id: The tenant_id of this NeutronListSecurityGroupRulesRequest.
:type: str
"""
self._tenant_id = tenant_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NeutronListSecurityGroupRulesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
164e025c757cbef908707f5219e2c665aaa5261b | be84495751737bbf0a8b7d8db2fb737cbd9c297c | /sdl/tests/sdl/rnd_test.py | bd5ccfdfe9baac2abbb86c6b90739265c39087ae | []
| no_license | mario007/renmas | 5e38ff66cffb27b3edc59e95b7cf88906ccc03c9 | bfb4e1defc88eb514e58bdff7082d722fc885e64 | refs/heads/master | 2021-01-10T21:29:35.019792 | 2014-08-17T19:11:51 | 2014-08-17T19:11:51 | 1,688,798 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py |
import unittest
from tdasm import Runtime
from sdl.vector import Vector2, Vector3, Vector4
from sdl.shader import Shader
from sdl.args import IntArg, FloatArg, Vec2Arg, Vec3Arg, Vec4Arg
code = """
p1 = rand_int()
p2 = random()
p3 = random2()
p4 = random3()
p5 = random4()
"""
p1 = IntArg('p1', 333)
p2 = FloatArg('p2', 333.0)
p3 = Vec2Arg('p3', Vector2(0.0, 0.0))
p4 = Vec3Arg('p4', Vector3(0.0, 0.0, 0.0))
p5 = Vec4Arg('p5', Vector4(0.0, 0.0, 0.0, 0.0))
shader = Shader(code=code, args=[p1, p2, p3, p4, p5])
shader.compile()
shader.prepare([Runtime()])
shader.execute()
print(shader.get_value('p1'))
print(shader.get_value('p2'))
print(shader.get_value('p3'))
print(shader.get_value('p4'))
print(shader.get_value('p5'))
| [
"[email protected]"
]
| |
c6872feee88fe1b197782ffe58764561cf3b2807 | 9f78c2bfadd1e87d779a786e7cd0952b6fbc96f1 | /common/models/log/AppErrorLog.py | 918ff63b12f1ffc3cbcf7a180a16e09a55e0cc6a | []
| no_license | Erick-LONG/order | 08393ed9b315cf2c6af5e2b9bfd6917605fe8d94 | 4b853403c9c949b3ecbe2766ec77750557cf11fc | refs/heads/master | 2022-11-11T09:32:53.570524 | 2020-06-30T09:20:18 | 2020-06-30T09:20:18 | 262,786,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | # coding: utf-8
from application import db
class AppErrorLog(db.Model):
__tablename__ = 'app_error_log'
id = db.Column(db.Integer, primary_key=True)
referer_url = db.Column(db.String(255), nullable=False, server_default=db.FetchedValue())
target_url = db.Column(db.String(255), nullable=False, server_default=db.FetchedValue())
query_params = db.Column(db.Text, nullable=False)
content = db.Column(db.String, nullable=False)
created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
| [
"[email protected]"
]
| |
fa108bd7530e8565ffc888194f3b2bc13e59a4a5 | b56eaf7a603cbb850be11dbbed2c33b954dedbcb | /distar/envs/other/alphastar_statistics.py | f2c414f241ad9e9546fba1cdbf7a88c0f1c3aa70 | [
"Apache-2.0"
]
| permissive | LFhase/DI-star | 2887d9c5dd8bfaa629e0171504b05ac70fdc356f | 09d507c412235a2f0cf9c0b3485ec9ed15fb6421 | refs/heads/main | 2023-06-20T20:05:01.378611 | 2021-07-09T16:26:18 | 2021-07-09T16:26:18 | 384,499,311 | 1 | 0 | Apache-2.0 | 2021-07-09T16:50:29 | 2021-07-09T16:50:28 | null | UTF-8 | Python | false | false | 20,962 | py | import copy
import os
import logging
import numpy as np
import torch
from ctools.pysc2.lib.action_dict import GENERAL_ACTION_INFO_MASK
from ctools.pysc2.lib.static_data import NUM_BEGIN_ACTIONS, NUM_UNIT_BUILD_ACTIONS, NUM_EFFECT_ACTIONS, NUM_RESEARCH_ACTIONS, \
UNIT_BUILD_ACTIONS_REORDER_ARRAY, EFFECT_ACTIONS_REORDER_ARRAY, RESEARCH_ACTIONS_REORDER_ARRAY, \
BEGIN_ACTIONS_REORDER_ARRAY, BEGIN_ACTIONS, \
OLD_BEGIN_ACTIONS_REORDER_INV
from ctools.envs.common import reorder_one_hot_array, batch_binary_encode, div_one_hot
from ..obs.alphastar_obs import LOCATION_BIT_NUM
from ctools.torch_utils import to_dtype, one_hot
def binary_search(data, item):
if len(data) <= 0:
raise RuntimeError("empty data with len: {}".format(len(data)))
low = 0
high = len(data) - 1
while low <= high:
mid = (high + low) // 2
if data[mid] == item:
return mid
elif data[mid] < item:
low = mid + 1
else:
high = mid - 1
if low == len(data):
low -= 1 # limit low within [0, len(data)-1]
return low
class RealTimeStatistics:
"""
Overview: real time agent statistics
"""
def __init__(self, begin_num=20):
self.action_statistics = {}
self.cumulative_statistics = {}
self.cumulative_statistics_game_loop = []
self.begin_statistics = []
self.begin_num = begin_num
def update_action_stat(self, act, obs):
# this will not clear the cache
def get_unit_types(units, entity_type_dict):
unit_types = set()
for u in units:
try:
unit_type = entity_type_dict[u]
unit_types.add(unit_type)
except KeyError:
logging.warning("Not found unit(id: {})".format(u))
return unit_types
action_type = act.action_type
if action_type not in self.action_statistics.keys():
self.action_statistics[action_type] = {
'count': 0,
'selected_type': set(),
'target_type': set(),
}
self.action_statistics[action_type]['count'] += 1
entity_type_dict = {id: type for id, type in zip(obs['entity_raw']['id'], obs['entity_raw']['type'])}
if act.selected_units is not None:
units = act.selected_units
unit_types = get_unit_types(units, entity_type_dict)
self.action_statistics[action_type]['selected_type'] = \
self.action_statistics[action_type]['selected_type'].union(unit_types)
if act.target_units is not None:
units = act.target_units
unit_types = get_unit_types(units, entity_type_dict)
self.action_statistics[action_type]['target_type'] = self.action_statistics[action_type][
'target_type'].union(unit_types)
def update_cum_stat(self, act, game_loop):
# this will not clear the cache
action_type = act.action_type
goal = GENERAL_ACTION_INFO_MASK[action_type]['goal']
if goal != 'other':
if action_type not in self.cumulative_statistics.keys():
self.cumulative_statistics[action_type] = {'count': 1, 'goal': goal}
else:
self.cumulative_statistics[action_type]['count'] += 1
loop_stat = copy.deepcopy(self.cumulative_statistics)
loop_stat['game_loop'] = game_loop
self.cumulative_statistics_game_loop.append(loop_stat)
def update_build_order_stat(self, act, game_loop, original_location):
# this will not clear the cache
worker_and_supply_units = (35, 64, 520, 222, 515, 503)
action_type = act.action_type
if action_type in worker_and_supply_units: # exclude worker and supply
return
goal = GENERAL_ACTION_INFO_MASK[action_type]['goal']
if action_type in BEGIN_ACTIONS:
if goal == 'build':
if original_location is not None:
location = original_location
else:
location = act.target_location
if isinstance(location, torch.Tensor): # for build ves, no target_location
location = location.tolist()
else:
location = None
self.begin_statistics.append({'action_type': action_type, 'location': location, 'game_loop': game_loop})
def update_stat(self, act, obs, game_loop, original_location=None):
"""
Update action_stat cum_stat and build_order_stat
Args:
act: Processed general action
obs: observation
game_loop: current game loop
"""
if obs is not None:
self.update_action_stat(act, obs)
self.update_cum_stat(act, game_loop)
self.update_build_order_stat(act, game_loop, original_location)
def get_reward_z(self, use_max_bo_clip):
"""
use_max_bo_clip (boolean): Whether to keep only the building orders of the first self.begin_num units.
"""
beginning_build_order = self.begin_statistics
if use_max_bo_clip and len(beginning_build_order) > self.begin_num:
beginning_build_order = beginning_build_order[:self.begin_num+1]
cumulative_stat = self.cumulative_statistics
cum_stat_tensor = transform_cum_stat(cumulative_stat)
ret = {
'built_unit': cum_stat_tensor['unit_build'],
'effect': cum_stat_tensor['effect'],
'upgrade': cum_stat_tensor['research'],
'build_order': transform_build_order_to_z_format(beginning_build_order),
}
ret = to_dtype(ret, torch.long)
return ret
def get_input_z(self, bo_length=20):
ret = {
'beginning_build_order': transform_build_order_to_input_format(self.begin_statistics, bo_length),
'cumulative_stat': transform_cum_stat(self.cumulative_statistics)
}
return ret
def get_stat(self):
ret = {'begin_statistics': self.begin_statistics, 'cumulative_statistics': self.cumulative_statistics}
return ret
def get_norm_units_num(self):
worker_and_supply_units = (35, 64, 520, 222, 515, 503)
zerg_units = (498, 501, 507, 508, 514, 516, 519, 522, 524, 526, 528, 383, 396, 391, 400)
units_num = {GENERAL_ACTION_INFO_MASK[k]['name'].split('_')[1]: 0 for k in zerg_units}
max_num = 1
for k in self.cumulative_statistics.keys():
if k not in worker_and_supply_units and k in zerg_units \
and self.cumulative_statistics[k]['goal'] == 'unit':
unit_name = GENERAL_ACTION_INFO_MASK[k]['name'].split('_')[1]
units_num[unit_name] = self.cumulative_statistics[k]['count']
max_num = max(self.cumulative_statistics[k]['count'], max_num)
for k in units_num.keys():
units_num[k] /= (1.0 * max_num)
return units_num
class GameLoopStatistics:
"""
Overview: Human replay data statistics specified by game loop
"""
def __init__(self, stat, begin_num=20):
self.ori_stat = stat
self.ori_stat = self.add_game_loop(self.ori_stat)
self.begin_num = begin_num
self.mmr = 6200
self._clip_global_bo()
self.cache_reward_z = None
self.cache_input_z = None
self.max_game_loop = self.ori_stat['cumulative_stat'][-1]['game_loop']
self._init_global_z()
def add_game_loop(self, stat):
beginning_build_order = stat['beginning_build_order']
cumulative_stat = stat['cumulative_stat']
if 'game_loop' in beginning_build_order[0].keys():
return stat
def is_action_frame(action_type, cum_idx):
# for start case
if cum_idx == 0:
return action_type in cumulative_stat[cum_idx].keys()
last_frame = cumulative_stat[cum_idx - 1]
cur_frame = cumulative_stat[cum_idx]
miss_key = cur_frame.keys() - last_frame.keys()
diff_count_key = set()
for k in last_frame.keys():
if k != 'game_loop' and cur_frame[k]['count'] != last_frame[k]['count']:
diff_count_key.add(k)
diff_key = miss_key.union(diff_count_key)
return action_type in diff_key
cum_idx = 0
new_beginning_build_order = []
for i in range(len(beginning_build_order)):
item = beginning_build_order[i]
action_type = item['action_type']
while cum_idx < len(cumulative_stat) and not is_action_frame(action_type, cum_idx):
cum_idx += 1
if cum_idx < len(cumulative_stat):
item.update({'game_loop': cumulative_stat[cum_idx]['game_loop']})
new_beginning_build_order.append(item)
cum_idx += 1
new_stat = stat
new_stat['beginning_build_order'] = new_beginning_build_order
new_stat['begin_game_loop'] = [t['game_loop'] for t in new_beginning_build_order]
new_stat['cum_game_loop'] = [t['game_loop'] for t in new_stat['cumulative_stat']]
return new_stat
def _clip_global_bo(self):
beginning_build_order = copy.deepcopy(self.ori_stat['beginning_build_order'])
if len(beginning_build_order) < self.begin_num:
# the input_global_bo will be padded up to begin_num when transformed into input format
self.input_global_bo = beginning_build_order
self.reward_global_bo = beginning_build_order
else:
beginning_build_order = beginning_build_order[:self.begin_num]
self.input_global_bo = beginning_build_order
self.reward_global_bo = beginning_build_order
def _init_global_z(self):
# init input_global_z
beginning_build_order, cumulative_stat = self.input_global_bo, self.ori_stat['cumulative_stat'][-1]
self.input_global_z = transformed_stat_mmr(
{
'begin_statistics': beginning_build_order,
'cumulative_statistics': cumulative_stat
}, self.mmr, self.begin_num
)
# init reward_global_z
beginning_build_order, cumulative_stat = self.reward_global_bo, self.ori_stat['cumulative_stat'][-1]
cum_stat_tensor = transform_cum_stat(cumulative_stat)
self.reward_global_z = {
'built_unit': cum_stat_tensor['unit_build'],
'effect': cum_stat_tensor['effect'],
'upgrade': cum_stat_tensor['research'],
'build_order': transform_build_order_to_z_format(beginning_build_order),
}
self.reward_global_z = to_dtype(self.reward_global_z, torch.long)
def get_input_z_by_game_loop(self, game_loop, cumulative_stat=None):
"""
Note: if game_loop is None, load global stat
"""
if cumulative_stat is None:
if game_loop is None:
return self.input_global_z
else:
_, cumulative_stat = self._get_stat_by_game_loop(game_loop)
beginning_build_order = self.input_global_bo
ret = transformed_stat_mmr(
{
'begin_statistics': beginning_build_order,
'cumulative_statistics': cumulative_stat
}, self.mmr, self.begin_num
)
return ret
def get_reward_z_by_game_loop(self, game_loop, build_order_length=None):
"""
Note: if game_loop is None, load global stat
"""
if game_loop is None:
global_z = copy.deepcopy(self.reward_global_z)
global_z['build_order']['type'] = global_z['build_order']['type'][:build_order_length]
global_z['build_order']['loc'] = global_z['build_order']['loc'][:build_order_length]
return global_z
else:
beginning_build_order, cumulative_stat = self._get_stat_by_game_loop(game_loop)
cum_stat_tensor = transform_cum_stat(cumulative_stat)
ret = {
'built_unit': cum_stat_tensor['unit_build'],
'effect': cum_stat_tensor['effect'],
'upgrade': cum_stat_tensor['research'],
'build_order': transform_build_order_to_z_format(beginning_build_order),
}
ret = to_dtype(ret, torch.long)
return ret
def _get_stat_by_game_loop(self, game_loop):
begin_idx = binary_search(self.ori_stat['begin_game_loop'], game_loop)
cum_idx = binary_search(self.ori_stat['cum_game_loop'], game_loop)
return self.ori_stat['beginning_build_order'][:begin_idx + 1], self.ori_stat['cumulative_stat'][cum_idx]
def excess_max_game_loop(self, agent_game_loop):
return agent_game_loop > self.max_game_loop
def transform_build_order_to_z_format(stat):
"""
Overview: transform beginning_build_order to the format to calculate reward
stat: list->element: dict('action_type': int, 'location': list(len=2)->element: int)
"""
ret = {'type': np.zeros(len(stat), dtype=np.int), 'loc': np.empty((len(stat), 2), dtype=np.int)}
zeroxy = np.array([0, 0], dtype=np.int)
for n in range(len(stat)):
action_type, location = stat[n]['action_type'], stat[n]['location']
ret['type'][n] = action_type
ret['loc'][n] = location if isinstance(location, list) else zeroxy
ret['type'] = torch.Tensor(ret['type'])
ret['loc'] = torch.Tensor(ret['loc'])
return ret
def transform_build_order_to_input_format(stat, begin_num, location_num=LOCATION_BIT_NUM):
"""
Overview: transform beginning_build_order to the format for input
stat: list->element: dict('action_type': int, 'location': list(len=2)->element: int)
"""
beginning_build_order_tensor = []
for item in stat:
action_type, location = item['action_type'], item['location']
if action_type == 0:
action_type = torch.zeros(NUM_BEGIN_ACTIONS)
else:
action_type = torch.LongTensor([action_type])
action_type = reorder_one_hot_array(action_type, BEGIN_ACTIONS_REORDER_ARRAY, num=NUM_BEGIN_ACTIONS)
if isinstance(location, list):
x = batch_binary_encode(torch.LongTensor([location[0]]), bit_num=location_num)[0]
y = batch_binary_encode(torch.LongTensor([location[1]]), bit_num=location_num)[0]
location = torch.cat([x, y], dim=0)
else:
location = torch.zeros(location_num * 2)
beginning_build_order_tensor.append(torch.cat([action_type.squeeze(0), location], dim=0))
if len(stat):
beginning_build_order_tensor = torch.stack(beginning_build_order_tensor, dim=0)
else:
return torch.zeros(begin_num, 194)
# pad
if beginning_build_order_tensor.shape[0] < begin_num:
miss_num = begin_num - beginning_build_order_tensor.shape[0]
pad_part = torch.zeros(miss_num, beginning_build_order_tensor.shape[1])
beginning_build_order_tensor = torch.cat([beginning_build_order_tensor, pad_part], dim=0)
return beginning_build_order_tensor[:begin_num]
def transform_cum_stat(cumulative_stat):
"""
Overview: transform cumulative_stat to the format for both input and reward
cumulative_stat: dict('action_type': {'goal': str, count: int})
"""
cumulative_stat_tensor = {
'unit_build': torch.zeros(NUM_UNIT_BUILD_ACTIONS),
'effect': torch.zeros(NUM_EFFECT_ACTIONS),
'research': torch.zeros(NUM_RESEARCH_ACTIONS)
}
for k, v in cumulative_stat.items():
if k == 'game_loop':
continue
if v['goal'] in ['unit', 'build']:
cumulative_stat_tensor['unit_build'][UNIT_BUILD_ACTIONS_REORDER_ARRAY[k]] = 1
elif v['goal'] in ['effect']:
cumulative_stat_tensor['effect'][EFFECT_ACTIONS_REORDER_ARRAY[k]] = 1
elif v['goal'] in ['research']:
cumulative_stat_tensor['research'][RESEARCH_ACTIONS_REORDER_ARRAY[k]] = 1
return cumulative_stat_tensor
def transform_stat(stat, meta, begin_num):
mmr = meta['home_mmr']
return transformed_stat_mmr(stat, mmr, begin_num)
def transformed_stat_mmr(stat, mmr, begin_num):
"""
Overview: transform replay metadata and statdata to input stat(mmr + z)
"""
beginning_build_order = stat['begin_statistics']
beginning_build_order_tensor = transform_build_order_to_input_format(beginning_build_order, begin_num)
cumulative_stat_tensor = transform_cum_stat(stat['cumulative_statistics'])
mmr = torch.LongTensor([mmr])
mmr = div_one_hot(mmr, 6000, 1000).squeeze(0)
return {
'mmr': mmr,
'beginning_build_order': beginning_build_order_tensor,
'cumulative_stat': cumulative_stat_tensor
}
def transform_stat_processed(old_stat_processed):
"""
Overview: transform new begin action(for stat_processed)
"""
new_stat_processed = copy.deepcopy(old_stat_processed)
beginning_build_order = new_stat_processed['beginning_build_order']
new_beginning_build_order = []
location_dim = 2 * LOCATION_BIT_NUM
for item in beginning_build_order:
action_type, location = item[:-location_dim], item[-location_dim:]
action_type = torch.nonzero(action_type).item()
action_type = OLD_BEGIN_ACTIONS_REORDER_INV[action_type]
if action_type not in BEGIN_ACTIONS:
continue
action_type = BEGIN_ACTIONS_REORDER_ARRAY[action_type]
action_type = torch.LongTensor([action_type])
action_type = one_hot(action_type, NUM_BEGIN_ACTIONS)[0]
new_item = torch.cat([action_type, location], dim=0)
new_beginning_build_order.append(new_item)
new_stat_processed['beginning_build_order'] = torch.stack(new_beginning_build_order, dim=0)
return new_stat_processed
def transform_stat_professional_player(old_stat):
new_stat = copy.deepcopy(old_stat)
beginning_build_order = new_stat['beginning_build_order']
new_beginning_build_order = []
for item in beginning_build_order:
if item['action_type'] in BEGIN_ACTIONS:
new_beginning_build_order.append(item)
new_stat['beginning_build_order'] = new_beginning_build_order
return new_stat
class StatKey:
def __init__(self, home_race=None, away_race=None, map_name=None, player_id=None):
self.home_race = home_race
self.away_race = away_race
self.map_name = map_name
self.player_id = player_id
@classmethod
def check_path(cls, item):
"""
Overview: check stat path name format
Note:
format template: homerace_awayrace_mapname_playerid_id
"""
race_list = ['zerg', 'terran', 'protoss']
map_list = ['KingsCove', 'KairosJunction', 'NewRepugnancy', 'CyberForest']
try:
item_contents = item.split('_')
assert len(item_contents) == 5
assert item_contents[0] in race_list
assert item_contents[1] in race_list
assert item_contents[2] in map_list
assert item_contents[3] in ['1', '2']
except Exception as e:
print(item_contents)
return False
return True
@classmethod
def path2key(cls, path):
items = path.split('_')[:4]
return StatKey(*items)
def match(self, other):
assert isinstance(other, StatKey)
for k, v in self.__dict__.items():
if v is not None:
if other.__dict__[k] != v:
return False
return True
def __repr__(self):
return 'h_race: {}\ta_race: {}\tmap: {}\tid: {}'.format(
self.home_race, self.away_race, self.map_name, self.player_id
)
class StatManager:
def __init__(self, dirname, stat_path_list):
with open(stat_path_list, 'r') as f:
data = f.readlines()
data = [t.strip() for t in data]
self.stat_paths = [item for item in data if StatKey.check_path(item)]
self.stat_keys = [StatKey.path2key(t) for t in self.stat_paths]
self.dirname = dirname
def get_ava_stats(self, **kwargs):
assert kwargs['player_id'] == 'ava'
# select matched results
stats = []
for player_id in ['1', '2']:
kwargs['player_id'] = player_id
query = StatKey(**kwargs)
matched_results_idx = [idx for idx, t in enumerate(self.stat_keys) if query.match(t)]
if len(matched_results_idx) == 0:
raise RuntimeError("no matched stat, input kwargs are: {}".format(kwargs))
# random sample
selected_idx = np.random.choice(matched_results_idx)
stat_path = self.stat_paths[selected_idx]
stats.append(stat_path)
stats = [os.path.join(self.dirname, s) for s in stats]
return stats
| [
"[email protected]"
]
| |
75a863a592f82faf0099f420daadea5edbe253db | 074655fbb70dc7dad1807597efa267abb0fb3500 | /desafios/desafio-106.py | a3efcfa39014af63e7cdc60e9f066c83e8da1fb4 | []
| no_license | rodrigojgrande/python-mundo | bfa57ff12c537084aeeb5469451e13e74c6fb9f1 | d482c84d5c6ae8cfec79317b85390e17ede17f58 | refs/heads/master | 2023-04-23T08:22:45.251817 | 2021-05-19T13:08:21 | 2021-05-19T13:08:21 | 351,783,397 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | #Exercício Python 106: Faça um mini-sistema que utilize o Interactive Help do Python. O usuário vai digitar o comando e o manual vai aparecer. Quando o usuário digitar a palavra ‘FIM’, o programa se encerrará. Importante: use cores.
from time import sleep
cor = ('\033[m', # 0 - sem cores
'\033[0;30;41m', # 1 - vermelho
'\033[0;30;42m', # 2 - verde
'\033[0;30;43m', # 3 - amarelo
'\033[0;30;44m', # 4 - azul
'\033[0;30;45m', # 5 - roxo
'\033[7;30m' # 6 - branco
);
def ajuda(comando):
titulo(f'Acessando o manual do comando \'{comando}\'', 4)
print(cor[6], end='')
help(comando)
print(cor[0], end='')
sleep(2)
def titulo(msg, c=0):
tamanho = len(msg) + 4
print(cor[c], end='')
print('~' * tamanho)
print(f' {msg}')
print('~' * tamanho)
print(cor[0], end='')
sleep(1)
#Programa Principal
comando = ''
while True:
titulo('SISTEMA DE AJUDA PyHELP', 2)
comando = str(input("Função ou Biblioteca > "))
if comando.upper() == 'FIM':
break
else:
ajuda(comando)
titulo('ATÉ LOGO', 1)
| [
"[email protected]"
]
| |
efc52d8b10f9081ff6a555ce6d84839a77e88f05 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02973/s843936905.py | 1546a742a043329f3c3d9e840dbeb8dda98ec3c7 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | from bisect import bisect_right
def main():
inf=float("inf")
n=int(input())
alst=[int(input()) for _ in range(n)]
work=[inf]*n
for i in range(n-1,-1,-1):
j=bisect_right(work,alst[i])
work[j]=alst[i]
print(n-work.count(inf))
main() | [
"[email protected]"
]
| |
332db90717d18029d34aa1bbca1ce2d43fdd2a1d | e495c3f9227d790f3c08a56c357f2a95c167ec9c | /zerojudge.tw/a780.py | 5d40bbaa97c9b43d36b1a9634123d570ef876590 | []
| no_license | InfiniteWing/Solves | 84d894d66588693c73ec1dcaebe3b8b148e1d224 | a0f8f09fac5e462d7d12a23ccd8414bd5ff8ffad | refs/heads/master | 2021-10-11T00:17:37.367776 | 2019-01-19T15:07:54 | 2019-01-19T15:07:54 | 103,742,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | def main():
while True:
try:
s = input()
except EOFError:
break
o, e, a = float(s.split()[0]),float(s.split()[1]),float(s.split()[2])
if(o == 0 and e == 0 and a == 0):
break
m = o / e
f = a / m
print('{0:.2f}'.format(round(m,2)),'{0:.2f}'.format(round(f,2)))
main() | [
"[email protected]"
]
| |
abd5ee70aab65729e8f7fa743256471068ff8cf4 | 4d6975caece0acdc793a41e8bc6d700d8c2fec9a | /leetcode/856.consecutive-numbers-sum/856.consecutive-numbers-sum.py | e7d78eaedc4dc18c9eedb0691612ac16db60e37b | []
| no_license | guiconti/workout | 36a3923f2381d6e7023e127100409b3a2e7e4ccb | 5162d14cd64b720351eb30161283e8727cfcf376 | refs/heads/master | 2021-08-03T10:32:02.108714 | 2021-07-26T04:38:14 | 2021-07-26T04:38:14 | 221,025,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | class Solution:
def consecutiveNumbersSum(self, N: int) -> int:
| [
"[email protected]"
]
| |
d8f453cbc8d6faf8544ab9e6c7c8f3f69eca3db6 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part007491.py | e36cb723cb2eaeafc9ed54508427de7131e5b47c | []
| no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,598 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher25426(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.1.1.2.2.2.1.0', 1, 1, S(1)), Mul),
(VariableWithCount('i2.2.1.0', 1, 1, None), Mul)
]),
1: (1, Multiset({}), [
(VariableWithCount('i2.1.1.2.2.2.1.0', 1, 1, S(1)), Mul),
(VariableWithCount('i2.2.1.1', 1, 1, None), Mul)
]),
2: (2, Multiset({}), [
(VariableWithCount('i2.1.1.2.2.2.1.0', 1, 1, S(1)), Mul),
(VariableWithCount('i2.3.1.0', 1, 1, None), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher25426._instance is None:
CommutativeMatcher25426._instance = CommutativeMatcher25426()
return CommutativeMatcher25426._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 25425
return
yield
from collections import deque | [
"[email protected]"
]
| |
e646e840415066474e090129909b4fa89a674716 | 334d0a4652c44d0c313e11b6dcf8fb89829c6dbe | /checkov/terraform/checks/provider/bridgecrew/credentials.py | fac41da6ae3817889168443670a4b19c7c89d1ea | [
"Apache-2.0"
]
| permissive | schosterbarak/checkov | 4131e03b88ae91d82b2fa211f17e370a6f881157 | ea6d697de4de2083c8f6a7aa9ceceffd6b621b58 | refs/heads/master | 2022-05-22T18:12:40.994315 | 2022-04-28T07:44:05 | 2022-04-28T07:59:17 | 233,451,426 | 0 | 0 | Apache-2.0 | 2020-03-23T12:12:23 | 2020-01-12T20:07:15 | Python | UTF-8 | Python | false | false | 1,157 | py | import re
from typing import Dict, List, Any
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.provider.base_check import BaseProviderCheck
from checkov.common.models.consts import bridgecrew_token_pattern
class BridgecrewCredentials(BaseProviderCheck):
def __init__(self) -> None:
name = "Ensure no hard coded API token exist in the provider"
id = "CKV_BCW_1"
supported_provider = ["bridgecrew"]
categories = [CheckCategories.SECRETS]
super().__init__(name=name, id=id, categories=categories, supported_provider=supported_provider)
def scan_provider_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
if self.secret_found(conf, "token", bridgecrew_token_pattern):
return CheckResult.FAILED
return CheckResult.PASSED
@staticmethod
def secret_found(conf: Dict[str, List[Any]], field: str, pattern: str) -> bool:
if field in conf.keys():
value = conf[field][0]
if re.match(pattern, value) is not None:
return True
return False
check = BridgecrewCredentials()
| [
"[email protected]"
]
| |
71ad3f0dc161322df687c69ddedcd303e2fee89f | 3cd75f3d62911ba3d2114f95203e81d91be32877 | /1day/Str09.py | 432786c3f756f162e0a411ef5774f40ee0cbf828 | []
| no_license | kukukuni/Python_ex | 3667e2fe1db3a161d9e6acd5d8145a3e692f8e89 | e56d10bbcf3dc33b4422462a5b3c2dedb082b8c3 | refs/heads/master | 2022-11-05T13:58:55.770984 | 2019-04-14T00:57:18 | 2019-04-14T00:57:18 | 181,244,073 | 0 | 1 | null | 2022-10-23T06:38:06 | 2019-04-14T00:50:00 | Jupyter Notebook | UTF-8 | Python | false | false | 359 | py | # Str09.py
'''
print('숫자 1번 입력'); n1 = int(input())
print('숫자 2번 입력'); n2 = int(input())
print(n1+n2)
print('숫자 2개 입력')
n1, n2 = input().split(',')
print(int(n1)+int(n2))
print('숫자 2개 입력')
n1, n2 = map(int,input().split(','))
print(n1+n2)
'''
n1, n2 = map(int,input('숫자 2개 입력\n').split(','))
print(n1+n2)
| [
"[email protected]"
]
| |
97a6a1c5513c76cbf12f904f40a7662ec5781c10 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/video/TSM/mmaction/models/localizers/bsn.py | 1fc25e34f2d18f58ab944770a3fbec26930151c1 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 16,887 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...localization import temporal_iop
from ..builder import LOCALIZERS, build_loss
from .base import BaseTAPGenerator
from .utils import post_processing
@LOCALIZERS.register_module()
class TEM(BaseTAPGenerator):
"""Temporal Evaluation Model for Boundary Sensitive Network.
Please refer `BSN: Boundary Sensitive Network for Temporal Action
Proposal Generation <http://arxiv.org/abs/1806.02964>`_.
Code reference
https://github.com/wzmsltw/BSN-boundary-sensitive-network
Args:
tem_feat_dim (int): Feature dimension.
tem_hidden_dim (int): Hidden layer dimension.
tem_match_threshold (float): Temporal evaluation match threshold.
loss_cls (dict): Config for building loss.
Default: ``dict(type='BinaryLogisticRegressionLoss')``.
loss_weight (float): Weight term for action_loss. Default: 2.
output_dim (int): Output dimension. Default: 3.
conv1_ratio (float): Ratio of conv1 layer output. Default: 1.0.
conv2_ratio (float): Ratio of conv2 layer output. Default: 1.0.
conv3_ratio (float): Ratio of conv3 layer output. Default: 0.01.
"""
def __init__(self,
temporal_dim,
boundary_ratio,
tem_feat_dim,
tem_hidden_dim,
tem_match_threshold,
loss_cls=dict(type='BinaryLogisticRegressionLoss'),
loss_weight=2,
output_dim=3,
conv1_ratio=1,
conv2_ratio=1,
conv3_ratio=0.01):
super().__init__()
self.temporal_dim = temporal_dim
self.boundary_ratio = boundary_ratio
self.feat_dim = tem_feat_dim
self.c_hidden = tem_hidden_dim
self.match_threshold = tem_match_threshold
self.output_dim = output_dim
self.loss_cls = build_loss(loss_cls)
self.loss_weight = loss_weight
self.conv1_ratio = conv1_ratio
self.conv2_ratio = conv2_ratio
self.conv3_ratio = conv3_ratio
self.conv1 = nn.Conv1d(
in_channels=self.feat_dim,
out_channels=self.c_hidden,
kernel_size=3,
stride=1,
padding=1,
groups=1)
self.conv2 = nn.Conv1d(
in_channels=self.c_hidden,
out_channels=self.c_hidden,
kernel_size=3,
stride=1,
padding=1,
groups=1)
self.conv3 = nn.Conv1d(
in_channels=self.c_hidden,
out_channels=self.output_dim,
kernel_size=1,
stride=1,
padding=0)
self.anchors_tmins, self.anchors_tmaxs = self._temporal_anchors()
def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.):
"""Generate temporal anchors.
Args:
tmin_offset (int): Offset for the minimum value of temporal anchor.
Default: 0.
tmax_offset (int): Offset for the maximun value of temporal anchor.
Default: 1.
Returns:
tuple[Sequence[float]]: The minimum and maximum values of temporal
anchors.
"""
temporal_gap = 1. / self.temporal_dim
anchors_tmins = []
anchors_tmaxs = []
for i in range(self.temporal_dim):
anchors_tmins.append(temporal_gap * (i + tmin_offset))
anchors_tmaxs.append(temporal_gap * (i + tmax_offset))
return anchors_tmins, anchors_tmaxs
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = F.relu(self.conv1_ratio * self.conv1(x))
x = F.relu(self.conv2_ratio * self.conv2(x))
x = torch.sigmoid(self.conv3_ratio * self.conv3(x))
return x
def forward_train(self, raw_feature, label_action, label_start, label_end):
"""Define the computation performed at every call when training."""
tem_output = self._forward(raw_feature)
score_action = tem_output[:, 0, :]
score_start = tem_output[:, 1, :]
score_end = tem_output[:, 2, :]
loss_action = self.loss_cls(score_action, label_action,
self.match_threshold)
loss_start_small = self.loss_cls(score_start, label_start,
self.match_threshold)
loss_end_small = self.loss_cls(score_end, label_end,
self.match_threshold)
loss_dict = {
'loss_action': loss_action * self.loss_weight,
'loss_start': loss_start_small,
'loss_end': loss_end_small
}
return loss_dict
def forward_test(self, raw_feature, video_meta):
"""Define the computation performed at every call when testing."""
tem_output = self._forward(raw_feature).cpu().numpy()
batch_action = tem_output[:, 0, :]
batch_start = tem_output[:, 1, :]
batch_end = tem_output[:, 2, :]
video_meta_list = [dict(x) for x in video_meta]
video_results = []
for batch_idx, _ in enumerate(batch_action):
video_name = video_meta_list[batch_idx]['video_name']
video_action = batch_action[batch_idx]
video_start = batch_start[batch_idx]
video_end = batch_end[batch_idx]
video_result = np.stack((video_action, video_start, video_end,
self.anchors_tmins, self.anchors_tmaxs),
axis=1)
video_results.append((video_name, video_result))
return video_results
def generate_labels(self, gt_bbox):
"""Generate training labels."""
match_score_action_list = []
match_score_start_list = []
match_score_end_list = []
for every_gt_bbox in gt_bbox:
gt_tmins = every_gt_bbox[:, 0].cpu().numpy()
gt_tmaxs = every_gt_bbox[:, 1].cpu().numpy()
gt_lens = gt_tmaxs - gt_tmins
gt_len_pad = np.maximum(1. / self.temporal_dim,
self.boundary_ratio * gt_lens)
gt_start_bboxs = np.stack(
(gt_tmins - gt_len_pad / 2, gt_tmins + gt_len_pad / 2), axis=1)
gt_end_bboxs = np.stack(
(gt_tmaxs - gt_len_pad / 2, gt_tmaxs + gt_len_pad / 2), axis=1)
match_score_action = []
match_score_start = []
match_score_end = []
for anchor_tmin, anchor_tmax in zip(self.anchors_tmins,
self.anchors_tmaxs):
match_score_action.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax, gt_tmins,
gt_tmaxs)))
match_score_start.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_start_bboxs[:, 0], gt_start_bboxs[:,
1])))
match_score_end.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_action_list.append(match_score_action)
match_score_start_list.append(match_score_start)
match_score_end_list.append(match_score_end)
match_score_action_list = torch.Tensor(match_score_action_list)
match_score_start_list = torch.Tensor(match_score_start_list)
match_score_end_list = torch.Tensor(match_score_end_list)
return (match_score_action_list, match_score_start_list,
match_score_end_list)
def forward(self,
raw_feature,
gt_bbox=None,
video_meta=None,
return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
label_action, label_start, label_end = (
self.generate_labels(gt_bbox))
device = raw_feature.device
label_action = label_action.to(device)
label_start = label_start.to(device)
label_end = label_end.to(device)
return self.forward_train(raw_feature, label_action, label_start,
label_end)
return self.forward_test(raw_feature, video_meta)
@LOCALIZERS.register_module()
class PEM(BaseTAPGenerator):
"""Proposals Evaluation Model for Boundary Sensitive Network.
Please refer `BSN: Boundary Sensitive Network for Temporal Action
Proposal Generation <http://arxiv.org/abs/1806.02964>`_.
Code reference
https://github.com/wzmsltw/BSN-boundary-sensitive-network
Args:
pem_feat_dim (int): Feature dimension.
pem_hidden_dim (int): Hidden layer dimension.
pem_u_ratio_m (float): Ratio for medium score proprosals to balance
data.
pem_u_ratio_l (float): Ratio for low score proprosals to balance data.
pem_high_temporal_iou_threshold (float): High IoU threshold.
pem_low_temporal_iou_threshold (float): Low IoU threshold.
soft_nms_alpha (float): Soft NMS alpha.
soft_nms_low_threshold (float): Soft NMS low threshold.
soft_nms_high_threshold (float): Soft NMS high threshold.
post_process_top_k (int): Top k proposals in post process.
feature_extraction_interval (int):
Interval used in feature extraction. Default: 16.
fc1_ratio (float): Ratio for fc1 layer output. Default: 0.1.
fc2_ratio (float): Ratio for fc2 layer output. Default: 0.1.
output_dim (int): Output dimension. Default: 1.
"""
def __init__(self,
pem_feat_dim,
pem_hidden_dim,
pem_u_ratio_m,
pem_u_ratio_l,
pem_high_temporal_iou_threshold,
pem_low_temporal_iou_threshold,
soft_nms_alpha,
soft_nms_low_threshold,
soft_nms_high_threshold,
post_process_top_k,
feature_extraction_interval=16,
fc1_ratio=0.1,
fc2_ratio=0.1,
output_dim=1):
super().__init__()
self.feat_dim = pem_feat_dim
self.hidden_dim = pem_hidden_dim
self.u_ratio_m = pem_u_ratio_m
self.u_ratio_l = pem_u_ratio_l
self.pem_high_temporal_iou_threshold = pem_high_temporal_iou_threshold
self.pem_low_temporal_iou_threshold = pem_low_temporal_iou_threshold
self.soft_nms_alpha = soft_nms_alpha
self.soft_nms_low_threshold = soft_nms_low_threshold
self.soft_nms_high_threshold = soft_nms_high_threshold
self.post_process_top_k = post_process_top_k
self.feature_extraction_interval = feature_extraction_interval
self.fc1_ratio = fc1_ratio
self.fc2_ratio = fc2_ratio
self.output_dim = output_dim
self.fc1 = nn.Linear(
in_features=self.feat_dim, out_features=self.hidden_dim, bias=True)
self.fc2 = nn.Linear(
in_features=self.hidden_dim,
out_features=self.output_dim,
bias=True)
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = torch.cat(list(x))
x = F.relu(self.fc1_ratio * self.fc1(x))
x = torch.sigmoid(self.fc2_ratio * self.fc2(x))
return x
def forward_train(self, bsp_feature, reference_temporal_iou):
"""Define the computation performed at every call when training."""
pem_output = self._forward(bsp_feature)
reference_temporal_iou = torch.cat(list(reference_temporal_iou))
device = pem_output.device
reference_temporal_iou = reference_temporal_iou.to(device)
anchors_temporal_iou = pem_output.view(-1)
u_hmask = (reference_temporal_iou >
self.pem_high_temporal_iou_threshold).float()
u_mmask = (
(reference_temporal_iou <= self.pem_high_temporal_iou_threshold)
& (reference_temporal_iou > self.pem_low_temporal_iou_threshold)
).float()
u_lmask = (reference_temporal_iou <=
self.pem_low_temporal_iou_threshold).float()
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = self.u_ratio_m * num_h / (num_m)
r_m = torch.min(r_m, torch.Tensor([1.0]).to(device))[0]
u_smmask = torch.rand(u_hmask.size()[0], device=device)
u_smmask = u_smmask * u_mmask
u_smmask = (u_smmask > (1. - r_m)).float()
r_l = self.u_ratio_l * num_h / (num_l)
r_l = torch.min(r_l, torch.Tensor([1.0]).to(device))[0]
u_slmask = torch.rand(u_hmask.size()[0], device=device)
u_slmask = u_slmask * u_lmask
u_slmask = (u_slmask > (1. - r_l)).float()
temporal_iou_weights = u_hmask + u_smmask + u_slmask
temporal_iou_loss = F.smooth_l1_loss(anchors_temporal_iou,
reference_temporal_iou)
temporal_iou_loss = torch.sum(
temporal_iou_loss *
temporal_iou_weights) / torch.sum(temporal_iou_weights)
loss_dict = dict(temporal_iou_loss=temporal_iou_loss)
return loss_dict
def forward_test(self, bsp_feature, tmin, tmax, tmin_score, tmax_score,
video_meta):
"""Define the computation performed at every call when testing."""
pem_output = self._forward(bsp_feature).view(-1).cpu().numpy().reshape(
-1, 1)
tmin = tmin.view(-1).cpu().numpy().reshape(-1, 1)
tmax = tmax.view(-1).cpu().numpy().reshape(-1, 1)
tmin_score = tmin_score.view(-1).cpu().numpy().reshape(-1, 1)
tmax_score = tmax_score.view(-1).cpu().numpy().reshape(-1, 1)
score = np.array(pem_output * tmin_score * tmax_score).reshape(-1, 1)
result = np.concatenate(
(tmin, tmax, tmin_score, tmax_score, pem_output, score), axis=1)
result = result.reshape(-1, 6)
video_info = dict(video_meta[0])
proposal_list = post_processing(result, video_info,
self.soft_nms_alpha,
self.soft_nms_low_threshold,
self.soft_nms_high_threshold,
self.post_process_top_k,
self.feature_extraction_interval)
output = [
dict(
video_name=video_info['video_name'],
proposal_list=proposal_list)
]
return output
def forward(self,
bsp_feature,
reference_temporal_iou=None,
tmin=None,
tmax=None,
tmin_score=None,
tmax_score=None,
video_meta=None,
return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
return self.forward_train(bsp_feature, reference_temporal_iou)
return self.forward_test(bsp_feature, tmin, tmax, tmin_score,
tmax_score, video_meta)
| [
"[email protected]"
]
| |
d6db57ca78ffbfbe55bff62613e68d9b7b0a32b5 | d78e59d285cdd1e16f1d7d836d39715e8a581c8b | /machine_learning/ml_basic/lab10_NN_ReLu_Xavier_Dropout_and_Adam/06_dropout_for_mnist.py | 7ddee527ad9b4d1754910dc4f1f7f773f5825539 | []
| no_license | egaoneko/study | 79c11e0df9d713b05babde3461f2e74f3906ad80 | b965654c7cc8e8361f5ec0596af57c55d35137cc | refs/heads/master | 2020-04-12T09:04:55.131290 | 2017-09-03T10:13:04 | 2017-09-03T10:13:04 | 54,833,896 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,649 | py | '''
A logistic regression learning algorithm example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
# Import MINST data
from random import randint
import math
from matplotlib import pyplot as plt
# from lab07_App_and_Tip import input_data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
import tensorflow as tf
# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
# tf Graph Input
X = tf.placeholder("float", [None, 784]) # mnist data image of shape 28*28=784
Y = tf.placeholder("float", [None, 10]) # 0-9 digits recognition => 10 classes
def xavier_init(n_inputs, n_outputs, uniform=True):
"""Set the parameter initialization using the method described.
This method is designed to keep the scale of the gradients roughly the same
in all layers.
Xavier Glorot and Yoshua Bengio (2010):
Understanding the difficulty of training deep feedforward neural
networks. International conference on artificial intelligence and
statistics.
Args:
n_inputs: The number of input nodes into each output.
n_outputs: The number of output nodes for each input.
uniform: If true use a uniform distribution, otherwise use a normal.
Returns:
An initializer.
"""
if uniform:
# 6 was used in the paper.
init_range = math.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range)
else:
# 3 gives us approximately the same limits as above since this repicks
# values greater than 2 standard deviations from the mean.
stddev = math.sqrt(3.0 / (n_inputs + n_outputs))
return tf.truncated_normal_initializer(stddev=stddev)
# Set model weights
W1 = tf.get_variable("W1", shape=[784, 256], initializer=xavier_init(784, 256))
W2 = tf.get_variable("W2", shape=[256, 256], initializer=xavier_init(256, 256))
W3 = tf.get_variable("W3", shape=[256, 256], initializer=xavier_init(256, 256))
W4 = tf.get_variable("W4", shape=[256, 256], initializer=xavier_init(256, 256))
W5 = tf.get_variable("W5", shape=[256, 10], initializer=xavier_init(256, 10))
B1 = tf.Variable(tf.random_normal([256]))
B2 = tf.Variable(tf.random_normal([256]))
B3 = tf.Variable(tf.random_normal([256]))
B4 = tf.Variable(tf.random_normal([256]))
B5 = tf.Variable(tf.random_normal([10]))
# Construct model
dropout_rate = tf.placeholder("float")
_L1 = tf.nn.relu(tf.add(tf.matmul(X, W1), B1)) # Hidden layer with RELU activation
L1 = tf.nn.dropout(_L1, dropout_rate)
_L2 = tf.nn.relu(tf.add(tf.matmul(L1, W2), B2)) # Hidden layer with RELU activation
L2 = tf.nn.dropout(_L2, dropout_rate)
_L3 = tf.nn.relu(tf.add(tf.matmul(L2, W3), B3)) # Hidden layer with RELU activation
L3 = tf.nn.dropout(_L3, dropout_rate)
_L4 = tf.nn.relu(tf.add(tf.matmul(L3, W4), B4)) # Hidden layer with RELU activation
L4 = tf.nn.dropout(_L4, dropout_rate)
hypothesis = tf.add(tf.matmul(L2, W5), B5) # No need to use softmax here
# Minimize error using cross entropy
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(hypothesis, Y)) # Softmax loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Gradient Descent
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys, dropout_rate: 0.7})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys, dropout_rate: 0.7}) / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy:", accuracy.eval({X: mnist.test.images, Y: mnist.test.labels, dropout_rate: 1}))
| [
"[email protected]"
]
| |
442f038c532429a81876bc8698542bb72ca76432 | f8376e83352d2dfab28c41f24345071a77a45fd9 | /Regular Expression/re/phn.py | 80c12d5a86a9d9db9511ad8c56d94500a7275064 | []
| no_license | harihavwas/pythonProgram | 2111ee98eccda68165159db0305c413ee53ee38a | 126df8b3a418dbaf618575b450fd4cfde44c80a7 | refs/heads/master | 2023-07-27T23:39:10.867329 | 2021-09-16T15:35:00 | 2021-09-16T15:35:00 | 402,320,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | import re
f=open('phn','r')
x='[+][9][1]\d{10}$'
for i in f:
s=i.rstrip('\n')
m=re.fullmatch(x,s)
if m!=None:
print(s) | [
"[email protected]"
]
| |
19172244f167fb5ed0a40749ee2b2ec36237c41a | e65ae5bd9ae1c93e7117e630f7340bc73aa71212 | /lib/gevent/greentest/2.6/test_ftplib.py | 1c2ceeb87282ee5345ebdfd72f7fea38d9e08d23 | [
"MIT"
]
| permissive | nadirhamid/oneline | e98ff1ed81da0536f9602ecdde2fb2a4fe80d256 | 833ebef0e26ae8e0cc452756381227746d830b23 | refs/heads/master | 2021-01-21T04:27:41.715047 | 2016-05-30T03:50:34 | 2016-05-30T03:50:34 | 23,320,578 | 1 | 2 | NOASSERTION | 2020-03-12T17:22:24 | 2014-08-25T16:29:36 | Python | UTF-8 | Python | false | false | 15,567 | py | """Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class and IPv6 environment
import ftplib
import threading
import asyncore
import asynchat
import socket
import StringIO
from unittest import TestCase
from test import test_support
from test.test_support import HOST
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024)
def handle_close(self):
self.baseclass.push('226 transfer complete')
self.close()
class DummyFTPHandler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator("\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = ''.join(self.in_buffer)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data + '\r\n')
def cmd_port(self, arg):
addr = map(int, arg.split(','))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ',')
p1, p2 = divmod(port, 256)
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
self.dtp.push(RETR_DATA)
self.dtp.close_when_done()
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler = self.handler(conn)
self.close()
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=2)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = StringIO.StringIO(RETR_DATA)
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storlines(self):
f = StringIO.StringIO(RETR_DATA.replace('\r\n', '\n'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
self.assertEqual(self.server.handler.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send("1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost")
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost", timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
| [
"[email protected]"
]
| |
5210afc84531e89637ca9db677df678b13d46e8d | f89cd872172489785df20354c7a78bc332c4d894 | /equationTemplate.py | e46c6c29a3f9ac3c018bd91bbd236fa72b2eb375 | []
| no_license | amararora07/CodeFights | d565ed21b1f5c2fbe4d902159db61bee8244e1c8 | 51e3cb75eb32d22dac60f380b1f5b87822678c20 | refs/heads/master | 2021-09-06T15:45:08.716269 | 2018-02-08T06:06:52 | 2018-02-08T06:06:52 | 109,230,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | from itertools import permutations as p
def equationTemplate(v):
for i in p(v,4):
if i[0]*i[1]*i[2]==x[3]:
return True
elif i[0]*i[1]==i[2]*i[3]:
return True
return False
| [
"[email protected]"
]
| |
380cb5a1e2cf85fb3c7431a312ad036cab0a410f | 000e9c92b8b86402ab506a191cc60302f2c269a3 | /orders/migrations/0004_translations.py | c2c7f568841f74a299851a573b8e9ccd93dfb5b5 | [
"MIT"
]
| permissive | FrankCasanova/onlineshop | 71c29fe3cc6a1dbb715474ffb09bde98443591af | 1a9011ce3d49976e2584cdadc33893d04947a73b | refs/heads/master | 2023-08-25T20:24:15.754513 | 2021-10-22T16:59:34 | 2021-10-22T16:59:34 | 406,788,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | # Generated by Django 3.2.8 on 2021-10-17 14:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0003_auto_20211011_1122'),
]
operations = [
migrations.AlterField(
model_name='order',
name='address',
field=models.CharField(max_length=250, verbose_name='address'),
),
migrations.AlterField(
model_name='order',
name='city',
field=models.CharField(max_length=100, verbose_name='city'),
),
migrations.AlterField(
model_name='order',
name='email',
field=models.EmailField(max_length=254, verbose_name='e-mail'),
),
migrations.AlterField(
model_name='order',
name='first_name',
field=models.CharField(max_length=50, verbose_name='first_name'),
),
migrations.AlterField(
model_name='order',
name='last_name',
field=models.CharField(max_length=50, verbose_name='last_name'),
),
migrations.AlterField(
model_name='order',
name='postal_code',
field=models.CharField(max_length=20, verbose_name='postal code'),
),
]
| [
"[email protected]"
]
| |
8a06a2e7dfcfe5bf589af9767e48dd05d03919eb | cc6e7f63eaf4b3570771c46fb8b24b88e6e1f59e | /typical/TDPC/A.py | 0980d25005faca221dd08fe47b5fde2dab33484c | []
| no_license | kamojiro/atcoderall | 82a39e9be083a01c14445417597bf357e6c854a8 | 973af643c06125f52d302a5bc1d65f07a9414419 | refs/heads/master | 2022-07-12T00:14:38.803239 | 2022-06-23T10:24:54 | 2022-06-23T10:24:54 | 161,755,381 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from copy import deepcopy
N = int( input())
P = list( map(int, input().split()))
Q = set([0])
for i in range(N):
R = deepcopy(Q)
p = P[i]
for r in R:
Q.add(r+p)
print(len(Q))
| [
"[email protected]"
]
| |
2bedb12cb2b704a1990287c3c9623e526b68825e | 833a83e8fd32041b20c0a13f6bf0759c4067bee6 | /homeworkpal_project/maximo/migrations/0003_auto_20151104_2238.py | acc9ef30e6f720f7fc7d129fd7a46d5c7829d0d7 | [
"MIT"
]
| permissive | luiscberrocal/homeworkpal | ac92de1dcbd43d9290fde8174f4d4544ed2cad23 | 342acf876264fade818b107f4af13cac067f1ace | refs/heads/master | 2020-12-12T13:13:47.022473 | 2015-12-29T19:38:43 | 2015-12-29T19:38:43 | 44,059,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('maximo', '0002_datadocument'),
]
operations = [
migrations.AlterField(
model_name='datadocument',
name='processed',
field=models.DateTimeField(null=True, blank=True),
),
]
| [
"[email protected]"
]
| |
21fbee6df470e1100c9990e738892141bd260770 | a364f53dda3a96c59b2b54799907f7d5cde57214 | /easy/278-First Bad Version.py | b8730330fdf693d30feb90c5163921ac734e1e16 | [
"Apache-2.0"
]
| permissive | Davidxswang/leetcode | 641cc5c10d2a97d5eb0396be0cfc818f371aff52 | d554b7f5228f14c646f726ddb91014a612673e06 | refs/heads/master | 2022-12-24T11:31:48.930229 | 2020-10-08T06:02:57 | 2020-10-08T06:02:57 | 260,053,912 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | """
You are a product manager and currently leading a team to develop a new product. Unfortunately, the latest version of your product fails the quality check. Since each version is developed based on the previous version, all the versions after a bad version are also bad.
Suppose you have n versions [1, 2, ..., n] and you want to find out the first bad one, which causes all the following ones to be bad.
You are given an API bool isBadVersion(version) which will return whether version is bad. Implement a function to find the first bad version. You should minimize the number of calls to the API.
Example:
Given n = 5, and version = 4 is the first bad version.
call isBadVersion(3) -> false
call isBadVersion(5) -> true
call isBadVersion(4) -> true
Then 4 is the first bad version.
"""
# The tricky part here is the case start == end.
# time complexity: O(logn), space complexity: O(logn) due to the function call stack
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return an integer
# def isBadVersion(version):
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
return self.check(1, n)
def check(self, start: int, end: int) -> int:
if start == end:
return start
middle = start + (end - start) // 2
middleResult = isBadVersion(middle)
if middleResult:
return self.check(start, middle)
else:
middle1Result = isBadVersion(middle + 1)
if middle1Result:
return middle + 1
else:
return self.check(middle + 1, end) | [
"[email protected]"
]
| |
5b74f7b4264a2bdcf246eb141174ffb4f69616fe | 9fbe90eab4cb25022e7c93776da3a5733656a09a | /examples/boardgame/player.py | 039c9731b60bbac9246f112b24ac637561d86a8e | [
"MIT"
]
| permissive | Nathanator/networkzero | 453e218d6e0b8080158cb968f4acc5e0cb0fb65c | e6bf437f424660c32cf1ef81f83d9eee925f44e7 | refs/heads/master | 2021-01-15T13:14:53.101742 | 2016-04-07T20:32:28 | 2016-04-07T20:32:28 | 55,724,894 | 0 | 0 | null | 2016-04-07T20:12:18 | 2016-04-07T20:12:17 | null | UTF-8 | Python | false | false | 197 | py | import networkzero as nw0
address = nw0.discover("board")
player = input("Which player? ")
while True:
move = input("Move: ")
nw0.send_command(address, "MOVE '%s' '%s'" % (player, move))
| [
"[email protected]"
]
| |
13fa8feb12381497d43c29fb6b3033f1e053a654 | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/090d103842942eab3616de2464e26c4db3b402611e2f44f446e8b4086b8df170/xml/parsers/expat/model.py | a7f24af4b3f303679b553ba09459d025a5309dbf | []
| no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | # encoding: utf-8
# module xml.parsers.expat.model calls itself pyexpat.model
# from C:\Users\Doly\Anaconda3\lib\site-packages\numba\npyufunc\workqueue.cp37-win_amd64.pyd
# by generator 1.147
""" Constants used to interpret content model information. """
# no imports
# Variables with simple values
XML_CQUANT_NONE = 0
XML_CQUANT_OPT = 1
XML_CQUANT_PLUS = 3
XML_CQUANT_REP = 2
XML_CTYPE_ANY = 2
XML_CTYPE_CHOICE = 5
XML_CTYPE_EMPTY = 1
XML_CTYPE_MIXED = 3
XML_CTYPE_NAME = 4
XML_CTYPE_SEQ = 6
__loader__ = None
__spec__ = None
# no functions
# no classes
| [
"[email protected]"
]
| |
aaef2d15129a5165f1996b41811e74b2bb8706b9 | 34599596e145555fde0d4264a1d222f951f49051 | /pcat2py/class/20f21bf6-5cc5-11e4-af55-00155d01fe08.py | 291d4124651b9efe02966dcbd2fccda4c97ca607 | [
"MIT"
]
| permissive | phnomcobra/PCAT2PY | dc2fcbee142ce442e53da08476bfe4e68619346d | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | refs/heads/master | 2021-01-11T02:23:30.669168 | 2018-02-13T17:04:03 | 2018-02-13T17:04:03 | 70,970,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | #!/usr/bin/python
################################################################################
# 20f21bf6-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# [email protected]
# [email protected]
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20f21bf6-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
sz = cli.get_reg_sz(r'HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon', 'Allocatefloppies')
# Output Lines
self.output = [r'HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon', ('Allocatefloppies=' + sz)]
if sz == "0":
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\Software\Microsoft\Windows NT'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Microsoft\Windows NT\CurrentVersion'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon' -name 'Allocatefloppies' -value 0")
| [
"[email protected]"
]
| |
d979d6055cd3b9523c5c7306b9146672c4d1ba5a | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /travelport/models/journey.py | fbec726990b63d058dbf48495740315328d8d4e3 | []
| no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 1,097 | py | from __future__ import annotations
from dataclasses import dataclass, field
from xsdata.models.datatype import XmlDuration
from travelport.models.air_segment_ref import AirSegmentRef
__NAMESPACE__ = "http://www.travelport.com/schema/air_v52_0"
@dataclass
class Journey:
"""
Information about all connecting segment list and total traveling time.
Parameters
----------
air_segment_ref
travel_time
Total traveling time that is difference between the departure time
of the first segment and the arrival time of the last segments for
that particular entire set of connection.
"""
class Meta:
namespace = "http://www.travelport.com/schema/air_v52_0"
air_segment_ref: list[AirSegmentRef] = field(
default_factory=list,
metadata={
"name": "AirSegmentRef",
"type": "Element",
"max_occurs": 999,
}
)
travel_time: None | XmlDuration = field(
default=None,
metadata={
"name": "TravelTime",
"type": "Attribute",
}
)
| [
"[email protected]"
]
| |
6f7393b8be9b1f7cdda141ca678315df0f7d0786 | 288a00d2ab34cba6c389b8c2444455aee55a8a95 | /tests/data23/recipe-435885.py | e88c6b46b1f95afffedafd3382b1d82cbf0470bf | [
"BSD-2-Clause"
]
| permissive | JohannesBuchner/pystrict3 | ffd77b7bbc378bd4d8f21b5c6bd69a0d64a52ddb | 18b0dd369082422f9bf0f89c72e7acb53a49849c | refs/heads/master | 2023-08-14T06:37:37.954880 | 2023-07-13T11:16:38 | 2023-07-13T11:16:38 | 268,571,175 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,111 | py | # dispatch.py
# definitions:
import threading
class Dispatcher(object):
def __init__(self, targets=None, nonBlocking=True):
if not targets or targets is None:
self._targets = []
else:
self._targets = targets
self._nonBlocking = nonBlocking
def __iadd__(self, target):
self._targets.append(target)
return self
def __isub__(self, target):
self._targets.remove(target)
return self
def isNonBlocking(self):
return self._nonBlocking
nonBlocking = property(isNonBlocking)
def __call__(self, *listArgs, **kwArgs):
def invokeTargets():
for target in self._targets:
target(*listArgs, **kwArgs)
if self.nonBlocking:
threading.Timer(0, invokeTargets).start()
else:
invokeTargets()
# demos:
def Test1():
"""
A simple example demonstrating most functionality.
"""
def m1():
print('m1 invoked')
def m2():
print('m2 invoked')
e = Dispatcher()
e += m1
e += m2
e += m2
print('Dispatching:')
e()
e -= m1
print('Dispatching:')
e()
e -= m2
print('Dispatching:')
e()
def Test2():
"""
A more realistic example for the OO programmer.
"""
class Sprite(object):
def __init__(self, location):
self._location = location
locationChanged = Dispatcher()
def getLocation(self):
return self._location
def setLocation(self, newLocation):
oldLocation = self._location
self._location = newLocation
# Dispatch a "property change event"
self.locationChanged(oldLocation, newLocation)
location = property(getLocation, setLocation)
s = Sprite((2,4))
def SpriteLocationChanged(oldLocation, newLocation):
print('oldLocation =', oldLocation)
print('newLocation =', newLocation)
s.locationChanged += SpriteLocationChanged
s.location = (3,4)
s.location = (4,4)
if __name__ == '__main__':
Test1()
Test2()
| [
"[email protected]"
]
| |
cb0e7bf0d07ab3a63cbf6e86a3f500d771f3843e | aced407b41f6669f69e9eb8bd599260d50c0bd3f | /server/libs/top/api/rest/TradeShippingaddressUpdateRequest.py | 0f5dd43dd498846c1ab1208cb7481da7a49cf645 | []
| no_license | alswl/music_sofa | 42f7d15431f11b97bf67b604cfde0a0e9e3860cc | c4e5425ef6c80c3e57c91ba568f7cbfe63faa378 | refs/heads/master | 2016-09-12T18:37:34.357510 | 2016-05-20T11:49:52 | 2016-05-20T11:49:52 | 58,946,171 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | '''
Created by auto_sdk on 2013-11-07 12:53:22
'''
from top.api.base import RestApi
class TradeShippingaddressUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.receiver_address = None
self.receiver_city = None
self.receiver_district = None
self.receiver_mobile = None
self.receiver_name = None
self.receiver_phone = None
self.receiver_state = None
self.receiver_zip = None
self.tid = None
def getapiname(self):
return 'taobao.trade.shippingaddress.update'
| [
"[email protected]"
]
| |
0203ee949e80c337db199c170eae4e2cfd879524 | 0930b6c994225d44818887716ce4e8771af86b81 | /exercisesDosCapitulos/10-arquivosEExcecoes/10.1-aprendendoPython/aprendendoPython.py | 6e32d8cb6c3d8180a69b10b03a53d70b8a10c8cd | []
| no_license | jonasht/cursoIntesivoDePython | 44d81b08f1652c4fa7a6d14a0e3f62ee8e06944c | fb5f5c9884fb1a6062a7c4e7676e5cc3b13c0ebb | refs/heads/master | 2023-05-23T20:44:34.372825 | 2021-06-19T12:13:46 | 2021-06-19T12:13:46 | 293,325,804 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 728 | py |
# para poder achar o arquivo no ubuntu deve-se escrever no terminal
# code "diretorio" do arquivo para poder funcionar
arquivoDeTexto = 't.txt'
def l(): print('=-'*40+'=')
print()
l()
print('mostrando um arquivo todo de uma so vez com arquivo.read()')
with open(arquivoDeTexto) as arq:
palavras = arq.read()
print(palavras)
l()
print('percorrendo o objeto arquivo com um laço "for" ')
with open(arquivoDeTexto) as arquivo:
for frase in arquivo:
print(frase.rstrip())
l()
print('armazendo linhas em uma lista e trabalhando com ela fora do "with" usando um "for"')
with open(arquivoDeTexto) as arquivo:
linhas = arquivo.readlines()
for linha in linhas:
print(linha.rstrip())
l()
| [
"[email protected]"
]
| |
f9a3905061d392da39f24c565147913325dbd3f4 | 3b625b6a8867c71399b421615f2391269e6dee53 | /appfordomaintests_de_2065/wsgi.py | 1c0dab674258642ab854785653e2665b332b5146 | []
| no_license | crowdbotics-apps/appfordomaintests-de-2065 | cd691f1b94ed3f792724f7d0316518400c07619c | 78e2519a37f767953064c31e898d08b7b395b6bb | refs/heads/master | 2022-04-15T19:07:57.805517 | 2020-03-19T16:48:08 | 2020-03-19T16:48:08 | 248,555,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | """
WSGI config for appfordomaintests_de_2065 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfordomaintests_de_2065.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
0670598f0b9e25686ea6b06c303213ef4d557478 | 5e2dddce9c67d5b54d203776acd38d425dbd3398 | /spacy/lang/bn/tokenizer_exceptions.py | 82f3cfaf78640cf4e4be76697544dcba61533595 | [
"MIT"
]
| permissive | yuxuan2015/spacy_zh_model | 8164a608b825844e9c58d946dcc8698853075e37 | e89e00497ab3dad0dd034933e25bc2c3f7888737 | refs/heads/master | 2020-05-15T11:07:52.906139 | 2019-08-27T08:28:11 | 2019-08-27T08:28:11 | 182,213,671 | 1 | 0 | null | 2019-04-19T06:27:18 | 2019-04-19T06:27:17 | null | UTF-8 | Python | false | false | 953 | py | # coding=utf-8
from __future__ import unicode_literals
from ...symbols import ORTH, LEMMA
_exc = {}
for exc_data in [
{ORTH: "ডঃ", LEMMA: "ডক্টর"},
{ORTH: "ডাঃ", LEMMA: "ডাক্তার"},
{ORTH: "ড.", LEMMA: "ডক্টর"},
{ORTH: "ডা.", LEMMA: "ডাক্তার"},
{ORTH: "মোঃ", LEMMA: "মোহাম্মদ"},
{ORTH: "মো.", LEMMA: "মোহাম্মদ"},
{ORTH: "সে.", LEMMA: "সেলসিয়াস"},
{ORTH: "কি.মি.", LEMMA: "কিলোমিটার"},
{ORTH: "কি.মি", LEMMA: "কিলোমিটার"},
{ORTH: "সে.মি.", LEMMA: "সেন্টিমিটার"},
{ORTH: "সে.মি", LEMMA: "সেন্টিমিটার"},
{ORTH: "মি.লি.", LEMMA: "মিলিলিটার"}]:
_exc[exc_data[ORTH]] = [exc_data]
TOKENIZER_EXCEPTIONS = _exc
| [
"[email protected]"
]
| |
34be8784c8de3e7f0b2d38864291b7b19e58d65a | d9a4121ac2872bbe3f76564caebe6818dc5888a7 | /tests/test_analysis_submission_response.py | 84d476d5916af4ccb26e3b41aa77c5f6c8d8d179 | [
"MIT"
]
| permissive | s0b0lev/mythx-models | ecb07abada43eb9c26929bfd6cd76dca9105207f | 0fc14fef9e41a68a7d97e0bb170fd0eca5693d9a | refs/heads/master | 2020-08-20T19:22:14.320454 | 2019-10-11T08:32:04 | 2019-10-11T08:32:04 | 216,057,981 | 0 | 0 | MIT | 2019-10-18T15:47:10 | 2019-10-18T15:47:09 | null | UTF-8 | Python | false | false | 2,212 | py | import json
import pytest
from mythx_models.exceptions import ValidationError
from mythx_models.response import Analysis, AnalysisSubmissionResponse
from mythx_models.util import serialize_api_timestamp
from . import common as testdata
def assert_analysis_data(expected, analysis: Analysis):
assert expected["apiVersion"] == analysis.api_version
assert expected["maruVersion"] == analysis.maru_version
assert expected["mythrilVersion"] == analysis.mythril_version
assert expected["harveyVersion"] == analysis.harvey_version
assert expected["queueTime"] == analysis.queue_time
assert expected["runTime"] == analysis.run_time
assert expected["status"] == analysis.status
assert expected["submittedAt"] == serialize_api_timestamp(analysis.submitted_at)
assert expected["submittedBy"] == analysis.submitted_by
assert expected["uuid"] == analysis.uuid
def test_analysis_submission_from_valid_json():
resp = AnalysisSubmissionResponse.from_json(
json.dumps(testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT)
)
assert_analysis_data(testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT, resp.analysis)
def test_analysis_submission_from_empty_json():
with pytest.raises(ValidationError):
AnalysisSubmissionResponse.from_json("{}")
def test_analysis_submission_from_valid_dict():
resp = AnalysisSubmissionResponse.from_dict(
testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT
)
assert_analysis_data(testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT, resp.analysis)
def test_analysis_submission_from_empty_dict():
with pytest.raises(ValidationError):
AnalysisSubmissionResponse.from_dict({})
def test_analysis_submission_to_dict():
d = testdata.ANALYSIS_SUBMISSION_RESPONSE_OBJECT.to_dict()
assert d == testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT
def test_analysis_submission_to_json():
json_str = testdata.ANALYSIS_SUBMISSION_RESPONSE_OBJECT.to_json()
assert json.loads(json_str) == testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT
def test_analysis_submission_property_delegation():
assert_analysis_data(
testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT,
testdata.ANALYSIS_SUBMISSION_RESPONSE_OBJECT,
)
| [
"[email protected]"
]
| |
40e701e304cdc95780f0c60fa96c57f9e665568e | ab269258a76b4a7f9af01de0b73144db23d6f005 | /System Scripting/Problem06/6P/pythonwmi-simplified.py | c16e30867db2ed09bc26032ce471117879c17a56 | []
| no_license | jubic/RP-Misc | 24715770b457c3f40db145f4f34d0fb775b71653 | 3c8e12646779e060180870475c0ef10773140e0f | refs/heads/master | 2016-09-06T07:00:36.032240 | 2013-03-30T09:10:02 | 2013-03-30T09:10:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | from win32com.client import Dispatch
import os
server = Dispatch("WbemScripting.SWbemLocator")
conn = server.ConnectServer("localhost", "root\\cimv2")
def query(what):
results = conn.ExecQuery("Select * from " + what)
collections = []
for item in results:
data = {}
for each in item.Properties_:
data[each.Name] = each.Value
collections.append(data)
return collections
def write_to_file(fd, results):
for result in results:
for key, value in result.items():
f.write("%40s = %s\n" % (key, value) )
f.write("%50s" % "---------------------")
f.write("\n")
results = query("Win32_OperatingSystem")
filename = results[0]["CSName"]
f = open(filename, "wb")
f.write("%50s" % "====== OperatingSystem ======\n")
write_to_file(f, results)
f.write("%50s" % "====== Win32_Processor ======\n")
results = query("Win32_Processor")
write_to_file(f, results)
f.write("%50s" % "====== Win32_PhysicalMemory ======\n")
results = query("Win32_PhysicalMemory")
write_to_file(f, results)
f.write("%50s" % "====== Win32_LogicalDisk ======\n")
results = query("Win32_LogicalDisk")
write_to_file(f, results)
f.write("%50s" % "====== Win32_NetworkAdapterConfiguration ======\n")
results = query("Win32_NetworkAdapterConfiguration")
write_to_file(f, results)
f.write("%50s" % "====== Win32_Product ======\n")
results = query("Win32_Product")
write_to_file(f, results)
f.close()
| [
"[email protected]"
]
| |
d988c1a01af2913efd6faa4787ac8de7865acd11 | 4875d4e4ad63310e44086be4d8e450eba151ecaf | /code/pyworkshop/05_Dictionaries/02_Dict.py | d414fa02e0804626a53aafdee1dc7412c5c5c1ef | [
"MIT"
]
| permissive | archeranimesh/pythonFundamentals | 7a066ee1ee23a5a78623e5ed50da5167e2c59c16 | 35662181d95406505002fe6a39f577822bfd560b | refs/heads/master | 2020-06-01T12:15:51.828280 | 2019-07-13T17:48:21 | 2019-07-13T17:48:21 | 190,776,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | nums = {"one": 1, "two": 2, "three": 3}
# add new key/Value pair.
nums["four"] = 4
print(nums)
# There are no duplicate key in Dictionaries.
# If new value is assigned to same key, it will
# Override the old value.
nums["two"] = "2222"
print(nums) # {'one': 1, 'two': '2222', 'three': 3, 'four': 4}
# Existence of a key in dict.
print("one" in nums)
nums["two"] = 2
print(nums)
# Combine two list.
rainbow = {"Green": "G", "Red": "R", "Blue": "B"}
rainbow.update(nums)
print(rainbow)
# Append value to a list in dict.
color = {"Green": ["Spinich"]}
print(color)
vegetable = color
print(type(vegetable["Green"]))
vegetable["Green"].append("Lettuce")
print(color)
# 3 important functions on Dictionaries
# .keys(): returns special list called dict keys
print(nums.keys())
# .values: returns a special list called dict values
print(nums.values())
# .item: returns a list of tuple, called dict items
print(nums.items())
| [
"[email protected]"
]
| |
b75a006234cd636a9f0b674101009b376cf4ede1 | e5a0a77a66563511c72feda18229712f109ab16d | /code/Chapter 14 - configparser/crud_config.py | f82dbd809b9feb0fd1c7fc3301b61832c269eb04 | []
| no_license | driscollis/Python-101-Russian | 0136b3fe3edee025e4408a89c0461bb79ab4be07 | 28ce6727ef56dee8b6966526c5f80d8323ec9d73 | refs/heads/master | 2021-10-20T23:31:05.413934 | 2018-10-23T06:54:30 | 2018-10-23T06:54:30 | 149,648,717 | 0 | 2 | null | 2018-10-23T06:54:31 | 2018-09-20T17:53:06 | Python | UTF-8 | Python | false | false | 760 | py | import configparser
import os
def crudConfig(path):
"""
Create, read, update, delete config
"""
if not os.path.exists(path):
createConfig(path)
config = configparser.ConfigParser()
config.read(path)
# read some values from the config
font = config.get("Settings", "font")
font_size = config.get("Settings", "font_size")
# change a value in the config
config.set("Settings", "font_size", "12")
# delete a value from the config
config.remove_option("Settings", "font_style")
# write changes back to the config file
with open(path, "w") as config_file:
config.write(config_file)
if __name__ == "__main__":
path = "settings.ini"
crudConfig(path) | [
"[email protected]"
]
| |
7ceae1ad282b1059676d6451c86751575d7e1e6b | a40950330ea44c2721f35aeeab8f3a0a11846b68 | /OpenCV-python读取监控/发送照片.py | 7485633f350fd958947abde317da53ecd06ae10f | []
| no_license | huang443765159/kai | 7726bcad4e204629edb453aeabcc97242af7132b | 0d66ae4da5a6973e24e1e512fd0df32335e710c5 | refs/heads/master | 2023-03-06T23:13:59.600011 | 2023-03-04T06:14:12 | 2023-03-04T06:14:12 | 233,500,005 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | import cv2
import socket
import pickle
import base64
import numpy as np
network = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
frame = cv2.imread('B.jpeg')
new_frame = cv2.resize(frame, (int(frame.shape[1] * 0.2), int(frame.shape[0] * 0.2)))
tx_data = pickle.dumps((b'\x01', new_frame))
print(len(tx_data))
head, rx_data = pickle.loads(tx_data)
new_rx = cv2.resize(rx_data, (int(rx_data.shape[1] * 2), int(rx_data.shape[0] * 2)))
# print(head, rx_data)
cv2.imshow('1', new_rx)
cv2.waitKey(0)
| [
"[email protected]"
]
| |
0a50a5878a88fadb82d274ab672c616160eb913b | 79e8e93a6807a4e2bd4923e0d9b78e3a7b38bb0b | /python/round2/permutations.py | 9ef6c0e553d85cf6940d2bfd03b7f8a1e35da930 | []
| no_license | djole103/algo | 2885c30e927898c749e99ee05ff6c8f43033c9eb | 5c60dc77fcc091d1b2c52de99ee3071d82e1e17f | refs/heads/master | 2020-04-12T02:28:58.300269 | 2017-04-19T23:18:04 | 2017-04-19T23:18:04 | 43,453,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | import collections
#O(n) storage
def isPermutation(str1, str2):
if str1 == None or str2 == None: return False
d = collections.defaultdict(int)
for l in str1:
d[l] +=1
for l in str2:
if l not in d:
return False
d[l] -= 1
if d[l] < 0:
return False
return True
def isPermutationLol(str1, str2):
return sorted(str1) == sorted(str2)
def allPermutations(str):
if len(str) <= 1: return str
perms = []
for i in range(len(str)):
perms += [ str[i]+x for x in allPermutations(str[:i] + str[i+1:])]
return perms
print(allPermutations("abc"))
def swapPermute(xs, low=0):
if low+1 >= len(xs):
yield xs
else:
for p in swapPermute(xs, low+1):
yield p
for i in range(low+1,len(xs)):
xs[low], xs[i] = xs[i], xs[low]
for p in swapPermute(xs, low+1):
yield p
xs[low], xs[i] = xs[i], xs[low]
for i in swapPermute(['a','b','c']):
print(i)
| [
"[email protected]"
]
| |
c1d2f5c25c0936309c89953f0cef51921de927b6 | e34cbf5fce48f661d08221c095750240dbd88caf | /python/day26/learncrm/crmAdmin/templatetags/__init__.py | a32c81fa794bf67b4db658cf0d748f8b0d206638 | []
| no_license | willianflasky/growup | 2f994b815b636e2582594375e90dbcb2aa37288e | 1db031a901e25bbe13f2d0db767cd28c76ac47f5 | refs/heads/master | 2023-01-04T13:13:14.191504 | 2020-01-12T08:11:41 | 2020-01-12T08:11:41 | 48,899,304 | 2 | 0 | null | 2022-12-26T19:46:22 | 2016-01-02T05:04:39 | C | UTF-8 | Python | false | false | 55 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# by Wendy | [
"[email protected]"
]
| |
5fd162325d1b76f2416508c204ac01e3912b2b7c | eb067a086adce4571a8d69db5423db41d8817d0d | /test.py | e28ceca3d03757572d445f642b7afc80d5a00003 | []
| no_license | thomasballinger/simplerest | 09f47d1e6a3a4f5b6dc5de0f511dfd9d14783e8a | d0309b5a9439de8c16d107d33e4784e0a9b068a9 | refs/heads/master | 2016-09-15T19:00:37.995063 | 2014-05-22T13:12:18 | 2014-05-22T13:12:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | import socket
server = socket.socket()
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('', 7000))
server.listen(5)
while True:
s, (ip, port) = server.accept()
data = ''
data = s.recv(10000)
print data
s.send("your page")
s.close()
| [
"[email protected]"
]
| |
7924494e333eaaa3fc1fb45014a438dff96f2abb | c5542154b44f1b228cdadeaf44c6a5998ed37ed4 | /base/day3/if/2numif.py | cf3ae469fede9e992a02e8e751cd5ee19d44e9a9 | []
| no_license | ThrallOtaku/python3Test | a31a2de1576b3a3c1062a7d6908119d7cbf21b36 | c62e6024bbbeafd396b68e40332991758914ba0b | refs/heads/master | 2020-03-07T22:45:47.403999 | 2018-06-08T10:19:42 | 2018-06-08T10:19:42 | 127,763,269 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | '''
x=10
num= 10 if x>18 else 20
print(num)
'''
#ages=10
#price= 20 if ages>=16 else 10
import os
os.system("calc") if 3>12 else os.system("notepad") | [
"[email protected]"
]
| |
fe76135fa9eccd572a16fd5da8714993539e245e | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_route_filters_operations.py | 0a30eb3cd481c39e0c762bf92fbb47cf91856be2 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
]
| permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 25,449 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RouteFiltersOperations(object):
"""RouteFiltersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Deletes the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_filter_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.RouteFilter"
"""Gets the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param expand: Expands referenced express route bgp peering resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilter, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.RouteFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilter"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
route_filter_parameters, # type: "models.RouteFilter"
**kwargs # type: Any
):
# type: (...) -> "models.RouteFilter"
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilter"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_parameters, 'RouteFilter')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_filter_name, # type: str
route_filter_parameters, # type: "models.RouteFilter"
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Creates or updates a route filter in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param route_filter_parameters: Parameters supplied to the create or update route filter
operation.
:type route_filter_parameters: ~azure.mgmt.network.v2020_03_01.models.RouteFilter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either RouteFilter or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.RouteFilter]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilter"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
route_filter_parameters=route_filter_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
route_filter_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.RouteFilter"
"""Updates tags of a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param parameters: Parameters supplied to update route filter tags.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilter, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.RouteFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilter"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.RouteFilterListResult"]
"""Gets all route filters in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilterListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.RouteFilterListResult"]
"""Gets all route filters in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilterListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeFilters'} # type: ignore
| [
"[email protected]"
]
| |
3b34c3aa261597bb0b7a20265a7d26473b548dd0 | a50a4e874d3d203344a47bc7ad9c317b213eab90 | /base/config.py | 28c8fb077efd365c3408ab3d90723e234358ad31 | []
| no_license | fjl121029xx/yarn-api-python | d5b61ca0695d5fdc4f8923d5814f6576c3c87509 | 4468609dea2d7630fd9fc3dabbe7c02ded7aa4a1 | refs/heads/master | 2020-12-04T02:02:40.913088 | 2020-02-27T08:08:18 | 2020-02-27T08:08:18 | 231,563,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,782 | py | AppConfig = {
'DOHKO': {
# 'livyServerUri': 'http://172.20.44.6:8999/sessions/',
# 'yarnServerUri': 'http://172.20.44.6:8088/ws/v1/cluster/apps/',
# 'livyServerPath': '/usr/hdp/current/livy2-server/bin/livy-server',
'livyServerUri': 'http://172.26.25.148:8999/sessions/',
'yarnServerUri': 'http://172.26.25.148:8088/ws/v1/cluster/apps/',
'livyServerPath': '/home/hadoop/livy/bin/livy-server',
'readApp': {
"jars": ["hdfs://cluster/yqs/tools/engine-0.0.1-SNAPSHOT.jar"],
"pyFiles": [],
"files": [],
"archives": [],
"kind": 'spark',
"driverMemory": '2g',
"driverCores": 1,
"executorMemory": '2g',
"executorCores": 2,
"numExecutors": 4,
"queue": 'default',
"heartbeatTimeoutInSecond": 86400,
"proxyUser": 'yqs',
'conf': {
"spark.default.parallelism": 12,
"spark.rdd.compress": True,
"spark.io.compression.codec": "snappy"
}
},
'writeApp': {
"jars": ["hdfs://cluster/yqs/tools/engine-0.0.1-SNAPSHOT.jar"],
"pyFiles": [],
"files": [],
"archives": [],
"kind": 'spark',
"driverMemory": '512m',
"driverCores": 1,
"executorMemory": '2g',
"executorCores": 2,
"numExecutors": 2,
"queue": 'default',
"heartbeatTimeoutInSecond": 86400,
"proxyUser": 'yqs',
'conf': {
"spark.default.parallelism": 12,
"spark.rdd.compress": True,
"spark.io.compression.codec": "snappy"
}
}
},
'PRODUCT': {
# 'livyServerUri': 'http://rm.yqs.hualala.com:8999/sessions/',
# 'yarnServerUri': 'http://rm.yqs.hualala.com:8088/ws/v1/cluster/apps/',
# 'livyServerPath': '/home/olap/tools/apps/livy/bin/livy-server',
'livyServerUri': 'http://172.26.25.148:8999/sessions/',
'yarnServerUri': 'http://172.26.25.148:8088/ws/v1/cluster/apps/',
'livyServerPath': '/home/hadoop/livy/bin/livy-server',
'readApp': {
"jars": ["hdfs://cluster/yqs/tools/engine-0.0.1-SNAPSHOT.jar"],
"pyFiles": [],
"files": [],
"archives": [],
"kind": 'spark',
"driverMemory": '16g',
"driverCores": 8,
"executorMemory": '10g',
"executorCores": 6,
"numExecutors": 35,
"queue": 'default',
"heartbeatTimeoutInSecond": 86400,
"proxyUser": None,
'conf': {
"spark.default.parallelism": 400,
"spark.scheduler.mode": "FAIR",
"spark.serializer": "org.apache.spark.serializer.KryoSerializer",
"spark.rdd.compress": True,
"spark.io.compression.codec": "snappy",
"spark.sql.inMemoryColumnarStorage.batchSize": 300000,
"spark.sql.files.maxPartitionBytes": 134217728,
"spark.sql.broadcastTimeout": 60,
"spark.sql.orc.enabled": True,
"spark.sql.orc.impl": "native",
"spark.sql.orc.enableVectorizedReader": True,
"spark.sql.hive.convertMetastoreOrc": True
}
},
'writeApp': {
"jars": ["hdfs://cluster/yqs/tools/engine-0.0.1-SNAPSHOT.jar"],
"pyFiles": [],
"files": [],
"archives": [],
"kind": 'spark',
"driverMemory": '10g',
"driverCores": 4,
"executorMemory": '10g',
"executorCores": 6,
"numExecutors": 10,
"queue": 'default',
"heartbeatTimeoutInSecond": 86400,
"proxyUser": None,
'conf': {
"spark.default.parallelism": 400,
"spark.scheduler.mode": "FAIR",
"spark.serializer": "org.apache.spark.serializer.KryoSerializer",
"spark.rdd.compress": True,
"spark.io.compression.codec": "snappy",
"spark.sql.inMemoryColumnarStorage.batchSize": 300000,
"spark.sql.files.maxPartitionBytes": 134217728,
"spark.sql.broadcastTimeout": 60,
"spark.sql.orc.enabled": True,
"spark.sql.orc.impl": "native",
"spark.sql.orc.enableVectorizedReader": True,
"spark.sql.hive.convertMetastoreOrc": True,
"spark.sql.orc.filterPushdown": True,
"spark.sql.orc.char.enabled": True
}
}
}
}
| [
"[email protected]"
]
| |
688a69eeefdd18fc59f72c8a0c55e7ada6bac042 | 00ccdc877771cb0cf493526d1e201e0f625bf5e7 | /dohq_teamcity/models/vcs_root_entry.py | 71eac569239c8a77d6638dac74da2b020c9df602 | [
"MIT"
]
| permissive | expobrain/teamcity | a52928045166bb5d34f4a0396cb840bfee8f43d5 | 9f04c0692a2c5b277a608c2f11cc1fb48e0c87e2 | refs/heads/master | 2020-04-13T13:11:07.270515 | 2018-10-18T01:40:06 | 2018-10-18T01:40:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,526 | py | # coding: utf-8
from dohq_teamcity.custom.base_model import TeamCityObject
# from dohq_teamcity.models.vcs_root import VcsRoot # noqa: F401,E501
class VcsRootEntry(TeamCityObject):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'checkout_rules': 'str',
'id': 'str',
'inherited': 'bool',
'vcs_root': 'VcsRoot'
}
attribute_map = {
'checkout_rules': 'checkout-rules',
'id': 'id',
'inherited': 'inherited',
'vcs_root': 'vcs-root'
}
def __init__(self, checkout_rules=None, id=None, inherited=False, vcs_root=None, teamcity=None): # noqa: E501
"""VcsRootEntry - a model defined in Swagger""" # noqa: E501
self._checkout_rules = None
self._id = None
self._inherited = None
self._vcs_root = None
self.discriminator = None
if checkout_rules is not None:
self.checkout_rules = checkout_rules
if id is not None:
self.id = id
if inherited is not None:
self.inherited = inherited
if vcs_root is not None:
self.vcs_root = vcs_root
super(VcsRootEntry, self).__init__(teamcity=teamcity)
@property
def checkout_rules(self):
"""Gets the checkout_rules of this VcsRootEntry. # noqa: E501
:return: The checkout_rules of this VcsRootEntry. # noqa: E501
:rtype: str
"""
return self._checkout_rules
@checkout_rules.setter
def checkout_rules(self, checkout_rules):
"""Sets the checkout_rules of this VcsRootEntry.
:param checkout_rules: The checkout_rules of this VcsRootEntry. # noqa: E501
:type: str
"""
self._checkout_rules = checkout_rules
@property
def id(self):
"""Gets the id of this VcsRootEntry. # noqa: E501
:return: The id of this VcsRootEntry. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this VcsRootEntry.
:param id: The id of this VcsRootEntry. # noqa: E501
:type: str
"""
self._id = id
@property
def inherited(self):
"""Gets the inherited of this VcsRootEntry. # noqa: E501
:return: The inherited of this VcsRootEntry. # noqa: E501
:rtype: bool
"""
return self._inherited
@inherited.setter
def inherited(self, inherited):
"""Sets the inherited of this VcsRootEntry.
:param inherited: The inherited of this VcsRootEntry. # noqa: E501
:type: bool
"""
self._inherited = inherited
@property
def vcs_root(self):
"""Gets the vcs_root of this VcsRootEntry. # noqa: E501
:return: The vcs_root of this VcsRootEntry. # noqa: E501
:rtype: VcsRoot
"""
return self._vcs_root
@vcs_root.setter
def vcs_root(self, vcs_root):
"""Sets the vcs_root of this VcsRootEntry.
:param vcs_root: The vcs_root of this VcsRootEntry. # noqa: E501
:type: VcsRoot
"""
self._vcs_root = vcs_root
| [
"[email protected]"
]
| |
55107bfc0b70cbf0ce0416d7d702e61475dc14dd | f26521284741a1f730e2d52de7426807247e08b6 | /Python/Topics/Class/Who is who/main.py | 2bcfcf86bdc076784c5ae9755f976ba6ac78e8bc | [
"MIT"
]
| permissive | drtierney/hyperskill-problems | 0e6fe8ca418d1af700a5a1b1b2eed1f1f07b8e9e | b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0 | refs/heads/main | 2021-10-25T07:02:50.838216 | 2021-10-16T19:08:49 | 2021-10-16T19:08:49 | 253,045,232 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | class Angel:
color = "white"
feature = "wings"
home = "Heaven"
class Demon:
color = "red"
feature = "horns"
home = "Hell"
the_angel = Angel()
print(the_angel.color)
print(the_angel.feature)
print(the_angel.home)
the_demon = Demon()
for attribute in ['color', 'feature', 'home']:
print(getattr(the_demon, attribute))
| [
"[email protected]"
]
| |
ece8c2bdbe9075b3980b6da4c1de9129bd71031b | 495f28047fcc69ee4b579f90b421a9d87aa38f93 | /archconvnets/convnet2/python_util/data.py | a5a01dd5207b4816ae7c9cca0c8405e49f682eca | [
"Apache-2.0"
]
| permissive | shyamalschandra/archconvnets | 10dfffa5f29f8b35e2a7d095c934d2112bcc1f45 | 147d8eab7cd21c53b6689a8364cdb613bd602aa6 | refs/heads/master | 2021-01-22T16:38:46.513143 | 2015-01-21T21:14:50 | 2015-01-21T21:14:50 | 29,734,254 | 1 | 0 | null | 2015-01-23T13:47:35 | 2015-01-23T13:47:35 | null | UTF-8 | Python | false | false | 39,138 | py | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as n
from numpy.random import randn, rand, random_integers
import os
from threading import Thread
from collections import OrderedDict
from util import *
import time as systime
import math
import importlib
import hashlib
from skdata import larray
BATCH_META_FILE = "batches.meta"
class DataLoaderThread(Thread):
def __init__(self, path, tgt, mode='pickle'):
Thread.__init__(self)
self.path = path
if mode == 'numpy':
self.path = self.path + '.npy'
self.tgt = tgt
self.mode = mode
def run(self):
if mode == 'pickle':
self.tgt += [unpickle(self.path)]
elif mode == 'numpy':
self.tgt += [n.load(self.path).reshape((1, ))[0]]
class DataProvider:
BATCH_REGEX = re.compile('^data_batch_(\d+)(\.\d+)?$')
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False):
if batch_range == None:
batch_range = DataProvider.get_batch_nums(data_dir)
if init_batchnum is None or init_batchnum not in batch_range:
init_batchnum = batch_range[0]
self.data_dir = data_dir
self.batch_range = batch_range
self.curr_epoch = init_epoch
self.curr_batchnum = init_batchnum
self.dp_params = dp_params
self.batch_meta = self.get_batch_meta(data_dir)
self.data_dic = None
self.test = test
self.batch_idx = batch_range.index(init_batchnum)
def get_next_batch(self):
if self.data_dic is None or len(self.batch_range) > 1:
self.data_dic = self.get_batch(self.curr_batchnum)
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
return epoch, batchnum, self.data_dic
def get_batch(self, batch_num, mode='pickle'):
fname = self.get_data_file_name(batch_num)
if mode == 'numpy':
fname += '.npy'
if os.path.isdir(fname): # batch in sub-batches
sub_batches = sorted(os.listdir(fname), key=alphanum_key)
#print sub_batches
num_sub_batches = len(sub_batches)
tgts = [[] for i in xrange(num_sub_batches)]
threads = [DataLoaderThread(os.path.join(fname, s), tgt, mode=mode) for (s, tgt) in zip(sub_batches, tgts)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return [t[0] for t in tgts]
if mode == 'pickle':
return unpickle(fname)
elif mode == 'numpy':
return n.load(fname).reshape((1, ))[0]
def get_data_dims(self,idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
def advance_batch(self):
self.batch_idx = self.get_next_batch_idx()
self.curr_batchnum = self.batch_range[self.batch_idx]
if self.batch_idx == 0: # we wrapped
self.curr_epoch += 1
def get_next_batch_idx(self):
return (self.batch_idx + 1) % len(self.batch_range)
def get_next_batch_num(self):
return self.batch_range[self.get_next_batch_idx()]
# get filename of current batch
def get_data_file_name(self, batchnum=None):
if batchnum is None:
batchnum = self.curr_batchnum
return os.path.join(self.data_dir, 'data_batch_%d' % batchnum)
@classmethod
def get_instance(cls, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, type="default", dp_params={}, test=False):
# why the fuck can't i reference DataProvider in the original definition?
#cls.dp_classes['default'] = DataProvider
type = type or DataProvider.get_batch_meta(data_dir)['dp_type'] # allow data to decide data provider
if type.startswith("dummy-"):
name = "-".join(type.split('-')[:-1]) + "-n"
if name not in dp_types:
raise DataProviderException("No such data provider: %s" % type)
_class = dp_classes[name]
dims = int(type.split('-')[-1])
return _class(dims)
elif type in dp_types:
_class = dp_classes[type]
return _class(data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
raise DataProviderException("No such data provider: %s" % type)
@classmethod
def register_data_provider(cls, name, desc, _class):
if name in dp_types:
raise DataProviderException("Data provider %s already registered" % name)
dp_types[name] = desc
dp_classes[name] = _class
@staticmethod
def get_batch_meta(data_dir):
return unpickle(os.path.join(data_dir, BATCH_META_FILE))
@staticmethod
def get_batch_filenames(srcdir):
return sorted([f for f in os.listdir(srcdir) if DataProvider.BATCH_REGEX.match(f)], key=alphanum_key)
@staticmethod
def get_batch_nums(srcdir):
names = DataProvider.get_batch_filenames(srcdir)
return sorted(list(set(int(DataProvider.BATCH_REGEX.match(n).group(1)) for n in names)))
@staticmethod
def get_num_batches(srcdir):
return len(DataProvider.get_batch_nums(srcdir))
class DummyDataProvider(DataProvider):
def __init__(self, data_dim):
#self.data_dim = data_dim
self.batch_range = [1]
self.batch_meta = {'num_vis': data_dim, 'data_in_rows':True}
self.curr_epoch = 1
self.curr_batchnum = 1
self.batch_idx = 0
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
data = rand(512, self.get_data_dims()).astype(n.single)
return self.curr_epoch, self.curr_batchnum, {'data':data}
class LabeledDataProvider(DataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False):
DataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
def get_num_classes(self):
return len(self.batch_meta['label_names'])
class LabeledDataProviderTrans(LabeledDataProvider):
def __init__(self, data_dir,
img_size, num_colors,
batch_range=None,
init_epoch=1, init_batchnum=None, dp_params=None, test=False):
data_dir = data_dir.split('|')
if len(data_dir) == 1:
data_dir = data_dir[0]
if isinstance(data_dir, list):
self._dps = [LabeledDataProviderTrans(d, img_size, num_colors, batch_range=batch_range,
init_epoch=init_epoch, init_batchnum=init_batchnum,
dp_params=dp_params, test=test) for d in data_dir]
else:
self._dps = None
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.num_colors = num_colors
self.img_size = img_size
@staticmethod
def get_batch_meta(data_dir):
if isinstance(data_dir, list):
bm = [DataProvider.get_batch_meta(d) for d in data_dir]
keys = bm[0].keys()
mdict = {}
for k in keys:
if k not in ['data_mean', 'num_vis']:
mdict[k] = bm[0][k]
mdict['num_vis'] = sum([b['num_vis'] for b in bm])
if 'data_mean' in bm[0]:
mdict['data_mean'] = n.concatenate([b['data_mean'] for b in bm])
return mdict
else:
return DataProvider.get_batch_meta(data_dir)
def get_out_img_size( self ):
return self.img_size
def get_out_img_depth( self ):
if isinstance(self.data_dir, list):
return self.num_colors * len(self._dps)
else:
return self.num_colors
def get_next_batch(self):
if isinstance(self.data_dir, list):
bs = [d.get_next_batch() for d in self._dps]
epoch = bs[0][0]
batch_num = bs[0][1]
labels = bs[0][2][1]
data = n.row_stack([b[2][0] for b in bs])
self.advance_batch()
return epoch, batch_num, [data, labels]
else:
epoch, batchnum, d = LabeledDataProvider.get_next_batch(self)
d['data'] = n.require(d['data'], dtype=n.single, requirements='C')
d['data'] = d['data'].T
d['data'] = n.require(d['data'], requirements='C')
d['labels'] = n.c_[n.require(d['labels'], dtype=n.single, requirements='C')]
return epoch, batchnum, [d['data'], d['labels']]
@staticmethod
def get_batch_nums(srcdir):
if isinstance(srcdir, list):
return DataProvider.get_batch_nums(srcdir[0])
else:
return DataProvider.get_batch_nums(srcdir)
class LabeledDummyDataProvider(DummyDataProvider):
def __init__(self, data_dim, num_classes=10, num_cases=7):
#self.data_dim = data_dim
self.batch_range = [1]
self.batch_meta = {'num_vis': data_dim,
'label_names': [str(x) for x in range(num_classes)],
'data_in_rows':True}
self.num_cases = num_cases
self.num_classes = num_classes
self.curr_epoch = 1
self.curr_batchnum = 1
self.batch_idx=0
self.data = None
def get_num_classes(self):
return self.num_classes
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
if self.data is None:
data = rand(self.num_cases, self.get_data_dims()).astype(n.single) # <--changed to rand
labels = n.require(n.c_[random_integers(0,self.num_classes-1,self.num_cases)], requirements='C', dtype=n.single)
self.data, self.labels = data, labels
else:
data, labels = self.data, self.labels
# print data.shape, labels.shape
return self.curr_epoch, self.curr_batchnum, [data.T, labels.T ]
def dldata_to_convnet_reformatting(stims, lbls):
if stims.ndim > 2:
img_sz = stims.shape[1]
batch_size = stims.shape[0]
if stims.ndim == 3:
new_s = (batch_size, img_sz**2)
stims = stims.reshape(new_s).T
else:
assert stims.ndim == 4
nc = stims.shape[3]
new_s = (nc * (img_sz**2), batch_size)
print(stims.shape)
stims = stims.transpose([3, 1, 2, 0]).reshape(new_s)
else:
stims = stims.T
if lbls is not None:
if hasattr(lbls, 'keys'):
labels = OrderedDict([])
for k in lbls:
lblk = lbls[k]
assert lblk.ndim == 1
lblk = lblk.reshape((1, lblk.shape[0]))
labels[k] = lblk
else:
assert lbls.ndim == 1
labels = lbls.reshape((1, lbls.shape[0]))
return {'data': stims, 'labels': labels}
else:
return {'data': stims}
class DLDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range, init_epoch=1,
init_batchnum=None, dp_params=None, test=False):
#load dataset and meta
self.replace_label = dp_params.get('replace_label', False)
modulename, attrname = dp_params['dataset_name']
module = importlib.import_module(modulename)
dataset_obj = getattr(module, attrname)
dataset_data = dp_params.get('dataset_data', None)
if dataset_data is not None:
dset = dataset_obj(data=dataset_data)
else:
dset = dataset_obj()
meta = self.meta = dset.meta
mlen = len(meta)
self.dp_params = dp_params
#default data location
if data_dir == '':
pstring = hashlib.sha1(repr(dp_params['preproc'])).hexdigest() + '_%d' % dp_params['batch_size']
data_dir = dset.home('convnet_batches', pstring)
#compute number of batches
mlen = len(meta)
batch_size = dp_params['batch_size']
num_batches = self.num_batches = int(math.ceil(mlen / float(batch_size)))
batch_regex = re.compile('data_batch_([\d]+)')
imgs_mean = None
existing_batches = []
isf = 0
if os.path.exists(data_dir):
_L = os.listdir(data_dir)
existing_batches = [int(batch_regex.match(_l).groups()[0]) for _l in _L if batch_regex.match(_l)]
existing_batches.sort()
metafile = os.path.join(data_dir, 'batches.meta')
if existing_batches:
assert os.path.exists(metafile), 'Batches found but no metafile %s' % metafile
if os.path.exists(metafile):
bmeta = cPickle.load(open(metafile))
ebatches = bmeta['existing_batches']
imgs_mean = bmeta['data_mean']
isf = bmeta['images_so_far']
#assertions checking that the things that need to be the same
#for these batches to make sense are in fact the same
assert dp_params['batch_size'] == bmeta['num_cases_per_batch'], (dp_params['batch_size'], bmeta['num_cases_per_batch'])
if 'dataset_name' in bmeta:
assert dp_params['dataset_name'] == bmeta['dataset_name'], (dp_params['dataset_name'], bmeta['dataset_name'])
if 'preproc' in bmeta:
#assert dp_params['preproc'] == bmeta['preproc'], (dp_params['preproc'], bmeta['preproc'])
pass
if 'dataset_data' in bmeta:
assert dataset_data == bmeta['dataset_data'], (dataset_data, bmeta['dataset_data'])
else:
ebatches = []
#assert existing_batches == ebatches, ('Expected batches', ebatches, 'found batches', existing_batches)
needed_batches = [_b for _b in batch_range if _b not in existing_batches]
if existing_batches:
print('Found batches: ', existing_batches)
print('Batches needed: ', needed_batches)
else:
print('data_dir %s does not exist, creating' % data_dir)
needed_batches = batch_range[:]
os.makedirs(data_dir)
if needed_batches or self.replace_label:
indset = self.indset = self.get_indset()
metacol = self.metacol = self.get_metacol()
if needed_batches:
#get stimarray (may be lazyarray)
#something about appearing to require uint8??
#dp_params['preproc']['dtype'] = 'uint8' #or assertion?
stimarray = dset.get_images(preproc=dp_params['preproc'])
#actually write out batches, while tallying img mean
for bnum, inds in enumerate(indset):
if bnum not in needed_batches:
continue
print('Creating batch %d' % bnum)
#get stimuli and put in the required format
stims = n.asarray(stimarray[inds])
if 'float' in repr(stims.dtype):
stims = n.uint8(n.round(255 * stims))
lbls = metacol[inds]
d = dldata_to_convnet_reformatting(stims, lbls)
d['ids'] = meta[inds]['id']
#add to the mean
if imgs_mean is None:
imgs_mean = n.zeros((d['data'].shape[0],))
dlen = d['data'].shape[0]
fr = isf / (isf + float(dlen))
imgs_mean *= fr
imgs_mean += (1 - fr) * d['data'].mean(axis=1)
isf += dlen
#write out batch
outdict = {'batch_label': 'batch_%d' % bnum,
'labels': d['labels'],
'data': d['data'],
'ids': d['ids']
}
outpath = os.path.join(data_dir, 'data_batch_%d' % bnum)
n.save(outpath, outdict)
#write out batches.meta
existing_batches += needed_batches
existing_batches.sort()
outdict = {'num_cases_per_batch': batch_size,
'label_names': self.labels_unique,
'num_vis': d['data'].shape[0],
'data_mean': imgs_mean,
'existing_batches': existing_batches,
'images_so_far': isf,
'dataset_name': dp_params['dataset_name'],
'dataset_data': dataset_data,
'preproc': dp_params['preproc']}
with open(os.path.join(data_dir, 'batches.meta'), 'w') as _f:
cPickle.dump(outdict, _f)
LabeledDataProvider.__init__(self, data_dir, batch_range,
init_epoch, init_batchnum, dp_params, test)
if self.replace_label:
self.batch_meta['label_names'] = self.labels_unique
else:
self.labels_unique = self.batch_meta['label_names']
def get_num_classes(self, name=None):
if name is None or not hasattr(self.labels_unique, 'keys'):
return len(self.labels_unique)
else:
return len(self.labels_unique[name])
def get_next_batch(self):
t0 = systime.time()
epoch, batchnum, d = LabeledDataProvider.get_next_batch(self)
t1 = systime.time()
#d['data'] = n.require(d['data'].copy(order='A'), requirements='C')
d['data'] = n.require(d['data'], requirements='C')
t2 = systime.time()
if hasattr(d['labels'], 'keys'):
for k in d['labels']:
d['labels'][k] = n.c_[n.require(d['labels'][k], dtype=n.single)]
else:
d['labels'] = n.c_[n.require(d['labels'], dtype=n.single)]
t3 = systime.time()
print('timing: nextbatch %.4f order %.4f labels %.4f' % (t1 - t0, t2 - t1, t3 - t2))
return epoch, batchnum, d
def get_batch(self, batch_num):
dic = LabeledDataProvider.get_batch(self, batch_num, mode='numpy')
if self.replace_label:
metacol = self.metacol
indset = self.indset
lbls = metacol[indset[batch_num]]
assert lbls.ndim == 1
labels = lbls.reshape((1, lbls.shape[0]))
dic['labels'] = labels
return dic
def get_metacol(self):
meta_attr = self.dp_params['meta_attribute']
if isinstance(meta_attr, list):
meta_attr = map(str, meta_attr)
metacol = OrderedDict([])
self.labels_unique = OrderedDict([])
for ma in meta_attr:
mcol, lu = self.get_metacol_base(ma)
metacol[ma] = mcol
self.labels_unique[ma] = lu
else:
meta_attr = str(meta_attr)
metacol, labels_unique = self.get_metacol_base(meta_attr)
self.labels_unique = labels_unique
return metacol
def get_metacol_base(self, ma):
assert isinstance(ma, str), ma
metacol = self.meta[ma][:]
mlen = len(metacol)
try:
metacol + 1
labels_unique = None
except TypeError:
labels_unique = n.unique(metacol)
labels = n.zeros((mlen, ), dtype='int')
for label in range(len(labels_unique)):
labels[metacol == labels_unique[label]] = label
metacol = labels
return metacol, labels_unique
def get_indset(self):
dp_params = self.dp_params
perm_type = dp_params.get('perm_type')
num_batches = self.num_batches
batch_size = dp_params['batch_size']
meta = self.meta
if perm_type is not None:
mlen = len(self.meta)
if perm_type == 'random':
perm_seed = dp_params.get('perm_seed', 0)
rng = n.random.RandomState(seed=perm_seed)
perm = rng.permutation(mlen)
indset = [perm[batch_size * bidx: batch_size * (bidx + 1)] for bidx in range(num_batches)]
elif perm_type == 'ordered_random':
perm_seed = dp_params.get('perm_seed', 0)
rng = n.random.RandomState(seed=perm_seed)
perm = rng.permutation(mlen)
submeta = meta[dp_params['perm_order']].copy()
submeta = submeta[perm]
s = submeta.argsort(order=dp_params['perm_order'])
new_perm = perm[s]
indset = [new_perm[batch_size * bidx: batch_size * (bidx + 1)] for bidx in range(num_batches)]
elif perm_type == 'query_random':
perm_seed = dp_params.get('perm_seed', 0)
rng = n.random.RandomState(seed=perm_seed)
query = dp_params['perm_query']
qf = get_lambda_from_query_config(query)
inds = n.array(map(qf, meta))
indsf = n.invert(inds).nonzero()[0]
indst = inds.nonzero()[0]
inds1 = indst[rng.permutation(len(indst))]
inds2 = indsf[rng.permutation(len(indsf))]
inds = n.concatenate([inds1, inds2])
indset = [inds[batch_size * bidx: batch_size * (bidx + 1)] for bidx in range(num_batches)]
else:
raise ValueError, 'Unknown permutation type.'
else:
indset = [slice(batch_size * bidx, batch_size * (bidx + 1))
for bidx in range(num_batches)]
return indset
def get_perm(self):
dp_params = self.dp_params
perm_type = dp_params.get('perm_type')
meta = self.meta
mlen = len(self.meta)
if perm_type == 'random':
perm_seed = dp_params.get('perm_seed', 0)
rng = n.random.RandomState(seed=perm_seed)
return rng.permutation(mlen), perm_type + '_' + str(perm_seed)
else:
raise ValueError, 'Unknown permutation type.'
class DLDataProvider2(DLDataProvider):
def __init__(self, data_dir, batch_range, init_epoch=1,
init_batchnum=None, dp_params=None, test=False,
read_mode='r', cache_type='memmap'):
#load dataset and meta
modulename, attrname = dp_params['dataset_name']
module = importlib.import_module(modulename)
dataset_obj = getattr(module, attrname)
dataset_data = dp_params.get('dataset_data', None)
if dataset_data is not None:
dset = dataset_obj(data=dataset_data)
else:
dset = dataset_obj()
meta = self.meta = dset.meta
mlen = len(meta)
self.dp_params = dp_params
#compute number of batches
mlen = len(meta)
batch_size = self.batch_size = dp_params['batch_size']
num_batches = self.num_batches = int(math.ceil(mlen / float(batch_size)))
num_batches_for_meta = self.num_batches_for_meta = dp_params['num_batches_for_mean']
perm_type = dp_params.get('perm_type')
images = dset.get_images(preproc=dp_params['preproc'])
if hasattr(images, 'dirname'):
base_dir, orig_name = os.path.split(images.dirname)
else:
base_dir = dset.home('cache')
orig_name = 'images_cache_' + get_id(dp_params['preproc'])
perm, perm_id = self.get_perm()
reorder = Reorder(images)
lmap = larray.lmap(reorder, perm, f_map = reorder)
if cache_type == 'hdf5':
new_name = orig_name + '_' + perm_id + '_hdf5'
print('Getting stimuli from cache hdf5 at %s/%s ' % (base_dir, new_name))
self.stimarray = larray.cache_hdf5(lmap,
name=new_name,
basedir=base_dir,
mode=read_mode)
elif cache_type == 'memmap':
new_name = orig_name + '_' + perm_id + '_memmap'
print('Getting stimuli from cache memmap at %s/%s ' % (base_dir, new_name))
self.stimarray = larray.cache_memmap(lmap,
name=new_name,
basedir=base_dir)
metacol = self.get_metacol()
if hasattr(metacol, 'keys'):
for k in metacol:
metacol[k] = metacol[k][perm]
self.metacol = metacol
else:
self.metacol = metacol[perm]
#default data location
if data_dir == '':
pstring = hashlib.sha1(repr(dp_params['preproc'])).hexdigest() + '_%d' % dp_params['batch_size']
data_dir = dset.home('convnet_batches', pstring)
if not os.path.exists(data_dir):
print('data_dir %s does not exist, creating' % data_dir)
os.makedirs(data_dir)
metafile = os.path.join(data_dir, 'batches.meta')
if os.path.exists(metafile):
print('Meta file at %s exists, loading' % metafile)
bmeta = cPickle.load(open(metafile))
#assertions checking that the things that need to be the same
#for these batches to make sense are in fact the same
assert dp_params['batch_size'] == bmeta['num_cases_per_batch'], (dp_params['batch_size'], bmeta['num_cases_per_batch'])
if 'dataset_name' in bmeta:
assert dp_params['dataset_name'] == bmeta['dataset_name'], (dp_params['dataset_name'], bmeta['dataset_name'])
if 'preproc' in bmeta:
assert dp_params['preproc'] == bmeta['preproc'], (dp_params['preproc'], bmeta['preproc'])
#pass
if 'dataset_data' in bmeta:
assert dataset_data == bmeta['dataset_data'], (dataset_data, bmeta['dataset_data'])
else:
print('Making batches.meta at %s ...' % metafile)
imgs_mean = None
isf = 0
for bn in range(num_batches_for_meta):
print('Meta batch %d' % bn)
#get stimuli and put in the required format
print(self.stimarray.shape, batch_size)
stims = self.stimarray[bn * batch_size: (bn + 1) * batch_size]
print("Shape", stims.shape)
stims = n.asarray(stims)
print('Got stims', stims.shape, stims.nbytes)
if 'float' in repr(stims.dtype):
stims = n.uint8(n.round(255 * stims))
print('Converted to uint8', stims.nbytes)
d = dldata_to_convnet_reformatting(stims, None)
#add to the mean
if imgs_mean is None:
imgs_mean = n.zeros((d['data'].shape[0],))
dlen = d['data'].shape[0]
fr = isf / (isf + float(dlen))
imgs_mean *= fr
imgs_mean += (1 - fr) * d['data'].mean(axis=1)
isf += dlen
#write out batches.meta
outdict = {'num_cases_per_batch': batch_size,
'label_names': self.labels_unique,
'num_vis': d['data'].shape[0],
'data_mean': imgs_mean,
'dataset_name': dp_params['dataset_name'],
'dataset_data': dataset_data,
'preproc': dp_params['preproc']}
with open(metafile, 'wb') as _f:
cPickle.dump(outdict, _f)
self.batch_meta = cPickle.load(open(metafile, 'rb'))
LabeledDataProvider.__init__(self, data_dir, batch_range,
init_epoch, init_batchnum, dp_params, test)
def get_batch(self, batch_num):
print('bn', batch_num)
batch_size = self.batch_size
inds = slice(batch_num * batch_size, (batch_num + 1) * batch_size)
print('got slice')
stims = n.asarray(self.stimarray[inds])
print('got stims')
if 'float' in repr(stims.dtype):
stims = n.uint8(n.round(255 * stims))
print('to uint8')
if hasattr(self.metacol, 'keys'):
lbls = OrderedDict([(k, self.metacol[k][inds]) for k in self.metacol])
else:
lbls = self.metacol[inds]
print('got meta')
d = dldata_to_convnet_reformatting(stims, lbls)
print('done')
return d
class Reorder(object):
def __init__(self, X):
self.X = X
def __call__(self, inds):
mat = self.X[inds]
if 'float' in repr(mat.dtype):
mat = n.uint8(n.round(255 * mat))
if mat.ndim < self.X.ndim:
assert mat.ndim == self.X.ndim - 1, (mat.ndim, self.X.ndim)
assert mat.shape == self.X.shape[1:], (mat.shape, self.X.shape)
mat = mat.reshape((1, ) + mat.shape)
return dldata_to_convnet_reformatting(mat, None)['data'].T
def rval_getattr(self, attr, objs=None):
if attr == 'shape':
xs = self.X.shape
return (n.prod(xs[1:]), )
elif attr == 'dtype':
return 'uint8'
else:
return getattr(self.X, attr)
#########MapProvider
class DLDataMapProvider(DLDataProvider):
"""
Same interace as DLDataProvider2 but allows an arbitrary number of
image-shaped maps. This is specified by:
* dp_params["map_methods"], a list of names of methods for getting maps
from dataset object. This assumes that each of the map-getting
methods take an argument "preproc", just like the standard get_images.
* dp_params["map_preprocs"] = list of preprocs to apply in getting the maps.
"""
def __init__(self, data_dir, batch_range, init_epoch=1,
init_batchnum=None, dp_params=None, test=False,
read_mode='r', cache_type='memmap'):
if batch_range == None:
batch_range = DataProvider.get_batch_nums(data_dir)
if init_batchnum is None or init_batchnum not in batch_range:
init_batchnum = batch_range[0]
self.data_dir = data_dir
self.batch_range = batch_range
self.curr_epoch = init_epoch
self.curr_batchnum = init_batchnum
self.dp_params = dp_params
self.data_dic = None
self.test = test
self.batch_idx = batch_range.index(init_batchnum)
#load dataset and meta
modulename, attrname = dp_params['dataset_name']
module = importlib.import_module(modulename)
dataset_obj = getattr(module, attrname)
dataset_data = dp_params.get('dataset_data', None)
if dataset_data is not None:
dset = self.dset = dataset_obj(data=dataset_data)
else:
dset = self.dset = dataset_obj()
meta = self.meta = dset.meta
mlen = len(meta)
self.dp_params = dp_params
#compute number of batches
mlen = len(meta)
batch_size = self.batch_size = dp_params['batch_size']
self.num_batches = int(math.ceil(mlen / float(batch_size)))
self.num_batches_for_meta = dp_params['num_batches_for_mean']
perm, perm_id = self.get_perm()
self.metacol = self.get_metacol()[perm]
map_methods = self.map_methods = dp_params['map_methods']
map_preprocs = self.map_preprocs = dp_params['map_preprocs']
assert hasattr(map_methods, '__iter__')
assert hasattr(map_preprocs, '__iter__')
assert len(map_methods) == len(map_preprocs), (len(map_methods) , len(map_preprocs))
map_list = [getattr(dset, mname)(preproc=pp)
for mname, pp in zip(map_methods, map_preprocs)]
self.map_shapes = [m.shape for m in map_list]
mnames = self.mnames = [mn + '_' + get_id(pp) for mn, pp in zip(map_methods, map_preprocs)]
assert data_dir != ''
self.data_dir = data_dir
if not os.path.exists(data_dir):
print('data_dir %s does not exist, creating' % data_dir)
os.makedirs(data_dir)
self.stimarraylist = []
basedir = self.dset.home('cache')
self.batch_meta_dict = {}
for map, mname, pp in zip(map_list, mnames, map_preprocs):
self.stimarraylist.append(get_stimarray(map, mname, perm, perm_id, cache_type, basedir))
self.make_batch_meta(mname, self.stimarraylist[-1], pp)
def get_num_classes(self, dataIdx=None):
if dataIdx is None or not hasattr(self.labels_unique, 'keys'):
return len(self.labels_unique)
else:
name = self.labels_unique.keys()[dataIdx]
return len(self.labels_unique[name])
def get_next_batch(self):
epoch, batchnum, d = LabeledDataProvider.get_next_batch(self)
for mn in self.mnames:
d[mn] = n.require(d[mn], requirements='C')
d['labels'] = n.c_[n.require(d['labels'], dtype=n.single)]
return epoch, batchnum, d
def get_batch(self, batch_num):
batch_size = self.batch_size
inds = slice(batch_num * batch_size, (batch_num + 1) * batch_size)
lbls = self.label_reformatting(self.metacol[inds])
return_dict = {'labels': lbls}
for mname, marray in zip(self.mnames, self.stimarraylist):
return_dict[mname] = n.asarray(marray[inds]).T
return return_dict
def make_batch_meta(self, mname, marray, pp):
batch_size = self.batch_size
metafile = os.path.join(self.data_dir, mname + '.meta')
dp_params = self.dp_params
dataset_data = dp_params.get('dataset_data', None)
if os.path.exists(metafile):
print('Meta file at %s exists, loading' % metafile)
bmeta = cPickle.load(open(metafile))
#assertions checking that the things that need to be the same
#for these batches to make sense are in fact the same
assert dp_params['batch_size'] == bmeta['num_cases_per_batch'], (dp_params['batch_size'], bmeta['num_cases_per_batch'])
if 'dataset_name' in bmeta:
assert dp_params['dataset_name'] == bmeta['dataset_name'], (dp_params['dataset_name'], bmeta['dataset_name'])
if 'preproc' in bmeta:
assert pp == bmeta['preproc'], (pp, bmeta['preproc'])
#pass
if 'dataset_data' in bmeta:
assert dataset_data == bmeta['dataset_data'], (dataset_data, bmeta['dataset_data'])
assert bmeta['mname'] == mname, (bmeta['mname'], mname)
else:
print('Making %s meta at %s ...' % (mname, metafile))
imgs_mean = None
isf = 0
for bn in range(self.num_batches_for_meta):
print('Meta batch %d' % bn)
stims = marray[bn * batch_size: (bn + 1) * batch_size]
stims = n.asarray(stims).T
#add to the mean
if imgs_mean is None:
imgs_mean = n.zeros((stims.shape[0],))
dlen = stims.shape[0]
fr = isf / (isf + float(dlen))
imgs_mean *= fr
imgs_mean += (1 - fr) * stims.mean(axis=1)
isf += dlen
#write out batches.meta
outdict = {'num_cases_per_batch': batch_size,
'mname': mname,
'num_vis': stims.shape[0],
'data_mean': imgs_mean,
'dataset_name': dp_params['dataset_name'],
'dataset_data': dataset_data,
'preproc': pp}
with open(metafile, 'wb') as _f:
cPickle.dump(outdict, _f)
self.batch_meta_dict[mname] = cPickle.load(open(metafile, 'rb'))
def label_reformatting(self, lbls):
assert lbls.ndim == 1
labels = lbls.reshape((1, lbls.shape[0]))
return labels
def map_reformatting(stims):
img_sz = stims.shape[1]
batch_size = stims.shape[0]
if stims.ndim == 3:
new_s = (batch_size, img_sz**2)
stims = stims.reshape(new_s).T
else:
assert stims.ndim == 4
nc = stims.shape[3]
new_s = (nc * (img_sz**2), batch_size)
print(stims.shape)
stims = stims.transpose([3, 1, 2, 0]).reshape(new_s)
return stims
class Reorder2(object):
def __init__(self, X):
self.X = X
def __call__(self, inds):
mat = self.X[inds]
if mat.ndim < self.X.ndim:
assert mat.ndim == self.X.ndim - 1, (mat.ndim, self.X.ndim)
assert mat.shape == self.X.shape[1:], (mat.shape, self.X.shape)
mat = mat.reshape((1, ) + mat.shape)
if 'float' in repr(mat.dtype):
mat = n.uint8(n.round(255 * mat))
return map_reformatting(mat).T
def rval_getattr(self, attr, objs=None):
if attr == 'shape':
xs = self.X.shape
return (n.prod(xs[1:]), )
elif attr == 'dtype':
return 'uint8'
else:
return getattr(self.X, attr)
def get_stimarray(marray, mname, perm, perm_id, cache_type, base_dir):
reorder = Reorder2(marray)
lmap = larray.lmap(reorder, perm, f_map = reorder)
if cache_type == 'hdf5':
new_name = mname + '_' + perm_id + '_hdf5'
print('Getting stimuli from cache hdf5 at %s/%s ' % (base_dir, new_name))
return larray.cache_hdf5(lmap,
name=new_name,
basedir=base_dir,
mode=read_mode)
elif cache_type == 'memmap':
new_name = mname + '_' + perm_id + '_memmap'
print('Getting stimuli from cache memmap at %s/%s ' % (base_dir, new_name))
return larray.cache_memmap(lmap,
name=new_name,
basedir=base_dir)
####GENERAL Stuff
dp_types = {"dummy-n": "Dummy data provider for n-dimensional data",
"dummy-labeled-n": "Labeled dummy data provider for n-dimensional data"}
dp_classes = {"dummy-n": DummyDataProvider,
"dummy-labeled-n": LabeledDummyDataProvider}
def get_lambda_from_query_config(q):
"""turns a dictionary specificying a mongo query (basically)
into a lambda for subsetting a data table
"""
if hasattr(q, '__call__'):
return q
elif q == None:
return lambda x: True
else:
return lambda x: all([x[k] in v for k, v in q.items()])
class DataProviderException(Exception):
pass
def get_id(l):
return hashlib.sha1(repr(l)).hexdigest()
| [
"[email protected]"
]
| |
2dc9330666f5fbcb6526ba3adaba2c90af3de318 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03032/s021397547.py | d664139ae2e131d0ee24b03e0e38925747d285b4 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | from bisect import bisect_left
N,K = map(int,input().split())
V = list(map(int,input().split()))
vmax = -10**9
for n in range(min(N,K)+1):
A = V[:n]
for m in range(min(N,K)-n+1):
B = V[N-m:]
B = B+A
B = sorted(B)
ind = bisect_left(B,0)
k = min(ind,K-n-m)
v = sum(B[k:])
vmax = max(vmax,v)
print(vmax) | [
"[email protected]"
]
| |
c71cb016fd30053e434a2b42e23a96a22cca55b8 | 8bada7f2a894b1cc8d7e12b56c7df7d9cbb236e2 | /asynccsv.py | be7d2112ac76df01415362aea15f6c89c08f4049 | [
"MIT"
]
| permissive | mattmiller87/RLScrape | 34ac573fe1c44eb0066b41040f1f930f872f1d2c | 6848d68d5997eb816b15663dbfe36066d36621ac | refs/heads/master | 2021-07-11T02:31:33.331638 | 2020-12-17T23:47:20 | 2020-12-17T23:47:20 | 225,527,126 | 3 | 4 | MIT | 2020-12-17T20:38:49 | 2019-12-03T03:59:49 | Python | UTF-8 | Python | false | false | 9,343 | py | #!/usr/bin/python3
import requests
import csv
import datetime
import argparse
import os
import re
from tqdm import tqdm as pbar
import asyncio
from aioify import aioify
from setup_logging import logger
from rlscrape import Webscrape
readibletime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") # used for csvWrite
sem = asyncio.Semaphore(50) # control how many urls are being retrieved at a time
class csvIO:
'''I/O for CSV'''
def __init__(self):
checkFolders()
self.csvinput = results.input
self.csvoutput = results.output
# self.seasons = results.seasons
self.playlists = results.playlists
self.latestseason = '16' #need a better way to update this, perhaps dynamically?
self.header = []
tierchoices = ['1T','2T','TournamentT','3T','All']
tiermatch = [item for item in tierchoices if item in self.playlists]
if len(tiermatch) > 0:
self.tiertf = True
else:
self.tiertf = False
def areadCSVLinks(self):
'''read input CSV file. File MUST be structured either: preferred = *kwargs,Name,Link || optional = *kwargs,Link'''
with open(self.csvinput, 'r', newline='', encoding='latin-1') as csvread:
reader = csv.reader(csvread)
playerdict = {} # define a basic dict to pass csv information into
i = 0
for row in reader:
playerdict[i] = {}
if i < 1: # define headers
self.header = [str(i+1) for i in range(len(row))] # handle kwargs as header - assign number
self.header[-2] = "Name"
self.header[-1] = "Link"
name,link = row[-2:] # select last two items
if "overview" in link:
link = link.replace("/overview","")
try:
gamertag = link.split('/')[-1] # last item in link is gamertag
platform = link.split('/')[-2] # item before gamertag is platform
except IndexError:
logger.error("Gamertag:%(name)s Link:%(link)s is not formatted properly" % locals())
else:
playerdict[i][gamertag] = {} # define dict for each gamertag and values for that gamertag
a = 0
for item in row: # handle kwargs
if len(row) - a > 2:
playerdict[i][gamertag][a] = item
a += 1
if "ps4" == platform or "ps" == platform:
platform = "psn"
if "xbox" == platform:
platform = "xbl"
playerdict[i][gamertag]['platform'] = platform
playerdict[i][gamertag]['name'] = name
playerdict[i][gamertag]['link'] = link
i += 1
return playerdict
async def aRetrieveData(self,gamertag,gamerdict):
platform = gamerdict['platform']
name = gamerdict['name']
link = gamerdict['link']
scrape = Webscrape()
newrow = []
aioretrieve = aioify(obj=scrape.retrieveDataRLTracker, name='aioretrieve')
data = await aioretrieve(gamertag=gamertag,platform=platform)
newrow = self._dictToList(data)
a = 0
for k,v in gamerdict.items(): # handle kwargs
if a == k:
newrow.insert(a,v)
a += 1
newrow.insert(a,name)
newrow.insert(a+1,link)
return newrow
def awriteCSV(self,newrows):
'''write list of data to outputCSV file'''
season = self.latestseason
header_dict = {
'1': "S%s_1s_MMR" % (season), '1GP': "S%s_1s_GamesPlayed" % (season), '1T': "S%s_1s_Tier" % (season),
'2': "S%s_2s_MMR" % (season), '2GP': "S%s_2s_GamesPlayed" % (season), '2T': "S%s_2s_Tier" % (season),
'Tournament': "S%s_Tournament_MMR" % (season), 'TournamentGP': "S%s_Tournament_GamesPlayed" % (season), '3ST': "S%s_Solo3s_Tier" % (season),
'3': "S%s_3s_MMR" % (season), '3GP': "S%s_3s_GamesPlayed" % (season), '3T': "S%s_3s_Tier" % (season),
}
if "All" in self.playlists:
self.header.extend(header_dict[k] for k in header_dict)
else:
self.header.extend(header_dict[k] for k in header_dict if k in self.playlists)
with open(self.csvoutput, 'w',newline='', encoding='latin-1') as csvwrite:
w = csv.writer(csvwrite, delimiter=',')
w.writerow(self.header)
for newrow in newrows:
w.writerow(newrow)
def _dictToList(self,dictdata):
'''Take json formatted dictionary of playerdata and create a list which is better formatted for csv
this is specifically designed for RSC'''
tiertf = self.tiertf
newdict = {}
for gamertag,gdata in dictdata.items():
for season,sdata in gdata.items():
newdict[season] = {
'1': None, '1GP': None, '1T' : None,
'2': None, '2GP': None, '2T' : None,
'Tournament': None, 'TournamentGP': None, 'TournamentT' : None,
'3': None, '3GP': None, '3T' : None
}
for playlist,pdata in sdata.items():
if playlist in 'Ranked Duel 1v1' and pdata is not None and pdata.items():
newdict[season]['1'] = pdata['MMR']
newdict[season]['1GP'] = pdata['Games Played']
if tiertf:
newdict[season]['1T'] = pdata['Tier Number']
if playlist in 'Ranked Doubles 2v2' and pdata is not None and pdata.items():
newdict[season]['2'] = pdata['MMR']
newdict[season]['2GP'] = pdata['Games Played']
if tiertf:
newdict[season]['2T'] = pdata['Tier Number']
if playlist in 'Tournament' and pdata is not None and pdata.items():
newdict[season]['Tournament'] = pdata['MMR']
newdict[season]['TournamentGP'] = pdata['Games Played']
if tiertf:
newdict[season]['TournamentT'] = pdata['Tier Number']
if playlist in 'Ranked Standard 3v3' and pdata is not None and pdata.items():
newdict[season]['3'] = pdata['MMR']
newdict[season]['3GP'] = pdata['Games Played']
if tiertf:
newdict[season]['3T'] = pdata['Tier Number']
newlist = []
for dictseason,v in newdict.items():
if "All" in self.playlists:
newlist.extend([v[k] for k in v])
else:
newlist.extend([v[k] for k in v if k in self.playlists])
return newlist
async def _safe_download(self,gamertag,platform):
async with sem: # only allow so many retrieve requests at a time - helps with progress bar too
return await self.aRetrieveData(gamertag,platform)
def checkFolders():
if not os.path.exists("Scrapes"):
logger.info("Creating Scrapes folder...")
os.makedirs("Scrapes")
async def singleRun():
logger.info("Start for csv input:%s" % (results.input))
inputoutput = csvIO() # initialize class
datadict = inputoutput.areadCSVLinks() # read the csv file
tasks = []
for i,idict in datadict.items():
for k,v in idict.items():
task = loop.create_task(inputoutput._safe_download(k,v)) # start the retrieve process
tasks.append(task)
responses = []
for task in pbar(asyncio.as_completed(tasks),desc='retrieve',total=len(tasks)):
responses.append(await task)
inputoutput.awriteCSV(responses)
logger.info("Finish for csv output:%s" % (results.output))
if __name__ == "__main__":
'''Run locally to this script'''
#Use comandline arguments for input
#edit the default parameter to change options manually without commandline options
parser = argparse.ArgumentParser(description='Scrape Commandline Options', add_help=True)
parser.add_argument('-i', action='store', dest='input', help='Input CSV to use', default='example.csv')
parser.add_argument('-o', action='store', dest='output', help='Output CSV to use', default='Scrapes/%s_RLTN.csv' % (readibletime)) #RLTN = RocketLeague Tracker Network
###
# no longer can search for multiple seasons - this may be revisited at some point
#parser.add_argument('-s', action='store', dest='seasons', help='retrieve for season(s) defined. Example: 8 9 11', nargs='+', default=['14']) #need a better way to update this, perhaps dynamically?
##
parser.add_argument('-p', action='store', dest='playlists', help='playlist options. Example: 1 2 3S 3', choices=("1","2","Tournament","3","1GP","2GP","TournamentGP","3GP","1T","2T","TournamentT","3T","All"), nargs='+', default="['1','1GP','2','2GP','Tournament','TournamentGP','3','3GP']")
results = parser.parse_args()
loop = asyncio.get_event_loop()
loop.run_until_complete(singleRun())
loop.close() | [
"[email protected]"
]
| |
9aff4776e7a2ddf4284e16a6e8a0267f27c5ea27 | 1cb7aeb570630c9743a5b0dc7a254197d26016de | /py/testdir_ec2_only/test_parse_syn_s3n_thru_hdfs.py | 877490a8fb08c2a833f6b77182c24885c8f4cc5c | [
"Apache-2.0"
]
| permissive | devinshields/h2o | 576dbebc663265190cfca3fe8341b10d2243213c | 9d8b782e5cb7f38f3cb0086fef15ecec7d9282d5 | refs/heads/master | 2021-01-18T04:57:34.703132 | 2013-08-27T00:57:51 | 2013-08-27T00:57:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,071 | py | import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts
import h2o_browse as h2b
import h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
# all hdfs info is done thru the hdfs_config michal's ec2 config sets up?
h2o_hosts.build_cloud_with_hosts(1,
# this is for our amazon ec hdfs
# see https://github.com/0xdata/h2o/wiki/H2O-and-s3n
hdfs_name_node='10.78.14.235:9000',
hdfs_version='0.20.2')
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_syn_s3n_thru_hdfs(self):
# I put these file copies on s3 with unique suffixes
# under this s3n "path"
csvFilename = "*_10000x200*"
trialMax = 1
timeoutSecs = 500
URI = "s3n://home-0xdiag-datasets/syn_datasets"
s3nKey = URI + "/" + csvFilename
for trial in range(trialMax):
# since we delete the key, we have to re-import every iteration
# s3n URI thru HDFS is not typical.
importHDFSResult = h2o.nodes[0].import_hdfs(URI)
s3nFullList = importHDFSResult['succeeded']
### print "s3nFullList:", h2o.dump_json(s3nFullList)
self.assertGreater(len(s3nFullList),1,"Didn't see more than 1 files in s3n?")
key2 = "syn_datasets_" + str(trial) + ".hex"
print "Loading s3n key: ", s3nKey, 'thru HDFS'
start = time.time()
parseKey = h2o.nodes[0].parse(s3nKey, key2,
timeoutSecs=500, retryDelaySecs=10, pollTimeoutSecs=60)
elapsed = time.time() - start
print s3nKey, 'parse time:', parseKey['response']['time']
print "parse result:", parseKey['destination_key']
print "Trial #", trial, "completed in", elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
inspect = h2o_cmd.runInspect(None, parseKey['destination_key'])
print "\n" + key2 + \
" num_rows:", "{:,}".format(inspect['num_rows']), \
" num_cols:", "{:,}".format(inspect['num_cols'])
print "Deleting key in H2O so we get it from s3n (if ec2) or nfs again.", \
"Otherwise it would just parse the cached key."
storeView = h2o.nodes[0].store_view()
### print "storeView:", h2o.dump_json(storeView)
print "BROKE: we can't delete keys with a pattern match yet..this fails"
print "So we only do 1 trial and don't delete"
# print "Removing", s3nKey
# removeKeyResult = h2o.nodes[0].remove_key(key=s3nKey)
if __name__ == '__main__':
h2o.unit_main()
| [
"[email protected]"
]
| |
d6664e32d60cee4b037eec8832a8a5700d57d63a | e81576012330e6a6024d14f3e241f88ca34b73cd | /python_code/vnev/Lib/site-packages/jdcloud_sdk/services/resourcetag/models/ResourceReqVo.py | d520231ef4b8a9e9d0ce52cfcf8615761daffcb6 | [
"MIT"
]
| permissive | Ureimu/weather-robot | eba6a84147755aa83c941a306bac1a7c4e95e23e | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | refs/heads/master | 2021-01-15T07:23:42.274413 | 2020-03-23T02:30:19 | 2020-03-23T02:30:19 | 242,912,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class ResourceReqVo(object):
def __init__(self, serviceCodes=None, resourceIds=None, tagFilters=None, orderCondition=None, descOrAsc=None, pageSize=None, currentPage=None):
"""
:param serviceCodes: (Optional) 产品线名称列表
标签系统支持的产品线名称如下
- vm disk sqlserver es mongodb ip
- memcached redis drds rds database db_ro
- percona percona_ro mariadb mariadb_ro pg cdn
- nativecontainer pod zfs jqs kubernetesNodegroup jcq
:param resourceIds: (Optional) 资源id列表
:param tagFilters: (Optional) 标签过滤列表
:param orderCondition: (Optional) 排序依据
:param descOrAsc: (Optional) 排序方向, 取值为ASC, DESC
:param pageSize: (Optional) 每页记录数
:param currentPage: (Optional) 当前页码
"""
self.serviceCodes = serviceCodes
self.resourceIds = resourceIds
self.tagFilters = tagFilters
self.orderCondition = orderCondition
self.descOrAsc = descOrAsc
self.pageSize = pageSize
self.currentPage = currentPage
| [
"[email protected]"
]
| |
b6b87d81e355a4f8bff6abb5d8f6e610fc0bb9d5 | 68271a37c3c4dd3d31b24c0cddbf574472f9f6a5 | /backend.py | 7e19cee00e6f11342bb1cc90a70ae13c0cee22f0 | [
"MIT"
]
| permissive | LSaldyt/automata | 8bcbb269bdfdf01803d66b77eb31be0a7eddb83b | ff0ba058f087fbcd7958866019b4b7cb43e924bd | refs/heads/master | 2020-04-28T19:01:49.703783 | 2019-04-22T21:06:01 | 2019-04-22T21:06:01 | 175,497,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
def cuboid(o, size=(1,1,1)):
X = [[[0, 1, 0], [0, 0, 0], [1, 0, 0], [1, 1, 0]],
[[0, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0]],
[[1, 0, 1], [1, 0, 0], [1, 1, 0], [1, 1, 1]],
[[0, 0, 1], [0, 0, 0], [0, 1, 0], [0, 1, 1]],
[[0, 1, 0], [0, 1, 1], [1, 1, 1], [1, 1, 0]],
[[0, 1, 1], [0, 0, 1], [1, 0, 1], [1, 1, 1]]]
X = np.array(X).astype(float)
for i in range(3):
X[:,:,i] *= size[i]
X += np.array(o)
return X
def renderCubes(positions, sizes=None, colors=None, **kwargs):
if not isinstance(sizes,(list,np.ndarray)): sizes=[(1,1,1)]*len(positions)
if not isinstance(colors,(list,np.ndarray)): colors=["C0"]*len(positions)
g = []
for p,s,c in zip(positions,sizes,colors):
g.append( cuboid(p, size=s) )
return Poly3DCollection(np.concatenate(g),
facecolors=np.repeat(colors,6, axis=0), **kwargs)
| [
"[email protected]"
]
| |
afbc0ea56e7cb155afec46f10c5e11b4625c3058 | abad82a1f487c5ff2fb6a84059a665aa178275cb | /Codewars/7kyu/so-easy-charge-time-calculation/Python/test.py | 0c7363e56959f0d28c875a4cc5375e8de39a6d2e | [
"MIT"
]
| permissive | RevansChen/online-judge | 8ae55f136739a54f9c9640a967ec931425379507 | ad1b07fee7bd3c49418becccda904e17505f3018 | refs/heads/master | 2021-01-19T23:02:58.273081 | 2019-07-05T09:42:40 | 2019-07-05T09:42:40 | 88,911,035 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | # Python - 3.6.0
Test.describe('Basic Tests')
Test.assert_equals(calculate_time(1000, 500), 2.6)
Test.assert_equals(calculate_time(1500, 500), 3.9)
Test.assert_equals(calculate_time(2000, 1000), 2.6)
Test.assert_equals(calculate_time(5000, 1000), 6.5)
Test.assert_equals(calculate_time(1000, 5000), 0.26)
| [
"[email protected]"
]
| |
22f53a51f9d56c1020e164b962c2a83a03669e8f | a1c9c55e1520356113a320be18e8fcb31654a944 | /archive/0.10/generated/seaborn-jointplot-5.py | 97b6478b704f4f926f64361289dfc029bff368a4 | []
| no_license | seaborn/seaborn.github.io | bac12a9255b41c7971e9e94ea393d372ef66ef62 | f70445bc3456f0216169806c2daf03452ca1eba4 | refs/heads/master | 2023-01-06T10:50:10.789810 | 2022-12-30T19:59:55 | 2022-12-30T19:59:55 | 70,731,605 | 16 | 5 | null | 2022-06-28T00:32:07 | 2016-10-12T18:56:12 | HTML | UTF-8 | Python | false | false | 147 | py | g = (sns.jointplot("sepal_length", "sepal_width",
data=iris, color="k")
.plot_joint(sns.kdeplot, zorder=0, n_levels=6))
| [
"[email protected]"
]
| |
56f93826cccd3b8c8efa2400ea3934ed95d6102e | db0e991d02a50eda222aaebeb7a5772b9cba467f | /account/admin.py | 6db31fc81344fb402617759b0d2e5180d5105ae8 | []
| no_license | iamrraj/Djanog_Learn | 1ba90ac797b284c5e2a7dd733fd61353ee8af241 | c522b41411415585468cadfe6999262a6c9b487d | refs/heads/master | 2020-04-17T09:48:09.414269 | 2019-01-18T21:24:53 | 2019-01-18T21:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | from django.contrib import admin
from .models import Profile,Categoty,Product,Slide
# Register your models here.
class ProfileAdmin(admin.ModelAdmin):
list_display = ('user','location')
list_display = ('user','birth_date','email_confirmed', 'location')
list_filter = [ 'location', 'user' ]
search_fields = ['location', 'user' ]
class ProductAdmin(admin.ModelAdmin):
list_display = ('name','disprice','seller')
list_display = ('name','disprice','seller', 'acprice','categoty')
list_display_links = ('name','disprice','seller', 'acprice','categoty')
list_filter = ['pub_date','categoty','seller']
search_fields = ['categoty','seller','name']
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name','pub_date')
list_display_links = ('name','pub_date')
list_filter = ['pub_date','name']
search_fields = ['id','name','pub_date']
class SlideAdmin(admin.ModelAdmin):
list_display = ('image','pub_date')
list_display_links = ('image','pub_date')
list_filter = ['pub_date','image']
admin.site.register(Profile,ProfileAdmin)
admin.site.register(Product,ProductAdmin)
admin.site.register(Categoty,CategoryAdmin)
admin.site.register(Slide,SlideAdmin) | [
"[email protected]"
]
| |
e7dc87d8f60339b7be557f18f118cc68c3545903 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2098/49405/273048.py | cff498a9698319a280edf449fde35bbd033aa422 | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | a = int(input())
b = 1
while b < a:
b *= 26
for i in range(b, 0, -1):
print(chr(a // b + ord("A") - 1), end="")
a %= 26
b //= 26
if a > 0: print(chr(a + ord("A") - 1)) | [
"[email protected]"
]
| |
dcd624ef125ecb43865c6cf90b0020339955f483 | 87f31b789750f6b545d6a79bd0b7028ebf4126c7 | /vislab/_results.py | 1716115a35aa0d860c71db36fd53bb1bdebffacc | [
"BSD-2-Clause"
]
| permissive | hertzmann/vislab | db4d1b9e63e9bb8a33e491cff433e02c0315ca81 | fcded208637fb51edfeaef1bde0bf766f9af1941 | refs/heads/master | 2021-01-24T04:20:32.382941 | 2016-07-12T20:21:28 | 2016-07-12T20:21:28 | 13,571,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,575 | py | import os
import pandas as pd
import cPickle
import numpy as np
import vislab
def load_pred_results(collection_name, cache_dirname, multiclass=False, force=False):
"""
Return DataFrame of prediction experiment results and Panel of per-image
predictions.
"""
if not os.path.exists(cache_dirname):
vislab.util.makedirs(cache_dirname)
results_df_filename = os.path.join(
cache_dirname, '{}_results_df.pickle'.format(collection_name))
preds_panel_filename = os.path.join(
cache_dirname, '{}_preds_panel.pickle'.format(collection_name))
# If cache exists, load and return.
if (os.path.exists(results_df_filename) and
os.path.exists(preds_panel_filename) and
not force):
results_df = pd.read_pickle(results_df_filename)
preds_panel = pd.read_pickle(preds_panel_filename)
print("Loaded from cache: {} records".format(results_df.shape[0]))
return results_df, preds_panel
# Otherwise, construct from database.
client = vislab.util.get_mongodb_client()
collection = client['predict'][collection_name]
print("Results in collection {}: {}".format(collection_name, collection.count()))
df = pd.DataFrame(list(collection.find()))
df.index = df.index.astype(str)
# Make the features list hashable for filtering/joins.
df['features_str'] = df['features'].apply(lambda x: ','.join(sorted(x)))
# We need a unique representation of the predictor settings.
df['setting'] = df.apply(lambda x: '{} {} {}'.format(x['features_str'], x['quadratic'], 'vw'), axis=1)
# And of the task performed.
df['full_task'] = df.apply(lambda x: '{} {}'.format(x['task'], x['data']), axis=1)
df = df.drop_duplicates(cols=['features_str', 'full_task'], take_last=True)
# Just for printing, if needed.
df = df.sort(['full_task', 'setting'])
# Get all predictions in a separate panel and drop the pickled ones.
if multiclass:
data = {}
for setting in df['setting'].unique():
el = df[df['setting'] == setting].iloc[0]
try:
pred_df = cPickle.loads(el['pred_df'])
except:
assert('results_name' in el)
pred_df_filename = '{}/{}.h5'.format(
vislab.config['paths']['results'], el['results_name'])
#print(pred_df_filename)
pred_df = pd.read_hdf(pred_df_filename, 'df')
# Not sure why there should ever be duplicate indices, but
# there are for one of the wikipaintings results...
pred_df['__index'] = pred_df.index
pred_df.drop_duplicates(cols='__index', take_last=True, inplace=True)
del pred_df['__index']
data[setting] = pred_df
preds_panel = pd.Panel(data).swapaxes('items', 'minor')
else:
preds_panel = get_all_preds_panel(df)
try:
del df['pred_df']
except KeyError:
pass
df.to_pickle(results_df_filename)
preds_panel.to_pickle(preds_panel_filename)
return df, preds_panel
def get_all_preds_panel(df):
all_full_tasks = df['full_task'].unique()
data = dict((
(full_task, get_all_preds_df(df, full_task))
for full_task in all_full_tasks
))
all_preds_panel = pd.Panel(data)
return all_preds_panel
def get_all_preds_df(df, full_task):
"""
Get the DataFrame of predictions from the results dataframe.
Tip: get all predictions of an image with
all_preds_panel.major_xs('f_1604904579').T
"""
tdf = df[df['full_task'] == full_task]
# Make sure that there are no duplicate settings.
if len(tdf.setting.unique()) != tdf.shape[0]:
try:
del df['pred_df']
except KeyError:
pass
print(tdf.to_string())
raise Exception("Non-unique feature-setting pairs")
pred_dfs = []
for i, row in tdf.iterrows():
try:
pred_df = cPickle.loads(row['pred_df'])
except:
assert('results_name' in row)
pred_df_filename = '{}/{}.h5'.format(
vislab.config['paths']['results'], row['results_name'])
pred_df = pd.read_hdf(pred_df_filename, 'df')
pred_df.index = pred_df.index.astype(str)
pred_dfs.append(pred_df)
# Make sure that all the settings had the same label and split information
arbitrary_pred_df = pred_dfs[0]
assert(np.all(df_['label'] == arbitrary_pred_df['label'] for df_ in pred_dfs))
assert(np.all(df_['split'] == arbitrary_pred_df['split'] for df_ in pred_dfs))
data = []
for df_ in pred_dfs:
df_["index"] = df_.index
# TODO: why the fuck are the duplicate indices???
df_ = df_.drop_duplicates('index')
if 'score' in df_.columns:
data.append(df_['score'])
else:
# TODO: temporary, remove when all experiments are re-run
data.append(df_['pred'])
all_preds_df = pd.DataFrame(data, index=tdf['setting']).T
all_preds_df['label'] = arbitrary_pred_df['label']
all_preds_df['split'] = arbitrary_pred_df['split']
# # Re-order columns
# # columns = all_preds_df.columns.values
# # reordered_columns = ['split', 'label'] + (columns - ['split', 'label']).tolist()
# # all_preds_df = all_preds_df[:, reordered_columns]
all_preds_df.index = all_preds_df.index.astype(str)
return all_preds_df
if __name__ == '__main__':
load_pred_results('wikipaintings_oct25', 'whatever', multiclass=True)
| [
"[email protected]"
]
| |
0f002addf74bef460a8840967aaf1a0aba19ff6d | 47136f769b2e870242f438927cee8dabcbca94c0 | /week8/informatics/4/F.py | ac3e3e385b9712dcdb1ec40e27313b118220704f | []
| no_license | Almanova/WebDevelopment-Spring2020 | de97b5aba1f13a766e2ef183151e39db3c8bba53 | 0abdee8f25dee1a4d32da2b633903d33936b6e77 | refs/heads/master | 2023-01-11T08:20:27.232203 | 2020-04-17T01:31:01 | 2020-04-17T01:31:01 | 236,373,539 | 0 | 0 | null | 2023-01-07T16:25:00 | 2020-01-26T20:42:31 | TypeScript | UTF-8 | Python | false | false | 177 | py | n = int(input())
list = input().split()
cnt = 0
for i in range(1, n - 1):
if int(list[i - 1]) < int(list[i]) and int(list[i + 1]) < int(list[i]):
cnt += 1
print(cnt) | [
"[email protected]"
]
| |
e54d5f06e5fc1b80bc598b447f5332574e03328c | 35f7970d0423dac96f1fefda6fb2246ada0bd483 | /catkin_ws/build/rotors_joy_interface/catkin_generated/pkg.installspace.context.pc.py | fe71778c0472e11f9e3595aebb9f1e531858b5eb | []
| no_license | HugoGrellier/ros_project_bebop | 7c169635fa5ffe664bdb4155bac212a0a5f7b941 | d6c8c3ada879747a7b070dc88646d4c3b86d28c5 | refs/heads/master | 2023-02-09T10:37:22.209574 | 2020-10-20T18:52:04 | 2020-10-20T18:52:04 | 306,311,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;geometry_msgs;mav_msgs;sensor_msgs;trajectory_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rotors_joy_interface"
PROJECT_SPACE_DIR = "/home/student/Documents/ros_project_bebop/catkin_ws/install"
PROJECT_VERSION = "2.1.1"
| [
"[email protected]"
]
| |
6291822cb31b4bf8385ea3f7c22d79a5f2a4e13f | fb5b1b8dce103dea28be52f7bbd9ea84da2cec81 | /kolibri/core/discovery/api.py | 1a8389a92afb0d82f12c4c2108850fecba086a18 | [
"MIT"
]
| permissive | lyw07/kolibri | d7f6f92656faa0483cd2cbdf57a3b6c54d52c2f2 | 11e0d01e2bc43850a6dfd4238e6408004449c3dc | refs/heads/develop | 2021-01-02T09:40:04.457976 | 2019-05-20T21:29:27 | 2019-05-20T21:29:27 | 99,255,547 | 1 | 0 | MIT | 2018-03-08T18:43:36 | 2017-08-03T16:53:09 | Python | UTF-8 | Python | false | false | 380 | py | from rest_framework import viewsets
from .models import NetworkLocation
from .serializers import NetworkLocationSerializer
from kolibri.core.content.permissions import CanManageContent
class NetworkLocationViewSet(viewsets.ModelViewSet):
permission_classes = (CanManageContent,)
serializer_class = NetworkLocationSerializer
queryset = NetworkLocation.objects.all()
| [
"[email protected]"
]
| |
d06a16fc3cb7202fdd9058558cf45839be272a0b | e0980f704a573894350e285f66f4cf390837238e | /.history/menus/wagtail_hooks_20201030120530.py | 0719f7961b7e5d8a34a3fef28930d93e6014a64d | []
| no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | """ Kategoria zostanie dodana w pasku bocznym u admina"""
from wagtail.contrib.modeladmin.options import ModelAdmin
import ModelAdmin, decorator
class MenuAdmin(ModelAdmin) | [
"[email protected]"
]
| |
5f7296a2c63f51459b3ce77f09584dbef613d994 | 76b064a76ffd23b0d0dff57d266abd6a111e9247 | /Ch04 Counting Elements/MissingIntegers.py | 819800f4b1f6de69459764150ccb264152e3f8ce | []
| no_license | startFromBottom/codility_problems | 78e0e0fcd914730e0dd8f725dde3dc96be83a255 | c8e128b5768e8140e658274e7cc8fee95c1bce9a | refs/heads/master | 2022-12-05T12:38:01.595250 | 2020-08-25T11:49:44 | 2020-08-25T11:49:44 | 289,836,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | """
problem link : https://app.codility.com/programmers/lessons/4-counting_elements/missing_integer/
result : https://app.codility.com/demo/results/trainingETTG9E-G32/
"""
def solution(A):
A = sorted(list(set(A)))
# ex) A = [98, 99, 100] -> 1
if A[0] > 1:
return 1
for i in range(1, len(A)):
# ex) A = [1,2,4,5] -> 3
if A[i - 1] >= 0 and A[i] > A[i - 1] + 1:
return A[i - 1] + 1
# ex) A = [-3,-1, 3] -> 1
elif A[i - 1] <= 0 and A[i] > 1:
return 1
# ex) A = [-3, -1] -> 1
if A[-1] <= 0:
return 1
# ex) A = [1, 2, 3] -> 4
return A[-1] + 1
| [
"[email protected]"
]
| |
88f092084337bcd4d9073c16381898f674a18ef3 | 81bad22641705683c68ff89f19362ba202891652 | /napari/plugins/exceptions.py | e9979de0d9e5c07e6d09e0f1592bcca062d4cf1c | [
"BSD-3-Clause"
]
| permissive | sofroniewn/napari | ee2a39a1a1132910db6f2a47994671e8138edb51 | beaa98efe5cf04ba659086e7a514b2ade05277af | refs/heads/main | 2023-07-12T02:46:41.185932 | 2022-09-14T21:57:15 | 2022-09-14T21:57:15 | 154,751,137 | 2 | 3 | BSD-3-Clause | 2023-07-01T10:26:45 | 2018-10-25T23:43:01 | Python | UTF-8 | Python | false | false | 1,970 | py | from napari_plugin_engine import PluginError, standard_metadata
from ..utils.translations import trans
def format_exceptions(
plugin_name: str, as_html: bool = False, color="Neutral"
):
"""Return formatted tracebacks for all exceptions raised by plugin.
Parameters
----------
plugin_name : str
The name of a plugin for which to retrieve tracebacks.
as_html : bool
Whether to return the exception string as formatted html,
defaults to False.
Returns
-------
str
A formatted string with traceback information for every exception
raised by ``plugin_name`` during this session.
"""
_plugin_errors = PluginError.get(plugin_name=plugin_name)
if not _plugin_errors:
return ''
from napari import __version__
from ..utils._tracebacks import get_tb_formatter
format_exc_info = get_tb_formatter()
_linewidth = 80
_pad = (_linewidth - len(plugin_name) - 18) // 2
msg = [
trans._(
"{pad} Errors for plugin '{plugin_name}' {pad}",
deferred=True,
pad='=' * _pad,
plugin_name=plugin_name,
),
'',
f'{"napari version": >16}: {__version__}',
]
err0 = _plugin_errors[0]
if err0.plugin:
package_meta = standard_metadata(err0.plugin)
if package_meta:
msg.extend(
[
f'{"plugin package": >16}: {package_meta["package"]}',
f'{"version": >16}: {package_meta["version"]}',
f'{"module": >16}: {err0.plugin}',
]
)
msg.append('')
for n, err in enumerate(_plugin_errors):
_pad = _linewidth - len(str(err)) - 10
msg += ['', f'ERROR #{n + 1}: {str(err)} {"-" * _pad}', '']
msg.append(format_exc_info(err.info(), as_html, color))
msg.append('=' * _linewidth)
return ("<br>" if as_html else "\n").join(msg)
| [
"[email protected]"
]
| |
28fe05503890e1d58e8f3360c2c2d65753534bd2 | 8d6ae21b78b3b40382e21198c571a7957e055be5 | /July20/Statements/factors.py | 0711fc04d9ad0d4b19c9061f222235e998ee6070 | []
| no_license | vj-reddy/PythonBatch1 | 6c1a429e0ac57ea1db7b04af18187e84cd52f2d5 | b86a5a16b1004d1e4f855a57b019704c71425bbf | refs/heads/master | 2023-03-16T06:05:48.104363 | 2020-10-16T13:55:03 | 2020-10-16T13:55:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | number = int(input("Enter the number: "))
index = 2
while index <= number//2:
if number%index == 0:
print(index)
index = index+1 | [
"[email protected]"
]
| |
aa6025ca3f596c50a066dfe23bd9e32f3de84ba2 | ebe422519443dbe9c4acd3c7fd527d05cf444c59 | /modular_equation.py | 117dc8a748507e9c28c073df70bd420e73642f56 | []
| no_license | SaiSudhaV/coding_platforms | 2eba22d72fdc490a65e71daca41bb3d71b5d0a7b | 44d0f80104d0ab04ef93716f058b4b567759a699 | refs/heads/master | 2023-06-19T18:05:37.876791 | 2021-07-15T18:02:19 | 2021-07-15T18:02:19 | 355,178,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | # cook your dish here
def modular_equation(n, m):
res, tem = [], [1] * (n + 1)
for i in range(2, n + 1):
res.append(tem[m % i])
j = m % i
while j < n + 1:
tem[j] += 1
j += i
return sum(res)
if __name__ == "__main__":
t = int(input())
for i in range(t):
n, m = map(int, input().split())
print(modular_equation(n, m)) | [
"[email protected]"
]
| |
e6af48993c7c26fd4ed95950dd100596814de47c | 05ceedee44c66ece52a9d7df9dc8ac2df536557b | /monero_glue/messages/StellarGetPublicKey.py | 0e6305747025ab95cfb7e0ed611cbf5e6901497d | [
"MIT"
]
| permissive | tsusanka/monero-agent | 1e48042f7cbb77b3d3f6262c97de71da4f6beb3d | 526ca5a57714cdca3370021feda3ed5ad3e3ea1a | refs/heads/master | 2020-03-25T11:43:16.967931 | 2018-08-06T15:07:05 | 2018-08-06T15:07:05 | 143,745,130 | 1 | 0 | null | 2018-08-06T15:06:04 | 2018-08-06T15:06:03 | null | UTF-8 | Python | false | false | 614 | py | # Automatically generated by pb2py
# fmt: off
from .. import protobuf as p
if __debug__:
try:
from typing import List
except ImportError:
List = None # type: ignore
class StellarGetPublicKey(p.MessageType):
MESSAGE_WIRE_TYPE = 200
FIELDS = {
1: ('address_n', p.UVarintType, p.FLAG_REPEATED),
2: ('show_display', p.BoolType, 0),
}
def __init__(
self,
address_n: List[int] = None,
show_display: bool = None,
) -> None:
self.address_n = address_n if address_n is not None else []
self.show_display = show_display
| [
"[email protected]"
]
| |
3fe2c84bde72e2715727d3d95441bd71841b53b0 | f5a4f340da539520c60c4bce08356c6f5c171c54 | /tests/integration/reqs/test_tx.py | e8551a73473e6e6ef046ce1ffa96278212f25855 | [
"ISC",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | yyolk/xrpl-py | e3935c0a0f488793153ca29e9d71c197cf88f857 | e5bbdf458ad83e6670a4ebf3df63e17fed8b099f | refs/heads/master | 2023-07-17T03:19:29.239838 | 2021-07-03T01:24:57 | 2021-07-03T01:24:57 | 355,299,041 | 1 | 0 | ISC | 2021-04-08T05:29:43 | 2021-04-06T18:57:06 | null | UTF-8 | Python | false | false | 528 | py | from tests.integration.integration_test_case import IntegrationTestCase
from tests.integration.it_utils import test_async_and_sync
from tests.integration.reusable_values import OFFER
from xrpl.models.requests import Tx
class TestTx(IntegrationTestCase):
@test_async_and_sync(globals())
async def test_basic_functionality(self, client):
response = await client.request(
Tx(
transaction=OFFER.result["hash"],
),
)
self.assertTrue(response.is_successful())
| [
"[email protected]"
]
| |
50f1b2c6c3f6bec0a574850bc96f48c8683609c8 | cd0987589d3815de1dea8529a7705caac479e7e9 | /webkit/WebKitTools/Scripts/webkitpy/tool/steps/build.py | 10fe1a806ce51955b95b23099c0fc1bcef93962e | []
| no_license | azrul2202/WebKit-Smartphone | 0aab1ff641d74f15c0623f00c56806dbc9b59fc1 | 023d6fe819445369134dee793b69de36748e71d7 | refs/heads/master | 2021-01-15T09:24:31.288774 | 2011-07-11T11:12:44 | 2011-07-11T11:12:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,415 | py | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
from webkitpy.common.system.deprecated_logging import log
class Build(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.build,
Options.quiet,
Options.build_style,
]
def build(self, build_style):
self._tool.executive.run_and_throw_if_fail(self.port().build_webkit_command(build_style=build_style), self._options.quiet)
def run(self, state):
if not self._options.build:
return
log("Building WebKit")
if self._options.build_style == "both":
self.build("debug")
self.build("release")
else:
self.build(self._options.build_style)
| [
"[email protected]"
]
| |
2a0b3a93e513b7f33f12ced12c7f3a932ee7c77e | 7111511ef0cca1bcf84a76d49419fad504d78f6e | /test331scaping_DictWriter.py | a15348ff338735c3fd8aa09bcf4f71bffd95733e | []
| no_license | blockchain99/pythonlecture | 7800033cd62251b0eec8cf3b93f253175d9cb2e8 | 198e1b6d68db72e4a5009f988c503958ad7ab444 | refs/heads/master | 2020-12-12T14:21:53.626918 | 2020-01-15T19:02:07 | 2020-01-15T19:02:07 | 234,148,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,469 | py | import requests
from bs4 import BeautifulSoup
from csv import writer, DictWriter
response = requests.get("https://www.rithmschool.com/blog")
# print(response.text)
with open('test331out.text', 'w') as file:
file.write(response.text)
print("==============================================================")
#go to above url -> open developer tool in chrome.
soup = BeautifulSoup(response.text, "html.parser")
articles = soup.find_all("article")
# print(articles)
print("-------------------csv DicWriter---------------------")
with open("test331blogDict.csv", "w") as file:
headers = ["title", "link", "date"]
csv_dictwriter = DictWriter(file, fieldnames=headers)
csv_dictwriter.writeheader()
for article in articles:
#get_text: access the inner text in an element("a")
# print(article.find("a").get_text()) #anchor tag -> convert to text
a_tag = article.find("a")
title = a_tag.get_text() #anchor tag -> convert to text
url = a_tag['href']
# print(article.find("time")) #<time datetime="2019-10-22" pubdate=""></time>
time = article.find("time")
date = time['datetime']
# print(date) #2019-09-03
# print(title, url, date)
# csv_writer.writerow(title, url, date) #TypeError: writerow() takes exactly one argument (3 given)
csv_dictwriter.writerow({
"title" : title,
"link" : url,
"date" : date
})
| [
"[email protected]"
]
| |
bc43583f980dc0e9c3943616e02cb5acb73ba03c | 2695d586778c3a19cad843f14f505f3e534f470d | /practice/Dynamic_Programming/Sticker_9465.py | cac0d3e543a6e3506d7453dc877d19c9cfa72409 | []
| no_license | kjh03160/Algorithm_Basic | efdb2473186d0aff983a8c0f961d6b86ce66b0d1 | 24842569237db95629cec38ca9ea8e459857c77e | refs/heads/master | 2023-07-14T21:34:29.074373 | 2021-09-11T10:13:00 | 2021-09-11T10:13:00 | 276,395,958 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | # https://www.acmicpc.net/problem/9465
def answer(L):
DP = [[0, 0, 0] for _ in range(len(L[0]))]
DP[0] = [L[0][0], L[1][0], 0]
for i in range(1, len(L[0])):
DP[i][0] = max(DP[i - 1][1], DP[i - 1][2]) + L[0][i] # 현재 위를 선택할 때
DP[i][1] = max(DP[i - 1][0], DP[i - 1][2]) + L[1][i] # 현재 아래를 선택할 때
DP[i][2] = max(DP[i - 1]) # 아무것도 선택 안할 때
return max(DP[-1])
import sys
input = sys.stdin.readline
t = int(input())
T = []
for i in range(t):
x = []
n = int(input())
x.append(list(map(int, input().split())))
x.append(list(map(int, input().split())))
T.append(x)
# print(x)
for i in T:
print(answer(i)) | [
"[email protected]"
]
| |
deae57d8466c67c0588f984d9edd9a77a8bac904 | ed38a50a81aeb206e7c735971bb874eb481e2e82 | /2A_2/python/funkcje03.py | a39d5694b51d6b1c312abac8c1751b4b94a38b3a | []
| no_license | lo1cgsan/rok202021 | e70db45494d891f179c08ddf3ef1ac55de25e76f | 07af7ea54d61d03f851de675744bada9083ecdca | refs/heads/main | 2023-06-01T03:38:20.534102 | 2021-06-21T12:21:42 | 2021-06-21T12:21:42 | 347,921,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# funkcje02.py
#
def zwieksz(a):
a += 2 # powiększenie wartości o dwa, tj: a = a + 2
print(a)
def zwieksz2(b):
b[0] += 2
print(b)
def main(args):
a = int(input("Podaj liczbę: ")) # zmienna lokalna
print(a)
zwieksz(a)
print(a)
b = [1]; # lista 1-elementowa
b[0] = int(input("Podaj liczbę: "))
zwieksz2(b)
print(b)
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| [
"[email protected]"
]
| |
3deacc17b483fd79573c192526fc20b8ae69b30f | be2a81f03e8a2dac7d356dde7a3ffdcfe3f77e00 | /providers/com/biomedcentral/migrations/0002_favicon.py | f3d7c663a9a58c872689d4481f4d3d62cbe13f76 | [
"Apache-2.0"
]
| permissive | Stevenholloway/SHARE | 4193bbd3ca50765a24bf21c0cc14438175fbb678 | b9759106d12c2ff548bad22c4be8650e9f41e61e | refs/heads/develop | 2021-01-21T19:13:35.205983 | 2017-02-23T14:45:46 | 2017-02-23T14:45:46 | 63,431,390 | 0 | 0 | null | 2016-07-15T15:17:45 | 2016-07-15T15:17:44 | null | UTF-8 | Python | false | false | 463 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-02-10 14:32
from __future__ import unicode_literals
from django.db import migrations
import share.robot
class Migration(migrations.Migration):
dependencies = [
('com.biomedcentral', '0001_initial'),
('share', '0018_store_favicons'),
]
operations = [
migrations.RunPython(
code=share.robot.RobotFaviconMigration('com.biomedcentral'),
),
]
| [
"[email protected]"
]
| |
eb7d6a4abda13af08dead3330c0e64c29fd40e93 | b012caadf2bbfa34db5d0144accc5aeb02f26c68 | /keystone/common/sql/contract_repo/versions/029_placeholder.py | a96cd6f3625240fefa55dfdb8f0df785e3aa29f3 | [
"Apache-2.0"
]
| permissive | sapcc/keystone | c66345df04af12066ec27ad93959da7b0b742fdc | 03a0a8146a78682ede9eca12a5a7fdacde2035c8 | refs/heads/stable/train-m3 | 2023-08-20T07:22:57.504438 | 2023-03-06T15:56:44 | 2023-03-06T15:56:44 | 92,154,503 | 0 | 6 | Apache-2.0 | 2023-03-24T22:13:22 | 2017-05-23T09:27:56 | Python | UTF-8 | Python | false | false | 754 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Pike backports. Do not use this number for new
# Queens work. New Queens work starts after all the placeholders.
def upgrade(migrate_engine):
pass
| [
"[email protected]"
]
| |
81d64fbe8e61f3bfd56fd9fe45446ed82ad92b0e | 3ee5f3f013cbb6ab8620c973c191ccc5e5d47aec | /nps_examples_py/setup.py | e7d39e367baae191821864d00af970b90237b4ce | []
| no_license | nps-ros2/nps-ros2-examples | 96837de908c7c76089f1eda6c10fb28c23452fdf | 557244746b370f04288a7de74b4b12991cf331e0 | refs/heads/master | 2020-05-07T20:51:12.577750 | 2019-09-10T17:43:42 | 2019-09-10T17:43:42 | 180,880,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | from setuptools import find_packages
from setuptools import setup
package_name = 'nps_examples_py'
setup(
name=package_name,
version='0.6.2',
packages=find_packages(exclude=['test']),
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
author='your name',
author_email='[email protected]',
maintainer='your name',
maintainer_email='[email protected]',
keywords=['ROS'],
classifiers=[
'Programming Language :: Python'
],
description=(
'Adapted from ROS2 demos.'
),
license='your license',
entry_points={
'console_scripts': [
'listener = nps_examples_py.topics.listener:main',
'talker = nps_examples_py.topics.talker:main'
],
},
)
| [
"[email protected]"
]
| |
31fcfdec26c80808e1bec53fe1961534b35fc880 | 0d24036dcf8736c0392a1ee1c2f3b45633221d8a | /etc/src/genpy-bgp-oper/cisco_ios_xr_ipv4_bgp_oper/bgp/instances/instance/instance_standby/default_vrf/afs/af/advertised_path_xr/advertised_path/bgp_path_bag_pb2.py | 3e00a023d4edf24ceacb989bcfb6ea9a39192b3d | []
| no_license | mspiez/telemetry_collector | c4b97c6686748fc20748898a25e9fc756d2d0b63 | 52ed12c06debfe04181f0bfea9854a66ed8bb3df | refs/heads/master | 2020-12-19T23:28:08.358956 | 2020-05-02T19:54:38 | 2020-05-02T19:54:38 | 235,883,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 177,156 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cisco_ios_xr_ipv4_bgp_oper/bgp/instances/instance/instance_standby/default_vrf/afs/af/advertised_path_xr/advertised_path/bgp_path_bag.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='cisco_ios_xr_ipv4_bgp_oper/bgp/instances/instance/instance_standby/default_vrf/afs/af/advertised_path_xr/advertised_path/bgp_path_bag.proto',
package='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path',
syntax='proto3',
serialized_pb=_b('\n\x8b\x01\x63isco_ios_xr_ipv4_bgp_oper/bgp/instances/instance/instance_standby/default_vrf/afs/af/advertised_path_xr/advertised_path/bgp_path_bag.proto\x12xcisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path\"\x89\x01\n\x11\x62gp_path_bag_KEYS\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x66_name\x18\x02 \x01(\t\x12\n\n\x02rd\x18\x03 \x01(\t\x12\x0f\n\x07network\x18\x04 \x01(\t\x12\x15\n\rprefix_length\x18\x05 \x01(\r\x12\x18\n\x10neighbor_address\x18\x06 \x01(\t\"\x98\x06\n\x0c\x62gp_path_bag\x12\x0f\n\x07no_path\x18\x32 \x01(\x08\x12\x0f\n\x07\x61\x66_name\x18\x33 \x01(\t\x12\xa1\x01\n\x10neighbor_address\x18\x34 \x01(\x0b\x32\x86\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype\x12\x1b\n\x13process_instance_id\x18\x35 \x01(\r\x12\x11\n\tlabel_oor\x18\x36 \x01(\x08\x12\x1a\n\x12label_o_or_version\x18\x37 \x01(\r\x12\x12\n\nlabel_fail\x18\x38 \x01(\x08\x12\x9e\x01\n\x10path_information\x18\x39 \x01(\x0b\x32\x83\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_\x12\xa8\x01\n\x1a\x61ttributes_after_policy_in\x18: \x01(\x0b\x32\x83\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_\x12\x1b\n\x13route_distinguisher\x18; \x01(\t\x12\"\n\x1asource_route_distinguisher\x18< \x01(\t\x12\x16\n\x0eprefix_version\x18= \x01(\r\x12\x10\n\x08vrf_name\x18> \x01(\t\x12\x17\n\x0fsource_vrf_name\x18? \x01(\t\x12\x12\n\nsrcaf_name\x18@ \x01(\t\"&\n\x15IPV4TunnelAddressType\x12\r\n\x05value\x18\x01 \x01(\t\"#\n\x12IPV4MDTAddressType\x12\r\n\x05value\x18\x01 \x01(\t\"(\n\x17RTConstraintAddressType\x12\r\n\x05value\x18\x01 \x01(\t\" \n\x0fIPV6AddressType\x12\r\n\x05value\x18\x01 \x01(\t\"\x1f\n\x0eMACAddressType\x12\r\n\x05value\x18\x01 \x01(\t\"\xfc\x02\n\x13\x62gp_attr_rnh_addr_t\x12\x0b\n\x03len\x18\x01 \x01(\t\x12\x14\n\x0cipv4_address\x18\x02 \x01(\t\x12\xa0\x01\n\x0cipv6_address\x18\x03 \x01(\x0b\x32\x89\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV6AddressType\x12\x9e\x01\n\x0bmac_address\x18\x04 \x01(\x0b\x32\x88\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.MACAddressType\"\xda\x01\n\x12\x62gp_evpn_gw_addr_t\x12\x0b\n\x03len\x18\x01 \x01(\t\x12\x14\n\x0cipv4_address\x18\x02 \x01(\t\x12\xa0\x01\n\x0cipv6_address\x18\x03 \x01(\x0b\x32\x89\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV6AddressType\")\n\x10\x62gp_l2vpn_addr_t\x12\x15\n\rl2vpn_address\x18\x01 \x01(\x0c\"#\n\x14\x62gp_l2vpn_evpn_esi_t\x12\x0b\n\x03\x65si\x18\x01 \x01(\x0c\"%\n\x14L2VPNEVPNAddressType\x12\r\n\x05value\x18\x01 \x01(\t\".\n\x15\x62gp_l2vpn_mspw_addr_t\x12\x15\n\rl2vpn_address\x18\x01 \x01(\x0c\"$\n\x13IPV6MVPNAddressType\x12\r\n\x05value\x18\x01 \x01(\t\"$\n\x13IPV4MVPNAddressType\x12\r\n\x05value\x18\x01 \x01(\t\"!\n\x10LS_LSAddressType\x12\r\n\x05value\x18\x01 \x01(\t\"(\n\x17IPv4FlowspecAddressType\x12\r\n\x05value\x18\x01 \x01(\t\"(\n\x17IPv6FlowspecAddressType\x12\r\n\x05value\x18\x01 \x01(\t\"\xd7\x19\n\x0c\x62gp_addrtype\x12\x0b\n\x03\x61\x66i\x18\x01 \x01(\t\x12\x14\n\x0cipv4_address\x18\x02 \x01(\t\x12\x1a\n\x12ipv4_mcast_address\x18\x03 \x01(\t\x12\x1a\n\x12ipv4_label_address\x18\x04 \x01(\t\x12\xad\x01\n\x13ipv4_tunnel_address\x18\x05 \x01(\x0b\x32\x8f\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV4TunnelAddressType\x12\xa7\x01\n\x10ipv4_mdt_address\x18\x06 \x01(\x0b\x32\x8c\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV4MDTAddressType\x12\x18\n\x10ipv4_vpn_address\x18\x07 \x01(\t\x12\x1d\n\x15ipv4_vpna_mcastddress\x18\x08 \x01(\t\x12\xa0\x01\n\x0cipv6_address\x18\t \x01(\x0b\x32\x89\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV6AddressType\x12\xa6\x01\n\x12ipv6_mcast_address\x18\n \x01(\x0b\x32\x89\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV6AddressType\x12\xa6\x01\n\x12ipv6_label_address\x18\x0b \x01(\x0b\x32\x89\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV6AddressType\x12\xa4\x01\n\x10ipv6_vpn_address\x18\x0c \x01(\x0b\x32\x89\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV6AddressType\x12\xaa\x01\n\x16ipv6_vpn_mcast_address\x18\r \x01(\x0b\x32\x89\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV6AddressType\x12\xa7\x01\n\x12l2_vpnvpls_address\x18\x0e \x01(\x0b\x32\x8a\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_l2vpn_addr_t\x12\xb1\x01\n\x15rt_constraint_address\x18\x0f \x01(\x0b\x32\x91\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.RTConstraintAddressType\x12\xa9\x01\n\x11ipv6_mvpn_address\x18\x10 \x01(\x0b\x32\x8d\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV6MVPNAddressType\x12\xa9\x01\n\x11ipv4_mvpn_address\x18\x11 \x01(\x0b\x32\x8d\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV4MVPNAddressType\x12\xac\x01\n\x13l2_vpn_evpn_address\x18\x12 \x01(\x0b\x32\x8e\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.L2VPNEVPNAddressType\x12\xa2\x01\n\rls_ls_address\x18\x13 \x01(\x0b\x32\x8a\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.LS_LSAddressType\x12\xad\x01\n\x13l2_vpn_mspw_address\x18\x14 \x01(\x0b\x32\x8f\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_l2vpn_mspw_addr_t\x12\xb1\x01\n\x15ipv4_flowspec_address\x18\x15 \x01(\x0b\x32\x91\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPv4FlowspecAddressType\x12\xb1\x01\n\x15ipv6_flowspec_address\x18\x16 \x01(\x0b\x32\x91\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPv6FlowspecAddressType\x12\xb5\x01\n\x19ipv4_vpn_flowspec_address\x18\x17 \x01(\x0b\x32\x91\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPv4FlowspecAddressType\x12\xb5\x01\n\x19ipv6_vpn_flowspec_address\x18\x18 \x01(\x0b\x32\x91\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPv6FlowspecAddressType\"\xc1\x01\n\x0e\x62gp_prefixtype\x12\x97\x01\n\x06prefix\x18\x01 \x01(\x0b\x32\x86\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype\x12\x15\n\rprefix_length\x18\x02 \x01(\r\"\x91\x02\n\rbgp_te_tunnel\x12\x13\n\x0btunnel_name\x18\x01 \x01(\x0c\x12\x15\n\rhas_te_tunnel\x18\x02 \x01(\x08\x12\x14\n\x0cis_tunnel_up\x18\x03 \x01(\x08\x12\x1c\n\x14is_tunnel_info_stale\x18\x04 \x01(\x08\x12\x1c\n\x14is_tunnel_registered\x18\x05 \x01(\x08\x12\x1a\n\x12tunnel_v6_required\x18\x06 \x01(\x08\x12\x19\n\x11tunnel_v6_enabled\x18\x07 \x01(\x08\x12\x15\n\rbinding_label\x18\x08 \x01(\r\x12\x18\n\x10tunnel_if_handle\x18\t \x01(\r\x12\x1a\n\x12last_tunnel_update\x18\n \x01(\r\"9\n\x14\x62gp_pedistlbl_entry_\x12\x12\n\npe_address\x18\x01 \x01(\t\x12\r\n\x05label\x18\x02 \x01(\r\"\xec\t\n\x16\x62gp_common_attr_entry_\x12\x19\n\x11is_metric_present\x18\x01 \x01(\x08\x12\x1d\n\x15is_local_pref_present\x18\x02 \x01(\x08\x12#\n\x1bis_atomic_aggregate_present\x18\x03 \x01(\x08\x12\x1d\n\x15is_aggregator_present\x18\x04 \x01(\x08\x12\x19\n\x11is_origin_present\x18\x05 \x01(\x08\x12\x1a\n\x12is_as_path_present\x18\x06 \x01(\x08\x12\x1c\n\x14is_community_present\x18\x07 \x01(\x08\x12%\n\x1dis_extended_community_present\x18\x08 \x01(\x08\x12\x16\n\x0eis_ssa_present\x18\t \x01(\x08\x12\x1c\n\x14is_connector_present\x18\n \x01(\x08\x12\x17\n\x0fis_pmsi_present\x18\x0b \x01(\x08\x12\x18\n\x10is_pppmp_present\x18\x0c \x01(\x08\x12\x17\n\x0fis_aigp_present\x18\r \x01(\x08\x12)\n!is_pe_distinguisher_label_present\x18\x0e \x01(\x08\x12\x1f\n\x17is_ls_attribute_present\x18\x0f \x01(\x08\x12\x1e\n\x16is_label_index_present\x18\x10 \x01(\x08\x12\x13\n\x0bneighbor_as\x18\x11 \x01(\r\x12\x15\n\raggregator_as\x18\x12 \x01(\r\x12\x1a\n\x12\x61ggregator_address\x18\x13 \x01(\t\x12\x0e\n\x06metric\x18\x14 \x01(\r\x12\x18\n\x10local_preference\x18\x15 \x01(\r\x12\x0e\n\x06origin\x18\x16 \x01(\r\x12\x0f\n\x07\x61s_path\x18\x17 \x03(\r\x12\x11\n\tcommunity\x18\x18 \x03(\r\x12 \n\x18\x65xtended_community_flags\x18\x19 \x01(\r\x12\x1a\n\x12\x65xtended_community\x18\x1a \x03(\r\x12\x1a\n\x12unknown_attributes\x18\x1b \x03(\r\x12\x14\n\x0c\x63luster_list\x18\x1c \x03(\r\x12\x12\n\noriginator\x18\x1d \x01(\t\x12\x1b\n\x13l2_t_pv3_session_id\x18\x1e \x01(\r\x12\x17\n\x0fl2_t_pv3_cookie\x18\x1f \x03(\r\x12\x16\n\x0e\x63onnector_type\x18 \x01(\r\x12\x17\n\x0f\x63onnector_value\x18! \x03(\r\x12\x19\n\x11\x61igp_metric_value\x18\" \x01(\x04\x12\x11\n\tpmsi_type\x18# \x01(\r\x12\x12\n\npmsi_flags\x18$ \x01(\r\x12\x12\n\npmsi_label\x18% \x01(\r\x12\x12\n\npmsi_value\x18& \x03(\r\x12\x12\n\nppm_pvalue\x18\' \x01(\r\x12\xaf\x01\n\x16pe_distinguisher_label\x18( \x03(\x0b\x32\x8e\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_pedistlbl_entry_\x12\x0f\n\x07ls_attr\x18) \x03(\r\x12\x18\n\x10label_index_attr\x18* \x03(\r\"\xa3\n\n\tbgp_attr_\x12\x18\n\x10is_as_path2_byte\x18\x01 \x01(\x08\x12&\n\x1eis_application_gateway_present\x18\x02 \x01(\x08\x12\x1b\n\x13is_attr_set_present\x18\x03 \x01(\x08\x12\x1c\n\x14set_aigp_inbound_igp\x18\x04 \x01(\x08\x12\x1f\n\x17set_aigp_inbound_metric\x18\x05 \x01(\x08\x12\x16\n\x0eis_rnh_present\x18\x06 \x01(\x08\x12\x19\n\x11is_ribrnh_present\x18\x07 \x01(\x08\x12\x1c\n\x14\x61ttribute_key_number\x18\x08 \x01(\r\x12!\n\x19\x61ttribute_reuse_id_config\x18\t \x01(\x08\x12!\n\x19\x61ttribute_reuse_id_max_id\x18\n \x01(\r\x12\x1f\n\x17\x61ttribute_reuse_id_node\x18\x0b \x01(\r\x12\"\n\x1a\x61ttribute_reuse_id_current\x18\x0c \x01(\r\x12\x1f\n\x17\x61ttribute_reuse_id_keys\x18\r \x01(\r\x12&\n\x1e\x61ttribute_reuse_id_recover_sec\x18\x0e \x01(\r\x12\x19\n\x11vpn_distinguisher\x18\x0f \x01(\r\x12\xac\x01\n\x11\x63ommon_attributes\x18\x10 \x01(\x0b\x32\x90\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_\x12\xa3\x01\n\x08\x61ttr_set\x18\x11 \x01(\x0b\x32\x90\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_\x12\x10\n\x08rnh_type\x18\x12 \x01(\r\x12\x0f\n\x07rnh_len\x18\x13 \x01(\r\x12\x14\n\x0crnh_addr_len\x18\x14 \x01(\r\x12\xa0\x01\n\x08rnh_addr\x18\x15 \x01(\x0b\x32\x8d\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_rnh_addr_t\x12\x14\n\x0cribrnh_table\x18\x16 \x01(\r\x12\x12\n\nribrnh_mac\x18\x17 \x01(\t\x12\x9a\x01\n\tribrnh_ip\x18\x18 \x01(\x0b\x32\x86\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype\x12\x16\n\x0eribrnhip_table\x18\x19 \x01(\r\x12\x12\n\nribrnh_vni\x18\x1a \x01(\r\x12\x14\n\x0cribrnh_encap\x18\x1b \x01(\r\"\xe9\x01\n\x11\x62gp_advinfo_type_\x12\x13\n\x0bis_neighbor\x18\x01 \x01(\x08\x12\xa1\x01\n\x10neighbor_address\x18\x02 \x01(\x0b\x32\x86\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype\x12\x1b\n\x13update_group_number\x18\x03 \x01(\r\"\xd0\'\n\tbgp_path_\x12\x9d\x01\n\nbgp_prefix\x18\x01 \x01(\x0b\x32\x88\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_prefixtype\x12\xa1\x01\n\x10neighbor_address\x18\x02 \x01(\x0b\x32\x86\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype\x12\x12\n\nroute_type\x18\x03 \x01(\t\x12\x15\n\ris_path_valid\x18\x04 \x01(\x08\x12\x16\n\x0eis_path_damped\x18\x05 \x01(\x08\x12\x1c\n\x14is_path_history_held\x18\x06 \x01(\x08\x12\x18\n\x10is_internal_path\x18\x07 \x01(\x08\x12\x14\n\x0cis_best_path\x18\x08 \x01(\x08\x12\x12\n\nis_as_best\x18\t \x01(\x08\x12\x17\n\x0fis_spkr_as_best\x18\n \x01(\x08\x12\x17\n\x0fis_partial_best\x18\x0b \x01(\x08\x12!\n\x19is_aggregation_suppressed\x18\x0c \x01(\x08\x12\x1a\n\x12is_import_dampened\x18\r \x01(\x08\x12\x19\n\x11is_import_suspect\x18\x0e \x01(\x08\x12\x1e\n\x16is_path_not_advertised\x18\x0f \x01(\x08\x12(\n is_path_not_advertised_to_ebg_ps\x18\x10 \x01(\x08\x12(\n is_path_advertised_local_as_only\x18\x11 \x01(\x08\x12$\n\x1cis_path_from_route_reflector\x18\x12 \x01(\x08\x12\x1d\n\x15is_path_received_only\x18\x13 \x01(\x08\x12%\n\x1dis_received_path_not_modified\x18\x14 \x01(\x08\x12\x1f\n\x17is_path_locally_sourced\x18\x15 \x01(\x08\x12\x1f\n\x17is_path_local_aggregate\x18\x16 \x01(\x08\x12$\n\x1cis_path_from_network_command\x18\x17 \x01(\x08\x12)\n!is_path_from_redistribute_command\x18\x18 \x01(\x08\x12\x18\n\x10is_path_imported\x18\x19 \x01(\x08\x12\x1c\n\x14is_path_reoriginated\x18\x1a \x01(\x08\x12&\n\x1eis_path_reoriginated_stitching\x18\x1b \x01(\x08\x12\x18\n\x10is_path_vpn_only\x18\x1c \x01(\x08\x12\'\n\x1fis_path_from_confederation_peer\x18\x1d \x01(\x08\x12\x1f\n\x17is_path_synced_with_igp\x18\x1e \x01(\x08\x12\x19\n\x11is_path_multipath\x18\x1f \x01(\x08\x12\x1d\n\x15is_path_imp_candidate\x18 \x01(\x08\x12\x15\n\ris_path_stale\x18! \x01(\x08\x12 \n\x18is_path_long_lived_stale\x18\" \x01(\x08\x12\x16\n\x0eis_path_backup\x18# \x01(\x08\x12(\n is_path_backup_protect_multipath\x18$ \x01(\x08\x12\x1d\n\x15is_path_best_external\x18% \x01(\x08\x12\x1f\n\x17is_path_additional_path\x18& \x01(\x08\x12!\n\x19is_path_nexthop_discarded\x18\' \x01(\x08\x12\x99\x01\n\x08next_hop\x18( \x01(\x0b\x32\x86\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype\x12\x9d\x01\n\x0bnhte_tunnel\x18) \x01(\x0b\x32\x87\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_te_tunnel\x12\x16\n\x0ehas_rcvd_label\x18* \x01(\x08\x12\x12\n\nrcvd_label\x18+ \x01(\r\x12\x1b\n\x13has_local_net_label\x18, \x01(\x08\x12\x13\n\x0blocal_label\x18- \x01(\r\x12\x12\n\nigp_metric\x18. \x01(\r\x12\x13\n\x0bpath_weight\x18/ \x01(\r\x12\x1a\n\x12neighbor_router_id\x18\x30 \x01(\t\x12\x1a\n\x12has_mdt_group_addr\x18\x31 \x01(\x08\x12\x9f\x01\n\x0emdt_group_addr\x18\x32 \x01(\x0b\x32\x86\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype\x12\x13\n\x0bl2_vpn_size\x18\x33 \x01(\r\x12\x0f\n\x07has_esi\x18\x34 \x01(\x08\x12\xa8\x01\n\x0fl2_vpn_evpn_esi\x18\x35 \x01(\x0b\x32\x8e\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_l2vpn_evpn_esi_t\x12\x13\n\x0bhas_gw_addr\x18\x36 \x01(\x08\x12\x9e\x01\n\x07gw_addr\x18\x37 \x01(\x0b\x32\x8c\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_evpn_gw_addr_t\x12\x18\n\x10has_second_label\x18\x38 \x01(\x08\x12\x14\n\x0csecond_label\x18\x39 \x01(\r\x12\"\n\x1al2vpn_circuit_status_value\x18: \x03(\r\x12\x17\n\x0fpath_flap_count\x18; \x01(\r\x12 \n\x18seconds_since_first_flap\x18< \x01(\r\x12\x1a\n\x12time_to_unsuppress\x18= \x01(\r\x12\x16\n\x0e\x64\x61mpen_penalty\x18> \x01(\r\x12\x15\n\rhalflife_time\x18? \x01(\r\x12\x18\n\x10suppress_penalty\x18@ \x01(\r\x12\x13\n\x0breuse_value\x18\x41 \x01(\r\x12\x1d\n\x15maximum_suppress_time\x18\x42 \x01(\r\x12\x1c\n\x14\x62\x65st_path_comp_stage\x18\x43 \x01(\t\x12\xa6\x01\n\x15\x62\x65st_path_comp_winner\x18\x44 \x01(\x0b\x32\x86\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype\x12 \n\x18\x62\x65st_path_id_comp_winner\x18\x45 \x01(\r\x12\x12\n\npath_flags\x18\x46 \x01(\x04\x12\x19\n\x11path_import_flags\x18G \x01(\r\x12\x14\n\x0c\x62\x65st_path_id\x18H \x01(\r\x12\x15\n\rlocal_path_id\x18I \x01(\r\x12\x14\n\x0crcvd_path_id\x18J \x01(\r\x12\x1a\n\x12path_table_version\x18K \x01(\r\x12\xaf\x01\n\x19local_peers_advertised_to\x18L \x03(\x0b\x32\x8b\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_advinfo_type_\x12\xac\x01\n\x16pe_peers_advertised_to\x18M \x03(\x0b\x32\x8b\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_advinfo_type_\x12\xad\x01\n\x17\x62\x65st_path_orr_bitfields\x18N \x03(\x0b\x32\x8b\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_advinfo_type_\x12\xac\x01\n\x16\x61\x64\x64_path_orr_bitfields\x18O \x03(\x0b\x32\x8b\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_advinfo_type_\x12\x17\n\x0f\x61\x64vertisedto_pe\x18P \x01(\x08\x12\x12\n\nrib_failed\x18Q \x01(\x08\x12\"\n\x1asn_rpki_origin_as_validity\x18R \x01(\r\x12$\n\x1cshow_rpki_origin_as_validity\x18S \x01(\x08\x12\x1e\n\x16ibgp_signaled_validity\x18T \x01(\x08\x12*\n\"rpki_origin_as_validation_disabled\x18U \x01(\x08\x12\x17\n\x0f\x61\x63\x63\x65pt_own_path\x18V \x01(\x08\x12(\n accept_own_self_originated_p_ath\x18W \x01(\x08\x12\x13\n\x0b\x61igp_metric\x18X \x01(\x04\x12\x15\n\rmvpn_sfs_path\x18Y \x01(\x08\x12\x1a\n\x12\x66spec_invalid_path\x18Z \x01(\x08\x12\x19\n\x11has_mvpn_nbr_addr\x18[ \x01(\x08\x12\x9e\x01\n\rmvpn_nbr_addr\x18\\ \x01(\x0b\x32\x86\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype\x12\x1d\n\x15has_mvpn_nexthop_addr\x18] \x01(\x08\x12\xa2\x01\n\x11mvpn_nexthop_addr\x18^ \x01(\x0b\x32\x86\x01.cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype\x12\x15\n\rhas_mvpn_pmsi\x18_ \x01(\x08\x12\x16\n\x0emvpn_pmsi_type\x18` \x01(\r\x12\x17\n\x0fmvpn_pmsi_flags\x18\x61 \x01(\r\x12\x17\n\x0fmvpn_pmsi_label\x18\x62 \x01(\r\x12\x17\n\x0fmvpn_pmsi_value\x18\x63 \x03(\r\x12\x18\n\x10has_mvpn_extcomm\x18\x64 \x01(\x08\x12\x1a\n\x12\x65xtended_community\x18\x65 \x03(\r\x12\x17\n\x0fmvpn_path_flags\x18\x66 \x01(\r\x12\x10\n\x08local_nh\x18g \x01(\x08\x12\x1c\n\x14rt_set_limit_enabled\x18h \x01(\x08\x12\x16\n\x0epath_rt_set_id\x18i \x01(\r\x12\x1f\n\x17path_rt_set_route_count\x18j \x01(\r\x12#\n\x1bis_path_af_install_eligible\x18k \x01(\x08\x12\x19\n\x11is_permanent_path\x18l \x01(\x08\x12\x19\n\x11graceful_shutdown\x18m \x01(\x08\x12!\n\x19labeled_unicast_safi_path\x18n \x01(\x08\x62\x06proto3')
)
_BGP_PATH_BAG_KEYS = _descriptor.Descriptor(
name='bgp_path_bag_KEYS',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_KEYS',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instance_name', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_KEYS.instance_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='af_name', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_KEYS.af_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rd', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_KEYS.rd', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='network', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_KEYS.network', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='prefix_length', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_KEYS.prefix_length', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='neighbor_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_KEYS.neighbor_address', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=267,
serialized_end=404,
)
_BGP_PATH_BAG = _descriptor.Descriptor(
name='bgp_path_bag',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='no_path', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag.no_path', index=0,
number=50, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='af_name', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag.af_name', index=1,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='neighbor_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag.neighbor_address', index=2,
number=52, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='process_instance_id', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag.process_instance_id', index=3,
number=53, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label_oor', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag.label_oor', index=4,
number=54, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label_o_or_version', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag.label_o_or_version', index=5,
number=55, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label_fail', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag.label_fail', index=6,
number=56, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path_information', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag.path_information', index=7,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attributes_after_policy_in', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag.attributes_after_policy_in', index=8,
number=58, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='route_distinguisher', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag.route_distinguisher', index=9,
number=59, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source_route_distinguisher', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag.source_route_distinguisher', index=10,
number=60, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='prefix_version', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag.prefix_version', index=11,
number=61, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='vrf_name', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag.vrf_name', index=12,
number=62, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source_vrf_name', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag.source_vrf_name', index=13,
number=63, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='srcaf_name', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag.srcaf_name', index=14,
number=64, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=407,
serialized_end=1199,
)
_IPV4TUNNELADDRESSTYPE = _descriptor.Descriptor(
name='IPV4TunnelAddressType',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV4TunnelAddressType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV4TunnelAddressType.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1201,
serialized_end=1239,
)
_IPV4MDTADDRESSTYPE = _descriptor.Descriptor(
name='IPV4MDTAddressType',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV4MDTAddressType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV4MDTAddressType.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1241,
serialized_end=1276,
)
_RTCONSTRAINTADDRESSTYPE = _descriptor.Descriptor(
name='RTConstraintAddressType',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.RTConstraintAddressType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.RTConstraintAddressType.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1278,
serialized_end=1318,
)
_IPV6ADDRESSTYPE = _descriptor.Descriptor(
name='IPV6AddressType',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV6AddressType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV6AddressType.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1320,
serialized_end=1352,
)
_MACADDRESSTYPE = _descriptor.Descriptor(
name='MACAddressType',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.MACAddressType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.MACAddressType.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1354,
serialized_end=1385,
)
_BGP_ATTR_RNH_ADDR_T = _descriptor.Descriptor(
name='bgp_attr_rnh_addr_t',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_rnh_addr_t',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='len', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_rnh_addr_t.len', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv4_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_rnh_addr_t.ipv4_address', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv6_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_rnh_addr_t.ipv6_address', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mac_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_rnh_addr_t.mac_address', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1388,
serialized_end=1768,
)
_BGP_EVPN_GW_ADDR_T = _descriptor.Descriptor(
name='bgp_evpn_gw_addr_t',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_evpn_gw_addr_t',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='len', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_evpn_gw_addr_t.len', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv4_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_evpn_gw_addr_t.ipv4_address', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv6_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_evpn_gw_addr_t.ipv6_address', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1771,
serialized_end=1989,
)
_BGP_L2VPN_ADDR_T = _descriptor.Descriptor(
name='bgp_l2vpn_addr_t',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_l2vpn_addr_t',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='l2vpn_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_l2vpn_addr_t.l2vpn_address', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1991,
serialized_end=2032,
)
_BGP_L2VPN_EVPN_ESI_T = _descriptor.Descriptor(
name='bgp_l2vpn_evpn_esi_t',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_l2vpn_evpn_esi_t',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='esi', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_l2vpn_evpn_esi_t.esi', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2034,
serialized_end=2069,
)
_L2VPNEVPNADDRESSTYPE = _descriptor.Descriptor(
name='L2VPNEVPNAddressType',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.L2VPNEVPNAddressType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.L2VPNEVPNAddressType.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2071,
serialized_end=2108,
)
_BGP_L2VPN_MSPW_ADDR_T = _descriptor.Descriptor(
name='bgp_l2vpn_mspw_addr_t',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_l2vpn_mspw_addr_t',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='l2vpn_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_l2vpn_mspw_addr_t.l2vpn_address', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2110,
serialized_end=2156,
)
_IPV6MVPNADDRESSTYPE = _descriptor.Descriptor(
name='IPV6MVPNAddressType',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV6MVPNAddressType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV6MVPNAddressType.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2158,
serialized_end=2194,
)
_IPV4MVPNADDRESSTYPE = _descriptor.Descriptor(
name='IPV4MVPNAddressType',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV4MVPNAddressType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV4MVPNAddressType.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2196,
serialized_end=2232,
)
_LS_LSADDRESSTYPE = _descriptor.Descriptor(
name='LS_LSAddressType',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.LS_LSAddressType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.LS_LSAddressType.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2234,
serialized_end=2267,
)
_IPV4FLOWSPECADDRESSTYPE = _descriptor.Descriptor(
name='IPv4FlowspecAddressType',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPv4FlowspecAddressType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPv4FlowspecAddressType.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2269,
serialized_end=2309,
)
_IPV6FLOWSPECADDRESSTYPE = _descriptor.Descriptor(
name='IPv6FlowspecAddressType',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPv6FlowspecAddressType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPv6FlowspecAddressType.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2311,
serialized_end=2351,
)
_BGP_ADDRTYPE = _descriptor.Descriptor(
name='bgp_addrtype',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='afi', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.afi', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv4_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv4_address', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv4_mcast_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv4_mcast_address', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv4_label_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv4_label_address', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv4_tunnel_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv4_tunnel_address', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv4_mdt_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv4_mdt_address', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv4_vpn_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv4_vpn_address', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv4_vpna_mcastddress', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv4_vpna_mcastddress', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv6_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv6_address', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv6_mcast_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv6_mcast_address', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv6_label_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv6_label_address', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv6_vpn_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv6_vpn_address', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv6_vpn_mcast_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv6_vpn_mcast_address', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='l2_vpnvpls_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.l2_vpnvpls_address', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rt_constraint_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.rt_constraint_address', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv6_mvpn_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv6_mvpn_address', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv4_mvpn_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv4_mvpn_address', index=16,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='l2_vpn_evpn_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.l2_vpn_evpn_address', index=17,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ls_ls_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ls_ls_address', index=18,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='l2_vpn_mspw_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.l2_vpn_mspw_address', index=19,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv4_flowspec_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv4_flowspec_address', index=20,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv6_flowspec_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv6_flowspec_address', index=21,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv4_vpn_flowspec_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv4_vpn_flowspec_address', index=22,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ipv6_vpn_flowspec_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype.ipv6_vpn_flowspec_address', index=23,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2354,
serialized_end=5641,
)
_BGP_PREFIXTYPE = _descriptor.Descriptor(
name='bgp_prefixtype',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_prefixtype',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='prefix', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_prefixtype.prefix', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='prefix_length', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_prefixtype.prefix_length', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5644,
serialized_end=5837,
)
_BGP_TE_TUNNEL = _descriptor.Descriptor(
name='bgp_te_tunnel',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_te_tunnel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tunnel_name', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_te_tunnel.tunnel_name', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='has_te_tunnel', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_te_tunnel.has_te_tunnel', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_tunnel_up', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_te_tunnel.is_tunnel_up', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_tunnel_info_stale', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_te_tunnel.is_tunnel_info_stale', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_tunnel_registered', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_te_tunnel.is_tunnel_registered', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tunnel_v6_required', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_te_tunnel.tunnel_v6_required', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tunnel_v6_enabled', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_te_tunnel.tunnel_v6_enabled', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='binding_label', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_te_tunnel.binding_label', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tunnel_if_handle', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_te_tunnel.tunnel_if_handle', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_tunnel_update', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_te_tunnel.last_tunnel_update', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5840,
serialized_end=6113,
)
_BGP_PEDISTLBL_ENTRY_ = _descriptor.Descriptor(
name='bgp_pedistlbl_entry_',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_pedistlbl_entry_',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pe_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_pedistlbl_entry_.pe_address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_pedistlbl_entry_.label', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6115,
serialized_end=6172,
)
_BGP_COMMON_ATTR_ENTRY_ = _descriptor.Descriptor(
name='bgp_common_attr_entry_',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='is_metric_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_metric_present', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_local_pref_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_local_pref_present', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_atomic_aggregate_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_atomic_aggregate_present', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_aggregator_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_aggregator_present', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_origin_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_origin_present', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_as_path_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_as_path_present', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_community_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_community_present', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_extended_community_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_extended_community_present', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_ssa_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_ssa_present', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_connector_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_connector_present', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_pmsi_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_pmsi_present', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_pppmp_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_pppmp_present', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_aigp_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_aigp_present', index=12,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_pe_distinguisher_label_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_pe_distinguisher_label_present', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_ls_attribute_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_ls_attribute_present', index=14,
number=15, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_label_index_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.is_label_index_present', index=15,
number=16, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='neighbor_as', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.neighbor_as', index=16,
number=17, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aggregator_as', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.aggregator_as', index=17,
number=18, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aggregator_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.aggregator_address', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='metric', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.metric', index=19,
number=20, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='local_preference', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.local_preference', index=20,
number=21, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='origin', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.origin', index=21,
number=22, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='as_path', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.as_path', index=22,
number=23, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='community', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.community', index=23,
number=24, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extended_community_flags', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.extended_community_flags', index=24,
number=25, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extended_community', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.extended_community', index=25,
number=26, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown_attributes', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.unknown_attributes', index=26,
number=27, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cluster_list', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.cluster_list', index=27,
number=28, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='originator', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.originator', index=28,
number=29, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='l2_t_pv3_session_id', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.l2_t_pv3_session_id', index=29,
number=30, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='l2_t_pv3_cookie', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.l2_t_pv3_cookie', index=30,
number=31, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='connector_type', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.connector_type', index=31,
number=32, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='connector_value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.connector_value', index=32,
number=33, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aigp_metric_value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.aigp_metric_value', index=33,
number=34, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pmsi_type', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.pmsi_type', index=34,
number=35, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pmsi_flags', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.pmsi_flags', index=35,
number=36, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pmsi_label', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.pmsi_label', index=36,
number=37, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pmsi_value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.pmsi_value', index=37,
number=38, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ppm_pvalue', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.ppm_pvalue', index=38,
number=39, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pe_distinguisher_label', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.pe_distinguisher_label', index=39,
number=40, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ls_attr', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.ls_attr', index=40,
number=41, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label_index_attr', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_.label_index_attr', index=41,
number=42, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6175,
serialized_end=7435,
)
_BGP_ATTR_ = _descriptor.Descriptor(
name='bgp_attr_',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='is_as_path2_byte', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.is_as_path2_byte', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_application_gateway_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.is_application_gateway_present', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_attr_set_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.is_attr_set_present', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='set_aigp_inbound_igp', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.set_aigp_inbound_igp', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='set_aigp_inbound_metric', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.set_aigp_inbound_metric', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_rnh_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.is_rnh_present', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_ribrnh_present', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.is_ribrnh_present', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attribute_key_number', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.attribute_key_number', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attribute_reuse_id_config', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.attribute_reuse_id_config', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attribute_reuse_id_max_id', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.attribute_reuse_id_max_id', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attribute_reuse_id_node', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.attribute_reuse_id_node', index=10,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attribute_reuse_id_current', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.attribute_reuse_id_current', index=11,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attribute_reuse_id_keys', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.attribute_reuse_id_keys', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attribute_reuse_id_recover_sec', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.attribute_reuse_id_recover_sec', index=13,
number=14, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='vpn_distinguisher', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.vpn_distinguisher', index=14,
number=15, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='common_attributes', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.common_attributes', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attr_set', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.attr_set', index=16,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rnh_type', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.rnh_type', index=17,
number=18, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rnh_len', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.rnh_len', index=18,
number=19, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rnh_addr_len', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.rnh_addr_len', index=19,
number=20, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rnh_addr', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.rnh_addr', index=20,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ribrnh_table', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.ribrnh_table', index=21,
number=22, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ribrnh_mac', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.ribrnh_mac', index=22,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ribrnh_ip', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.ribrnh_ip', index=23,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ribrnhip_table', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.ribrnhip_table', index=24,
number=25, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ribrnh_vni', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.ribrnh_vni', index=25,
number=26, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ribrnh_encap', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_.ribrnh_encap', index=26,
number=27, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7438,
serialized_end=8753,
)
_BGP_ADVINFO_TYPE_ = _descriptor.Descriptor(
name='bgp_advinfo_type_',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_advinfo_type_',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='is_neighbor', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_advinfo_type_.is_neighbor', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='neighbor_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_advinfo_type_.neighbor_address', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='update_group_number', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_advinfo_type_.update_group_number', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8756,
serialized_end=8989,
)
_BGP_PATH_ = _descriptor.Descriptor(
name='bgp_path_',
full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bgp_prefix', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.bgp_prefix', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='neighbor_address', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.neighbor_address', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='route_type', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.route_type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_valid', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_valid', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_damped', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_damped', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_history_held', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_history_held', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_internal_path', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_internal_path', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_best_path', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_best_path', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_as_best', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_as_best', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_spkr_as_best', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_spkr_as_best', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_partial_best', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_partial_best', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_aggregation_suppressed', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_aggregation_suppressed', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_import_dampened', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_import_dampened', index=12,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_import_suspect', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_import_suspect', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_not_advertised', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_not_advertised', index=14,
number=15, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_not_advertised_to_ebg_ps', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_not_advertised_to_ebg_ps', index=15,
number=16, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_advertised_local_as_only', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_advertised_local_as_only', index=16,
number=17, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_from_route_reflector', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_from_route_reflector', index=17,
number=18, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_received_only', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_received_only', index=18,
number=19, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_received_path_not_modified', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_received_path_not_modified', index=19,
number=20, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_locally_sourced', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_locally_sourced', index=20,
number=21, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_local_aggregate', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_local_aggregate', index=21,
number=22, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_from_network_command', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_from_network_command', index=22,
number=23, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_from_redistribute_command', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_from_redistribute_command', index=23,
number=24, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_imported', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_imported', index=24,
number=25, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_reoriginated', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_reoriginated', index=25,
number=26, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_reoriginated_stitching', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_reoriginated_stitching', index=26,
number=27, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_vpn_only', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_vpn_only', index=27,
number=28, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_from_confederation_peer', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_from_confederation_peer', index=28,
number=29, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_synced_with_igp', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_synced_with_igp', index=29,
number=30, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_multipath', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_multipath', index=30,
number=31, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_imp_candidate', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_imp_candidate', index=31,
number=32, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_stale', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_stale', index=32,
number=33, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_long_lived_stale', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_long_lived_stale', index=33,
number=34, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_backup', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_backup', index=34,
number=35, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_backup_protect_multipath', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_backup_protect_multipath', index=35,
number=36, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_best_external', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_best_external', index=36,
number=37, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_additional_path', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_additional_path', index=37,
number=38, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_nexthop_discarded', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_nexthop_discarded', index=38,
number=39, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='next_hop', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.next_hop', index=39,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nhte_tunnel', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.nhte_tunnel', index=40,
number=41, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='has_rcvd_label', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.has_rcvd_label', index=41,
number=42, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rcvd_label', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.rcvd_label', index=42,
number=43, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='has_local_net_label', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.has_local_net_label', index=43,
number=44, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='local_label', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.local_label', index=44,
number=45, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='igp_metric', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.igp_metric', index=45,
number=46, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path_weight', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.path_weight', index=46,
number=47, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='neighbor_router_id', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.neighbor_router_id', index=47,
number=48, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='has_mdt_group_addr', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.has_mdt_group_addr', index=48,
number=49, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mdt_group_addr', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.mdt_group_addr', index=49,
number=50, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='l2_vpn_size', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.l2_vpn_size', index=50,
number=51, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='has_esi', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.has_esi', index=51,
number=52, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='l2_vpn_evpn_esi', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.l2_vpn_evpn_esi', index=52,
number=53, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='has_gw_addr', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.has_gw_addr', index=53,
number=54, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gw_addr', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.gw_addr', index=54,
number=55, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='has_second_label', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.has_second_label', index=55,
number=56, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_label', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.second_label', index=56,
number=57, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='l2vpn_circuit_status_value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.l2vpn_circuit_status_value', index=57,
number=58, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path_flap_count', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.path_flap_count', index=58,
number=59, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='seconds_since_first_flap', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.seconds_since_first_flap', index=59,
number=60, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='time_to_unsuppress', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.time_to_unsuppress', index=60,
number=61, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dampen_penalty', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.dampen_penalty', index=61,
number=62, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='halflife_time', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.halflife_time', index=62,
number=63, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='suppress_penalty', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.suppress_penalty', index=63,
number=64, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reuse_value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.reuse_value', index=64,
number=65, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='maximum_suppress_time', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.maximum_suppress_time', index=65,
number=66, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='best_path_comp_stage', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.best_path_comp_stage', index=66,
number=67, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='best_path_comp_winner', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.best_path_comp_winner', index=67,
number=68, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='best_path_id_comp_winner', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.best_path_id_comp_winner', index=68,
number=69, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path_flags', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.path_flags', index=69,
number=70, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path_import_flags', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.path_import_flags', index=70,
number=71, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='best_path_id', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.best_path_id', index=71,
number=72, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='local_path_id', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.local_path_id', index=72,
number=73, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rcvd_path_id', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.rcvd_path_id', index=73,
number=74, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path_table_version', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.path_table_version', index=74,
number=75, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='local_peers_advertised_to', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.local_peers_advertised_to', index=75,
number=76, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pe_peers_advertised_to', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.pe_peers_advertised_to', index=76,
number=77, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='best_path_orr_bitfields', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.best_path_orr_bitfields', index=77,
number=78, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='add_path_orr_bitfields', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.add_path_orr_bitfields', index=78,
number=79, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='advertisedto_pe', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.advertisedto_pe', index=79,
number=80, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rib_failed', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.rib_failed', index=80,
number=81, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sn_rpki_origin_as_validity', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.sn_rpki_origin_as_validity', index=81,
number=82, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='show_rpki_origin_as_validity', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.show_rpki_origin_as_validity', index=82,
number=83, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ibgp_signaled_validity', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.ibgp_signaled_validity', index=83,
number=84, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rpki_origin_as_validation_disabled', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.rpki_origin_as_validation_disabled', index=84,
number=85, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='accept_own_path', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.accept_own_path', index=85,
number=86, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='accept_own_self_originated_p_ath', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.accept_own_self_originated_p_ath', index=86,
number=87, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aigp_metric', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.aigp_metric', index=87,
number=88, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mvpn_sfs_path', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.mvpn_sfs_path', index=88,
number=89, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fspec_invalid_path', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.fspec_invalid_path', index=89,
number=90, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='has_mvpn_nbr_addr', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.has_mvpn_nbr_addr', index=90,
number=91, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mvpn_nbr_addr', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.mvpn_nbr_addr', index=91,
number=92, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='has_mvpn_nexthop_addr', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.has_mvpn_nexthop_addr', index=92,
number=93, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mvpn_nexthop_addr', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.mvpn_nexthop_addr', index=93,
number=94, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='has_mvpn_pmsi', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.has_mvpn_pmsi', index=94,
number=95, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mvpn_pmsi_type', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.mvpn_pmsi_type', index=95,
number=96, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mvpn_pmsi_flags', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.mvpn_pmsi_flags', index=96,
number=97, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mvpn_pmsi_label', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.mvpn_pmsi_label', index=97,
number=98, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mvpn_pmsi_value', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.mvpn_pmsi_value', index=98,
number=99, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='has_mvpn_extcomm', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.has_mvpn_extcomm', index=99,
number=100, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extended_community', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.extended_community', index=100,
number=101, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mvpn_path_flags', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.mvpn_path_flags', index=101,
number=102, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='local_nh', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.local_nh', index=102,
number=103, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rt_set_limit_enabled', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.rt_set_limit_enabled', index=103,
number=104, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path_rt_set_id', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.path_rt_set_id', index=104,
number=105, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path_rt_set_route_count', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.path_rt_set_route_count', index=105,
number=106, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_path_af_install_eligible', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_path_af_install_eligible', index=106,
number=107, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_permanent_path', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.is_permanent_path', index=107,
number=108, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='graceful_shutdown', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.graceful_shutdown', index=108,
number=109, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='labeled_unicast_safi_path', full_name='cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_.labeled_unicast_safi_path', index=109,
number=110, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8992,
serialized_end=14064,
)
_BGP_PATH_BAG.fields_by_name['neighbor_address'].message_type = _BGP_ADDRTYPE
_BGP_PATH_BAG.fields_by_name['path_information'].message_type = _BGP_PATH_
_BGP_PATH_BAG.fields_by_name['attributes_after_policy_in'].message_type = _BGP_ATTR_
_BGP_ATTR_RNH_ADDR_T.fields_by_name['ipv6_address'].message_type = _IPV6ADDRESSTYPE
_BGP_ATTR_RNH_ADDR_T.fields_by_name['mac_address'].message_type = _MACADDRESSTYPE
_BGP_EVPN_GW_ADDR_T.fields_by_name['ipv6_address'].message_type = _IPV6ADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['ipv4_tunnel_address'].message_type = _IPV4TUNNELADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['ipv4_mdt_address'].message_type = _IPV4MDTADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['ipv6_address'].message_type = _IPV6ADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['ipv6_mcast_address'].message_type = _IPV6ADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['ipv6_label_address'].message_type = _IPV6ADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['ipv6_vpn_address'].message_type = _IPV6ADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['ipv6_vpn_mcast_address'].message_type = _IPV6ADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['l2_vpnvpls_address'].message_type = _BGP_L2VPN_ADDR_T
_BGP_ADDRTYPE.fields_by_name['rt_constraint_address'].message_type = _RTCONSTRAINTADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['ipv6_mvpn_address'].message_type = _IPV6MVPNADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['ipv4_mvpn_address'].message_type = _IPV4MVPNADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['l2_vpn_evpn_address'].message_type = _L2VPNEVPNADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['ls_ls_address'].message_type = _LS_LSADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['l2_vpn_mspw_address'].message_type = _BGP_L2VPN_MSPW_ADDR_T
_BGP_ADDRTYPE.fields_by_name['ipv4_flowspec_address'].message_type = _IPV4FLOWSPECADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['ipv6_flowspec_address'].message_type = _IPV6FLOWSPECADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['ipv4_vpn_flowspec_address'].message_type = _IPV4FLOWSPECADDRESSTYPE
_BGP_ADDRTYPE.fields_by_name['ipv6_vpn_flowspec_address'].message_type = _IPV6FLOWSPECADDRESSTYPE
_BGP_PREFIXTYPE.fields_by_name['prefix'].message_type = _BGP_ADDRTYPE
_BGP_COMMON_ATTR_ENTRY_.fields_by_name['pe_distinguisher_label'].message_type = _BGP_PEDISTLBL_ENTRY_
_BGP_ATTR_.fields_by_name['common_attributes'].message_type = _BGP_COMMON_ATTR_ENTRY_
_BGP_ATTR_.fields_by_name['attr_set'].message_type = _BGP_COMMON_ATTR_ENTRY_
_BGP_ATTR_.fields_by_name['rnh_addr'].message_type = _BGP_ATTR_RNH_ADDR_T
_BGP_ATTR_.fields_by_name['ribrnh_ip'].message_type = _BGP_ADDRTYPE
_BGP_ADVINFO_TYPE_.fields_by_name['neighbor_address'].message_type = _BGP_ADDRTYPE
_BGP_PATH_.fields_by_name['bgp_prefix'].message_type = _BGP_PREFIXTYPE
_BGP_PATH_.fields_by_name['neighbor_address'].message_type = _BGP_ADDRTYPE
_BGP_PATH_.fields_by_name['next_hop'].message_type = _BGP_ADDRTYPE
_BGP_PATH_.fields_by_name['nhte_tunnel'].message_type = _BGP_TE_TUNNEL
_BGP_PATH_.fields_by_name['mdt_group_addr'].message_type = _BGP_ADDRTYPE
_BGP_PATH_.fields_by_name['l2_vpn_evpn_esi'].message_type = _BGP_L2VPN_EVPN_ESI_T
_BGP_PATH_.fields_by_name['gw_addr'].message_type = _BGP_EVPN_GW_ADDR_T
_BGP_PATH_.fields_by_name['best_path_comp_winner'].message_type = _BGP_ADDRTYPE
_BGP_PATH_.fields_by_name['local_peers_advertised_to'].message_type = _BGP_ADVINFO_TYPE_
_BGP_PATH_.fields_by_name['pe_peers_advertised_to'].message_type = _BGP_ADVINFO_TYPE_
_BGP_PATH_.fields_by_name['best_path_orr_bitfields'].message_type = _BGP_ADVINFO_TYPE_
_BGP_PATH_.fields_by_name['add_path_orr_bitfields'].message_type = _BGP_ADVINFO_TYPE_
_BGP_PATH_.fields_by_name['mvpn_nbr_addr'].message_type = _BGP_ADDRTYPE
_BGP_PATH_.fields_by_name['mvpn_nexthop_addr'].message_type = _BGP_ADDRTYPE
DESCRIPTOR.message_types_by_name['bgp_path_bag_KEYS'] = _BGP_PATH_BAG_KEYS
DESCRIPTOR.message_types_by_name['bgp_path_bag'] = _BGP_PATH_BAG
DESCRIPTOR.message_types_by_name['IPV4TunnelAddressType'] = _IPV4TUNNELADDRESSTYPE
DESCRIPTOR.message_types_by_name['IPV4MDTAddressType'] = _IPV4MDTADDRESSTYPE
DESCRIPTOR.message_types_by_name['RTConstraintAddressType'] = _RTCONSTRAINTADDRESSTYPE
DESCRIPTOR.message_types_by_name['IPV6AddressType'] = _IPV6ADDRESSTYPE
DESCRIPTOR.message_types_by_name['MACAddressType'] = _MACADDRESSTYPE
DESCRIPTOR.message_types_by_name['bgp_attr_rnh_addr_t'] = _BGP_ATTR_RNH_ADDR_T
DESCRIPTOR.message_types_by_name['bgp_evpn_gw_addr_t'] = _BGP_EVPN_GW_ADDR_T
DESCRIPTOR.message_types_by_name['bgp_l2vpn_addr_t'] = _BGP_L2VPN_ADDR_T
DESCRIPTOR.message_types_by_name['bgp_l2vpn_evpn_esi_t'] = _BGP_L2VPN_EVPN_ESI_T
DESCRIPTOR.message_types_by_name['L2VPNEVPNAddressType'] = _L2VPNEVPNADDRESSTYPE
DESCRIPTOR.message_types_by_name['bgp_l2vpn_mspw_addr_t'] = _BGP_L2VPN_MSPW_ADDR_T
DESCRIPTOR.message_types_by_name['IPV6MVPNAddressType'] = _IPV6MVPNADDRESSTYPE
DESCRIPTOR.message_types_by_name['IPV4MVPNAddressType'] = _IPV4MVPNADDRESSTYPE
DESCRIPTOR.message_types_by_name['LS_LSAddressType'] = _LS_LSADDRESSTYPE
DESCRIPTOR.message_types_by_name['IPv4FlowspecAddressType'] = _IPV4FLOWSPECADDRESSTYPE
DESCRIPTOR.message_types_by_name['IPv6FlowspecAddressType'] = _IPV6FLOWSPECADDRESSTYPE
DESCRIPTOR.message_types_by_name['bgp_addrtype'] = _BGP_ADDRTYPE
DESCRIPTOR.message_types_by_name['bgp_prefixtype'] = _BGP_PREFIXTYPE
DESCRIPTOR.message_types_by_name['bgp_te_tunnel'] = _BGP_TE_TUNNEL
DESCRIPTOR.message_types_by_name['bgp_pedistlbl_entry_'] = _BGP_PEDISTLBL_ENTRY_
DESCRIPTOR.message_types_by_name['bgp_common_attr_entry_'] = _BGP_COMMON_ATTR_ENTRY_
DESCRIPTOR.message_types_by_name['bgp_attr_'] = _BGP_ATTR_
DESCRIPTOR.message_types_by_name['bgp_advinfo_type_'] = _BGP_ADVINFO_TYPE_
DESCRIPTOR.message_types_by_name['bgp_path_'] = _BGP_PATH_
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
bgp_path_bag_KEYS = _reflection.GeneratedProtocolMessageType('bgp_path_bag_KEYS', (_message.Message,), dict(
DESCRIPTOR = _BGP_PATH_BAG_KEYS,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_KEYS)
))
_sym_db.RegisterMessage(bgp_path_bag_KEYS)
bgp_path_bag = _reflection.GeneratedProtocolMessageType('bgp_path_bag', (_message.Message,), dict(
DESCRIPTOR = _BGP_PATH_BAG,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag)
))
_sym_db.RegisterMessage(bgp_path_bag)
IPV4TunnelAddressType = _reflection.GeneratedProtocolMessageType('IPV4TunnelAddressType', (_message.Message,), dict(
DESCRIPTOR = _IPV4TUNNELADDRESSTYPE,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV4TunnelAddressType)
))
_sym_db.RegisterMessage(IPV4TunnelAddressType)
IPV4MDTAddressType = _reflection.GeneratedProtocolMessageType('IPV4MDTAddressType', (_message.Message,), dict(
DESCRIPTOR = _IPV4MDTADDRESSTYPE,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV4MDTAddressType)
))
_sym_db.RegisterMessage(IPV4MDTAddressType)
RTConstraintAddressType = _reflection.GeneratedProtocolMessageType('RTConstraintAddressType', (_message.Message,), dict(
DESCRIPTOR = _RTCONSTRAINTADDRESSTYPE,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.RTConstraintAddressType)
))
_sym_db.RegisterMessage(RTConstraintAddressType)
IPV6AddressType = _reflection.GeneratedProtocolMessageType('IPV6AddressType', (_message.Message,), dict(
DESCRIPTOR = _IPV6ADDRESSTYPE,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV6AddressType)
))
_sym_db.RegisterMessage(IPV6AddressType)
MACAddressType = _reflection.GeneratedProtocolMessageType('MACAddressType', (_message.Message,), dict(
DESCRIPTOR = _MACADDRESSTYPE,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.MACAddressType)
))
_sym_db.RegisterMessage(MACAddressType)
bgp_attr_rnh_addr_t = _reflection.GeneratedProtocolMessageType('bgp_attr_rnh_addr_t', (_message.Message,), dict(
DESCRIPTOR = _BGP_ATTR_RNH_ADDR_T,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_rnh_addr_t)
))
_sym_db.RegisterMessage(bgp_attr_rnh_addr_t)
bgp_evpn_gw_addr_t = _reflection.GeneratedProtocolMessageType('bgp_evpn_gw_addr_t', (_message.Message,), dict(
DESCRIPTOR = _BGP_EVPN_GW_ADDR_T,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_evpn_gw_addr_t)
))
_sym_db.RegisterMessage(bgp_evpn_gw_addr_t)
bgp_l2vpn_addr_t = _reflection.GeneratedProtocolMessageType('bgp_l2vpn_addr_t', (_message.Message,), dict(
DESCRIPTOR = _BGP_L2VPN_ADDR_T,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_l2vpn_addr_t)
))
_sym_db.RegisterMessage(bgp_l2vpn_addr_t)
bgp_l2vpn_evpn_esi_t = _reflection.GeneratedProtocolMessageType('bgp_l2vpn_evpn_esi_t', (_message.Message,), dict(
DESCRIPTOR = _BGP_L2VPN_EVPN_ESI_T,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_l2vpn_evpn_esi_t)
))
_sym_db.RegisterMessage(bgp_l2vpn_evpn_esi_t)
L2VPNEVPNAddressType = _reflection.GeneratedProtocolMessageType('L2VPNEVPNAddressType', (_message.Message,), dict(
DESCRIPTOR = _L2VPNEVPNADDRESSTYPE,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.L2VPNEVPNAddressType)
))
_sym_db.RegisterMessage(L2VPNEVPNAddressType)
bgp_l2vpn_mspw_addr_t = _reflection.GeneratedProtocolMessageType('bgp_l2vpn_mspw_addr_t', (_message.Message,), dict(
DESCRIPTOR = _BGP_L2VPN_MSPW_ADDR_T,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_l2vpn_mspw_addr_t)
))
_sym_db.RegisterMessage(bgp_l2vpn_mspw_addr_t)
IPV6MVPNAddressType = _reflection.GeneratedProtocolMessageType('IPV6MVPNAddressType', (_message.Message,), dict(
DESCRIPTOR = _IPV6MVPNADDRESSTYPE,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV6MVPNAddressType)
))
_sym_db.RegisterMessage(IPV6MVPNAddressType)
IPV4MVPNAddressType = _reflection.GeneratedProtocolMessageType('IPV4MVPNAddressType', (_message.Message,), dict(
DESCRIPTOR = _IPV4MVPNADDRESSTYPE,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPV4MVPNAddressType)
))
_sym_db.RegisterMessage(IPV4MVPNAddressType)
LS_LSAddressType = _reflection.GeneratedProtocolMessageType('LS_LSAddressType', (_message.Message,), dict(
DESCRIPTOR = _LS_LSADDRESSTYPE,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.LS_LSAddressType)
))
_sym_db.RegisterMessage(LS_LSAddressType)
IPv4FlowspecAddressType = _reflection.GeneratedProtocolMessageType('IPv4FlowspecAddressType', (_message.Message,), dict(
DESCRIPTOR = _IPV4FLOWSPECADDRESSTYPE,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPv4FlowspecAddressType)
))
_sym_db.RegisterMessage(IPv4FlowspecAddressType)
IPv6FlowspecAddressType = _reflection.GeneratedProtocolMessageType('IPv6FlowspecAddressType', (_message.Message,), dict(
DESCRIPTOR = _IPV6FLOWSPECADDRESSTYPE,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.IPv6FlowspecAddressType)
))
_sym_db.RegisterMessage(IPv6FlowspecAddressType)
bgp_addrtype = _reflection.GeneratedProtocolMessageType('bgp_addrtype', (_message.Message,), dict(
DESCRIPTOR = _BGP_ADDRTYPE,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_addrtype)
))
_sym_db.RegisterMessage(bgp_addrtype)
bgp_prefixtype = _reflection.GeneratedProtocolMessageType('bgp_prefixtype', (_message.Message,), dict(
DESCRIPTOR = _BGP_PREFIXTYPE,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_prefixtype)
))
_sym_db.RegisterMessage(bgp_prefixtype)
bgp_te_tunnel = _reflection.GeneratedProtocolMessageType('bgp_te_tunnel', (_message.Message,), dict(
DESCRIPTOR = _BGP_TE_TUNNEL,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_te_tunnel)
))
_sym_db.RegisterMessage(bgp_te_tunnel)
bgp_pedistlbl_entry_ = _reflection.GeneratedProtocolMessageType('bgp_pedistlbl_entry_', (_message.Message,), dict(
DESCRIPTOR = _BGP_PEDISTLBL_ENTRY_,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_pedistlbl_entry_)
))
_sym_db.RegisterMessage(bgp_pedistlbl_entry_)
bgp_common_attr_entry_ = _reflection.GeneratedProtocolMessageType('bgp_common_attr_entry_', (_message.Message,), dict(
DESCRIPTOR = _BGP_COMMON_ATTR_ENTRY_,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_common_attr_entry_)
))
_sym_db.RegisterMessage(bgp_common_attr_entry_)
bgp_attr_ = _reflection.GeneratedProtocolMessageType('bgp_attr_', (_message.Message,), dict(
DESCRIPTOR = _BGP_ATTR_,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_attr_)
))
_sym_db.RegisterMessage(bgp_attr_)
bgp_advinfo_type_ = _reflection.GeneratedProtocolMessageType('bgp_advinfo_type_', (_message.Message,), dict(
DESCRIPTOR = _BGP_ADVINFO_TYPE_,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_advinfo_type_)
))
_sym_db.RegisterMessage(bgp_advinfo_type_)
bgp_path_ = _reflection.GeneratedProtocolMessageType('bgp_path_', (_message.Message,), dict(
DESCRIPTOR = _BGP_PATH_,
__module__ = 'cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_bag_pb2'
# @@protoc_insertion_point(class_scope:cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_standby.default_vrf.afs.af.advertised_path_xr.advertised_path.bgp_path_)
))
_sym_db.RegisterMessage(bgp_path_)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
]
| |
621c24156116cefc947d5504a6bd59729de62186 | 5d74051293a4740c597abb016870a56a58cecf5b | /modules/persons/application/controllers/v1/delete_user_api.py | 76a6e1ba2a6489b962b624f13f7d278ed3e95ec2 | [
"BSD-3-Clause"
]
| permissive | eduardolujan/hexagonal_architecture_django | 98e707148745f5a36f166c0584cfba21cca473f0 | 8055927cb460bc40f3a2651c01a9d1da696177e8 | refs/heads/develop | 2023-02-21T22:46:20.614779 | 2021-01-16T02:48:37 | 2021-01-16T02:48:37 | 305,813,872 | 5 | 2 | BSD-3-Clause | 2021-01-16T18:00:26 | 2020-10-20T19:32:46 | Python | UTF-8 | Python | false | false | 2,043 | py | # -*- coding: utf-8 -*-
from modules.shared.infrastructure.log import LoggerDecorator, PyLoggerService
from modules.shared.domain.http import status as http_status
from modules.shared.domain.requests import Request
from modules.shared.domain.responses import Response
from modules.shared.domain.serializers import SerializerManager
from modules.users.domain.repository import UserRepository
from modules.users.application.delete import UserDeleter as DeleteUserService
@LoggerDecorator(logger=PyLoggerService(file_path=__file__))
class DeleteUserApi:
"""
Delete User API
"""
def __init__(self,
request: Request,
response: Response,
request_serializer_manager: SerializerManager,
user_repository: UserRepository):
# Http objects
self.__request = request
self.__response = response
self.__request_serializer_manager = request_serializer_manager
# Delete user
self.__user_repository = user_repository
def __call__(self, id: int) -> None:
"""
Delete user by id
@param id: user id
@type id: int
"""
try:
delete_user_data = dict(id=id)
delete_user_dto = self.__request_serializer_manager.get_dto_from_dict(delete_user_data)
delete_user = DeleteUserService(self.__user_repository)
delete_user(**delete_user_dto)
response_data = dict(
success=True,
message='All ok',
)
return self.__response(response_data, status=http_status.HTTP_200_OK)
except Exception as err:
self.log.exception(f"Error in {__class__}::post, err:{err}")
response_data = dict(
success=False,
message=f"{err}"
)
if hasattr(err, 'errors'):
response_data.update(errors=err.errors)
return self.__response(response_data, status=http_status.HTTP_400_BAD_REQUEST)
| [
"[email protected]"
]
| |
d5b778e30438fb5003e9ab8f5be37d0e342c02cc | 380712a4d3436b5997cebdaf2d6bdd5227ffef99 | /06_using_classess.py | d142dc0ceb8bdaa3387069624f66825710ea5553 | []
| no_license | rifqirosyidi/tkinter-basic | 76b80095a0063a5e184fa12a1fb9193f3ea91fb6 | 75d9ae83af4b555335b95ac177bdd361529550ed | refs/heads/master | 2020-08-30T08:52:43.162243 | 2019-11-01T13:32:50 | 2019-11-01T13:32:50 | 218,325,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | from tkinter import *
class SomeClass:
def __init__(self, master):
self.frame = Frame(master)
self.frame.pack()
self.print_button = Button(self.frame, text="Print Me", command=self.print_message)
self.print_button.pack(side=LEFT)
self.close_button = Button(self.frame, text="Close Me", command=self.frame.quit)
self.close_button.pack(side=LEFT)
def print_message(self):
print("Print Me, Hi You Click Print Me")
root = Tk()
b = SomeClass(root)
root.mainloop() | [
"[email protected]"
]
| |
ed7e12c276248207dcadefe405fbe058b20652dd | 258e47d8e55db0fb12437aa1e7f9860a8bef6623 | /agilex/configuracion_agilex/doctype/tipo_de_documento/tipo_de_documento_dashboard.py | 6f3e46469731e8b52ecba7c28fb32c310398f215 | [
"MIT"
]
| permissive | Nirchains/agilex | 003894bed211c71004f37beb22fd96fc1df6576f | 04470873abdea5d0023a1ccadf02a932fb3e834b | refs/heads/master | 2021-06-12T11:23:48.027599 | 2021-05-28T21:48:00 | 2021-05-28T21:48:00 | 166,990,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | from frappe import _
def get_data():
return {
'heatmap': False,
'heatmap_message': _('This is based on the Time Sheets created against this project'),
'fieldname': 'tipo_de_documento',
'transactions': [
{
'label': _('Ver expedientes'),
'items': ['Expediente']
}
]
} | [
"[email protected]"
]
| |
51fbd3042c1ab812d5c5f8d4532d7226469186bd | d0d845cc5c77ec62cb5f5268527efadc5ff68e12 | /tests/linsys_test.py | 01b8acbc61b5dff2c53bf5ee4ce03f50f6297486 | [
"MIT"
]
| permissive | madhavajay/ud953 | 2134a267ccf15ff95d717b9d76633bfd83ea5e40 | 6c101ae15adefa98ad4950275b52ef03419a0f40 | refs/heads/master | 2021-01-21T04:44:51.628018 | 2016-06-18T08:58:20 | 2016-06-18T08:58:20 | 50,235,584 | 2 | 6 | null | 2016-06-18T09:02:22 | 2016-01-23T11:29:14 | Python | UTF-8 | Python | false | false | 6,192 | py | # -*- coding: utf-8 -*-
# Author: github.com/madhavajay
"""This is a test for the Linear System Class"""
from decimal import Decimal, getcontext
from vector import Vector
from line import Line
from plane import Plane
from linsys import LinearSystem
# set the decimal precision
getcontext().prec = 30
def test_linsys_basepoint():
"""Test Linear System Base Point"""
plane_1 = Plane(Vector([1, 1, 1]), 1)
plane_2 = Plane(Vector([0, 1, 0]), 2)
plane_3 = Plane(Vector([1, 1, -1]), 3)
plane_4 = Plane(Vector([1, 0, -2]), 2)
system = LinearSystem([plane_1, plane_2, plane_3, plane_4])
system[0] = plane_1
vector1 = Vector([1, 2])
constant = 2
answer = Vector([2, 0])
line = Line(vector1, constant)
basepoint = line.basepoint
assert basepoint == answer
def test_linsys_swap_row():
"""Test Linear System Swap Row"""
plane_1 = Plane(Vector([1, 1, 1]), 1)
plane_2 = Plane(Vector([0, 1, 0]), 2)
plane_3 = Plane(Vector([1, 1, -1]), 3)
plane_4 = Plane(Vector([1, 0, -2]), 2)
lin_sys = LinearSystem([plane_1, plane_2, plane_3, plane_4])
lin_sys.swap_rows(0, 1)
assert lin_sys[0] == plane_2 # swapped
assert lin_sys[1] == plane_1 # swapped
assert lin_sys[2] == plane_3
assert lin_sys[3] == plane_4
lin_sys.swap_rows(1, 3)
assert lin_sys[0] == plane_2
assert lin_sys[1] == plane_4 # swapped
assert lin_sys[2] == plane_3
assert lin_sys[3] == plane_1 # swapped
lin_sys.swap_rows(3, 1)
assert lin_sys[0] == plane_2
assert lin_sys[1] == plane_1 # swapped
assert lin_sys[2] == plane_3
assert lin_sys[3] == plane_4 # swapped
def test_linsys_multiply_row():
"""Test Linear System Multiply Coefficient and Row"""
plane_1 = Plane(Vector([1, 1, 1]), 1)
plane_2 = Plane(Vector([0, 1, 0]), 2)
plane_3 = Plane(Vector([1, 1, -1]), 3)
plane_4 = Plane(Vector([1, 0, -2]), 2)
# same as the end of the last test
lin_sys = LinearSystem([plane_2, plane_1, plane_3, plane_4])
lin_sys.multiply_coefficient_and_row(1, 0)
assert lin_sys[0] == plane_2
assert lin_sys[1] == plane_1
assert lin_sys[2] == plane_3
assert lin_sys[3] == plane_4
lin_sys.multiply_coefficient_and_row(-1, 2)
new_plane_3 = Plane(Vector([-1, -1, 1]), -3)
assert lin_sys[0] == plane_2
assert lin_sys[1] == plane_1
assert lin_sys[2] == new_plane_3
assert lin_sys[3] == plane_4
lin_sys.multiply_coefficient_and_row(10, 1)
new_plane_1 = Plane(Vector([10, 10, 10]), 10)
assert lin_sys[0] == plane_2
assert lin_sys[1] == new_plane_1
assert lin_sys[2] == new_plane_3
assert lin_sys[3] == plane_4
def test_linsys_multiply_row_add():
"""Test Linear System Multiply Times Row and add to Row"""
plane_2 = Plane(Vector([0, 1, 0]), 2)
new_plane_1 = Plane(Vector([10, 10, 10]), 10)
new_plane_3 = Plane(Vector([-1, -1, 1]), -3)
plane_4 = Plane(Vector([1, 0, -2]), 2)
# same as the end of the last test
lin_sys = LinearSystem([plane_2, new_plane_1, new_plane_3, plane_4])
# multiply the first row by 0 and add to the second row
# this should have no affect
lin_sys.add_multiple_times_row_to_row(0, 0, 1)
assert lin_sys[0] == plane_2
assert lin_sys[1] == new_plane_1
assert lin_sys[2] == new_plane_3
assert lin_sys[3] == plane_4
# multiply the first row by 1 and add it to the second row
lin_sys.add_multiple_times_row_to_row(1, 0, 1)
plane_1_added = Plane(Vector([10, 11, 10]), 12)
assert lin_sys[0] == plane_2
assert lin_sys[1] == plane_1_added
assert lin_sys[2] == new_plane_3
assert lin_sys[3] == plane_4
# multiply the second row by -1 and add to the first row
lin_sys.add_multiple_times_row_to_row(-1, 1, 0)
plane_2_subtracted = Plane(Vector([-10, -10, -10]), -10)
assert lin_sys[0] == plane_2_subtracted
assert lin_sys[1] == plane_1_added
assert lin_sys[2] == new_plane_3
assert lin_sys[3] == plane_4
def test_triangular_form():
"""Test for Triangular Form"""
plane_1 = Plane(Vector([0, 1, 1]), 1)
plane_2 = Plane(Vector([1, -1, 1]), 2)
plane_3 = Plane(Vector([1, 2, -5]), 3)
lin_sys = LinearSystem([plane_1, plane_2, plane_3])
triangular = lin_sys.compute_triangular_form()
assert triangular[0] == Plane(Vector([1, -1, 1]), 2)
assert triangular[1] == Plane(Vector([0, 1, 1]), 1)
assert triangular[2] == Plane(Vector([0, 0, -9]), -2)
def test_rref_form():
"""Test for RREF Reduced Row Echelon Form"""
plane_1 = Plane(Vector([0, 1, 1]), 1)
plane_2 = Plane(Vector([1, -1, 1]), 2)
plane_3 = Plane(Vector([1, 2, -5]), 3)
lin_sys = LinearSystem([plane_1, plane_2, plane_3])
rref = lin_sys.compute_rref_form()
assert rref[0] == Plane(Vector([1, 0, 0]), Decimal(23) / Decimal(9))
assert rref[1] == Plane(Vector([0, 1, 0]), Decimal(7) / Decimal(9))
assert rref[2] == Plane(Vector([0, 0, 1]), Decimal(2) / Decimal(9))
def test_no_consistent_solutions():
"""Test the system has no solutions"""
plane_1 = Plane(Vector([1, 1, -1]), 2)
plane_2 = Plane(Vector([2, 3, -1]), 0)
plane_3 = Plane(Vector([3, 4, -2]), 1)
lin_sys_1 = LinearSystem([plane_1, plane_2, plane_3])
solutions_1 = lin_sys_1.system_solutions()
assert solutions_1 == 'system has no consistent solutions'
def test_infinite_solutions():
"""Test the system has infinite solutions"""
plane_4 = Plane(Vector([1, 1, 1]), 3)
plane_5 = Plane(Vector([2, 4, 1]), 8)
plane_6 = Plane(Vector([6, 10, 4]), 22)
lin_sys_2 = LinearSystem([plane_4, plane_5, plane_6])
solutions_2 = lin_sys_2.system_solutions()
assert solutions_2 == 'system has infinite solutions'
def test_single_solution():
"""Test the system has a single solution"""
plane_7 = Plane(Vector([1, 1, 1]), 1)
plane_8 = Plane(Vector([0, 1, 0]), 2)
plane_9 = Plane(Vector([1, 1, -1]), 3)
plane_10 = Plane(Vector([1, 0, -2]), 2)
lin_sys_3 = LinearSystem([plane_7, plane_8, plane_9, plane_10])
solutions_3 = lin_sys_3.system_solutions()
assert solutions_3 == 'solution is: a = 0.000, b = 2.000, c = -1.000'
| [
"[email protected]"
]
| |
41a53bbfa73c42d13714aa95f8a6f780a4bd9f0f | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/299/66464/submittedfiles/testes.py | 83cd0f4164df46ab44cfb4ae691bbced548efa8a | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | # -*- coding: utf-8 -*-
print("Nikolas Sivini Borges Galvão")
print("20")
print(11+1037)
print((9*35+160)/5)
print(3.14159*5**2*3)
print((2+5)**2)
#add
x=0
y=2
while x<100:
x=x+y
print(x)
| [
"[email protected]"
]
| |
48a03867f4833bc8161bc39682ab3974887a8612 | d0fe1112743cc36b2089b695fb7c527a3b8bb9f7 | /LifeCycleAnalyzer/Simulators/__init__.py | da272a632315bffa45e44941c0af211cb81b23f6 | [
"MIT"
]
| permissive | vd1371/GIAMS | bfff465c69f02a5dd1a2544bfe7170087a8e181d | cf8b7cb028b6cc6cd7facd6f45dd288067e9ff65 | refs/heads/master | 2023-04-14T07:09:00.801898 | 2022-08-01T01:16:06 | 2022-08-01T01:16:06 | 278,012,609 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from .MainSimulator import MainSimulator
from .DummyRiskAnalyzer import DummyRiskAnalyzer
from .EnvSimulator import EnvSimulator | [
"[email protected]"
]
| |
f421a6af40ae5171cceff2d4962cb7c99889310d | fe87192240c3d5ffe7deb5c9f2b7f02f347a2c00 | /peptide-permable/analyze_result.py | 69ef531b3d8aa7f477fdaf44fe9133b385513008 | []
| no_license | leexa90/dl_dev_course | ccfae0bbef4790b0b75fc9da0679f23c1da3bcf5 | 10a9e826cd7e752ce607deadc63826b313de39d2 | refs/heads/master | 2022-08-17T05:07:35.280305 | 2017-12-07T09:04:14 | 2017-12-07T09:04:14 | 105,847,852 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,368 | py | import pandas as pd
import os
files = sorted([x for x in os.listdir('results') if ('.csv' in x and 'results5' in x)])
dict_files = {}
data = pd.read_csv('results/'+files[0])
data['diff'] = 0
dict_files[0] = files[0]
counter =1
for i in files[1:]:
print i
counter += 1
dict_files[counter] = i
temp = pd.read_csv('results/'+i)
temp['diff'] = counter
data = pd.concat([data,temp])
import numpy as np
import matplotlib.pyplot as plt
size= np.log10(data.prob)
plt.hist(size,bins=100)
dictt_inv = {0: 'A', 1: 'C', 2: 'E', 3: 'D', 4: 'G', 5: 'F', 6: 'I', 7: 'H', 8: 'K', 9: 'M',
10: 'L', 11: 'N', 12: 'Q', 13: 'P', 14: 'S', 15: 'R', 16: 'T', 17: 'W', 18: 'V', 19: 'Y'}
def string(arr):
result = ''
for i in arr:
result += dictt_inv[i]
return result
p53_seq='ETFSDLWKLLPEN'
p53_seq_vec = np.array([2., 16., 5., 14., 3., 10., 17., 8., 10., 10., 13., 2., 11.])
data['var'] = map(np.std, np.array(data[['fold' +str(x) for x in range(0,60)]]))
data['var'] = data['var']/(59**.5)
best = data.sort_values('prob')[list(data.keys()[0:13])+['diff','prob']].reset_index(drop=True)
def get_diff(x):
return np.argsort(p53_seq_vec != x[[str(y) for y in range(13)]].values)[-3:]
for i in range(1,10):
print p53_seq
#print best.iloc[-i][range(0,13)].values, best.iloc[-i].prob,'\n'
#print np.argsort(p53_seq_vec != best.iloc[-i][range(0,13)].values)[-3:],'\n'
print string(best.iloc[-i][range(0,13)].values), best.iloc[-i].prob,'\n'
#best['prob'] = np.log10(best['prob']+0.01)
for diff in pd.unique(data[data['prob']-data['var'] >= 0.60]['diff']):
above_30 = data[data['prob']-data['var'] >= 0.60]
above_30 = above_30[above_30['diff']== diff]
score = np.zeros((13,20))
float_formatter = lambda x: "%.3f" % x
np.set_printoptions(formatter={'float_kind':float_formatter})
for aa in range(0,20):
for pos in range(0,13):
score[pos,aa] = np.sum(above_30[above_30[str(pos)] == aa].prob)/np.sum(above_30.prob)
import matplotlib as mpl
from matplotlib.text import TextPath
from matplotlib.patches import PathPatch
from matplotlib.font_manager import FontProperties
fp = FontProperties(family="monospace", weight="bold")
globscale = 1.35
LETTERS = {
"A" : TextPath((-0.35, 0), "A", size=1, prop=fp),
"C" : TextPath((-0.35, 0), "C", size=1, prop=fp),
"E" : TextPath((-0.35, 0), "E", size=1, prop=fp),
"D" : TextPath((-0.35, 0), "D", size=1, prop=fp) ,
"G" : TextPath((-0.35, 0), "G", size=1, prop=fp),
"F" : TextPath((-0.35, 0), "F", size=1, prop=fp),
"I" : TextPath((-0.35, 0), "I", size=1, prop=fp),
"H" : TextPath((-0.35, 0), "H", size=1, prop=fp) ,
"K" : TextPath((-0.35, 0), "K", size=1, prop=fp),
"M" : TextPath((-0.35, 0), "M", size=1, prop=fp),
"L" : TextPath((-0.35, 0.003), "L", size=1, prop=fp),
"N" : TextPath((-0.35, 0), "N", size=1, prop=fp) ,
"Q" : TextPath((-0.35, 0.01), "Q", size=1, prop=fp),
"P" : TextPath((-0.35, 0), "P", size=1, prop=fp),
"S" : TextPath((-0.35, 0.01), "S", size=1, prop=fp),
"R" : TextPath((-0.35, 0), "R", size=1, prop=fp),
"T" : TextPath((-0.35, 0), "T", size=1, prop=fp),
"W" : TextPath((-0.35, 0), "W", size=1, prop=fp),
"V" : TextPath((-0.35, 0), "V", size=1, prop=fp),
"Y" : TextPath((-0.35, 0), "Y", size=1, prop=fp) }
COLOR_SCHEME = {'A': 'grey', 'C': 'lightBlue', 'E': 'red', 'D': 'red',
'G': 'grey', 'F': 'green', 'I': 'grey', 'H': 'blue', 'K': 'blue',
'M': 'grey', 'L': 'grey', 'N': 'lightBlue', 'Q': 'lightBlue', 'P': 'orange',
'S': 'lightBlue', 'R': 'blue', 'T': 'lightBlue', 'W': 'green', 'V': 'grey',
'Y': 'green'}
def letterAt(letter, x, y, yscale=1, ax=None):
text = LETTERS[letter]
t = mpl.transforms.Affine2D().scale(1*globscale, yscale*globscale) + \
mpl.transforms.Affine2D().translate(x,y) + ax.transData
p = PathPatch(text, lw=0, fc=COLOR_SCHEME[letter], transform=t)
if ax != None:
ax.add_artist(p)
return p
def plot(thres=0.05,name='temp'):
fig, ax = plt.subplots(figsize=(10,8))
for i in range(0,13):
y = 0
for aa in np.argsort(score[i,:]):#for aa in range(0,20)[::-1]:
temp_score = score[i,aa]
if temp_score >= thres:
letter = dictt_inv[aa]
a=letterAt(letter,i+1,y,temp_score,ax)
y += temp_score
plt.xlim((0,14))
plt.ylim((-0.1,1))
plt.title(dict_files[diff]+',num samples:'+str(len(above_30)))
plt.xlabel('peptide position')
plt.ylabel('probabilities')
plt.tight_layout()
plt.xticks(range(1,14),['E1', 'T2', 'F3', 'S4', 'D5', 'L6', 'W7', 'K8', 'L9', 'L10', 'P11', 'E12', 'N13'])
for i in range(0,13):
a=letterAt(p53_seq[i],i+1,-0.1,0.09,ax)
plt.plot((0,14),(0,0),color='black',linewidth='5')
plt.savefig(name+'.png',dpi=300)
#plt.show()
plt.close()
for i in (5,):
plot(i*1.0/100,'Fig_60percent%s_thres%s_var'%(diff,i))
| [
"[email protected]"
]
| |
98adc2f86d297b8a79c2eb2c4ad0528d7e435bc9 | a056e699bb03614563dc9090c4c3bc65479fc2d9 | /buffered_normal.py | 052155b79cc21ef651f9092a14a4e1be4c1a18a0 | [
"BSD-3-Clause"
]
| permissive | iandees/marblecutter | 278890faaf7a4d7b604bf2520aff8adb3c5d1b95 | 779b9c597bbd69ca3044f2c246721dc4eeeef61d | refs/heads/mapzen | 2021-01-25T04:36:13.210028 | 2017-06-15T19:35:53 | 2017-06-15T19:35:53 | 93,455,208 | 0 | 0 | null | 2017-06-05T23:13:17 | 2017-06-05T23:13:17 | null | UTF-8 | Python | false | false | 1,243 | py | # noqa
# coding=utf-8
from __future__ import division
import logging
from StringIO import StringIO
import numpy as np
from PIL import Image
from normal import render_normal
LOG = logging.getLogger(__name__)
BUFFER = 4
COLLAR = 2
CONTENT_TYPE = 'image/png'
EXT = 'png'
NAME = 'Buffered Normal'
def render(tile, (data, buffers)): # noqa
buffers = map(lambda x: max(0, x - COLLAR), buffers)
data = data[0][buffers[3]:data.shape[1] - buffers[1],
buffers[0]:data.shape[2] - buffers[2]]
if buffers[0] == 0:
# empty left
cols = data[:, :COLLAR]
data = np.hstack((cols, data))
pass
if buffers[2] == 0:
# empty right
cols = data[:, -COLLAR:]
data = np.hstack((data, cols))
pass
if buffers[3] == 0:
# empty top buffer; repeat
rows = data[:COLLAR]
data = np.vstack((rows, data))
buffers[3] = COLLAR
if buffers[1] == 0:
# empty bottom buffer; repeat
data = np.vstack((data, rows))
buffers[1] = COLLAR
imgarr = render_normal(tile, data, buffers)
out = StringIO()
im = Image.fromarray(imgarr, 'RGBA')
im.save(out, 'png')
return (CONTENT_TYPE, out.getvalue())
| [
"[email protected]"
]
| |
855f7cf66e2f45a2fe4d5bc6c25db3575a14ec1d | a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea | /airflow/providers/amazon/aws/example_dags/example_redshift_to_s3.py | 8116e02dc165ce82f017a21ede850dece6254ec9 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
]
| permissive | ishiis/airflow | 4305794e36b611d01f49e3f2401be3dc49782670 | 292440d54f4db84aaf0c5a98cf5fcf34303f2fa8 | refs/heads/master | 2022-07-30T00:51:28.806940 | 2022-07-14T12:07:11 | 2022-07-14T12:07:11 | 209,801,072 | 1 | 0 | Apache-2.0 | 2019-09-20T13:47:26 | 2019-09-20T13:47:26 | null | UTF-8 | Python | false | false | 1,575 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from os import getenv
from airflow import DAG
from airflow.providers.amazon.aws.transfers.redshift_to_s3 import RedshiftToS3Operator
S3_BUCKET_NAME = getenv("S3_BUCKET_NAME", "s3_bucket_name")
S3_KEY = getenv("S3_KEY", "s3_key")
REDSHIFT_TABLE = getenv("REDSHIFT_TABLE", "redshift_table")
with DAG(
dag_id="example_redshift_to_s3",
start_date=datetime(2021, 1, 1),
schedule_interval=None,
catchup=False,
tags=['example'],
) as dag:
# [START howto_transfer_redshift_to_s3]
task_transfer_redshift_to_s3 = RedshiftToS3Operator(
task_id='transfer_redshift_to_s3',
s3_bucket=S3_BUCKET_NAME,
s3_key=S3_KEY,
schema='PUBLIC',
table=REDSHIFT_TABLE,
)
# [END howto_transfer_redshift_to_s3]
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.